Skip to content

Flor.shadow #5

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 176 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
176 commits
Select commit Hold shift + click to select a range
ade156c
Flor.shadow.alexis (#2)
rlnsanz Feb 9, 2022
dcb7bd2
flor.shadow@kaggle-nlp-disasters::2022-02-09T17:11:16.json
Feb 9, 2022
9f2a068
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-09T17:13:01.json
Feb 9, 2022
3d5936b
adding demo ipynb
Feb 9, 2022
70f540c
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-09T17:45:00.json
rlnsanz Feb 9, 2022
c500a62
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-09T17:52:33.json
rlnsanz Feb 9, 2022
53cecb0
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-09T17:53:31.json
rlnsanz Feb 9, 2022
b7412c9
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-09T17:55:43.json
rlnsanz Feb 9, 2022
08cfd10
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-09T18:13:16.json
rlnsanz Feb 9, 2022
c104f9e
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-09T18:27:09.json
rlnsanz Feb 9, 2022
1ba4e58
.
rlnsanz Feb 9, 2022
e5c85b6
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T02:29:15.json
rlnsanz Feb 10, 2022
06e5831
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:13:05.json
rlnsanz Feb 10, 2022
0ac9f4a
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:17:06.json
rlnsanz Feb 10, 2022
710c7e1
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:18:41.json
rlnsanz Feb 10, 2022
0a56d9d
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:20:08.json
rlnsanz Feb 10, 2022
b1ff2ad
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:21:24.json
rlnsanz Feb 10, 2022
0575170
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:22:25.json
rlnsanz Feb 10, 2022
b1c1e7d
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:23:19.json
rlnsanz Feb 10, 2022
9a51718
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:24:09.json
rlnsanz Feb 10, 2022
d997305
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:25:23.json
rlnsanz Feb 10, 2022
5e835c3
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:26:27.json
rlnsanz Feb 10, 2022
5ac088b
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:27:54.json
rlnsanz Feb 10, 2022
dda00f2
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:28:43.json
rlnsanz Feb 10, 2022
1d95481
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:29:39.json
rlnsanz Feb 10, 2022
8914354
flor.shadow@kaggle-nlp-disasters-rnn::2022-02-10T03:31:32.json
rlnsanz Feb 10, 2022
16fa6c3
.
rlnsanz Feb 13, 2022
d1bc9cf
.
rlnsanz Feb 13, 2022
65d2c3e
adding transformed file
rlnsanz Feb 13, 2022
4dc43e7
.
rlnsanz Feb 13, 2022
adf6211
.
rlnsanz Feb 14, 2022
f219e42
.
rlnsanz Feb 14, 2022
8b50cd8
.
rlnsanz Feb 14, 2022
f3260a3
.
rlnsanz Feb 14, 2022
7bf38f2
.
rlnsanz Feb 14, 2022
43e72db
.
rlnsanz Feb 14, 2022
2bc0531
.
rlnsanz Feb 14, 2022
0ddbdca
.
rlnsanz Feb 14, 2022
5a91d67
.
rlnsanz Feb 14, 2022
628c9b8
.
rlnsanz Feb 9, 2022
cd1dc32
.
rlnsanz Feb 16, 2022
ce4b549
.
rlnsanz Feb 16, 2022
6d711a9
.
rlnsanz Feb 16, 2022
6749c1c
swapped plotly for matplotlib
rlnsanz Feb 17, 2022
3734626
.
rlnsanz Feb 17, 2022
4fd5645
Create requirements.txt
rlnsanz Oct 20, 2022
4b7c1c9
Cornell Demo
rlnsanz Nov 1, 2022
dbc7217
demo w visuals
rlnsanz Nov 1, 2022
092531b
gitignore
rlnsanz Nov 9, 2022
64a1005
.
rlnsanz Nov 9, 2022
31dbe8a
updating requirements, need pyhton3.9
rlnsanz Nov 9, 2022
2c65e65
.
rlnsanz Nov 10, 2022
ce975fa
updating flor version
rlnsanz Nov 10, 2022
a60c7ee
updating reqs
rlnsanz Nov 10, 2022
72dc827
.
rlnsanz Nov 10, 2022
f375fa6
.
rlnsanz Nov 10, 2022
a55939b
.
rlnsanz Nov 10, 2022
3a3b1ca
Squashing a 20 epoch version (#3)
rlnsanz Nov 11, 2022
97171c1
.
rlnsanz Nov 11, 2022
6a2733a
adding dummy file
rlnsanz Jan 2, 2023
a52718f
flor.shadow@knd_debug::2022-11-24T18:10:47.json
rlnsanz Nov 25, 2022
596bb4b
flor.shadow@knd_dbg2::2022-11-24T18:11:20.json
rlnsanz Nov 25, 2022
02b4f37
flor.shadow@knd_dbg3::2022-11-24T18:15:37.json
rlnsanz Nov 25, 2022
4293dfc
sync
rlnsanz Jan 30, 2023
e778425
adding some sql
rlnsanz Jan 30, 2023
b80d5a6
.
rlnsanz Jan 30, 2023
bcf1280
queries
rlnsanz Feb 1, 2023
2ff5f49
timing
rlnsanz Feb 11, 2023
b62db77
flor.shadow@time_kaggle_nlp::2023-02-11T06:59:28.json
rlnsanz Feb 11, 2023
054c24a
flor.shadow@time_kaggle_nlp::2023-02-11T07:04:31.json
rlnsanz Feb 11, 2023
80bf514
flor.shadow@time_kaggle_nlp::2023-02-11T07:15:28.json
rlnsanz Feb 11, 2023
5aef679
flor.shadow@time_kaggle_nlp::2023-02-11T07:16:50.json
rlnsanz Feb 11, 2023
837130e
flor.shadow@time_kaggle_nlp::2023-02-11T07:18:30.json
rlnsanz Feb 11, 2023
2bfd4a4
flor.shadow@time_kaggle_nlp::2023-02-11T07:36:11.json
rlnsanz Feb 11, 2023
a1e1468
flor.shadow@time_kaggle_nlp::2023-02-11T08:05:44.json
rlnsanz Feb 11, 2023
eb3083f
flor.shadow@time_kaggle_nlp::2023-02-14T18:36:44.json
rlnsanz Feb 15, 2023
6cd659e
flor.shadow@kaggle-feb::2023-02-14T18:41:27.json
rlnsanz Feb 15, 2023
bf5f439
flor.shadow@kaggle-feb::2023-02-14T18:43:31.json
rlnsanz Feb 15, 2023
ff024ea
flor.shadow@kaggle-feb::2023-02-14T18:50:31.json
rlnsanz Feb 15, 2023
7d23029
saving replay
rlnsanz Feb 15, 2023
e512179
flor.shadow@kaggle-feb::2023-02-14T20:25:55.json
rlnsanz Feb 15, 2023
dc1d01b
flor.shadow@kaggle-feb::2023-02-14T20:28:06.json
rlnsanz Feb 15, 2023
0025a24
flor.shadow@kaggle-feb::2023-02-14T20:34:00.json
rlnsanz Feb 15, 2023
011129c
flor.shadow@kaggle-feb::2023-02-14T20:35:09.json
rlnsanz Feb 15, 2023
219b697
new commit
rlnsanz Feb 15, 2023
d058152
kaggle-nlp-disasters:flor.shadow@dummy::/home/rogarcia/.flor/kaggle-n…
rlnsanz Feb 15, 2023
66eea8a
kaggle-nlp-disasters:flor.shadow@dummy::None
rlnsanz Feb 15, 2023
9baf6d2
cleanup
rlnsanz Feb 16, 2023
939025b
created lab_notebook
rlnsanz Feb 16, 2023
ee0c4c5
kaggle-nlp-disasters:flor.shadow@jupyter::None
rlnsanz Feb 16, 2023
6fe79dc
dropped dummy
rlnsanz Feb 16, 2023
8b4003a
manual move to .,flor
rlnsanz Feb 16, 2023
a25334f
kaggle-nlp-disasters:flor.shadow@newFlor::None
rlnsanz Feb 16, 2023
a200c25
replayjson off by one
rlnsanz Feb 16, 2023
2d13b53
RECORD::kaggle-nlp-disasters:flor.shadow@noCommitHex::None
rlnsanz Feb 16, 2023
949d673
RECORD::kaggle-nlp-disasters:flor.shadow@noCommitHex::None
rlnsanz Feb 16, 2023
8f8cf27
trying new flor version
rlnsanz Feb 16, 2023
4f93f61
RECORD::kaggle-nlp-disasters:flor.shadow@magic::None
rlnsanz Feb 16, 2023
446b772
RECORD::kaggle-nlp-disasters:flor.shadow@magic::None
rlnsanz Feb 16, 2023
84d7db3
RECORD::kaggle-nlp-disasters:flor.shadow@saveMagic::None
rlnsanz Feb 16, 2023
05d435f
RECORD::kaggle-nlp-disasters:flor.shadow@saveMagic::None
rlnsanz Feb 16, 2023
4b79663
RECORD::kaggle-nlp-disasters:flor.shadow@saveMagic::None
rlnsanz Feb 16, 2023
2d11ae0
RECORD::kaggle-nlp-disasters:flor.shadow@saveMagic::None
rlnsanz Feb 16, 2023
7443386
RECORD::kaggle-nlp-disasters:flor.shadow@runID::None
rlnsanz Feb 16, 2023
bea2744
RECORD::kaggle-nlp-disasters:flor.shadow@runID::None
rlnsanz Feb 16, 2023
ff9c1fd
RECORD::kaggle-nlp-disasters:flor.shadow@runID::None
rlnsanz Feb 16, 2023
27c7674
RECORD::kaggle-nlp-disasters:flor.shadow@runID::None
rlnsanz Feb 16, 2023
9d2b860
REPLAY::kaggle-nlp-disasters:flor.shadow@runID::None
rlnsanz Feb 16, 2023
6586999
RECORD::kaggle-nlp-disasters:flor.shadow@runID::/home/rogarcia/.flor/…
rlnsanz Feb 17, 2023
2ce7093
saving session
rlnsanz Feb 17, 2023
0b092d2
RECORD::kaggle-nlp-disasters:flor.shadow@runID::/home/rogarcia/.flor/…
rlnsanz Feb 17, 2023
daa27bc
REPLAY::runID
rlnsanz Feb 17, 2023
e154113
REPLAY::runID
rlnsanz Feb 17, 2023
235ac1b
REPLAY::runID
rlnsanz Feb 17, 2023
6723da2
REPLAY::runID
rlnsanz Feb 17, 2023
13cba5c
REPLAY::runID
rlnsanz Feb 17, 2023
9b07a87
REPLAY::runID
rlnsanz Feb 17, 2023
93bab32
REPLAY::runID
rlnsanz Feb 17, 2023
3af0af8
REPLAY::runID
rlnsanz Feb 17, 2023
7e5180c
REPLAY::runID
rlnsanz Feb 17, 2023
be18006
REPLAY::runID
rlnsanz Feb 17, 2023
3d0be9a
run ipy
rlnsanz Feb 17, 2023
8a33e71
REPLAY::runID
rlnsanz Feb 17, 2023
2c4228f
saving notebook
rlnsanz Feb 17, 2023
99ca014
cleaning notebook
rlnsanz Feb 17, 2023
0f80f82
REPLAY::runID
rlnsanz Feb 17, 2023
1e302c5
saving run.ipy
rlnsanz Feb 17, 2023
1c7cbde
lab_nb_analysis
rlnsanz Feb 17, 2023
d2d9b00
saving nb
rlnsanz Feb 17, 2023
f5e0c78
clean dir
rlnsanz Feb 17, 2023
9d67dab
saving clean nb
rlnsanz Feb 17, 2023
3eddb6e
preparing demo
rlnsanz Feb 17, 2023
b16fc45
RECORD::joeDemo
rlnsanz Feb 17, 2023
69aef7d
RECORD::joeDemo
rlnsanz Feb 17, 2023
7614353
lab notebook
rlnsanz Feb 18, 2023
c82024f
lab notebook
rlnsanz Feb 19, 2023
9c185a8
lab notebook
rlnsanz Feb 19, 2023
8347d9e
setting up
rlnsanz Feb 22, 2023
cd605b1
debugging apply
rlnsanz Feb 22, 2023
6415692
reordering pages
rlnsanz Feb 22, 2023
c785db7
saving lab notebook
rlnsanz Feb 22, 2023
ec32b66
lab notebook
rlnsanz Feb 22, 2023
2e9b6f3
lab notebook
rlnsanz Feb 23, 2023
4db54f3
joe demo
rlnsanz Feb 23, 2023
8eaf805
RECORD::joeDemo
rlnsanz Feb 24, 2023
186ba43
RECORD::joeDemo
rlnsanz Feb 24, 2023
4c9a735
RECORD::joeDemo
rlnsanz Feb 24, 2023
edf484a
joe demo
rlnsanz Feb 24, 2023
b805681
joe demo
rlnsanz Feb 24, 2023
a9eb127
joe demo
rlnsanz Feb 24, 2023
3fd0cd8
RECORD::joeDemo
rlnsanz Feb 24, 2023
bfc7742
manual drop of a logging stmt
rlnsanz Feb 27, 2023
0aacb00
gitignore
rlnsanz Feb 27, 2023
9d93e6d
lab notebook
rlnsanz Feb 27, 2023
2e4a28e
merge
rlnsanz Feb 27, 2023
b98b4f2
lab notebook
rlnsanz Feb 27, 2023
1444b8b
dropping the clr_scheduler for constant learning rate
rlnsanz Feb 28, 2023
8d5abd7
RECORD::constantlr
rlnsanz Feb 28, 2023
96b5b6e
RECORD::constantlr
rlnsanz Feb 28, 2023
635a215
lab notebook
rlnsanz Feb 28, 2023
e48fa8e
lab notebook
rlnsanz Feb 28, 2023
2423186
lab notebook
rlnsanz Feb 28, 2023
d50e2bf
lab notebook
rlnsanz Mar 1, 2023
9e319bc
lab notebook
rlnsanz Mar 1, 2023
8783ad7
saving lab notebook
rlnsanz Mar 1, 2023
a401e2c
lab notebook
rlnsanz Mar 1, 2023
16d9748
lab nb
rlnsanz Mar 1, 2023
cfd3989
cleanup nb
rlnsanz Mar 1, 2023
7e8daef
lab notebook
rlnsanz Mar 1, 2023
3ea532a
RECORD::leastLogging
rlnsanz Mar 3, 2023
6cce4a7
RECORD::CLRscheduler
rlnsanz Mar 3, 2023
6cf23d9
RECORD::CLRscheduler
rlnsanz Mar 3, 2023
dc45190
RECORD::CLRscheduler
rlnsanz Mar 3, 2023
85d8382
lab nb
rlnsanz Mar 3, 2023
b9dddcc
lab nb
rlnsanz Mar 3, 2023
d868242
saving
rlnsanz Mar 8, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
6 changes: 6 additions & 0 deletions .flor/.replay.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"NAME": "CLRscheduler",
"TSTAMP": "2023-03-03T12:57:38.json",
"PROJID": "kaggle-nlp-disasters_flor.shadow",
"EPOCHS": 20
}
961 changes: 961 additions & 0 deletions .flor/log_records.csv

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions .flor/run.ipy
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# coding: utf-8
!python train_rnn.py --replay_flor 17/20
35 changes: 6 additions & 29 deletions .gitignore
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
__pycache__/
*.py[cod]
*$py.class

.venv/
# C extensions
*.so

Expand Down Expand Up @@ -99,31 +99,8 @@ celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/
.mypy_cache/**

dump/**
.DS_Store
peak_pivot.csv
7 changes: 0 additions & 7 deletions .replay.json

This file was deleted.

268 changes: 268 additions & 0 deletions .train_rnn_tfm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,268 @@
# type: ignore

import flor
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
from torchtext.legacy.data import Field, TabularDataset, BucketIterator
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.optim as optim
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import seaborn as sns
import flor
from multiprocessing import set_start_method
from utils import CLR_Scheduler
try:
set_start_method('spawn')
except RuntimeError:
pass
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
flor.namespace_stack.test_force(device, 'device')
device
label_field = Field(sequential=False, use_vocab=False, batch_first=True,
dtype=torch.float)
flor.namespace_stack.test_force(label_field, 'label_field')
text_field = Field(tokenize='spacy', lower=True, include_lengths=True,
batch_first=True)
flor.namespace_stack.test_force(text_field, 'text_field')
fields = [('words', text_field), ('target', label_field)]
flor.namespace_stack.test_force(fields, 'fields')
fields_test = [('words', text_field)]
flor.namespace_stack.test_force(fields_test, 'fields_test')
train, valid = TabularDataset.splits(path='data', train='train_rnn.csv',
validation='valid_rnn.csv', format='CSV', fields=fields, skip_header=True)
flor.namespace_stack.test_force(train, 'train')
flor.namespace_stack.test_force(valid, 'valid')
test = TabularDataset(path='data/test_rnn.csv', format='CSV', fields=
fields_test, skip_header=True)
flor.namespace_stack.test_force(test, 'test')
train_iter = BucketIterator(train, batch_size=200, sort_key=lambda x: len(x
.words), device=device, sort=True, sort_within_batch=True)
flor.namespace_stack.test_force(train_iter, 'train_iter')
valid_iter = BucketIterator(valid, batch_size=200, sort_key=lambda x: len(x
.words), device=device, sort=True, sort_within_batch=True)
flor.namespace_stack.test_force(valid_iter, 'valid_iter')
test_iter = BucketIterator(test, batch_size=200, sort_key=lambda x: len(x.
words), device=device, sort=True, sort_within_batch=True)
flor.namespace_stack.test_force(test_iter, 'test_iter')
text_field.build_vocab(train, min_freq=5)


class LSTM(nn.Module):

def __init__(self, dimension=128):
try:
flor.namespace_stack.new()
super(LSTM, self).__init__()
self.embedding = nn.Embedding(len(text_field.vocab), dimension)
flor.namespace_stack.test_force(self.embedding, 'self.embedding')
self.lstm = nn.LSTM(input_size=dimension, hidden_size=dimension,
num_layers=1, batch_first=True, bidirectional=True)
flor.namespace_stack.test_force(self.lstm, 'self.lstm')
self.drop = nn.Dropout(p=0.85)
flor.namespace_stack.test_force(self.drop, 'self.drop')
self.dimension = dimension
flor.namespace_stack.test_force(self.dimension, 'self.dimension')
self.fc = nn.Linear(2 * dimension, 1)
flor.namespace_stack.test_force(self.fc, 'self.fc')
self.relu = nn.ReLU()
flor.namespace_stack.test_force(self.relu, 'self.relu')
finally:
flor.namespace_stack.pop()

def forward(self, text, text_len):
try:
flor.namespace_stack.new()
text_emb = self.relu(self.embedding(text))
flor.namespace_stack.test_force(text_emb, 'text_emb')
packed_input = pack_padded_sequence(text_emb, text_len,
batch_first=True, enforce_sorted=False)
flor.namespace_stack.test_force(packed_input, 'packed_input')
packed_output, _ = self.lstm(packed_input)
flor.namespace_stack.test_force(packed_output, 'packed_output')
flor.namespace_stack.test_force(_, '_')
output, _ = pad_packed_sequence(packed_output, batch_first=True)
flor.namespace_stack.test_force(output, 'output')
flor.namespace_stack.test_force(_, '_')
out_forward = output[(range(len(output))), (text_len - 1), :
self.dimension]
flor.namespace_stack.test_force(out_forward, 'out_forward')
out_reverse = output[:, (0), self.dimension:]
flor.namespace_stack.test_force(out_reverse, 'out_reverse')
out_reduced = torch.cat((out_forward, out_reverse), 1)
flor.namespace_stack.test_force(out_reduced, 'out_reduced')
text_fea = out_reduced
flor.namespace_stack.test_force(text_fea, 'text_fea')
text_fea = self.fc(self.drop(text_fea))
flor.namespace_stack.test_force(text_fea, 'text_fea')
text_fea = torch.squeeze(text_fea, 1)
flor.namespace_stack.test_force(text_fea, 'text_fea')
text_out = torch.sigmoid(text_fea)
flor.namespace_stack.test_force(text_out, 'text_out')
return text_out
finally:
flor.namespace_stack.pop()


def train(model, optimizer, criterion=nn.BCELoss(), train_loader=train_iter,
valid_loader=valid_iter, test_loader=test_iter, num_epochs=5,
eval_every=len(train_iter) // 2, file_path='training_process',
best_valid_loss=float('Inf')):
try:
flor.namespace_stack.new()
running_loss = 0.0
flor.namespace_stack.test_force(running_loss, 'running_loss')
valid_running_loss = 0.0
flor.namespace_stack.test_force(valid_running_loss,
'valid_running_loss')
global_step = 0
flor.namespace_stack.test_force(global_step, 'global_step')
train_loss_list = []
flor.namespace_stack.test_force(train_loss_list, 'train_loss_list')
valid_loss_list = []
flor.namespace_stack.test_force(valid_loss_list, 'valid_loss_list')
global_steps_list = []
flor.namespace_stack.test_force(global_steps_list, 'global_steps_list')
best_loss = float('inf')
flor.namespace_stack.test_force(best_loss, 'best_loss')
model.train()
flor.skip_stack.new(2)
if flor.skip_stack.peek().should_execute(not flor.SKIP):
for epoch in flor.it(range(num_epochs)):
flor.log('learning_rate', str(optimizer.param_groups[0]['lr']))
flor.skip_stack.new(1)
if flor.skip_stack.peek().should_execute(not flor.SKIP):
for ((words, words_len), labels), _ in train_loader:
labels = labels.to(device)
flor.namespace_stack.test_force(labels, 'labels')
words = words.to(device)
flor.namespace_stack.test_force(words, 'words')
words_len = words_len.detach().cpu()
flor.namespace_stack.test_force(words_len,
'words_len')
output = model(words, words_len)
flor.namespace_stack.test_force(output, 'output')
loss = criterion(output, labels)
flor.namespace_stack.test_force(loss, 'loss')
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
global_step += 1
if global_step % eval_every == 0:
model.eval()
with torch.no_grad():
flor.skip_stack.new(0)
if flor.skip_stack.peek().should_execute(
not flor.SKIP):
for ((words, words_len), labels
), _ in valid_loader:
labels = labels.to(device)
flor.namespace_stack.test_force(labels,
'labels')
words = words.to(device)
flor.namespace_stack.test_force(words,
'words')
words_len = words_len.detach().cpu()
flor.namespace_stack.test_force(words_len,
'words_len')
output = model(words, words_len)
flor.namespace_stack.test_force(output,
'output')
loss = criterion(output, labels)
flor.namespace_stack.test_force(loss,
'loss')
valid_running_loss += float(loss.item())
(valid_running_loss, _, _, words_len,
output, loss) = (flor.skip_stack.
pop().proc_side_effects(
valid_running_loss, labels, words,
words_len, output, loss))
average_train_loss = running_loss / eval_every
flor.namespace_stack.test_force(
average_train_loss, 'average_train_loss')
average_valid_loss = valid_running_loss / len(
valid_loader)
flor.namespace_stack.test_force(
average_valid_loss, 'average_valid_loss')
if average_valid_loss < best_loss:
best_loss = average_valid_loss
flor.namespace_stack.test_force(best_loss,
'best_loss')
torch.save(model.state_dict(),
'best-model.pt')
train_loss_list.append(average_train_loss)
valid_loss_list.append(average_valid_loss)
global_steps_list.append(global_step)
running_loss = 0.0
flor.namespace_stack.test_force(running_loss,
'running_loss')
valid_running_loss = 0.0
flor.namespace_stack.test_force(
valid_running_loss, 'valid_running_loss')
model.train()
print(
'Epoch [{}/{}], LR: {:.3f}, Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'
.format(epoch + 1, num_epochs,
optimizer.param_groups[0]['lr'],
global_step, num_epochs * len(
train_loader), average_train_loss,
average_valid_loss))
flor.log('avg_train_loss', average_train_loss)
flor.log('average_valid_loss',
average_valid_loss)
clr_scheduler.step()
(_, _, running_loss, valid_running_loss, global_step, _,
_, _, best_loss, _, _, _) = (flor.skip_stack.pop().
proc_side_effects(model, optimizer, running_loss,
valid_running_loss, global_step, train_loss_list,
valid_loss_list, global_steps_list, best_loss,
torch, flor, clr_scheduler))
(_, _, running_loss, valid_running_loss, global_step, _, _, _,
best_loss, _, _, _) = (flor.skip_stack.pop().proc_side_effects(
model, optimizer, running_loss, valid_running_loss, global_step,
train_loss_list, valid_loss_list, global_steps_list, best_loss,
flor, torch, clr_scheduler))
y_pred = []
flor.namespace_stack.test_force(y_pred, 'y_pred')
model.eval()
with torch.no_grad():
flor.skip_stack.new(3)
if flor.skip_stack.peek().should_execute(not flor.SKIP):
for (words, words_len), _ in test_loader:
words = words.to(device)
flor.namespace_stack.test_force(words, 'words')
words_len = words_len.detach().cpu()
flor.namespace_stack.test_force(words_len, 'words_len')
output = model(words, words_len)
flor.namespace_stack.test_force(output, 'output')
output = (output > 0.5).int()
flor.namespace_stack.test_force(output, 'output')
y_pred.extend(output.tolist())
_, words_len, output, _ = flor.skip_stack.pop().proc_side_effects(
words, words_len, output, y_pred)
print('Finished Training!')
return y_pred
finally:
flor.namespace_stack.pop()


EPOCHS = 80
flor.namespace_stack.test_force(EPOCHS, 'EPOCHS')
MIN_LR = 0.0001
flor.namespace_stack.test_force(MIN_LR, 'MIN_LR')
model = LSTM(8).to(device)
flor.namespace_stack.test_force(model, 'model')
optimizer = optim.SGD(model.parameters(), lr=MIN_LR)
flor.namespace_stack.test_force(optimizer, 'optimizer')
flor.log('optimizer', str(type(optimizer)))
clr_scheduler = CLR_Scheduler(optimizer, net_steps=len(train_iter) * EPOCHS,
min_lr=MIN_LR, max_lr=4.0, tail_frac=0.0)
flor.namespace_stack.test_force(clr_scheduler, 'clr_scheduler')
pred = train(model=model, optimizer=optimizer, num_epochs=EPOCHS)
flor.namespace_stack.test_force(pred, 'pred')
if not flor.SKIP:
flor.flush()
Loading