Commit 952ab7c7 authored by Matt Harvey's avatar Matt Harvey
Browse files

Fix one-hot bug when encoding y

Showing with 5 additions and 6 deletions
+5 -6
...@@ -10,7 +10,7 @@ import sys ...@@ -10,7 +10,7 @@ import sys
import operator import operator
import threading import threading
from processor import process_image from processor import process_image
from keras.utils import np_utils from keras.utils import to_categorical
class threadsafe_iterator: class threadsafe_iterator:
def __init__(self, iterator): def __init__(self, iterator):
...@@ -98,8 +98,7 @@ class DataSet(): ...@@ -98,8 +98,7 @@ class DataSet():
label_encoded = self.classes.index(class_str) label_encoded = self.classes.index(class_str)
# Now one-hot it. # Now one-hot it.
label_hot = np_utils.to_categorical(label_encoded, len(self.classes)) label_hot = to_categorical(label_encoded, len(self.classes))
label_hot = label_hot[0] # just get a single row
return label_hot return label_hot
......
...@@ -78,9 +78,9 @@ class ResearchModels(): ...@@ -78,9 +78,9 @@ class ResearchModels():
our CNN to this model predomenently.""" our CNN to this model predomenently."""
# Model. # Model.
model = Sequential() model = Sequential()
model.add(LSTM(2048, return_sequences=True, input_shape=self.input_shape, model.add(LSTM(2048, return_sequences=False,
input_shape=self.input_shape,
dropout=0.5)) dropout=0.5))
model.add(Flatten())
model.add(Dense(512, activation='relu')) model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5)) model.add(Dropout(0.5))
model.add(Dense(self.nb_classes, activation='softmax')) model.add(Dense(self.nb_classes, activation='softmax'))
......
...@@ -86,7 +86,7 @@ def main(): ...@@ -86,7 +86,7 @@ def main():
# model can be one of lstm, lrcn, mlp, conv_3d, c3d # model can be one of lstm, lrcn, mlp, conv_3d, c3d
model = 'lstm' model = 'lstm'
saved_model = None # None or weights file saved_model = None # None or weights file
class_limit = 2 # int, can be 1-101 or None class_limit = 10 # int, can be 1-101 or None
seq_length = 40 seq_length = 40
load_to_memory = False # pre-load the sequences into memory load_to_memory = False # pre-load the sequences into memory
batch_size = 32 batch_size = 32
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment