diff --git a/data.py b/data.py
index 44420eea5d9b5e96585c94a9a50d418ee6b3979f..fc83570b23b74e3481db030321c09f5093a46d7b 100644
--- a/data.py
+++ b/data.py
@@ -22,7 +22,7 @@ class DataSet():
         """
         self.seq_length = seq_length
         self.class_limit = class_limit
-        self.sequence_path = './data/sequences/'
+        self.sequence_path = os.path.join('data', 'sequences')
         self.max_frames = 300  # max number of frames a video can have for us to use it
 
         # Get the data.
@@ -39,7 +39,7 @@ class DataSet():
     @staticmethod
     def get_data():
         """Load our data from file."""
-        with open('./data/data_file.csv', 'r') as fin:
+        with open(os.path.join('data', 'data_file.csv'), 'r') as fin:
             reader = csv.reader(fin)
             data = list(reader)
 
@@ -203,14 +203,14 @@ class DataSet():
     def get_frames_for_sample(sample):
         """Given a sample row from the data file, get all the corresponding frame
         filenames."""
-        path = './data/' + sample[0] + '/' + sample[1] + '/'
+        path = os.path.join('data', sample[0], sample[1])
         filename = sample[2]
-        images = sorted(glob.glob(path + filename + '*jpg'))
+        images = sorted(glob.glob(os.path.join(path, filename + '*jpg')))
         return images
 
     @staticmethod
     def get_filename_from_image(filename):
-        parts = filename.split('/')
+        parts = filename.split(os.path.sep)
         return parts[-1].replace('.jpg', '')
 
     @staticmethod
diff --git a/data/1_move_files.py b/data/1_move_files.py
index 874ea1e22ccf3c3ebf8bf3cdf4114bfb6b361bb3..23f663f1c0028a675ff78aa57d23dc3c9e872f5a 100644
--- a/data/1_move_files.py
+++ b/data/1_move_files.py
@@ -13,8 +13,8 @@ def get_train_test_lists(version='01'):
     breakdowns we'll later use to move everything.
     """
     # Get our files based on version.
-    test_file = './ucfTrainTestlist/testlist' + version + '.txt'
-    train_file = './ucfTrainTestlist/trainlist' + version + '.txt'
+    test_file = os.path.join('ucfTrainTestlist', 'testlist' + version + '.txt')
+    train_file = os.path.join('ucfTrainTestlist', 'trainlist' + version + '.txt')
 
     # Build the test list.
     with open(test_file) as fin:
@@ -44,14 +44,14 @@ def move_files(file_groups):
         for video in videos:
 
             # Get the parts.
-            parts = video.split('/')
+            parts = video.split(os.path.sep)
             classname = parts[0]
             filename = parts[1]
 
             # Check if this class exists.
-            if not os.path.exists(group + '/' + classname):
+            if not os.path.exists(os.path.join(group, classname)):
                 print("Creating folder for %s/%s" % (group, classname))
-                os.makedirs(group + '/' + classname)
+                os.makedirs(os.path.join(group, classname))
 
             # Check if we have already moved this file, or at least that it
             # exists to move.
@@ -60,7 +60,7 @@ def move_files(file_groups):
                 continue
 
             # Move it.
-            dest = group + '/' + classname + '/' + filename
+            dest = os.path.join(group, classname, filename)
             print("Moving %s to %s" % (filename, dest))
             os.rename(filename, dest)
 
diff --git a/data/2_extract_files.py b/data/2_extract_files.py
index 47c13e7c87c5cfc47a3eb0f91f747ae22c3fc262..bf7c524785e1098570c3d5f750215d5365644ae6 100644
--- a/data/2_extract_files.py
+++ b/data/2_extract_files.py
@@ -25,13 +25,13 @@ def extract_files():
     `ffmpeg -i video.mpg image-%04d.jpg`
     """
     data_file = []
-    folders = ['./train/', './test/']
+    folders = ['train', 'test']
 
     for folder in folders:
-        class_folders = glob.glob(folder + '*')
+        class_folders = glob.glob(os.path.join(folder, '*'))
 
         for vid_class in class_folders:
-            class_files = glob.glob(vid_class + '/*.avi')
+            class_files = glob.glob(os.path.join(vid_class, '*.avi'))
 
             for video_path in class_files:
                 # Get the parts of the file.
@@ -43,10 +43,9 @@ def extract_files():
                 # the info.
                 if not check_already_extracted(video_parts):
                     # Now extract it.
-                    src = train_or_test + '/' + classname + '/' + \
-                        filename
-                    dest = train_or_test + '/' + classname + '/' + \
-                        filename_no_ext + '-%04d.jpg'
+                    src = os.path.join(train_or_test, classname, filename)
+                    dest = os.path.join(train_or_test, classname,
+                        filename_no_ext + '-%04d.jpg')
                     call(["ffmpeg", "-i", src, dest])
 
                 # Now get how many frames it is.
@@ -66,25 +65,25 @@ def get_nb_frames_for_video(video_parts):
     """Given video parts of an (assumed) already extracted video, return
     the number of frames that were extracted."""
     train_or_test, classname, filename_no_ext, _ = video_parts
-    generated_files = glob.glob(train_or_test + '/' + classname + '/' +
-                                filename_no_ext + '*.jpg')
+    generated_files = glob.glob(os.path.join(train_or_test, classname,
+                                filename_no_ext + '*.jpg'))
     return len(generated_files)
 
 def get_video_parts(video_path):
     """Given a full path to a video, return its parts."""
-    parts = video_path.split('/')
-    filename = parts[3]
+    parts = video_path.split(os.path.sep)
+    filename = parts[2]
     filename_no_ext = filename.split('.')[0]
-    classname = parts[2]
-    train_or_test = parts[1]
+    classname = parts[1]
+    train_or_test = parts[0]
 
     return train_or_test, classname, filename_no_ext, filename
 
 def check_already_extracted(video_parts):
     """Check to see if we created the -0001 frame of this file."""
     train_or_test, classname, filename_no_ext, _ = video_parts
-    return bool(os.path.exists(train_or_test + '/' + classname +
-                               '/' + filename_no_ext + '-0001.jpg'))
+    return bool(os.path.exists(os.path.join(train_or_test, classname,
+                               filename_no_ext + '-0001.jpg')))
 
 def main():
     """
diff --git a/extract_features.py b/extract_features.py
index e14bce46ef3f29b2af7a8f17edc56e5ce457f279..b424e84b1dbf09cb74fce3068da951b87d2e94c4 100644
--- a/extract_features.py
+++ b/extract_features.py
@@ -32,8 +32,8 @@ pbar = tqdm(total=len(data.data))
 for video in data.data:
 
     # Get the path to the sequence for this video.
-    path = './data/sequences/' + video[2] + '-' + str(seq_length) + \
-        '-features.txt'
+    path = os.path.join('data', 'sequences', video[2] + '-' + str(seq_length) + \
+        '-features.txt')
 
     # Check if we already have it.
     if os.path.isfile(path):
diff --git a/models.py b/models.py
index 5669f440771fcb160abf134d2c1e9726638575fb..c10f126b0c2e8affea8031918bb4b1a16a8806d6 100644
--- a/models.py
+++ b/models.py
@@ -47,7 +47,7 @@ class ResearchModels():
             self.model = self.lstm()
         elif model == 'lrcn':
             print("Loading CNN-LSTM model.")
-            self.input_shape = (seq_length, 150, 150, 3)
+            self.input_shape = (seq_length, 80, 80, 3)
             self.model = self.lrcn()
         elif model == 'mlp':
             print("Loading simple MLP.")
@@ -66,7 +66,7 @@ class ResearchModels():
             sys.exit()
 
         # Now compile the network.
-        optimizer = Adam(lr=1e-4, decay=1e-6)
+        optimizer = Adam(lr=1e-5, decay=1e-6)
         self.model.compile(loss='categorical_crossentropy', optimizer=optimizer,
                            metrics=metrics)
 
diff --git a/train.py b/train.py
index b3cdb5462da1592afffc365c0e55227b5c46117e..0febe4b5c0deb1c00a5d358e91d41484993f02b8 100644
--- a/train.py
+++ b/train.py
@@ -5,31 +5,28 @@ from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogg
 from models import ResearchModels
 from data import DataSet
 import time
+import os.path
 
 def train(data_type, seq_length, model, saved_model=None,
           concat=False, class_limit=None, image_shape=None,
-          load_to_memory=False):
-    # Set variables.
-    nb_epoch = 1000000
-    batch_size = 32
-
+          load_to_memory=False, batch_size=32, nb_epoch=100):
     # Helper: Save the model.
     checkpointer = ModelCheckpoint(
-        filepath='./data/checkpoints/' + model + '-' + data_type + \
-            '.{epoch:03d}-{val_loss:.3f}.hdf5',
+        filepath=os.path.join('data', 'checkpoints', model + '-' + data_type + \
+            '.{epoch:03d}-{val_loss:.3f}.hdf5'),
         verbose=1,
         save_best_only=True)
 
     # Helper: TensorBoard
-    tb = TensorBoard(log_dir='./data/logs')
+    tb = TensorBoard(log_dir=os.path.join('data', 'logs'))
 
     # Helper: Stop when we stop learning.
-    early_stopper = EarlyStopping(patience=100000)
+    early_stopper = EarlyStopping(patience=5)
 
     # Helper: Save results.
     timestamp = time.time()
-    csv_logger = CSVLogger('./data/logs/' + model + '-' + 'training-' + \
-        str(timestamp) + '.log')
+    csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \
+        str(timestamp) + '.log'))
 
     # Get the data and process it.
     if image_shape is None:
@@ -78,18 +75,20 @@ def train(data_type, seq_length, model, saved_model=None,
             steps_per_epoch=steps_per_epoch,
             epochs=nb_epoch,
             verbose=1,
-            callbacks=[tb, early_stopper, csv_logger],
+            callbacks=[tb, early_stopper, csv_logger, checkpointer],
             validation_data=val_generator,
-            validation_steps=10)
+            validation_steps=40)
 
 def main():
     """These are the main training settings. Set each before running
     this file."""
-    model = 'conv_3d'  # see `models.py` for more
+    model = 'lrcn'  # see `models.py` for more
     saved_model = None  # None or weights file
-    class_limit = 2  # int, can be 1-101 or None
+    class_limit = 10  # int, can be 1-101 or None
     seq_length = 40
-    load_to_memory = True  # pre-load the sequences into memory
+    load_to_memory = False  # pre-load the sequences into memory
+    batch_size = 32
+    nb_epoch = 1000
 
     # Chose images or features and image shape based on network.
     if model == 'conv_3d':
@@ -97,7 +96,7 @@ def main():
         image_shape = (80, 80, 3)
     elif model == 'lrcn':
         data_type = 'images'
-        image_shape = (150, 150, 3)
+        image_shape = (80, 80, 3)
     else:
         data_type = 'features'
         image_shape = None
@@ -110,7 +109,7 @@ def main():
 
     train(data_type, seq_length, model, saved_model=saved_model,
           class_limit=class_limit, concat=concat, image_shape=image_shape,
-          load_to_memory=load_to_memory)
+          load_to_memory=load_to_memory, batch_size=batch_size, nb_epoch=nb_epoch)
 
 if __name__ == '__main__':
     main()
diff --git a/train_cnn.py b/train_cnn.py
index 898fadde57397b64f5b8e69466e3c419e60f41ee..0fb81dee369e47301517cc6352e0ebc200134383 100644
--- a/train_cnn.py
+++ b/train_cnn.py
@@ -14,12 +14,13 @@ from keras.models import Model
 from keras.layers import Dense, GlobalAveragePooling2D
 from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
 from data import DataSet
+import os.path
 
 data = DataSet()
 
 # Helper: Save the model.
 checkpointer = ModelCheckpoint(
-    filepath='./data/checkpoints/inception.{epoch:03d}-{val_loss:.2f}.hdf5',
+    filepath=os.path.join('data', 'checkpoints', 'inception.{epoch:03d}-{val_loss:.2f}.hdf5',)
     verbose=1,
     save_best_only=True)
 
@@ -27,7 +28,7 @@ checkpointer = ModelCheckpoint(
 early_stopper = EarlyStopping(patience=10)
 
 # Helper: TensorBoard
-tensorboard = TensorBoard(log_dir='./data/logs/')
+tensorboard = TensorBoard(log_dir=os.path.join('data', 'logs'))
 
 def get_generators():
     train_datagen = ImageDataGenerator(
@@ -41,14 +42,14 @@ def get_generators():
     test_datagen = ImageDataGenerator(rescale=1./255)
 
     train_generator = train_datagen.flow_from_directory(
-        './data/train/',
+        os.path.join('data', 'train'),
         target_size=(299, 299),
         batch_size=32,
         classes=data.classes,
         class_mode='categorical')
 
     validation_generator = test_datagen.flow_from_directory(
-        './data/test/',
+        os.path.join('data', 'test'),
         target_size=(299, 299),
         batch_size=32,
         classes=data.classes,
diff --git a/validate_cnn.py b/validate_cnn.py
index 5cb7b3918dcf41948a1ebbfa2cf4d4ed47ff1cf0..7f30b40a96d038af411a7e79989e75a1c45b282f 100644
--- a/validate_cnn.py
+++ b/validate_cnn.py
@@ -5,6 +5,7 @@ import numpy as np
 import operator
 import random
 import glob
+import os.path
 from data import DataSet
 from processor import process_image
 from keras.models import load_model
@@ -15,7 +16,7 @@ def main(nb_images=5):
     model = load_model('data/checkpoints/inception.057-1.16.hdf5')
 
     # Get all our test images.
-    images = glob.glob('./data/test/**/*.jpg')
+    images = glob.glob(os.path.join('data', 'test', '**', '*.jpg'))
 
     for _ in range(nb_images):
         print('-'*80)