Skip to content
GitLab
    • Explore Projects Groups Snippets
Projects Groups Snippets
  • /
  • Help
    • Help
    • Support
    • Community forum
    • Submit feedback
    • Contribute to GitLab
  • Sign in / Register
  • F five-video-classification-methods
  • Project information
    • Project information
    • Activity
    • Labels
    • Members
  • Repository
    • Repository
    • Files
    • Commits
    • Branches
    • Tags
    • Contributors
    • Graph
    • Compare
  • Issues 47
    • Issues 47
    • List
    • Boards
    • Service Desk
    • Milestones
  • Merge requests 7
    • Merge requests 7
  • CI/CD
    • CI/CD
    • Pipelines
    • Jobs
    • Schedules
  • Deployments
    • Deployments
    • Environments
    • Releases
  • Packages and registries
    • Packages and registries
    • Package Registry
    • Infrastructure Registry
  • Monitor
    • Monitor
    • Incidents
  • Analytics
    • Analytics
    • Value stream
    • CI/CD
    • Repository
  • Wiki
    • Wiki
  • Snippets
    • Snippets
  • Activity
  • Graph
  • Create a new issue
  • Jobs
  • Commits
  • Issue Boards
Collapse sidebar
  • Matt Harvey
  • five-video-classification-methods
  • Merge requests
  • !6

Keras 2 upgrade

  • Review changes

  • Download
  • Email patches
  • Plain diff
Merged Matt Harvey requested to merge keras-2-upgrade into master 8 years ago
  • Overview 1
  • Commits 23
  • Pipelines 0
  • Changes 7
  • Updates function calls to work with the new Keras 2 APIs
  • Fixes method call bugs in data.py
  • Changes LSTM network size to better fit into memory for normal-sized GPUs
  • Adds requirements.txt file to help with version issues people are having
Compare
  • master (base)

and
  • latest version
    cfff076e
    23 commits, 2 years ago

7 files
+ 55
- 60

    Preferences

    File browser
    Compare changes
dat‎a.py‎ +3 -3
extract_f‎eatures.py‎ +2 -2
extrac‎tor.py‎ +3 -5
mode‎ls.py‎ +32 -40
requirem‎ents.txt‎ +5 -0
trai‎n.py‎ +6 -6
train_‎cnn.py‎ +4 -4
data.py
+ 3
- 3
  • View file @ cfff076e

  • Edit in single-file editor

  • Open in Web IDE


@@ -159,7 +159,7 @@ class DataSet():
sequence = self.build_image_sequence(frames)
else:
# Get the sequence from disk.
sequence = self.get_extracted_sequence(sample)
sequence = self.get_extracted_sequence(data_type, sample)
if sequence is None:
print("Can't find sequence. Did you generate them?")
@@ -179,11 +179,11 @@ class DataSet():
"""Given a set of frames (filenames), build our sequence."""
return [process_image(x, self.image_shape) for x in frames]
def get_extracted_sequence(self, sample):
def get_extracted_sequence(self, data_type, sample):
"""Get the saved extracted features."""
filename = sample[2]
path = self.sequence_path + filename + '-' + str(self.seq_length) + \
'-features.txt'
'-' + data_type + '.txt'
if os.path.isfile(path):
# Use a dataframe/read_csv for speed increase over numpy.
features = pd.read_csv(path, sep=" ", header=None)
data.py
+ 3
- 3
  • View file @ cfff076e

  • Edit in single-file editor

  • Open in Web IDE


@@ -159,7 +159,7 @@ class DataSet():
sequence = self.build_image_sequence(frames)
else:
# Get the sequence from disk.
sequence = self.get_extracted_sequence(sample)
sequence = self.get_extracted_sequence(data_type, sample)
if sequence is None:
print("Can't find sequence. Did you generate them?")
@@ -179,11 +179,11 @@ class DataSet():
"""Given a set of frames (filenames), build our sequence."""
return [process_image(x, self.image_shape) for x in frames]
def get_extracted_sequence(self, sample):
def get_extracted_sequence(self, data_type, sample):
"""Get the saved extracted features."""
filename = sample[2]
path = self.sequence_path + filename + '-' + str(self.seq_length) + \
'-features.txt'
'-' + data_type + '.txt'
if os.path.isfile(path):
# Use a dataframe/read_csv for speed increase over numpy.
features = pd.read_csv(path, sep=" ", header=None)
extract_features.py
+ 2
- 2
  • View file @ cfff076e

  • Edit in single-file editor

  • Open in Web IDE


@@ -19,7 +19,7 @@ from tqdm import tqdm
# Set defaults.
seq_length = 40
class_limit = None # integer, number of classes to extract
class_limit = None # Number of classes to extract. Can be 1-101 or None for all.
# Get the dataset.
data = DataSet(seq_length=seq_length, class_limit=class_limit)
@@ -32,7 +32,7 @@ pbar = tqdm(total=len(data.data))
for video in data.data:
# Get the path to the sequence for this video.
path = './data/sequences-ucf/' + video[2] + '-' + str(seq_length) + \
path = './data/sequences/' + video[2] + '-' + str(seq_length) + \
'-features.txt'
# Check if we already have it.
extractor.py
+ 3
- 5
  • View file @ cfff076e

  • Edit in single-file editor

  • Open in Web IDE


@@ -13,17 +13,15 @@ class Extractor():
if weights is None:
# Get model with pretrained weights.
input_tensor = Input(shape=(299, 299, 3))
base_model = InceptionV3(
input_tensor=input_tensor,
weights='imagenet',
include_top=True
)
# We'll extract features at the final pool layer.
self.model = Model(
input=base_model.input,
output=base_model.get_layer('avg_pool').output
inputs=base_model.input,
outputs=base_model.get_layer('avg_pool').output
)
else:
@@ -49,7 +47,7 @@ class Extractor():
if self.weights is None:
# For imagenet/default network:
features = features[0][0][0]
features = features[0]
else:
# For loaded network:
features = features[0]
models.py
+ 32
- 40
  • View file @ cfff076e

  • Edit in single-file editor

  • Open in Web IDE


@@ -6,7 +6,7 @@ from keras.layers.recurrent import LSTM
from keras.models import Sequential, load_model
from keras.optimizers import Adam
from keras.layers.wrappers import TimeDistributed
from keras.layers.convolutional import (Convolution2D, MaxPooling3D, Convolution3D,
from keras.layers.convolutional import (Conv2D, MaxPooling3D, Conv3D,
MaxPooling2D)
from collections import deque
import sys
@@ -71,10 +71,10 @@ class ResearchModels():
our CNN to this model predomenently."""
# Model.
model = Sequential()
model.add(LSTM(4096, return_sequences=True, input_shape=self.input_shape,
dropout_W=0.5, dropout_U=0.5))
model.add(LSTM(2048, return_sequences=True, input_shape=self.input_shape,
dropout=0.5))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.nb_classes, activation='softmax'))
@@ -87,41 +87,33 @@ class ResearchModels():
steering-models/community-models/chauffeur/models.py
"""
model = Sequential()
model.add(TimeDistributed(Convolution2D(32, 3, 3,
init= "he_normal",
activation='relu',
border_mode='valid'), input_shape=self.input_shape))
model.add(TimeDistributed(Convolution2D(32, 3, 3,
init= "he_normal",
activation='relu',
border_mode='valid')))
model.add(TimeDistributed(Conv2D(32, (3,3),
kernel_initializer="he_normal",
activation='relu'), input_shape=self.input_shape))
model.add(TimeDistributed(Conv2D(32, (3,3),
kernel_initializer="he_normal",
activation='relu')))
model.add(TimeDistributed(MaxPooling2D()))
model.add(TimeDistributed(Convolution2D(48, 3, 3,
init= "he_normal",
activation='relu',
border_mode='valid')))
model.add(TimeDistributed(Convolution2D(48, 3, 3,
init= "he_normal",
activation='relu',
border_mode='valid')))
model.add(TimeDistributed(Conv2D(48, (3,3),
kernel_initializer="he_normal",
activation='relu')))
model.add(TimeDistributed(Conv2D(48, (3,3),
kernel_initializer="he_normal",
activation='relu')))
model.add(TimeDistributed(MaxPooling2D()))
model.add(TimeDistributed(Convolution2D(64, 3, 3,
init= "he_normal",
activation='relu',
border_mode='valid')))
model.add(TimeDistributed(Convolution2D(64, 3, 3,
init= "he_normal",
activation='relu',
border_mode='valid')))
model.add(TimeDistributed(Conv2D(64, (3,3),
kernel_initializer="he_normal",
activation='relu')))
model.add(TimeDistributed(Conv2D(64, (3,3),
kernel_initializer="he_normal",
activation='relu')))
model.add(TimeDistributed(MaxPooling2D()))
model.add(TimeDistributed(Convolution2D(128, 3, 3,
init= "he_normal",
activation='relu',
border_mode='valid')))
model.add(TimeDistributed(Convolution2D(128, 3, 3,
init= "he_normal",
activation='relu',
border_mode='valid')))
model.add(TimeDistributed(Conv2D(128, (3,3),
kernel_initializer="he_normal",
activation='relu')))
model.add(TimeDistributed(Conv2D(128, (3,3),
kernel_initializer="he_normal",
activation='relu')))
model.add(TimeDistributed(MaxPooling2D()))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(256, return_sequences=True))
@@ -151,13 +143,13 @@ class ResearchModels():
"""
# Model.
model = Sequential()
model.add(Convolution3D(
32, 7, 7, 7, activation='relu', input_shape=self.input_shape
model.add(Conv3D(
32, (7,7,7), activation='relu', input_shape=self.input_shape
))
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
model.add(Convolution3D(64, 3, 3, 3, activation='relu'))
model.add(Conv3D(64, (3,3,3), activation='relu'))
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
model.add(Convolution3D(128, 2, 2, 2, activation='relu'))
model.add(Conv3D(128, (2,2,2), activation='relu'))
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
model.add(Flatten())
model.add(Dense(256))
requirements.txt 0 → 100644
+ 5
- 0
  • View file @ cfff076e

  • Edit in single-file editor

  • Open in Web IDE

Keras>=2.0.2
numpy>=1.12.1
pandas>=0.19.2
tqdm>=4.11.2
matplotlib>=2.0.0
train.py
+ 6
- 6
  • View file @ cfff076e

  • Edit in single-file editor

  • Open in Web IDE


@@ -46,7 +46,7 @@ def train(data_type, seq_length, model, saved_model=None,
# Get samples per epoch.
# Multiply by 0.7 to attempt to guess how much of data.data is the train set.
samples_per_epoch = ((len(data.data) * 0.7) // batch_size) * batch_size
steps_per_epoch = (len(data.data) * 0.7) // batch_size
if load_to_memory:
# Get data.
@@ -70,18 +70,17 @@ def train(data_type, seq_length, model, saved_model=None,
validation_data=(X_test, y_test),
verbose=1,
callbacks=[checkpointer, tb, early_stopper, csv_logger],
nb_epoch=nb_epoch,
samples_per_epoch=samples_per_epoch)
epochs=nb_epoch)
else:
# Use fit generator.
rm.model.fit_generator(
generator=generator,
samples_per_epoch=samples_per_epoch,
nb_epoch=nb_epoch,
steps_per_epoch=steps_per_epoch,
epochs=nb_epoch,
verbose=1,
callbacks=[checkpointer, tb, early_stopper, csv_logger],
validation_data=val_generator,
nb_val_samples=256)
validation_steps=10)
def main():
"""These are the main training settings. Set each before running
@@ -96,6 +95,7 @@ def main():
if model == 'conv_3d' or model == 'crnn':
data_type = 'images'
image_shape = (80, 80, 3)
load_to_memory = False
else:
data_type = 'features'
image_shape = None
train_cnn.py
+ 4
- 4
  • View file @ cfff076e

  • Edit in single-file editor

  • Open in Web IDE


@@ -69,7 +69,7 @@ def get_model(weights='imagenet'):
predictions = Dense(len(data.classes), activation='softmax')(x)
# this is the model we will train
model = Model(input=base_model.input, output=predictions)
model = Model(inputs=base_model.input, outputs=predictions)
return model
def get_top_layer_model(base_model):
@@ -106,10 +106,10 @@ def train_model(model, nb_epoch, generators, callbacks=[]):
train_generator, validation_generator = generators
model.fit_generator(
train_generator,
samples_per_epoch=128,
steps_per_epoch=100,
validation_data=validation_generator,
nb_val_samples=768,
nb_epoch=nb_epoch,
validation_steps=10,
epochs=nb_epoch,
callbacks=callbacks)
return model
0 Assignees
None
Assign to
0 Reviewers
None
Request review from
Labels
0
None
0
None
    Assign labels
  • Manage project labels

Milestone
No milestone
None
None
Time tracking
No estimate or time spent
Lock merge request
Unlocked
2
2 participants
Administrator
Matt Harvey
Reference: harvitronix/five-video-classification-methods!6
Source branch: keras-2-upgrade

Menu

Explore Projects Groups Snippets