Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Develop #3

Merged
merged 4 commits into from
Nov 5, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
969 changes: 296 additions & 673 deletions .idea/workspace.xml

Large diffs are not rendered by default.

404 changes: 0 additions & 404 deletions .ipynb_checkpoints/gaugeReader-checkpoint.ipynb

This file was deleted.

5 changes: 5 additions & 0 deletions CustomEstimator/CLI_commands/copy_to_bucket.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
gsutil -m cp -r ./data/ImageEveryUnit/. gs://deep_gauge/data/
gsutil -m cp -r ./trainer/. gs://deep_gauge

## make a package
tar -czvf ensemble_package.tar.gz /ensemble_package
51 changes: 51 additions & 0 deletions CustomEstimator/CLI_commands/ens.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
python customEstimator.py --dev=True --retrain_primary_models=False --hidden_units='[100, 200, 300, 500]' --verbosity=DEBUG


#######
gcloud ml-engine local train \
--module-name trainer_from_storage.task \
--package-path /home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage \
-- \
--primary_models_directory=/home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage/logs/primary_models \
--ensemble_architecture_path=/home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage/logs/temporary_models \
--path_to_images=/home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage/data/ImageEveryUnit \
--bin_path=/home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage/logs/dumps/ \
--export_dir=/home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage/logs/exported_model




####### on GCP
gcloud ml-engine jobs submit training ensemble_training \
--scale-tier basic \
--package-path /home/khodayarim/PycharmProjects/ensamble_package.tar.gz \
--module-name ensemble_package.task \
--job-dir gs://deep_gauge \
--region "us-east1"
-- \
--primary_models_directory=gs://deep_gauge/ensemble_package/misc/primary_models \






--staging-bucket gs://deep_gauge/ensemble_package \

--packages additional-dep1.tar.gz,dep2.whl
-- \
--primary_models_directory=gs://deep_gauge/ensemble_package/misc/primary_models \

--module-name trainer_from_storage.task \
--package-path gs://deep_gauge/ensemble_package \
-- \
--primary_models_directory=/home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage/logs/primary_models \
--ensemble_architecture_path=/home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage/logs/temporary_models \
--path_to_images=/home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage/data/ImageEveryUnit \
--bin_path=/home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage/logs/dumps/ \
--export_dir=/home/khodayarim/PycharmProjects/DeepGauge-ML-Demo/CustomEstimator/modules/ensemble_modules/trainer_from_storage/logs/exported_model





File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
import glob
import numpy as np
import os
from tensorflow.python.framework import meta_graph
import tensorflow as tf
from modules import LoadImg
import time
import numpy as np
from tensorflow.python.framework import meta_graph

from CustomEstimator.modules.primary_models_modules import LoadImg

X_train, X_test, y_train, y_test, cls_indices = LoadImg.Dataset.prep_datasets(
ver_ratio=0.2, container_path='data/ImageEveryUnit',
ver_ratio=0.2, container_path='./CustomEstimator/data/ImageEveryUnit',
final_img_width=224, final_img_height=224,
color_mode="grayscale", random_state=1911, is_trial=True)
color_mode="grayscale", random_state=1911,
is_trial=True,
bin_path='./CustomEstimator/modules/ensemble_modules/ensemble/bin/')


class Ensemble():
Expand Down Expand Up @@ -189,9 +192,9 @@ def new_fc_layer(inp,
return raw_imgs, logits_fc


def build_model_and_train(primary_models_directory='./logs/primary_models/',
writer_path='./trial/writer',
save_model_path='./trial/best_model_main',
def build_model_and_train(primary_models_directory='./CustomEstimator/logs/primary_models/',
writer_path='./CustomEstimator/modules/ensemble_modules/ensemble/trial/writer',
save_model_path='./CustomEstimator/modules/ensemble_modules/ensemble/trial/best_model_main',
images_shape=[None, 224, 224, 3],
hidden_units=[500, 100],
X_train=X_train, X_test=X_test,
Expand All @@ -203,77 +206,101 @@ def build_model_and_train(primary_models_directory='./logs/primary_models/',
n_output=y_train.shape[1],
primary_models_directory=primary_models_directory,
images_shape=images_shape)
tf.reset_default_graph()
graph = tf.get_default_graph()

## y
y_true_tf = tf.placeholder(tf.float64, shape=[None, y_train.shape[1]],
name='y_true_tf')

y_pred = tf.nn.softmax(logits_fc)
y_pred_cls = tf.argmax(y_pred, axis=1)
y_true_cls = tf.argmax(y_true_tf, axis=1)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

## dataset
dataset = tf.data.Dataset.from_tensor_slices((raw_imgs, y_true_tf))
dataset = dataset.shuffle(buffer_size=100)
dataset = dataset.batch(batch_size)
dataset = dataset.repeat(epochs)
iter = dataset.make_initializable_iterator()
# iter = dataset.make_one_shot_iterator()
get_batch = iter.get_next()

## cost
cost = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits_fc,
labels=y_true_tf,
name='cost_fc')

trainable_variables = [v for v in tf.trainable_variables() if 'ensemble' in v.name]
optimizer = tf.train.AdamOptimizer(learning_rate=3e-4, name='adam_fc'). \
minimize(cost, var_list=trainable_variables)
# variables_list = [n.name for n in tf.get_default_graph().as_graph_def().node if 'raw_imgs' in n.name]

feed_dict_train = {raw_imgs: X_train,
y_true_tf: y_train}

feed_dict_test = {raw_imgs: X_test,
y_true_tf: y_test}
# tf.reset_default_graph()
# graph = tf.get_default_graph()
#
# ## y
# y_true_tf = tf.placeholder(tf.float64, shape=[None, y_train.shape[1]],
# name='y_true_tf')
#
# y_pred = tf.nn.softmax(logits_fc)
# y_pred_cls = tf.argmax(y_pred, axis=1)
# y_true_cls = tf.argmax(y_true_tf, axis=1)
# correct_prediction = tf.equal(y_pred_cls, y_true_cls)
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#
# ## dataset
# dataset = tf.data.Dataset.from_tensor_slices((raw_imgs, y_true_tf))
# dataset = dataset.shuffle(buffer_size=100)
# dataset = dataset.batch(batch_size)
# dataset = dataset.repeat(epochs)
# iter = dataset.make_initializable_iterator()
# # iter = dataset.make_one_shot_iterator()
# get_batch = iter.get_next()
#
# ## cost
# cost = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits_fc,
# labels=y_true_tf,
# name='cost_fc')
#
# trainable_variables = [v for v in tf.trainable_variables() if 'ensemble' in v.name]
# optimizer = tf.train.AdamOptimizer(learning_rate=3e-4, name='adam_fc'). \
# minimize(cost, var_list=trainable_variables)
# # variables_list = [n.name for n in tf.get_default_graph().as_graph_def().node if 'raw_imgs' in n.name]
#
# feed_dict_train = {raw_imgs: X_train,
# y_true_tf: y_train}
#
# feed_dict_test = {raw_imgs: X_test,
# y_true_tf: y_test}

# Start-time used for printing time-usage below.
start_time = time.time()

with tf.Session(graph=graph) as session:
# initialize weights and biases variables
session.run(tf.global_variables_initializer())
session.run(iter.initializer, feed_dict=feed_dict_train)
writer = tf.summary.FileWriter(writer_path, session.graph)

best_validation_accuracy=0

for i in range(200):
try:
print('batch {} ... '.format(i))
Xydata = session.run(get_batch)
session.run(optimizer, feed_dict={raw_imgs: Xydata[0],
y_true_tf: Xydata[1]})
accuracy_train = session.run(accuracy, feed_dict=feed_dict_train)
print('train_acc = {} ... '.format(accuracy_train))
accuracy_validation = session.run(accuracy, feed_dict=feed_dict_test)
print('validation_acc = {} ... '.format(accuracy_validation))
if accuracy_validation > best_validation_accuracy:
saver = tf.train.Saver(max_to_keep=1)
saver.save(session, save_model_path)
except:
break

writer.close()

end_time = time.time()

print("Training took {}.".format(end_time-start_time))
# start_time = time.time()

# with tf.Session() as session:
# # initialize weights and biases variables
# session.run(tf.global_variables_initializer())
# session.run(iter.initializer, feed_dict=feed_dict_train)
# writer = tf.summary.FileWriter(writer_path, session.graph)

# best_validation_accuracy=0
#
# for i in range(200):
# try:
# print('batch {} ... '.format(i))
# Xydata = session.run(get_batch)
# session.run(optimizer, feed_dict={raw_imgs: Xydata[0],
# y_true_tf: Xydata[1]})
# accuracy_train = session.run(accuracy, feed_dict=feed_dict_train)
# print('train_acc = {} ... '.format(accuracy_train))
# accuracy_validation = session.run(accuracy, feed_dict=feed_dict_test)
# print('validation_acc = {} ... '.format(accuracy_validation))
# if accuracy_validation > best_validation_accuracy:
# saver = tf.train.Saver(max_to_keep=1)
# saver.save(session, save_model_path)
# except:
# break
#
# writer.close()
#
# end_time = time.time()
#
# print("Training took {}.".format(end_time-start_time))

return

build_model_and_train()
build_model_and_train()

graph_pred = tf.get_default_graph()
a=tf.trainable_variables()
trainable_variables = [v for v in tf.trainable_variables() if 'logits_tf' in v.name]



with tf.Session() as session:
saver_3 = tf.train.import_meta_graph('./CustomEstimator/modules/ensemble_modules/ensemble/logs/primary_models/1st/best_model_main.meta',
clear_devices=True)
saver_3.restore(tf.get_default_session(),
tf.train.latest_checkpoint('./CustomEstimator/modules/ensemble_modules/ensemble/logs/primary_models/1st/'))
graph_pred = tf.get_default_graph()
raw_imgs = graph_pred.get_tensor_by_name("raw_imgs:0")
logits_tf = graph_pred.get_tensor_by_name("logits_tf:0")
b = tf.trainable_variables()

c=b[0].eval()

##
feed_dict_pred = {raw_imgs: X_train}

##
# logits = session.run(b[0])
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
BUCKET_NAME="tt_ttt"
echo $BUCKET_NAME
REGION=us-central1
gsutil mb -l $REGION gs://$BUCKET_NAME/

## to copy the data folder to your Cloud Storage bucket.
gsutil cp -r data gs://$BUCKET_NAME/data
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from setuptools import find_packages
from setuptools import setup

REQUIRED_PACKAGES = ['argparse',
'tensorflow==1.10.0',
'glob',
'os',
'sklearn==1.19.2',
'numpy==1.14.5',
'pandas==0.23.4',
'MultiColProcessor',
'pickle',
'json',
'collections',
'multiprocessing']

setup(
name='deepGauge_custom_estimator',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description=''
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
gcloud ml-engine jobs submit training "$JOB_ID" \
--stream-logs \
--module-name trainer.task \
--package-path trainer \
--staging-bucket "$BUCKET" \
--region us-central1 \
--runtime-version=1.4 \
-- \
--output_path "${GCS_PATH}/training" \
--eval_data_paths "${GCS_PATH}/preproc/eval*" \
--train_data_paths "${GCS_PATH}/preproc/train*"


##
REGION=us-central1
JOB_NAME=deep_gauge_1
BUCKET=gs://deep_gauge
PACKAGE_PATH=CustomEstimator/modules/ensemble_modules/trainer_from_storage/trainer


gcloud ml-engine jobs submit training $JOB_NAME \
--runtime-version 1.8 \
--module-name trainer.task \
--package-path "$PACKAGE_PATH" \
--staging-bucket "$BUCKET" \
--region $REGION \
-- \
--verbosity DEBUG
Loading