From eafe24961d9178ce5a4df4042b7a7a4b8476e524 Mon Sep 17 00:00:00 2001 From: Evan Shelhamer Date: Thu, 28 Aug 2014 16:12:01 -0700 Subject: [PATCH 1/2] ignore caffe generated files and stop ignoring examples - ignore models and snapshots, means, and input databases - excluding exceptions to commits is easier than including exhaustively --- .gitignore | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index d162cb42bb0..07eebe34591 100644 --- a/.gitignore +++ b/.gitignore @@ -47,11 +47,16 @@ python/caffe/proto/ # User's build configuration Makefile.config -# Data and examples are either +# Data and models are either # 1. reference, and not casually committed # 2. custom, and live on their own unless they're deliberated contributed data/* -examples/* +*model +*_iter_* +*.solverstate +*.binaryproto +*leveldb +*lmdb # Generated documentation docs/_site From e79380631f036722c890430eef4eb6ebafb2a477 Mon Sep 17 00:00:00 2001 From: Evan Shelhamer Date: Thu, 28 Aug 2014 16:34:57 -0700 Subject: [PATCH 2/2] set examples paths relative to root --- examples/cifar10/cifar10_full_solver.prototxt | 4 ++-- examples/cifar10/cifar10_full_solver_lr1.prototxt | 4 ++-- examples/cifar10/cifar10_full_solver_lr2.prototxt | 4 ++-- examples/cifar10/cifar10_full_train_test.prototxt | 8 ++++---- examples/cifar10/cifar10_quick_solver.prototxt | 4 ++-- .../cifar10/cifar10_quick_solver_lr1.prototxt | 4 ++-- .../cifar10/cifar10_quick_train_test.prototxt | 8 ++++---- examples/cifar10/convert_cifar_data.cpp | 4 ++-- examples/cifar10/create_cifar10.sh | 13 ++++++------- .../pascal_finetune_solver.prototxt | 14 ++++++++++++++ .../pascal_finetune_train.prototxt | 4 ++-- .../pascal_finetune_val.prototxt | 4 ++-- examples/imagenet/alexnet_solver.prototxt | 4 ++-- examples/imagenet/alexnet_train_val.prototxt | 8 ++++---- examples/imagenet/create_imagenet.sh | 13 +++++++------ examples/imagenet/get_caffe_alexnet_model.sh | 2 +- .../imagenet/get_caffe_rcnn_imagenet_model.sh | 3 ++- .../get_caffe_reference_imagenet_model.sh | 2 +- examples/imagenet/imagenet_solver.prototxt | 4 ++-- examples/imagenet/imagenet_train_val.prototxt | 8 ++++---- examples/imagenet/make_imagenet_mean.sh | 6 ++---- examples/imagenet/resume_training.sh | 8 +++----- examples/imagenet/time_imagenet.sh | 6 +++--- examples/imagenet/train_alexnet.sh | 4 +--- examples/imagenet/train_imagenet.sh | 4 +--- examples/mnist/create_mnist.sh | 15 +++++++++------ examples/mnist/lenet_consolidated_solver.prototxt | 8 ++++---- examples/mnist/lenet_solver.prototxt | 4 ++-- examples/mnist/lenet_train_test.prototxt | 4 ++-- examples/mnist/mnist_autoencoder.prototxt | 4 ++-- examples/mnist/mnist_autoencoder_solver.prototxt | 4 ++-- examples/mnist/train_lenet.sh | 4 +--- examples/mnist/train_lenet_consolidated.sh | 5 ++--- examples/mnist/train_mnist_autoencoder.sh | 6 +++--- .../pascal_finetune_solver.prototxt | 14 -------------- 35 files changed, 104 insertions(+), 111 deletions(-) create mode 100644 examples/finetune_pascal_detection/pascal_finetune_solver.prototxt rename examples/{pascal-finetuning => finetune_pascal_detection}/pascal_finetune_train.prototxt (97%) rename examples/{pascal-finetuning => finetune_pascal_detection}/pascal_finetune_val.prototxt (97%) delete mode 100644 examples/pascal-finetuning/pascal_finetune_solver.prototxt diff --git a/examples/cifar10/cifar10_full_solver.prototxt b/examples/cifar10/cifar10_full_solver.prototxt index d8e0bb27c44..f30b3986142 100644 --- a/examples/cifar10/cifar10_full_solver.prototxt +++ b/examples/cifar10/cifar10_full_solver.prototxt @@ -2,7 +2,7 @@ # then another factor of 10 after 10 more epochs (5000 iters) # The train/test net protocol buffer definition -net: "cifar10_full_train_test.prototxt" +net: "examples/cifar10/cifar10_full_train_test.prototxt" # test_iter specifies how many forward passes the test should carry out. # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, # covering the full 10,000 testing images. @@ -21,6 +21,6 @@ display: 200 max_iter: 60000 # snapshot intermediate results snapshot: 10000 -snapshot_prefix: "cifar10_full" +snapshot_prefix: "examples/cifar10/cifar10_full" # solver mode: CPU or GPU solver_mode: GPU diff --git a/examples/cifar10/cifar10_full_solver_lr1.prototxt b/examples/cifar10/cifar10_full_solver_lr1.prototxt index 746f4fba15a..59bc5721f4c 100644 --- a/examples/cifar10/cifar10_full_solver_lr1.prototxt +++ b/examples/cifar10/cifar10_full_solver_lr1.prototxt @@ -2,7 +2,7 @@ # then another factor of 10 after 10 more epochs (5000 iters) # The train/test net protocol buffer definition -net: "cifar10_full_train_test.prototxt" +net: "examples/cifar10/cifar10_full_train_test.prototxt" # test_iter specifies how many forward passes the test should carry out. # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, # covering the full 10,000 testing images. @@ -21,6 +21,6 @@ display: 200 max_iter: 65000 # snapshot intermediate results snapshot: 5000 -snapshot_prefix: "cifar10_full" +snapshot_prefix: "examples/cifar10/cifar10_full" # solver mode: CPU or GPU solver_mode: GPU diff --git a/examples/cifar10/cifar10_full_solver_lr2.prototxt b/examples/cifar10/cifar10_full_solver_lr2.prototxt index 5a549ffc96d..d4ed5d8e041 100644 --- a/examples/cifar10/cifar10_full_solver_lr2.prototxt +++ b/examples/cifar10/cifar10_full_solver_lr2.prototxt @@ -2,7 +2,7 @@ # then another factor of 10 after 10 more epochs (5000 iters) # The train/test net protocol buffer definition -net: "cifar10_full_train_test.prototxt" +net: "examples/cifar10/cifar10_full_train_test.prototxt" # test_iter specifies how many forward passes the test should carry out. # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, # covering the full 10,000 testing images. @@ -21,6 +21,6 @@ display: 200 max_iter: 70000 # snapshot intermediate results snapshot: 5000 -snapshot_prefix: "cifar10_full" +snapshot_prefix: "examples/cifar10/cifar10_full" # solver mode: CPU or GPU solver_mode: GPU diff --git a/examples/cifar10/cifar10_full_train_test.prototxt b/examples/cifar10/cifar10_full_train_test.prototxt index c01fef46ac0..ffecceb2a39 100644 --- a/examples/cifar10/cifar10_full_train_test.prototxt +++ b/examples/cifar10/cifar10_full_train_test.prototxt @@ -5,10 +5,10 @@ layers { top: "data" top: "label" data_param { - source: "cifar10-leveldb/cifar-train-leveldb" + source: "examples/cifar10/cifar10_train_leveldb" batch_size: 100 transform_param { - mean_file: "mean.binaryproto" + mean_file: "examples/cifar10/mean.binaryproto" } } include: { phase: TRAIN } @@ -19,10 +19,10 @@ layers { top: "data" top: "label" data_param { - source: "cifar10-leveldb/cifar-test-leveldb" + source: "examples/cifar10/cifar10_test_leveldb" batch_size: 100 transform_param { - mean_file: "mean.binaryproto" + mean_file: "examples/cifar10/mean.binaryproto" } } include: { phase: TEST } diff --git a/examples/cifar10/cifar10_quick_solver.prototxt b/examples/cifar10/cifar10_quick_solver.prototxt index cdd0722b3a0..14b4401ba16 100644 --- a/examples/cifar10/cifar10_quick_solver.prototxt +++ b/examples/cifar10/cifar10_quick_solver.prototxt @@ -1,7 +1,7 @@ # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 # The train/test net protocol buffer definition -net: "cifar10_quick_train_test.prototxt" +net: "examples/cifar10/cifar10_quick_train_test.prototxt" # test_iter specifies how many forward passes the test should carry out. # In the case of MNIST, we have test batch size 100 and 100 test iterations, # covering the full 10,000 testing images. @@ -20,6 +20,6 @@ display: 100 max_iter: 4000 # snapshot intermediate results snapshot: 4000 -snapshot_prefix: "cifar10_quick" +snapshot_prefix: "examples/cifar10/cifar10_quick" # solver mode: CPU or GPU solver_mode: GPU diff --git a/examples/cifar10/cifar10_quick_solver_lr1.prototxt b/examples/cifar10/cifar10_quick_solver_lr1.prototxt index 2ed54ad980f..d3af70c05e7 100644 --- a/examples/cifar10/cifar10_quick_solver_lr1.prototxt +++ b/examples/cifar10/cifar10_quick_solver_lr1.prototxt @@ -1,7 +1,7 @@ # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 # The train/test net protocol buffer definition -net: "cifar10_quick_train_test.prototxt" +net: "examples/cifar10/cifar10_quick_train_test.prototxt" # test_iter specifies how many forward passes the test should carry out. # In the case of MNIST, we have test batch size 100 and 100 test iterations, # covering the full 10,000 testing images. @@ -20,6 +20,6 @@ display: 100 max_iter: 5000 # snapshot intermediate results snapshot: 5000 -snapshot_prefix: "cifar10_quick" +snapshot_prefix: "examples/cifar10/cifar10_quick" # solver mode: CPU or GPU solver_mode: GPU diff --git a/examples/cifar10/cifar10_quick_train_test.prototxt b/examples/cifar10/cifar10_quick_train_test.prototxt index 779dacf6758..f708798c71c 100644 --- a/examples/cifar10/cifar10_quick_train_test.prototxt +++ b/examples/cifar10/cifar10_quick_train_test.prototxt @@ -5,10 +5,10 @@ layers { top: "data" top: "label" data_param { - source: "cifar10-leveldb/cifar-train-leveldb" + source: "examples/cifar10/cifar10_train_leveldb" batch_size: 100 transform_param { - mean_file: "mean.binaryproto" + mean_file: "examples/cifar10/mean.binaryproto" } } include: { phase: TRAIN } @@ -19,10 +19,10 @@ layers { top: "data" top: "label" data_param { - source: "cifar10-leveldb/cifar-test-leveldb" + source: "examples/cifar10/cifar10_test_leveldb" batch_size: 100 transform_param { - mean_file: "mean.binaryproto" + mean_file: "examples/cifar10/mean.binaryproto" } } include: { phase: TEST } diff --git a/examples/cifar10/convert_cifar_data.cpp b/examples/cifar10/convert_cifar_data.cpp index 2d5589bd30a..90ecb6d9a88 100644 --- a/examples/cifar10/convert_cifar_data.cpp +++ b/examples/cifar10/convert_cifar_data.cpp @@ -48,7 +48,7 @@ void convert_dataset(const string& input_folder, const string& output_folder) { LOG(INFO) << "Writing Training data"; leveldb::DB* train_db; leveldb::Status status; - status = leveldb::DB::Open(options, output_folder + "/cifar-train-leveldb", + status = leveldb::DB::Open(options, output_folder + "/cifar10_train_leveldb", &train_db); CHECK(status.ok()) << "Failed to open leveldb."; for (int fileid = 0; fileid < kCIFARTrainBatches; ++fileid) { @@ -71,7 +71,7 @@ void convert_dataset(const string& input_folder, const string& output_folder) { LOG(INFO) << "Writing Testing data"; leveldb::DB* test_db; - CHECK(leveldb::DB::Open(options, output_folder + "/cifar-test-leveldb", + CHECK(leveldb::DB::Open(options, output_folder + "/cifar10_test_leveldb", &test_db).ok()) << "Failed to open leveldb."; // Open files std::ifstream data_file((input_folder + "/test_batch.bin").c_str(), diff --git a/examples/cifar10/create_cifar10.sh b/examples/cifar10/create_cifar10.sh index 85757f345ef..ad5038e0c3e 100755 --- a/examples/cifar10/create_cifar10.sh +++ b/examples/cifar10/create_cifar10.sh @@ -1,19 +1,18 @@ #!/usr/bin/env sh # This script converts the cifar data into leveldb format. -EXAMPLES=../../build/examples/cifar10 -DATA=../../data/cifar10 -TOOLS=../../build/tools +EXAMPLE=examples/cifar10 +DATA=data/cifar10 echo "Creating leveldb..." -rm -rf cifar10-leveldb -mkdir cifar10-leveldb +rm -rf $EXAMPLE/cifar10_train_leveldb $EXAMPLE/cifar10_test_leveldb -$EXAMPLES/convert_cifar_data.bin $DATA ./cifar10-leveldb +./build/examples/cifar10/convert_cifar_data.bin $DATA $EXAMPLE echo "Computing image mean..." -$TOOLS/compute_image_mean.bin ./cifar10-leveldb/cifar-train-leveldb mean.binaryproto +./build/tools/compute_image_mean $EXAMPLE/cifar10_train_leveldb \ + $EXAMPLE/mean.binaryproto echo "Done." diff --git a/examples/finetune_pascal_detection/pascal_finetune_solver.prototxt b/examples/finetune_pascal_detection/pascal_finetune_solver.prototxt new file mode 100644 index 00000000000..54ba6350448 --- /dev/null +++ b/examples/finetune_pascal_detection/pascal_finetune_solver.prototxt @@ -0,0 +1,14 @@ +train_net: "examples/finetuning_pascal_detection/pascal_finetune_train.prototxt" +test_net: "examples/finetuning_pascal_detection/pascal_finetune_val.prototxt" +test_iter: 100 +test_interval: 1000 +base_lr: 0.001 +lr_policy: "step" +gamma: 0.1 +stepsize: 20000 +display: 20 +max_iter: 100000 +momentum: 0.9 +weight_decay: 0.0005 +snapshot: 10000 +snapshot_prefix: "examples/finetuning_pascal_detection/pascal_det_finetune" diff --git a/examples/pascal-finetuning/pascal_finetune_train.prototxt b/examples/finetune_pascal_detection/pascal_finetune_train.prototxt similarity index 97% rename from examples/pascal-finetuning/pascal_finetune_train.prototxt rename to examples/finetune_pascal_detection/pascal_finetune_train.prototxt index dfc60fe4b8a..ae7ee4ccf88 100644 --- a/examples/pascal-finetuning/pascal_finetune_train.prototxt +++ b/examples/finetune_pascal_detection/pascal_finetune_train.prototxt @@ -5,8 +5,8 @@ layers { top: "data" top: "label" window_data_param { - source: "window_file_2007_trainval.txt" - mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto" + source: "examples/finetune_pascal_detection/window_file_2007_trainval.txt" + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" batch_size: 128 crop_size: 227 mirror: true diff --git a/examples/pascal-finetuning/pascal_finetune_val.prototxt b/examples/finetune_pascal_detection/pascal_finetune_val.prototxt similarity index 97% rename from examples/pascal-finetuning/pascal_finetune_val.prototxt rename to examples/finetune_pascal_detection/pascal_finetune_val.prototxt index 91ded585d85..30dd164c4fa 100644 --- a/examples/pascal-finetuning/pascal_finetune_val.prototxt +++ b/examples/finetune_pascal_detection/pascal_finetune_val.prototxt @@ -5,8 +5,8 @@ layers { top: "data" top: "label" window_data_param { - source: "window_file_2007_test.txt" - mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto" + source: "examples/finetune_pascal_detection/window_file_2007_test.txt" + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" batch_size: 128 crop_size: 227 mirror: true diff --git a/examples/imagenet/alexnet_solver.prototxt b/examples/imagenet/alexnet_solver.prototxt index a3aeae07dc7..94bda7f36a5 100644 --- a/examples/imagenet/alexnet_solver.prototxt +++ b/examples/imagenet/alexnet_solver.prototxt @@ -1,4 +1,4 @@ -net: "alexnet_train_val.prototxt" +net: "examples/imagenet/alexnet_train_val.prototxt" test_iter: 1000 test_interval: 1000 base_lr: 0.01 @@ -10,5 +10,5 @@ max_iter: 450000 momentum: 0.9 weight_decay: 0.0005 snapshot: 10000 -snapshot_prefix: "caffe_alexnet_train" +snapshot_prefix: "examples/imagenet/caffe_alexnet" solver_mode: GPU diff --git a/examples/imagenet/alexnet_train_val.prototxt b/examples/imagenet/alexnet_train_val.prototxt index 8ffdb7a8f69..b0f5815e007 100644 --- a/examples/imagenet/alexnet_train_val.prototxt +++ b/examples/imagenet/alexnet_train_val.prototxt @@ -5,11 +5,11 @@ layers { top: "data" top: "label" data_param { - source: "ilsvrc12_train_leveldb" + source: "examples/imagenet/ilsvrc12_train_leveldb" batch_size: 256 transform_param { crop_size: 227 - mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto" + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" mirror: true } } @@ -21,11 +21,11 @@ layers { top: "data" top: "label" data_param { - source: "ilsvrc12_val_leveldb" + source: "examples/imagenet/ilsvrc12_val_leveldb" batch_size: 50 transform_param { crop_size: 227 - mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto" + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" mirror: false } } diff --git a/examples/imagenet/create_imagenet.sh b/examples/imagenet/create_imagenet.sh index 79cb17c4692..a286b8fe74c 100755 --- a/examples/imagenet/create_imagenet.sh +++ b/examples/imagenet/create_imagenet.sh @@ -2,8 +2,9 @@ # Create the imagenet leveldb inputs # N.B. set the path to the imagenet train + val data dirs -TOOLS=../../build/tools -DATA=../../data/ilsvrc12 +EXAMPLE=examples/imagenet +DATA=data/ilsvrc12 +TOOLS=build/tools TRAIN_DATA_ROOT=/path/to/imagenet/train/ VAL_DATA_ROOT=/path/to/imagenet/val/ @@ -35,22 +36,22 @@ fi echo "Creating train leveldb..." -GLOG_logtostderr=1 $TOOLS/convert_imageset.bin \ +GLOG_logtostderr=1 $TOOLS/convert_imageset \ --resize_height=$RESIZE_HEIGHT \ --resize_width=$RESIZE_WIDTH \ --shuffle \ $TRAIN_DATA_ROOT \ $DATA/train.txt \ - ilsvrc12_train_leveldb + $EXAMPLE/ilsvrc12_train_leveldb echo "Creating val leveldb..." -GLOG_logtostderr=1 $TOOLS/convert_imageset.bin \ +GLOG_logtostderr=1 $TOOLS/convert_imageset \ --resize_height=$RESIZE_HEIGHT \ --resize_width=$RESIZE_WIDTH \ --shuffle \ $VAL_DATA_ROOT \ $DATA/val.txt \ - ilsvrc12_val_leveldb + $EXAMPLE/ilsvrc12_val_leveldb echo "Done." diff --git a/examples/imagenet/get_caffe_alexnet_model.sh b/examples/imagenet/get_caffe_alexnet_model.sh index b680a32d1cc..7312ed93070 100755 --- a/examples/imagenet/get_caffe_alexnet_model.sh +++ b/examples/imagenet/get_caffe_alexnet_model.sh @@ -23,6 +23,6 @@ fi echo "Downloading..." -wget http://dl.caffe.berkeleyvision.org/caffe_alexnet_model +wget http://dl.caffe.berkeleyvision.org/$MODEL examples/imagenet/$MODEL echo "Done. Please run this command again to verify that checksum = $CHECKSUM." diff --git a/examples/imagenet/get_caffe_rcnn_imagenet_model.sh b/examples/imagenet/get_caffe_rcnn_imagenet_model.sh index c7f36edcbf0..9a8d0a155a0 100755 --- a/examples/imagenet/get_caffe_rcnn_imagenet_model.sh +++ b/examples/imagenet/get_caffe_rcnn_imagenet_model.sh @@ -23,5 +23,6 @@ fi echo "Downloading..." -wget http://dl.caffe.berkeleyvision.org/$MODEL +wget http://dl.caffe.berkeleyvision.org/$MODEL examples/imagenet/$MODEL + echo "Done. Please run this command again to verify that checksum = $CHECKSUM." diff --git a/examples/imagenet/get_caffe_reference_imagenet_model.sh b/examples/imagenet/get_caffe_reference_imagenet_model.sh index 85007994051..f687ebfa79e 100755 --- a/examples/imagenet/get_caffe_reference_imagenet_model.sh +++ b/examples/imagenet/get_caffe_reference_imagenet_model.sh @@ -23,6 +23,6 @@ fi echo "Downloading..." -wget http://dl.caffe.berkeleyvision.org/$MODEL +wget http://dl.caffe.berkeleyvision.org/$MODEL examples/imagenet/$MODEL echo "Done. Please run this command again to verify that checksum = $CHECKSUM." diff --git a/examples/imagenet/imagenet_solver.prototxt b/examples/imagenet/imagenet_solver.prototxt index 1ad40fe71e7..5b5be4bb8a9 100644 --- a/examples/imagenet/imagenet_solver.prototxt +++ b/examples/imagenet/imagenet_solver.prototxt @@ -1,4 +1,4 @@ -net: "imagenet_train_val.prototxt" +net: "examples/imagenet/imagenet_train_val.prototxt" test_iter: 1000 test_interval: 1000 base_lr: 0.01 @@ -10,5 +10,5 @@ max_iter: 450000 momentum: 0.9 weight_decay: 0.0005 snapshot: 10000 -snapshot_prefix: "caffe_imagenet_train" +snapshot_prefix: "examples/imagenet/caffe_imagenet" solver_mode: GPU diff --git a/examples/imagenet/imagenet_train_val.prototxt b/examples/imagenet/imagenet_train_val.prototxt index 5f9c83949f5..766d546a385 100644 --- a/examples/imagenet/imagenet_train_val.prototxt +++ b/examples/imagenet/imagenet_train_val.prototxt @@ -5,11 +5,11 @@ layers { top: "data" top: "label" data_param { - source: "ilsvrc12_train_leveldb" + source: "examples/imagenet/ilsvrc12_train_leveldb" batch_size: 256 transform_param { crop_size: 227 - mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto" + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" mirror: true } } @@ -21,11 +21,11 @@ layers { top: "data" top: "label" data_param { - source: "ilsvrc12_val_leveldb" + source: "examples/imagenet/ilsvrc12_val_leveldb" batch_size: 50 transform_param { crop_size: 227 - mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto" + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" mirror: false } } diff --git a/examples/imagenet/make_imagenet_mean.sh b/examples/imagenet/make_imagenet_mean.sh index f3d69cf2f64..d3d0c9af5d2 100755 --- a/examples/imagenet/make_imagenet_mean.sh +++ b/examples/imagenet/make_imagenet_mean.sh @@ -2,9 +2,7 @@ # Compute the mean image from the imagenet training leveldb # N.B. this is available in data/ilsvrc12 -TOOLS=../../build/tools -DATA=../../data/ilsvrc12 - -$TOOLS/compute_image_mean.bin ilsvrc12_train_leveldb $DATA/imagenet_mean.binaryproto +./build/tools/compute_image_mean examples/imagenet/ilsvrc12_train_leveldb \ + data/ilsvrc12/imagenet_mean.binaryproto echo "Done." diff --git a/examples/imagenet/resume_training.sh b/examples/imagenet/resume_training.sh index 9e00d93c469..3c964b56ffc 100755 --- a/examples/imagenet/resume_training.sh +++ b/examples/imagenet/resume_training.sh @@ -1,9 +1,7 @@ #!/usr/bin/env sh -TOOLS=../../build/tools - -$TOOLS/caffe train\ - --solver=imagenet_solver.prototxt \ - --snapshot=caffe_imagenet_train_10000.solverstate +./build/tools/caffe train \ + --solver=examples/imagenet/imagenet_solver.prototxt \ + --snapshot=examples/imagenet/caffe_imagenet_10000.solverstate echo "Done." diff --git a/examples/imagenet/time_imagenet.sh b/examples/imagenet/time_imagenet.sh index c448b4977ce..3f46e0e0f97 100755 --- a/examples/imagenet/time_imagenet.sh +++ b/examples/imagenet/time_imagenet.sh @@ -1,7 +1,5 @@ #!/usr/bin/env sh -TOOLS=../../build/tools - if [ -z "$1" ]; then echo "Using CPU! To time GPU mode, use:" echo " ./time_imagenet.sh " @@ -12,6 +10,8 @@ else GPU="--gpu=$1" fi -$TOOLS/caffe time --model=imagenet_train_val.prototxt ${GPU} +./build/tools/caffe time \ + --model=examples/imagenet/imagenet_train_val.prototxt \ + ${GPU} echo "Done." diff --git a/examples/imagenet/train_alexnet.sh b/examples/imagenet/train_alexnet.sh index 6a7c0577413..1ddcbeee4b0 100755 --- a/examples/imagenet/train_alexnet.sh +++ b/examples/imagenet/train_alexnet.sh @@ -1,7 +1,5 @@ #!/usr/bin/env sh -TOOLS=../../build/tools - -$TOOLS/caffe train --solver=alexnet_solver.prototxt +./build/tools/caffe train --solver=examples/imagenet/alexnet_solver.prototxt echo "Done." diff --git a/examples/imagenet/train_imagenet.sh b/examples/imagenet/train_imagenet.sh index 008b96c01a1..cba2ad59581 100755 --- a/examples/imagenet/train_imagenet.sh +++ b/examples/imagenet/train_imagenet.sh @@ -1,7 +1,5 @@ #!/usr/bin/env sh -TOOLS=../../build/tools - -$TOOLS/caffe train --solver=imagenet_solver.prototxt +./build/tools/caffe train --solver=examples/imagenet/imagenet_solver.prototxt echo "Done." diff --git a/examples/mnist/create_mnist.sh b/examples/mnist/create_mnist.sh index ae75bec2ab2..ed6e36903c2 100755 --- a/examples/mnist/create_mnist.sh +++ b/examples/mnist/create_mnist.sh @@ -1,15 +1,18 @@ #!/usr/bin/env sh # This script converts the mnist data into leveldb format. -EXAMPLES=../../build/examples/mnist -DATA=../../data/mnist +EXAMPLE=examples/mnist +DATA=data/mnist +BUILD=build/examples/mnist echo "Creating leveldb..." -rm -rf mnist-train-leveldb -rm -rf mnist-test-leveldb +rm -rf mnist_train_leveldb +rm -rf mnist_test_leveldb -$EXAMPLES/convert_mnist_data.bin $DATA/train-images-idx3-ubyte $DATA/train-labels-idx1-ubyte mnist-train-leveldb -$EXAMPLES/convert_mnist_data.bin $DATA/t10k-images-idx3-ubyte $DATA/t10k-labels-idx1-ubyte mnist-test-leveldb +$BUILD/convert_mnist_data.bin $DATA/train-images-idx3-ubyte \ + $DATA/train-labels-idx1-ubyte $EXAMPLE/mnist_train_leveldb +$BUILD/convert_mnist_data.bin $DATA/t10k-images-idx3-ubyte \ + $DATA/t10k-labels-idx1-ubyte $EXAMPLE/mnist_test_leveldb echo "Done." diff --git a/examples/mnist/lenet_consolidated_solver.prototxt b/examples/mnist/lenet_consolidated_solver.prototxt index 2e69589c67f..8ff593f2781 100644 --- a/examples/mnist/lenet_consolidated_solver.prototxt +++ b/examples/mnist/lenet_consolidated_solver.prototxt @@ -22,7 +22,7 @@ display: 100 max_iter: 10000 # snapshot intermediate results snapshot: 5000 -snapshot_prefix: "lenet" +snapshot_prefix: "examples/mnist/lenet" # Set a random_seed for repeatable results. # (For results that vary due to random initialization, comment out the below # line, or set to a negative integer -- e.g. "random_seed: -1") @@ -50,7 +50,7 @@ net_param { top: "data" top: "label" data_param { - source: "mnist-train-leveldb" + source: "examples/mnist/mnist_train_leveldb" batch_size: 64 transform_param { scale: 0.00390625 @@ -64,7 +64,7 @@ net_param { top: "data" top: "label" data_param { - source: "mnist-test-leveldb" + source: "examples/mnist/mnist_test_leveldb" batch_size: 100 transform_param { scale: 0.00390625 @@ -81,7 +81,7 @@ net_param { top: "data" top: "label" data_param { - source: "mnist-train-leveldb" + source: "examples/mnist/mnist_train_leveldb" batch_size: 100 transform_param { scale: 0.00390625 diff --git a/examples/mnist/lenet_solver.prototxt b/examples/mnist/lenet_solver.prototxt index a3b33090472..2dfbc834f41 100644 --- a/examples/mnist/lenet_solver.prototxt +++ b/examples/mnist/lenet_solver.prototxt @@ -1,5 +1,5 @@ # The train/test net protocol buffer definition -net: "lenet_train_test.prototxt" +net: "examples/mnist/lenet_train_test.prototxt" # test_iter specifies how many forward passes the test should carry out. # In the case of MNIST, we have test batch size 100 and 100 test iterations, # covering the full 10,000 testing images. @@ -20,6 +20,6 @@ display: 100 max_iter: 10000 # snapshot intermediate results snapshot: 5000 -snapshot_prefix: "lenet" +snapshot_prefix: "examples/mnist/lenet" # solver mode: CPU or GPU solver_mode: GPU diff --git a/examples/mnist/lenet_train_test.prototxt b/examples/mnist/lenet_train_test.prototxt index 578641d5b2d..8369af1daa0 100644 --- a/examples/mnist/lenet_train_test.prototxt +++ b/examples/mnist/lenet_train_test.prototxt @@ -5,7 +5,7 @@ layers { top: "data" top: "label" data_param { - source: "mnist-train-leveldb" + source: "examples/mnist/mnist_train_leveldb" batch_size: 64 transform_param { scale: 0.00390625 @@ -19,7 +19,7 @@ layers { top: "data" top: "label" data_param { - source: "mnist-test-leveldb" + source: "examples/mnist/mnist_test_leveldb" batch_size: 100 transform_param { scale: 0.00390625 diff --git a/examples/mnist/mnist_autoencoder.prototxt b/examples/mnist/mnist_autoencoder.prototxt index 3af6b7afcb3..d0abc685f0b 100644 --- a/examples/mnist/mnist_autoencoder.prototxt +++ b/examples/mnist/mnist_autoencoder.prototxt @@ -4,7 +4,7 @@ layers { name: "data" type: DATA data_param { - source: "mnist-train-leveldb" + source: "examples/mnist/mnist_train_leveldb" batch_size: 100 transform_param { scale: 0.0039215684 @@ -17,7 +17,7 @@ layers { name: "data" type: DATA data_param { - source: "mnist-test-leveldb" + source: "examples/mnist/mnist_test_leveldb" batch_size: 100 transform_param { scale: 0.0039215684 diff --git a/examples/mnist/mnist_autoencoder_solver.prototxt b/examples/mnist/mnist_autoencoder_solver.prototxt index ae1ddebccd2..af1202fc1fd 100644 --- a/examples/mnist/mnist_autoencoder_solver.prototxt +++ b/examples/mnist/mnist_autoencoder_solver.prototxt @@ -1,4 +1,4 @@ -net: "mnist_autoencoder.prototxt" +net: "examples/mnist/mnist_autoencoder.prototxt" test_iter: 50 test_interval: 100 test_compute_loss: true @@ -8,7 +8,7 @@ display: 20 max_iter: 4000000 weight_decay: 0.0005 snapshot: 10000 -snapshot_prefix: "mnist_autoencoder_train" +snapshot_prefix: "examples/mnist/mnist_autoencoder" momentum: 0.9 # solver mode: CPU or GPU solver_mode: GPU diff --git a/examples/mnist/train_lenet.sh b/examples/mnist/train_lenet.sh index b93e48fb629..1b6bf7d978d 100755 --- a/examples/mnist/train_lenet.sh +++ b/examples/mnist/train_lenet.sh @@ -1,5 +1,3 @@ #!/usr/bin/env sh -TOOLS=../../build/tools - -$TOOLS/caffe train --solver=lenet_solver.prototxt +./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt diff --git a/examples/mnist/train_lenet_consolidated.sh b/examples/mnist/train_lenet_consolidated.sh index 83fe895ba0b..c855467897e 100755 --- a/examples/mnist/train_lenet_consolidated.sh +++ b/examples/mnist/train_lenet_consolidated.sh @@ -1,5 +1,4 @@ #!/usr/bin/env sh -TOOLS=../../build/tools - -$TOOLS/caffe train --solver=lenet_consolidated_solver.prototxt +./build/tools/caffe train \ + --solver=examples/mnist/lenet_consolidated_solver.prototxt diff --git a/examples/mnist/train_mnist_autoencoder.sh b/examples/mnist/train_mnist_autoencoder.sh index 628c74b969a..cfd67e82fda 100755 --- a/examples/mnist/train_mnist_autoencoder.sh +++ b/examples/mnist/train_mnist_autoencoder.sh @@ -1,4 +1,4 @@ -#!/bin/bash -TOOLS=../../build/tools +#!/usr/bin/env sh -$TOOLS/caffe.bin train --solver=mnist_autoencoder_solver.prototxt +./build/tools/caffe train \ + --solver=examples/mnist/mnist_autoencoder_solver.prototxt diff --git a/examples/pascal-finetuning/pascal_finetune_solver.prototxt b/examples/pascal-finetuning/pascal_finetune_solver.prototxt deleted file mode 100644 index f2b80111e2f..00000000000 --- a/examples/pascal-finetuning/pascal_finetune_solver.prototxt +++ /dev/null @@ -1,14 +0,0 @@ -train_net: "pascal_finetune_train.prototxt" -test_net: "pascal_finetune_val.prototxt" -test_iter: 100 -test_interval: 1000 -base_lr: 0.001 -lr_policy: "step" -gamma: 0.1 -stepsize: 20000 -display: 20 -max_iter: 100000 -momentum: 0.9 -weight_decay: 0.0005 -snapshot: 10000 -snapshot_prefix: "pascal_finetune_train"