8000 Standardize Examples by shelhamer · Pull Request #1003 · BVLC/caffe · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Standardize Examples #1003

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Aug 29, 2014
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,16 @@ python/caffe/proto/
# User's build configuration
Makefile.config

# Data and examples are either
# Data and models are either
# 1. reference, and not casually committed
# 2. custom, and live on their own unless they're deliberated contributed
data/*
examples/*
*model
*_iter_*
*.solverstate
*.binaryproto
*leveldb
*lmdb

# Generated documentation
docs/_site
Expand Down
4 changes: 2 additions & 2 deletions examples/cifar10/cifar10_full_solver.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# then another factor of 10 after 10 more epochs (5000 iters)

# The train/test net protocol buffer definition
net: "cifar10_full_train_test.prototxt"
net: "examples/cifar10/cifar10_full_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
Expand All @@ -21,6 +21,6 @@ display: 200
max_iter: 60000
# snapshot intermediate results
snapshot: 10000
snapshot_prefix: "cifar10_full"
snapshot_prefix: "examples/cifar10/cifar10_full"
# solver mode: CPU or GPU
solver_mode: GPU
4 changes: 2 additions & 2 deletions examples/cifar10/cifar10_full_solver_lr1.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# then another factor of 10 after 10 more epochs (5000 iters)

# The train/test net protocol buffer definition
net: "cifar10_full_train_test.prototxt"
net: "examples/cifar10/cifar10_full_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
Expand All @@ -21,6 +21,6 @@ display: 200
max_iter: 65000
# snapshot intermediate results
snapshot: 5000
snapshot_prefix: "cifar10_full"
snapshot_prefix: "examples/cifar10/cifar10_full"
# solver mode: CPU or GPU
solver_mode: GPU
4 changes: 2 additions & 2 deletions examples/cifar10/cifar10_full_solver_lr2.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# then another factor of 10 after 10 more epochs (5000 iters)

# The train/test net protocol buffer definition
net: "cifar10_full_train_test.prototxt"
net: "examples/cifar10/cifar10_full_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
Expand All @@ -21,6 +21,6 @@ display: 200
max_iter: 70000
# snapshot intermediate results
snapshot: 5000
snapshot_prefix: "cifar10_full"
snapshot_prefix: "examples/cifar10/cifar10_full"
# solver mode: CPU or GPU
solver_mode: GPU
8 changes: 4 additions & 4 deletions examples/cifar10/cifar10_full_train_test.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ layers {
top: "data"
top: "label"
data_param {
source: "cifar10-leveldb/cifar-train-leveldb"
source: "examples/cifar10/cifar10_train_leveldb"
batch_size: 100
transform_param {
mean_file: "mean.binaryproto"
mean_file: "examples/cifar10/mean.binaryproto"
}
}
include: { phase: TRAIN }
Expand All @@ -19,10 +19,10 @@ layers {
top: "data"
top: "label"
data_param {
source: "cifar10-leveldb/cifar-test-leveldb"
source: "examples/cifar10/cifar10_test_leveldb"
batch_size: 100
transform_param {
mean_file: "mean.binaryproto"
mean_file: "examples/cifar10/mean.binaryproto"
}
}
include: { phase: TEST }
Expand Down
4 changes: 2 additions & 2 deletions examples/cifar10/cifar10_quick_solver.prototxt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# reduce the learning rate after 8 epochs (4000 iters) by a factor of 10

# The train/test net protocol buffer definition
net: "cifar10_quick_train_test.prototxt"
net: "examples/cifar10/cifar10_quick_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
Expand All @@ -20,6 +20,6 @@ display: 100
max_iter: 4000
# snapshot intermediate results
snapshot: 4000
snapshot_prefix: "cifar10_quick"
snapshot_prefix: "examples/cifar10/cifar10_quick"
# solver mode: CPU or GPU
solver_mode: GPU
4 changes: 2 additions & 2 deletions examples/cifar10/cifar10_quick_solver_lr1.prototxt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# reduce the learning rate after 8 epochs (4000 iters) by a factor of 10

# The train/test net protocol buffer definition
net: "cifar10_quick_train_test.prototxt"
net: "examples/cifar10/cifar10_quick_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
Expand All @@ -20,6 +20,6 @@ display: 100
max_iter: 5000
# snapshot intermediate results
snapshot: 5000
snapshot_prefix: "cifar10_quick"
snapshot_prefix: "examples/cifar10/cifar10_quick"
# solver mode: CPU or GPU
solver_mode: GPU
8 changes: 4 additions & 4 deletions examples/cifar10/cifar10_quick_train_test.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ layers {
top: "data"
top: "label"
data_param {
source: "cifar10-leveldb/cifar-train-leveldb"
source: "examples/cifar10/cifar10_train_leveldb"
batch_size: 100
transform_param {
mean_file: "mean.binaryproto"
mean_file: "examples/cifar10/mean.binaryproto"
}
}
include: { phase: TRAIN }
Expand All @@ -19,10 +19,10 @@ layers {
top: "data"
top: "label"
data_param {
source: "cifar10-leveldb/cifar-test-leveldb"
source: "examples/cifar10/cifar10_test_leveldb"
batch_size: 100
transform_param {
mean_file: "mean.binaryproto"
mean_file: "examples/cifar10/mean.binaryproto"
}
}
include: { phase: TEST }
Expand Down
4 changes: 2 additions & 2 deletions examples/cifar10/convert_cifar_data.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ void convert_dataset(const string& input_folder, const string& output_folder) {
LOG(INFO) << "Writing Training data";
leveldb::DB* train_db;
leveldb::Status status;
status = leveldb::DB::Open(options, output_folder + "/cifar-train-leveldb",
status = leveldb::DB::Open(options, output_folder + "/cifar10_train_leveldb",
&train_db);
CHECK(status.ok()) << "Failed to open leveldb.";
for (int fileid = 0; fileid < kCIFARTrainBatches; ++fileid) {
Expand All @@ -71,7 +71,7 @@ void convert_dataset(const string& input_folder, const string& output_folder) {

LOG(INFO) << "Writing Testing data";
leveldb::DB* test_db;
CHECK(leveldb::DB::Open(options, output_folder + "/cifar-test-leveldb",
CHECK(leveldb::DB::Open(options, output_folder + "/cifar10_test_leveldb",
&test_db).ok()) << "Failed to open leveldb.";
// Open files
std::ifstream data_file((input_folder + "/test_batch.bin").c_str(),
Expand Down
13 changes: 6 additions & 7 deletions examples/cifar10/create_cifar10.sh
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
#!/usr/bin/env sh
# This script converts the cifar data into leveldb format.

EXAMPLES=../../build/examples/cifar10
DATA=../../data/cifar10
TOOLS=../../build/tools
EXAMPLE=examples/cifar10
DATA=data/cifar10

echo "Creating leveldb..."

rm -rf cifar10-leveldb
mkdir cifar10-leveldb
rm -rf $EXAMPLE/cifar10_train_leveldb $EXAMPLE/cifar10_test_leveldb

$EXAMPLES/convert_cifar_data.bin $DATA ./cifar10-leveldb
./build/examples/cifar10/convert_cifar_data.bin $DATA $EXAMPLE

echo "Computing image mean..."

$TOOLS/compute_image_mean.bin ./cifar10-leveldb/cifar-train-leveldb mean.binaryproto
./build/tools/compute_image_mean $EXAMPLE/cifar10_train_leveldb \
$EXAMPLE/mean.binaryproto

echo "Done."
14 changes: 14 additions & 0 deletions examples/finetune_pascal_detection/pascal_finetune_solver.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
train_net: "examples/finetuning_pascal_detection/pascal_finetune_train.prototxt"
test_net: "examples/finetuning_pascal_detection/pascal_finetune_val.prototxt"
test_iter: 100
test_interval: 1000
base_lr: 0.001
lr_policy: "step"
gamma: 0.1
stepsize: 20000
display: 20
max_iter: 100000
momentum: 0.9
weight_decay: 0.0005
snapshot: 10000
snapshot_prefix: "examples/finetuning_pascal_detection/pascal_det_finetune"
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ layers {
top: "data"
top: "label"
window_data_param {
source: "window_file_2007_trainval.txt"
mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto"
source: "examples/finetune_pascal_detection/window_file_2007_trainval.txt"
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
batch_size: 128
crop_size: 227
mirror: true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ layers {
top: "data"
top: "label"
window_data_param {
source: "window_file_2007_test.txt"
mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto"
source: "examples/finetune_pascal_detection/window_file_2007_test.txt"
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
batch_size: 128
crop_size: 227
mirror: true
Expand Down
4 changes: 2 additions & 2 deletions examples/imagenet/alexnet_solver.prototxt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
net: "alexnet_train_val.prototxt"
net: "examples/imagenet/alexnet_train_val.prototxt"
test_iter: 1000
test_interval: 1000
base_lr: 0.01
Expand All @@ -10,5 +10,5 @@ max_iter: 450000
momentum: 0.9
weight_decay: 0.0005
snapshot: 10000
snapshot_prefix: "caffe_alexnet_train"
snapshot_prefix: "examples/imagenet/caffe_alexnet"
solver_mode: GPU
8 changes: 4 additions & 4 deletions examples/imagenet/alexnet_train_val.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@ layers {
top: "data"
top: "label"
data_param {
source: "ilsvrc12_train_leveldb"
source: "examples/imagenet/ilsvrc12_train_leveldb"
batch_size: 256
transform_param {
crop_size: 227
mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto"
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: true
}
}
Expand All @@ -21,11 +21,11 @@ layers {
top: "data"
top: "label"
data_param {
source: "ilsvrc12_val_leveldb"
source: "examples/imagenet/ilsvrc12_val_leveldb"
batch_size: 50
transform_param {
crop_size: 227
mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto"
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: false
}
}
Expand Down
13 changes: 7 additions & 6 deletions examples/imagenet/create_imagenet.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@
# Create the imagenet leveldb inputs
# N.B. set the path to the imagenet train + val data dirs

TOOLS=../../build/tools
DATA=../../data/ilsvrc12
EXAMPLE=examples/imagenet
DATA=data/ilsvrc12
TOOLS=build/tools

TRAIN_DATA_ROOT=/path/to/imagenet/train/
VAL_DATA_ROOT=/path/to/imagenet/val/
Expand Down Expand Up @@ -35,22 +36,22 @@ fi

echo "Creating train leveldb..."

GLOG_logtostderr=1 $TOOLS/convert_imageset.bin \
GLOG_logtostderr=1 $TOOLS/convert_imageset \
--resize_height=$RESIZE_HEIGHT \
--resize_width=$RESIZE_WIDTH \
--shuffle \
$TRAIN_DATA_ROOT \
$DATA/train.txt \
ilsvrc12_train_leveldb
$EXAMPLE/ilsvrc12_train_leveldb

echo "Creating val leveldb..."

GLOG_logtostderr=1 $TOOLS/convert_imageset.bin \
GLOG_logtostderr=1 $TOOLS/convert_imageset \
--resize_height=$RESIZE_HEIGHT \
--resize_width=$RESIZE_WIDTH \
--shuffle \
$VAL_DATA_ROOT \
$DATA/val.txt \
ilsvrc12_val_leveldb
$EXAMPLE/ilsvrc12_val_leveldb

echo "Done."
2 changes: 1 addition & 1 deletion examples/imagenet/get_caffe_alexnet_model.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,6 @@ fi

echo "Downloading..."

wget http://dl.caffe.berkeleyvision.org/caffe_alexnet_model
wget http://dl.caffe.berkeleyvision.org/$MODEL examples/imagenet/$MODEL

echo "Done. Please run this command again to verify that checksum = $CHECKSUM."
3 changes: 2 additions & 1 deletion examples/imagenet/get_caffe_rcnn_imagenet_model.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,5 +23,6 @@ fi

echo "Downloading..."

wget http://dl.caffe.berkeleyvision.org/$MODEL
wget http://dl.caffe.berkeleyvision.org/$MODEL examples/imagenet/$MODEL

echo "Done. Please run this command again to verify that checksum = $CHECKSUM."
2 changes: 1 addition & 1 deletion examples/imagenet/get_caffe_reference_imagenet_model.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,6 @@ fi

echo "Downloading..."

wget http://dl.caffe.berkeleyvision.org/$MODEL
wget http://dl.caffe.berkeleyvision.org/$MODEL examples/imagenet/$MODEL

echo "Done. Please run this command again to verify that checksum = $CHECKSUM."
4 changes: 2 additions & 2 deletions examples/imagenet/imagenet_solver.prototxt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
net: "imagenet_train_val.prototxt"
net: "examples/imagenet/imagenet_train_val.prototxt"
test_iter: 1000
test_interval: 1000
base_lr: 0.01
Expand All @@ -10,5 +10,5 @@ max_iter: 450000
momentum: 0.9
weight_decay: 0.0005
snapshot: 10000
snapshot_prefix: "caffe_imagenet_train"
snapshot_prefix: "examples/imagenet/caffe_imagenet"
solver_mode: GPU
8 changes: 4 additions & 4 deletions examples/imagenet/imagenet_train_val.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@ layers {
top: "data"
top: "label"
data_param {
source: "ilsvrc12_train_leveldb"
source: "examples/imagenet/ilsvrc12_train_leveldb"
batch_size: 256
transform_param {
crop_size: 227
mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto"
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: true
}
}
Expand All @@ -21,11 +21,11 @@ layers {
top: "data"
top: "label"
data_param {
source: "ilsvrc12_val_leveldb"
source: "examples/imagenet/ilsvrc12_val_leveldb"
batch_size: 50
transform_param {
crop_size: 227
mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto"
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: false
}
}
Expand Down
6 changes: 2 additions & 4 deletions examples/imagenet/make_imagenet_mean.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@
# Compute the mean image from the imagenet training leveldb
# N.B. this is available in data/ilsvrc12

TOOLS=../../build/tools
DATA=../../data/ilsvrc12

$TOOLS/compute_image_mean.bin ilsvrc12_train_leveldb $DATA/imagenet_mean.binaryproto
./build/tools/compute_image_mean examples/imagenet/ilsvrc12_train_leveldb \
data/ilsvrc12/imagenet_mean.binaryproto

echo "Done."
Loading
0