From 3ecbdae7457ffce52287bf9e7405dca83501da89 Mon Sep 17 00:00:00 2001 From: Jason Yosinski Date: Thu, 30 Apr 2015 22:48:02 -0700 Subject: [PATCH 01/54] Added deconv to InnerProductLayer, SoftmaxLayer, DropoutLayer, ReLULayer, ConvolutionLayer, LRNLayer, and PoolingLayer. Added Python bindings for deconv, zeroing, and partial backward/deconv passes. --- include/caffe/common.hpp | 15 +++ include/caffe/common_layers.hpp | 16 +++ include/caffe/layer.hpp | 48 +++++++++ include/caffe/net.hpp | 10 ++ include/caffe/neuron_layers.hpp | 13 +++ include/caffe/util/device_alternate.hpp | 19 ++++ include/caffe/vision_layers.hpp | 31 ++++++ python/caffe/_caffe.cpp | 1 + python/caffe/pycaffe.py | 131 ++++++++++++++++++++++++ src/caffe/layers/lrn_layer.cpp | 29 +++++- src/caffe/layers/relu_layer.cpp | 19 +++- src/caffe/layers/relu_layer.cu | 30 +++++- src/caffe/net.cpp | 51 +++++++++ src/caffe/proto/caffe.proto | 7 ++ 14 files changed, 417 insertions(+), 3 deletions(-) diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 6cf80a37bc1..7cb9aa5035d 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -57,10 +57,25 @@ private:\ const std::vector& propagate_down, \ const std::vector*>& bottom) +#define INSTANTIATE_LAYER_GPU_DECONV(classname) \ + template void classname::Deconv_gpu( \ + const std::vector*>& top, \ + const std::vector& propagate_down, \ + const std::vector*>& bottom); \ + template void classname::Deconv_gpu( \ + const std::vector*>& top, \ + const std::vector& propagate_down, \ + const std::vector*>& bottom) + #define INSTANTIATE_LAYER_GPU_FUNCS(classname) \ INSTANTIATE_LAYER_GPU_FORWARD(classname); \ INSTANTIATE_LAYER_GPU_BACKWARD(classname) +#define INSTANTIATE_LAYER_GPU_FUNCS_WITH_DECONV(classname) \ + INSTANTIATE_LAYER_GPU_FORWARD(classname); \ + INSTANTIATE_LAYER_GPU_BACKWARD(classname); \ + INSTANTIATE_LAYER_GPU_DECONV(classname) + // A simple macro to mark codes that are not implemented, so that when the code // is executed we will see a fatal log. #define NOT_IMPLEMENTED LOG(FATAL) << "Not Implemented Yet" diff --git a/include/caffe/common_layers.hpp b/include/caffe/common_layers.hpp index cae1c3e4ee6..8c9fc976a89 100644 --- a/include/caffe/common_layers.hpp +++ b/include/caffe/common_layers.hpp @@ -256,6 +256,14 @@ class InnerProductLayer : public Layer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + virtual void Deconv_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Backward_cpu(top, propagate_down, bottom); + } + virtual void Deconv_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Backward_gpu(top, propagate_down, bottom); + } int M_; int K_; @@ -352,6 +360,14 @@ class SoftmaxLayer : public Layer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + virtual void Deconv_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Backward_cpu(top, propagate_down, bottom); + } + virtual void Deconv_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Backward_gpu(top, propagate_down, bottom); + } int outer_num_; int inner_num_; diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 2d13ef97c05..2e4f3e8f822 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -145,6 +145,19 @@ class Layer { const vector& propagate_down, const vector*>& bottom); + /** + * @brief Given the top blob deconv info, compute the bottom blob deconv. Similar to Backward. + * + * The Deconv wrapper calls the relevant device wrapper function + * (Deconv_cpu or Deconv_gpu) to compute the bottom blob diffs given the + * top blob diffs. + * + * Your layer should implement Deconv_cpu and (optionally) Deconv_gpu. + */ + inline void Deconv(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + /** * @brief Returns the vector of learnable parameter blobs. */ @@ -332,6 +345,25 @@ class Layer { Backward_cpu(top, propagate_down, bottom); } + /** + * @brief Using the CPU device, compute the deconv (Zeiler et al, 2013) for the bottom blobs. + */ + virtual void Deconv_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + NOT_IMPLEMENTED; + } + /** + * @brief Using the GPU device, compute the deconv (Zeiler et al, 2013) for the bottom blobs. + * Fall back to Deconv_cpu() if unavailable. + */ + virtual void Deconv_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + // LOG(WARNING) << "Using CPU code as backup."; + Deconv_cpu(top, propagate_down, bottom); + } + /** * Called by the parent Layer's SetUp to check that the number of bottom * and top Blobs provided as input match the expected numbers specified by @@ -453,6 +485,22 @@ inline void Layer::Backward(const vector*>& top, } } +template +inline void Layer::Deconv(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + switch (Caffe::mode()) { + case Caffe::CPU: + Deconv_cpu(top, propagate_down, bottom); + break; + case Caffe::GPU: + Deconv_gpu(top, propagate_down, bottom); + break; + default: + LOG(FATAL) << "Unknown caffe mode."; + } +} + // Serialize LayerParameter to protocol buffer template void Layer::ToProto(LayerParameter* param, bool write_diff) { diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 075afebc9b0..3887652db41 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -67,6 +67,14 @@ class Net { void BackwardFrom(int start); void BackwardTo(int end); + /** + * The network deconv works similarly to backward and also takes no input and output. + */ + void Deconv(); + void DeconvFromTo(int start, int end); + void DeconvFrom(int start); + void DeconvTo(int end); + /** * @brief Reshape all layers from bottom to top. * @@ -203,6 +211,8 @@ class Net { void ForwardDebugInfo(const int layer_id); /// @brief Helper for displaying debug info in Backward. void BackwardDebugInfo(const int layer_id); + /// @brief Helper for displaying debug info in Deconv. + void DeconvDebugInfo(const int layer_id); /// @brief Helper for displaying debug info in Update. void UpdateDebugInfo(const int param_id); diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 323215134c7..626718b93b5 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -194,6 +194,14 @@ class DropoutLayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + virtual void Deconv_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Backward_cpu(top, propagate_down, bottom); + } + virtual void Deconv_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Backward_gpu(top, propagate_down, bottom); + } /// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$ Blob rand_vec_; @@ -408,6 +416,11 @@ class ReLULayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + + virtual void Deconv_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Deconv_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); }; #ifdef USE_CUDNN diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index 6ea595dba2d..0a3a916a66a 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -18,6 +18,19 @@ void classname::Backward_gpu(const vector*>& top, \ const vector& propagate_down, \ const vector*>& bottom) { NO_GPU; } \ +#define STUB_GPU_WITH_DECONV(classname) \ +template \ +void classname::Forward_gpu(const vector*>& bottom, \ + const vector*>& top) { NO_GPU; } \ +template \ +void classname::Backward_gpu(const vector*>& top, \ + const vector& propagate_down, \ + const vector*>& bottom) { NO_GPU; } \ +template \ +void classname::Deconv_gpu(const vector*>& top, \ + const vector& propagate_down, \ + const vector*>& bottom) { NO_GPU; } \ + #define STUB_GPU_FORWARD(classname, funcname) \ template \ void classname::funcname##_##gpu(const vector*>& bottom, \ @@ -29,6 +42,12 @@ void classname::funcname##_##gpu(const vector*>& top, \ const vector& propagate_down, \ const vector*>& bottom) { NO_GPU; } \ +#define STUB_GPU_DECONV(classname, funcname) \ +template \ +void classname::funcname##_##gpu(const vector*>& top, \ + const vector& propagate_down, \ + const vector*>& bottom) { NO_GPU; } \ + #else // Normal GPU + CPU Caffe. #include diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index cd0ab8babb0..0ee3b3615dc 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -172,6 +172,15 @@ class ConvolutionLayer : public BaseConvolutionLayer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + virtual void Deconv_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Backward_cpu(top, propagate_down, bottom); + } + virtual void Deconv_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Backward_gpu(top, propagate_down, bottom); + } + virtual inline bool reverse_dimensions() { return false; } virtual void compute_output_shape(); }; @@ -327,6 +336,17 @@ class LRNLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + + virtual void Deconv_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Deconv_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Deconv_passthrough_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Deconv_passthrough_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + virtual void CrossChannelForward_cpu(const vector*>& bottom, const vector*>& top); virtual void CrossChannelForward_gpu(const vector*>& bottom, @@ -371,6 +391,9 @@ class LRNLayer : public Layer { shared_ptr > product_layer_; Blob product_input_; vector*> product_bottom_vec_; + + // Fields used for deconv + bool deconv_ignore_; }; @@ -408,6 +431,14 @@ class PoolingLayer : public Layer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + virtual void Deconv_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Backward_cpu(top, propagate_down, bottom); + } + virtual void Deconv_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Backward_gpu(top, propagate_down, bottom); + } int kernel_h_, kernel_w_; int stride_h_, stride_w_; diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index dff7f627016..08a5eb8e118 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -206,6 +206,7 @@ BOOST_PYTHON_MODULE(_caffe) { .def("__init__", bp::make_constructor(&Net_Init_Load)) .def("_forward", &Net::ForwardFromTo) .def("_backward", &Net::BackwardFromTo) + .def("_deconv", &Net::DeconvFromTo) .def("reshape", &Net::Reshape) // The cast is to select a particular overload. .def("copy_from", static_cast::*)(const string)>( diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py index 3c19261f690..7b737d280c1 100644 --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -39,6 +39,133 @@ def _Net_params(self): if len(lr.blobs) > 0]) +def _Net_zero(self, zero_param_diffs = True): + """ + Set all activations (data and diffs) in the net to zero. + + Take + zero_param_diffs: If True, also zero the parameter blob diffs, + else skip parameter blobs. + """ + + for blob_name, blob in self.blobs.items(): + blob.data[...] = 0 + blob.diff[...] = 0 + if zero_param_diffs: + for param_name, blob_vec in self.params.items(): + for blob in blob_vec: + blob.diff[...] = 0 + + +def _Net_backward_from_layer(self, start_name, start_diff, diffs=None, zero_higher=False): + """ + Backward pass starting from somewhere in the middle of the + network, starting with the provided diffs. + + Take + start_name: layer at which to begin the backward pass + start_diff: diff to set at start_name layer + diffs: list of diffs to return in addition to bottom diffs. + zero_higher: whether or not to zero out higher layers to reflect the true 0 derivative or leave them alone to save time. + + Give + outs: {blob name: diff ndarray} dict. + """ + + if start_diff.shape != self.blobs[start_name].diff.shape: + raise Exception('Expected start_diff of shape %s but got %s' % (self.blobs[start_name].diff.shape, start_diff.shape)) + + self.blobs[start_name].diff[...] = start_diff + + if zero_higher: + past_start = False + for blob_name, blob in self.blobs.items(): + if past_start: + blob.diff[...] = 0 + if blob_name == start_name: + past_start = True + + return self.backward(start=start_name, diffs=diffs) + + +def _Net_deconv_from_layer(self, start_name, start_diff, diffs=None, zero_higher=False): + """ + Deconv pass starting from somewhere in the middle of the + network, starting with the provided diffs. + + Take + start_name: layer at which to begin the deconv pass + start_diff: diff to set at start_name layer + diffs: list of diffs to return in addition to bottom diffs. + zero_higher: whether or not to zero out higher layers to reflect the true 0 derivative or leave them alone to save time. + + Give + outs: {blob name: diff ndarray} dict. + """ + + if start_diff.shape != self.blobs[start_name].diff.shape: + raise Exception('Expected start_diff of shape %s but got %s' % (self.blobs[start_name].diff.shape, start_diff.shape)) + + self.blobs[start_name].diff[...] = start_diff + + if zero_higher: + past_start = False + for blob_name, blob in self.blobs.items(): + if past_start: + blob.diff[...] = 0 + if blob_name == start_name: + past_start = True + + return self.deconv(start=start_name, diffs=diffs) + + +def _Net_deconv(self, diffs=None, start=None, end=None, **kwargs): + """ + Deconv pass: prepare diffs and run the net backward in deconv mode. Just like _Net_Backward but calls Deconv instead. + + Take + diffs: list of diffs to return in addition to bottom diffs. + kwargs: Keys are output blob names and values are diff ndarrays. + If None, top diffs are taken from forward loss. + start: optional name of layer at which to begin the backward pass + end: optional name of layer at which to finish the backward pass (inclusive) + + Give + outs: {blob name: diff ndarray} dict. + """ + if diffs is None: + diffs = [] + + if start is not None: + start_ind = list(self._layer_names).index(start) + else: + start_ind = len(self.layers) - 1 + + if end is not None: + end_ind = list(self._layer_names).index(end) + outputs = set([end] + diffs) + else: + end_ind = 0 + outputs = set(self.inputs + diffs) + + if kwargs: + if set(kwargs.keys()) != set(self.outputs): + raise Exception('Top diff arguments do not match net outputs.') + # Set top diffs according to defined shapes and make arrays single and + # C-contiguous as Caffe expects. + for top, diff in kwargs.iteritems(): + if diff.ndim != 4: + raise Exception('{} diff is not 4-d'.format(top)) + if diff.shape[0] != self.blobs[top].num: + raise Exception('Diff is not batch sized') + self.blobs[top].diff[...] = diff + + self._deconv(start_ind, end_ind) + + # Unpack diffs to extract + return {out: self.blobs[out].diff for out in outputs} + + @property def _Net_inputs(self): return [list(self.blobs.keys())[i] for i in self._inputs] @@ -259,8 +386,12 @@ def _Net_batch(self, blobs): # Attach methods to Net. Net.blobs = _Net_blobs Net.params = _Net_params +Net.zero = _Net_zero +Net.backward_from_layer = _Net_backward_from_layer +Net.deconv_from_layer = _Net_deconv_from_layer Net.forward = _Net_forward Net.backward = _Net_backward +Net.deconv = _Net_deconv Net.forward_all = _Net_forward_all Net.forward_backward_all = _Net_forward_backward_all Net.set_input_arrays = _Net_set_input_arrays diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 36c1ace4c99..f86a0704b07 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -15,6 +15,7 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, alpha_ = this->layer_param_.lrn_param().alpha(); beta_ = this->layer_param_.lrn_param().beta(); k_ = this->layer_param_.lrn_param().k(); + deconv_ignore_ = this->layer_param_.lrn_param().deconv_ignore(); if (this->layer_param_.lrn_param().norm_region() == LRNParameter_NormRegion_WITHIN_CHANNEL) { // Set up split_layer_ to use inputs in the numerator and denominator. @@ -247,10 +248,36 @@ void LRNLayer::WithinChannelBackward( } } +template +void LRNLayer::Deconv_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (deconv_ignore_) { + // Deconv Option 1: pass through (ignore LRN layer): + Deconv_passthrough_cpu(top, propagate_down, bottom); + } else { + // Deconv Option 2: compute derivatives via backprop: + Backward_cpu(top, propagate_down, bottom); + } +} + +template +void LRNLayer::Deconv_passthrough_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + bottom_diff[i] = top_diff[i]; + } + } +} + #ifdef CPU_ONLY -STUB_GPU(LRNLayer); +STUB_GPU_WITH_DECONV(LRNLayer); STUB_GPU_FORWARD(LRNLayer, CrossChannelForward); STUB_GPU_BACKWARD(LRNLayer, CrossChannelBackward); +STUB_GPU_DECONV(LRNLayer, Deconv_passthrough); #endif INSTANTIATE_CLASS(LRNLayer); diff --git a/src/caffe/layers/relu_layer.cpp b/src/caffe/layers/relu_layer.cpp index cc00319a578..fba3a0c91dd 100644 --- a/src/caffe/layers/relu_layer.cpp +++ b/src/caffe/layers/relu_layer.cpp @@ -36,9 +36,26 @@ void ReLULayer::Backward_cpu(const vector*>& top, } } +template +void ReLULayer::Deconv_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); + Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); + if (negative_slope != Dtype(0)) + LOG(WARNING) << "negative_slope parameter = " << negative_slope << " but nonzero negative_slope params are not supported for Deconv through RELU."; + for (int i = 0; i < count; ++i) { + bottom_diff[i] = std::max(top_diff[i], Dtype(0)); + } + } +} + #ifdef CPU_ONLY -STUB_GPU(ReLULayer); +STUB_GPU_WITH_DECONV(ReLULayer); #endif INSTANTIATE_CLASS(ReLULayer); diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index b8924c855e5..ce3b41a53cf 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -58,8 +58,36 @@ void ReLULayer::Backward_gpu(const vector*>& top, } } +template +__global__ void ReLUDeconv(const int n, const Dtype* in_diff, + Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + out_diff[index] = in_diff[index] > 0 ? in_diff[index] : 0; + } +} + +template +void ReLULayer::Deconv_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); + if (negative_slope != Dtype(0)) + LOG(WARNING) << "negative_slope parameter = " << negative_slope << " but nonzero negative_slope params are not supported for Deconv through RELU."; + // NOLINT_NEXT_LINE(whitespace/operators) + ReLUDeconv<<>>( + count, top_diff, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + -INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); +//INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); +INSTANTIATE_LAYER_GPU_FUNCS_WITH_DECONV(ReLULayer); } // namespace caffe diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index fd00b122630..80c01b81f5c 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -542,6 +542,19 @@ void Net::BackwardFromTo(int start, int end) { } } +template +void Net::DeconvFromTo(int start, int end) { + CHECK_GE(end, 0); + CHECK_LT(start, layers_.size()); + for (int i = start; i >= end; --i) { + if (layer_need_backward_[i]) { + layers_[i]->Deconv( + top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]); + if (debug_info_) { DeconvDebugInfo(i); } + } + } +} + template void Net::InputDebugInfo(const int input_id) { const Blob& blob = *net_input_blobs_[input_id]; @@ -596,6 +609,29 @@ void Net::BackwardDebugInfo(const int layer_id) { } } +template +void Net::DeconvDebugInfo(const int layer_id) { + const vector*>& bottom_vec = bottom_vecs_[layer_id]; + for (int bottom_id = 0; bottom_id < bottom_vec.size(); ++bottom_id) { + if (!bottom_need_backward_[layer_id][bottom_id]) { continue; } + const Blob& blob = *bottom_vec[bottom_id]; + const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); + LOG(INFO) << " [Deconv] " + << "Layer " << layer_names_[layer_id] << ", bottom blob " << blob_name + << " diff: " << diff_abs_val_mean; + } + for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); + ++param_id) { + if (!layers_[layer_id]->param_propagate_down(param_id)) { continue; } + const Blob& blob = *layers_[layer_id]->blobs()[param_id]; + const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); + LOG(INFO) << " [Deconv] " + << "Layer " << layer_names_[layer_id] << ", param blob " << param_id + << " diff: " << diff_abs_val_mean; + } +} + template void Net::UpdateDebugInfo(const int param_id) { const Blob& blob = *params_[param_id]; @@ -677,6 +713,21 @@ void Net::Backward() { } } +template +void Net::DeconvFrom(int start) { + DeconvFromTo(start, 0); +} + +template +void Net::DeconvTo(int end) { + DeconvFromTo(layers_.size() - 1, end); +} + +template +void Net::Deconv() { + DeconvFromTo(layers_.size() - 1, 0); +} + template void Net::Reshape() { for (int i = 0; i < layers_.size(); ++i) { diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 5b21cf20028..42fd6fd6aea 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -447,6 +447,7 @@ message DataParameter { // be larger than the number of keys in the database. optional uint32 rand_skip = 7 [default = 0]; optional DB backend = 8 [default = LEVELDB]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do // simple scaling and subtracting the data mean, if provided. Note that the // mean subtraction is always carried out before scaling. @@ -603,6 +604,12 @@ message LRNParameter { } optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; optional float k = 5 [default = 1.]; + // Whether or not to skip the LRN layer during a deconv pass. If + // this is true, activations in a deconv will pass through the LRN + // layer unaffected. If it is false, deconv activations will be + // affected by LRN layers the same as backprop diffs are (will pass + // through the derivative of the layer). + optional bool deconv_ignore = 6 [default = false]; } // Message that stores parameters used by MemoryDataLayer From 968803233471bb8d3c837ec1aca32319f7855c2f Mon Sep 17 00:00:00 2001 From: Jason Yosinski Date: Mon, 4 May 2015 23:13:31 -0700 Subject: [PATCH 02/54] Added missing functions --- src/caffe/layers/lrn_layer.cu | 44 ++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 24aa6a30130..9b599cace44 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -192,8 +192,50 @@ template void LRNLayer::CrossChannelBackward_gpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom); +template +void LRNLayer::Deconv_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (deconv_ignore_) { + // Deconv Option 1: pass through (ignore LRN layer): + Deconv_passthrough_gpu(top, propagate_down, bottom); + } else { + // Deconv Option 2: compute derivatives via backprop: + Backward_gpu(top, propagate_down, bottom); + } +} + +template +__global__ void LRNDeconv_passthrough(const int n, const Dtype* in_diff, + Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + out_diff[index] = in_diff[index]; + } +} + +template +void LRNLayer::Deconv_passthrough_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + // Option 2: pass through (ignore LRN layer) + if (propagate_down[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + LRNDeconv_passthrough<<>>( + count, top_diff, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} +template void LRNLayer::Deconv_passthrough_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom); +template void LRNLayer::Deconv_passthrough_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom); + -INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); +//INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); +INSTANTIATE_LAYER_GPU_FUNCS_WITH_DECONV(LRNLayer); } // namespace caffe From c1c559c2cb98d6de955f1d469c6104cb265f5dc5 Mon Sep 17 00:00:00 2001 From: Luke Yeager Date: Mon, 29 Feb 2016 12:28:15 -0800 Subject: [PATCH 03/54] Don't force datum.label=0 in array_to_datum --- python/caffe/io.py | 5 +++-- python/caffe/test/test_io.py | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/python/caffe/io.py b/python/caffe/io.py index 75310589cec..cee5ace2e88 100644 --- a/python/caffe/io.py +++ b/python/caffe/io.py @@ -63,7 +63,7 @@ def blobprotovector_str_to_arraylist(str): return [blobproto_to_array(blob) for blob in vec.blobs] -def array_to_datum(arr, label=0): +def array_to_datum(arr, label=None): """Converts a 3-dimensional array to datum. If the array has dtype uint8, the output data will be encoded as a string. Otherwise, the output data will be stored in float format. @@ -76,7 +76,8 @@ def array_to_datum(arr, label=0): datum.data = arr.tostring() else: datum.float_data.extend(arr.flat) - datum.label = label + if label is not None: + datum.label = label return datum diff --git a/python/caffe/test/test_io.py b/python/caffe/test/test_io.py index 8c86ef75fb2..4a16b5b9128 100644 --- a/python/caffe/test/test_io.py +++ b/python/caffe/test/test_io.py @@ -39,3 +39,18 @@ def test_scalar(self): arr = caffe.io.blobproto_to_array(blob) self.assertEqual(arr, 123) + + +class TestArrayToDatum(unittest.TestCase): + + def test_label_none_size(self): + # Set label + d1 = caffe.io.array_to_datum( + np.ones((10,10,3)), label=1) + # Don't set label + d2 = caffe.io.array_to_datum( + np.ones((10,10,3))) + # Not setting the label should result in a smaller object + self.assertGreater( + len(d1.SerializeToString()), + len(d2.SerializeToString())) From 542d216bb28343111e6b7df2c24824c3f90e435a Mon Sep 17 00:00:00 2001 From: JacekR Date: Tue, 15 Mar 2016 10:43:34 +0100 Subject: [PATCH 04/54] Update Makefile: Changed MKL_DIR to MKLROOT MKLROOT variable is set by MKL scripts, so it also should be used in Makefile. --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 2f81aca84e7..5424c3a1858 100644 --- a/Makefile +++ b/Makefile @@ -364,9 +364,9 @@ ifeq ($(BLAS), mkl) # MKL LIBRARIES += mkl_rt COMMON_FLAGS += -DUSE_MKL - MKL_DIR ?= /opt/intel/mkl - BLAS_INCLUDE ?= $(MKL_DIR)/include - BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 + MKLROOT ?= /opt/intel/mkl + BLAS_INCLUDE ?= $(MKLROOT)/include + BLAS_LIB ?= $(MKLROOT)/lib $(MKLROOT)/lib/intel64 else ifeq ($(BLAS), open) # OpenBLAS LIBRARIES += openblas From 55cda91e068565d6832a081266f49245e2bf61e1 Mon Sep 17 00:00:00 2001 From: Jason Yosinski Date: Wed, 16 Mar 2016 14:06:51 -0400 Subject: [PATCH 05/54] Merged in latest Caffe master and refactored and cleaned up Deconv vs. Backward code --- include/caffe/layer.hpp | 17 +++++++++++++---- include/caffe/layers/conv_layer.hpp | 8 -------- include/caffe/layers/dropout_layer.hpp | 8 -------- include/caffe/layers/inner_product_layer.hpp | 8 -------- include/caffe/layers/input_layer.hpp | 2 -- include/caffe/layers/pooling_layer.hpp | 8 -------- include/caffe/layers/softmax_layer.hpp | 8 -------- 7 files changed, 13 insertions(+), 46 deletions(-) diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index e91963cd588..703d92cf157 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -183,7 +183,15 @@ class Layer { * (Deconv_cpu or Deconv_gpu) to compute the bottom blob diffs given the * top blob diffs. * - * Your layer should implement Deconv_cpu and (optionally) Deconv_gpu. + * Your layer should implement Deconv_cpu and Deconv_gpu. + * + * Note: By default, Deconv_gpu will just call Backward_gpu, and + * Deconv_cpu will just call Backward_cpu. In many cases this + * behavior is desired, e.g. for convolution or innerproduct or + * pooling layers. If this is not the desired behavior, override + * Deconv_cpu AND Deconv_gpu. If only one of Deconv_{cpu,gpu} is + * overridden, the other will still defer to Backward_{cpu,gpu}, + * which will lead to confusing and inconsistent behavior! */ inline void Deconv(const vector*>& top, const vector& propagate_down, @@ -382,7 +390,8 @@ class Layer { virtual void Deconv_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - NOT_IMPLEMENTED; + // LOG(WARNING) << "Explicit Deconv_cpu not implemented for " << type() << " yet; falling back to backward_cpu."; + Backward_cpu(top, propagate_down, bottom); } /** * @brief Using the GPU device, compute the deconv (Zeiler et al, 2013) for the bottom blobs. @@ -391,8 +400,8 @@ class Layer { virtual void Deconv_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - // LOG(WARNING) << "Using CPU code as backup."; - Deconv_cpu(top, propagate_down, bottom); + // LOG(WARNING) << "Explicit Deconv_gpu not implemented for " << type() << " yet; falling back to backward_gpu."; + Backward_gpu(top, propagate_down, bottom); } /** diff --git a/include/caffe/layers/conv_layer.hpp b/include/caffe/layers/conv_layer.hpp index 7c8ddf77ec8..93a618ddd72 100644 --- a/include/caffe/layers/conv_layer.hpp +++ b/include/caffe/layers/conv_layer.hpp @@ -75,14 +75,6 @@ class ConvolutionLayer : public BaseConvolutionLayer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - virtual void Deconv_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Backward_cpu(top, propagate_down, bottom); - } - virtual void Deconv_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Backward_gpu(top, propagate_down, bottom); - } virtual inline bool reverse_dimensions() { return false; } virtual void compute_output_shape(); }; diff --git a/include/caffe/layers/dropout_layer.hpp b/include/caffe/layers/dropout_layer.hpp index bf1476ac78f..e83143bc3cc 100644 --- a/include/caffe/layers/dropout_layer.hpp +++ b/include/caffe/layers/dropout_layer.hpp @@ -65,14 +65,6 @@ class DropoutLayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - virtual void Deconv_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Backward_cpu(top, propagate_down, bottom); - } - virtual void Deconv_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Backward_gpu(top, propagate_down, bottom); - } /// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$ Blob rand_vec_; diff --git a/include/caffe/layers/inner_product_layer.hpp b/include/caffe/layers/inner_product_layer.hpp index 8a9d282c6a7..18d0d6192eb 100644 --- a/include/caffe/layers/inner_product_layer.hpp +++ b/include/caffe/layers/inner_product_layer.hpp @@ -38,14 +38,6 @@ class InnerProductLayer : public Layer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - virtual void Deconv_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Backward_cpu(top, propagate_down, bottom); - } - virtual void Deconv_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Backward_gpu(top, propagate_down, bottom); - } int M_; int K_; diff --git a/include/caffe/layers/input_layer.hpp b/include/caffe/layers/input_layer.hpp index e146b8aed6a..f4472678c69 100644 --- a/include/caffe/layers/input_layer.hpp +++ b/include/caffe/layers/input_layer.hpp @@ -37,8 +37,6 @@ class InputLayer : public Layer { const vector*>& top) {} virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) {} - virtual void Deconv_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} }; } // namespace caffe diff --git a/include/caffe/layers/pooling_layer.hpp b/include/caffe/layers/pooling_layer.hpp index 4920443649a..f4d6803ba8e 100644 --- a/include/caffe/layers/pooling_layer.hpp +++ b/include/caffe/layers/pooling_layer.hpp @@ -43,14 +43,6 @@ class PoolingLayer : public Layer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - virtual void Deconv_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Backward_cpu(top, propagate_down, bottom); - } - virtual void Deconv_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Backward_gpu(top, propagate_down, bottom); - } int kernel_h_, kernel_w_; int stride_h_, stride_w_; diff --git a/include/caffe/layers/softmax_layer.hpp b/include/caffe/layers/softmax_layer.hpp index 5582e61a4f9..46f57de033b 100644 --- a/include/caffe/layers/softmax_layer.hpp +++ b/include/caffe/layers/softmax_layer.hpp @@ -35,14 +35,6 @@ class SoftmaxLayer : public Layer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - virtual void Deconv_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Backward_cpu(top, propagate_down, bottom); - } - virtual void Deconv_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Backward_gpu(top, propagate_down, bottom); - } int outer_num_; From 7a8183642cb1a12945d0a9ad2bddf8304428b4c8 Mon Sep 17 00:00:00 2001 From: Daniel Gordon Date: Wed, 30 Mar 2016 14:27:19 -0700 Subject: [PATCH 06/54] Use lazy initialization to reuse orderd dict/list creations to save time on repeated calls. --- python/caffe/pycaffe.py | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py index c5c0b824a77..ca6d050e2bd 100644 --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -27,7 +27,9 @@ def _Net_blobs(self): An OrderedDict (bottom to top, i.e., input to output) of network blobs indexed by name """ - return OrderedDict(zip(self._blob_names, self._blobs)) + if not hasattr(self, '_blobs_dict'): + self._blobs_dict = OrderedDict(zip(self._blob_names, self._blobs)) + return self._blobs_dict @property @@ -36,7 +38,10 @@ def _Net_blob_loss_weights(self): An OrderedDict (bottom to top, i.e., input to output) of network blob loss weights indexed by name """ - return OrderedDict(zip(self._blob_names, self._blob_loss_weights)) + if not hasattr(self, '_blobs_loss_weights_dict'): + self._blob_loss_weights_dict = OrderedDict(zip(self._blob_names, + self._blob_loss_weights)) + return self._blob_loss_weights_dict @property @@ -46,19 +51,28 @@ def _Net_params(self): parameters indexed by name; each is a list of multiple blobs (e.g., weights and biases) """ - return OrderedDict([(name, lr.blobs) - for name, lr in zip(self._layer_names, self.layers) - if len(lr.blobs) > 0]) + if not hasattr(self, '_params_dict'): + self._params_dict = OrderedDict([(name, lr.blobs) + for name, lr in zip( + self._layer_names, self.layers) + if len(lr.blobs) > 0]) + return self._params_dict @property def _Net_inputs(self): - return [list(self.blobs.keys())[i] for i in self._inputs] + if not hasattr(self, '_input_list'): + keys = list(self.blobs.keys()) + self._input_list = [keys[i] for i in self._inputs] + return self._input_list @property def _Net_outputs(self): - return [list(self.blobs.keys())[i] for i in self._outputs] + if not hasattr(self, '_output_list'): + keys = list(self.blobs.keys()) + self._output_list = [keys[i] for i in self._outputs] + return self._output_list def _Net_forward(self, blobs=None, start=None, end=None, **kwargs): From dee01c8b5f90a69fd3e73ee455f89aab56e2dbb7 Mon Sep 17 00:00:00 2001 From: Jeff Donahue Date: Mon, 4 Apr 2016 11:36:15 -0700 Subject: [PATCH 07/54] test_net.cpp: add TestForcePropagateDown --- src/caffe/test/test_net.cpp | 102 ++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 1e0788ec127..92fd317fee8 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -716,6 +716,61 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } + virtual void InitForcePropNet(bool test_force_true) { + string proto = + "name: 'ForcePropTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " shape { " + " dim: 5 " + " } " + " data_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " bottom: 'data' " + " top: 'innerproduct' "; + if (test_force_true) { + proto += " propagate_down: true "; + } + proto += + "} " + "layer { " + " name: 'loss' " + " bottom: 'innerproduct' " + " bottom: 'label' " + " top: 'cross_entropy_loss' " + " type: 'SigmoidCrossEntropyLoss' " + "} "; + InitNetFromProtoString(proto); + } + int seed_; shared_ptr > net_; }; @@ -2371,4 +2426,51 @@ TYPED_TEST(NetTest, TestSkipPropagateDown) { } } +TYPED_TEST(NetTest, TestForcePropagateDown) { + this->InitForcePropNet(false); + vector layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + const string& layer_name = this->net_->layer_names()[layer_id]; + const vector need_backward = + this->net_->bottom_need_backward()[layer_id]; + if (layer_name == "data") { + ASSERT_EQ(need_backward.size(), 0); + EXPECT_FALSE(layer_need_backward[layer_id]); + } else if (layer_name == "innerproduct") { + ASSERT_EQ(need_backward.size(), 1); + EXPECT_FALSE(need_backward[0]); // data + EXPECT_TRUE(layer_need_backward[layer_id]); + } else if (layer_name == "loss") { + ASSERT_EQ(need_backward.size(), 2); + EXPECT_TRUE(need_backward[0]); // innerproduct + EXPECT_FALSE(need_backward[1]); // label + EXPECT_TRUE(layer_need_backward[layer_id]); + } else { + LOG(FATAL) << "Unknown layer: " << layer_name; + } + } + this->InitForcePropNet(true); + layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + const string& layer_name = this->net_->layer_names()[layer_id]; + const vector need_backward = + this->net_->bottom_need_backward()[layer_id]; + if (layer_name == "data") { + ASSERT_EQ(need_backward.size(), 0); + EXPECT_FALSE(layer_need_backward[layer_id]); + } else if (layer_name == "innerproduct") { + ASSERT_EQ(need_backward.size(), 1); + EXPECT_TRUE(need_backward[0]); // data + EXPECT_TRUE(layer_need_backward[layer_id]); + } else if (layer_name == "loss") { + ASSERT_EQ(need_backward.size(), 2); + EXPECT_TRUE(need_backward[0]); // innerproduct + EXPECT_FALSE(need_backward[1]); // label + EXPECT_TRUE(layer_need_backward[layer_id]); + } else { + LOG(FATAL) << "Unknown layer: " << layer_name; + } + } +} + } // namespace caffe From 77cde9c84126cb108f59e2673c2e6f59b33180fa Mon Sep 17 00:00:00 2001 From: Jeff Donahue Date: Wed, 27 Jan 2016 12:55:41 -0800 Subject: [PATCH 08/54] Net: setting `propagate_down: true` forces backprop --- src/caffe/net.cpp | 9 ++++----- src/caffe/proto/caffe.proto | 7 ++++++- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 23d94c97c07..f0bf594936c 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -427,12 +427,11 @@ int Net::AppendBottom(const NetParameter& param, const int layer_id, bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); bottom_id_vecs_[layer_id].push_back(blob_id); available_blobs->erase(blob_name); - bool propagate_down = true; + bool need_backward = blob_need_backward_[blob_id]; // Check if the backpropagation on bottom_id should be skipped - if (layer_param.propagate_down_size() > 0) - propagate_down = layer_param.propagate_down(bottom_id); - const bool need_backward = blob_need_backward_[blob_id] && - propagate_down; + if (layer_param.propagate_down_size() > 0) { + need_backward = layer_param.propagate_down(bottom_id); + } bottom_need_backward_[layer_id].push_back(need_backward); return blob_id; } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 6900bb71482..650c87ae3a6 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -328,7 +328,12 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; - // Specifies on which bottoms the backpropagation should be skipped. + // Specifies whether to backpropagate to each bottom. If unspecified, + // Caffe will automatically infer whether each input needs backpropagation + // to compute parameter gradients. If set to true for some inputs, + // backpropagation to those inputs is forced; if set false for some inputs, + // backpropagation to those inputs is skipped. + // // The size must be either 0 or equal to the number of bottoms. repeated bool propagate_down = 11; From 3c3dc95766c8caa374c643b51bd92a27f787b8b5 Mon Sep 17 00:00:00 2001 From: emmanuel maggiori Date: Fri, 8 Apr 2016 10:25:12 +0200 Subject: [PATCH 09/54] Solving issue with exp layer with base e --- src/caffe/layers/exp_layer.cpp | 3 ++- src/caffe/test/test_neuron_layer.cpp | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/caffe/layers/exp_layer.cpp b/src/caffe/layers/exp_layer.cpp index 1f4a309fe25..0c1b463ae12 100644 --- a/src/caffe/layers/exp_layer.cpp +++ b/src/caffe/layers/exp_layer.cpp @@ -23,7 +23,8 @@ void ExpLayer::LayerSetUp(const vector*>& bottom, const Dtype input_scale = this->layer_param_.exp_param().scale(); const Dtype input_shift = this->layer_param_.exp_param().shift(); inner_scale_ = log_base * input_scale; - outer_scale_ = (input_shift == Dtype(0)) ? Dtype(1) : pow(base, input_shift); + outer_scale_ = (input_shift == Dtype(0)) ? Dtype(1) : + ( (base != Dtype(-1)) ? pow(base, input_shift) : exp(input_shift) ); } template diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index dd591f7d204..342f825cec3 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -394,6 +394,26 @@ TYPED_TEST(NeuronLayerTest, TestExpGradient) { this->TestExpGradient(kBase, kScale, kShift); } +TYPED_TEST(NeuronLayerTest, TestExpLayerWithShift) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e, + // with a non-zero shift + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestExpForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestExpGradientWithShift) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e, + // with a non-zero shift + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestExpGradient(kBase, kScale, kShift); +} + TYPED_TEST(NeuronLayerTest, TestExpLayerBase2) { typedef typename TypeParam::Dtype Dtype; const Dtype kBase = 2; From 09130ce35604a991cee41c942ff8845468cacfa7 Mon Sep 17 00:00:00 2001 From: Thomas Date: Mon, 11 Apr 2016 12:52:34 -0500 Subject: [PATCH 10/54] Fix protobuf message generation The latest versions of protobuf do not reveal empty message fields with dir(). This uses the documented way of determining all of a message's fields and so is compatible with past and future versions of protobuf. --- python/caffe/net_spec.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/caffe/net_spec.py b/python/caffe/net_spec.py index 63de4cce4b2..5fb1f0b3fb1 100644 --- a/python/caffe/net_spec.py +++ b/python/caffe/net_spec.py @@ -32,7 +32,7 @@ def param_name_dict(): # get all parameter names (typically underscore case) and corresponding # type names (typically camel case), which contain the layer names # (note that not all parameters correspond to layers, but we'll ignore that) - param_names = [s for s in dir(layer) if s.endswith('_param')] + param_names = [f.name for f in layer.DESCRIPTOR.fields if f.name.endswith('_param')] param_type_names = [type(getattr(layer, s)).__name__ for s in param_names] # strip the final '_param' or 'Parameter' param_names = [s[:-len('_param')] for s in param_names] From 219532f5552fb48931776f5236b5ec3d99eccb2a Mon Sep 17 00:00:00 2001 From: Muneyuki Noguchi Date: Tue, 12 Apr 2016 23:19:27 +0900 Subject: [PATCH 11/54] Fix typo in help text for "-model" option --- tools/caffe.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 305cfc3635d..d121fefc9d7 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -32,7 +32,7 @@ DEFINE_string(gpu, "", DEFINE_string(solver, "", "The solver definition protocol buffer text file."); DEFINE_string(model, "", - "The model definition protocol buffer text file.."); + "The model definition protocol buffer text file."); DEFINE_string(snapshot, "", "Optional; the snapshot solver state to resume training."); DEFINE_string(weights, "", From b265134710d78db4007471ccbe376c2c4221441a Mon Sep 17 00:00:00 2001 From: Evan Shelhamer Date: Wed, 13 Apr 2016 16:40:30 -0700 Subject: [PATCH 12/54] [docs] install: CUDA 7+ and cuDNN v4 compatible Latest CUDA versions are all compatible, and Caffe has been compatible with cuDNN v4 since PR #3439 --- docs/installation.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/installation.md b/docs/installation.md index 893164584d9..e6c6886df52 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -20,7 +20,7 @@ When updating Caffe, it's best to `make clean` before re-compiling. Caffe has several dependencies: * [CUDA](https://developer.nvidia.com/cuda-zone) is required for GPU mode. - * library version 7.0 and the latest driver version are recommended, but 6.* is fine too + * library version 7+ and the latest driver version are recommended, but 6.* is fine too * 5.5, and 5.0 are compatible but considered legacy * [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) via ATLAS, MKL, or OpenBLAS. * [Boost](http://www.boost.org/) >= 1.55 @@ -30,14 +30,14 @@ Optional dependencies: * [OpenCV](http://opencv.org/) >= 2.4 including 3.0 * IO libraries: `lmdb`, `leveldb` (note: leveldb requires `snappy`) -* cuDNN for GPU acceleration (v3) +* cuDNN for GPU acceleration (v4) Pycaffe and Matcaffe interfaces have their own natural needs. * For Python Caffe: `Python 2.7` or `Python 3.3+`, `numpy (>= 1.7)`, boost-provided `boost.python` * For MATLAB Caffe: MATLAB with the `mex` compiler. -**cuDNN Caffe**: for fastest operation Caffe is accelerated by drop-in integration of [NVIDIA cuDNN](https://developer.nvidia.com/cudnn). To speed up your Caffe models, install cuDNN then uncomment the `USE_CUDNN := 1` flag in `Makefile.config` when installing Caffe. Acceleration is automatic. The current version is cuDNN v3; older versions are supported in older Caffe. +**cuDNN Caffe**: for fastest operation Caffe is accelerated by drop-in integration of [NVIDIA cuDNN](https://developer.nvidia.com/cudnn). To speed up your Caffe models, install cuDNN then uncomment the `USE_CUDNN := 1` flag in `Makefile.config` when installing Caffe. Acceleration is automatic. The current version is cuDNN v4; older versions are supported in older Caffe. **CPU-only Caffe**: for cold-brewed CPU-only Caffe uncomment the `CPU_ONLY := 1` flag in `Makefile.config` to configure and build Caffe without CUDA. This is helpful for cloud or cluster deployment. From 462a688fb8997f87b19c3c51860eb32d5458b246 Mon Sep 17 00:00:00 2001 From: Evan Shelhamer Date: Wed, 13 Apr 2016 16:43:39 -0700 Subject: [PATCH 13/54] [docs] install: include latest versions and platforms, highlight guides Caffe runs on Ubuntu, OS X, and RHEL (+ company) in master with branches for OpenCL and Windows. Docker is a nice route to out-of-the-box brewing. --- docs/installation.md | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/docs/installation.md b/docs/installation.md index e6c6886df52..9aa83527fd5 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -5,13 +5,23 @@ title: Installation # Installation Prior to installing, have a glance through this guide and take note of the details for your platform. -We install and run Caffe on Ubuntu 14.04 and 12.04, OS X 10.10 / 10.9 / 10.8, and AWS. -The official Makefile and `Makefile.config` build are complemented by an automatic CMake build from the community. +We install and run Caffe on Ubuntu 16.04–12.04, OS X 10.11–10.8, and through Docker and AWS. +The official Makefile and `Makefile.config` build are complemented by a [community CMake build](#cmake-build). + +**Step-by-step Instructions**: + +- [Docker setup](https://github.com/BVLC/caffe/tree/master/docker) *out-of-the-box brewing* +- [Ubuntu installation](install_apt.html) *the standard platform* +- [OS X installation](install_osx.html) +- [RHEL / CentOS / Fedora installation](install_yum.html) +- [Windows](https://github.com/BVLC/caffe/tree/windows) *see the Windows branch led by Microsoft* +- [OpenCL](https://github.com/BVLC/caffe/tree/opencl) *see the OpenCL branch led by Fabian Tschopp* + +**Overview**: - [Prerequisites](#prerequisites) - [Compilation](#compilation) - [Hardware](#hardware) -- Platforms: [Ubuntu guide](install_apt.html), [OS X guide](install_osx.html), and [RHEL / CentOS / Fedora guide](install_yum.html) When updating Caffe, it's best to `make clean` before re-compiling. @@ -82,10 +92,6 @@ Install MATLAB, and make sure that its `mex` is in your `$PATH`. *Caffe's MATLAB interface works with versions 2015a, 2014a/b, 2013a/b, and 2012b.* -#### Windows - -There is an unofficial Windows port of Caffe at [niuzhiheng/caffe:windows](https://github.com/niuzhiheng/caffe). Thanks [@niuzhiheng](https://github.com/niuzhiheng)! - ## Compilation Caffe can be compiled with either Make or CMake. Make is officially supported while CMake is supported by the community. @@ -113,7 +119,7 @@ Be sure to set your MATLAB and Python paths in `Makefile.config` first! Now that you have installed Caffe, check out the [MNIST tutorial](gathered/examples/mnist.html) and the [reference ImageNet model tutorial](gathered/examples/imagenet.html). -### Compilation with CMake +### CMake Build In lieu of manually editing `Makefile.config` to configure the build, Caffe offers an unofficial CMake build thanks to @Nerei, @akosiorek, and other members of the community. It requires CMake version >= 2.8.7. The basic steps are as follows: From 0ef5918bbb7cb6e6d733ef91acff5349febc2bc7 Mon Sep 17 00:00:00 2001 From: Evan Shelhamer Date: Wed, 13 Apr 2016 18:52:50 -0700 Subject: [PATCH 14/54] [docs] install: be more firm about compute capability >= 3.0 --- docs/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.md b/docs/installation.md index 9aa83527fd5..95a57fdffda 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -137,7 +137,7 @@ See [PR #1667](https://github.com/BVLC/caffe/pull/1667) for options and details. **Laboratory Tested Hardware**: Berkeley Vision runs Caffe with K40s, K20s, and Titans including models at ImageNet/ILSVRC scale. We also run on GTX series cards (980s and 770s) and GPU-equipped MacBook Pros. We have not encountered any trouble in-house with devices with CUDA capability >= 3.0. All reported hardware issues thus-far have been due to GPU configuration, overheating, and the like. -**CUDA compute capability**: devices with compute capability <= 2.0 may have to reduce CUDA thread numbers and batch sizes due to hardware constraints. Your mileage may vary. +**CUDA compute capability**: devices with compute capability <= 2.0 may have to reduce CUDA thread numbers and batch sizes due to hardware constraints. Brew with caution; we recommend compute capbility >= 3.0. Once installed, check your times against our [reference performance numbers](performance_hardware.html) to make sure everything is configured properly. From b9164503ff51e8167cac9feb3f9a3d99778f13a8 Mon Sep 17 00:00:00 2001 From: Evan Shelhamer Date: Wed, 13 Apr 2016 18:53:28 -0700 Subject: [PATCH 15/54] [docs] install: include more lab tested hardware --- docs/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.md b/docs/installation.md index 95a57fdffda..aa946911c63 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -135,7 +135,7 @@ See [PR #1667](https://github.com/BVLC/caffe/pull/1667) for options and details. ## Hardware -**Laboratory Tested Hardware**: Berkeley Vision runs Caffe with K40s, K20s, and Titans including models at ImageNet/ILSVRC scale. We also run on GTX series cards (980s and 770s) and GPU-equipped MacBook Pros. We have not encountered any trouble in-house with devices with CUDA capability >= 3.0. All reported hardware issues thus-far have been due to GPU configuration, overheating, and the like. +**Laboratory Tested Hardware**: Berkeley Vision runs Caffe with Titan Xs, K80s, GTX 980s, K40s, K20s, Titans, and GTX 770s including models at ImageNet/ILSVRC scale. We have not encountered any trouble in-house with devices with CUDA capability >= 3.0. All reported hardware issues thus-far have been due to GPU configuration, overheating, and the like. **CUDA compute capability**: devices with compute capability <= 2.0 may have to reduce CUDA thread numbers and batch sizes due to hardware constraints. Brew with caution; we recommend compute capbility >= 3.0. From e867e60fa24985b112af9885ec553d5dd62f49bf Mon Sep 17 00:00:00 2001 From: Evan Shelhamer Date: Thu, 14 Apr 2016 22:56:37 -0700 Subject: [PATCH 16/54] [test] CropLayer: test dimensions check to reveal bounds checking bug --- src/caffe/test/test_crop_layer.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/caffe/test/test_crop_layer.cpp b/src/caffe/test/test_crop_layer.cpp index 45f24e2ee8d..ce2c736f644 100644 --- a/src/caffe/test/test_crop_layer.cpp +++ b/src/caffe/test/test_crop_layer.cpp @@ -91,6 +91,24 @@ TYPED_TEST(CropLayerTest, TestSetupShapeNegativeIndexing) { } } +TYPED_TEST(CropLayerTest, TestDimensionsCheck) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + // Reshape size blob to have incompatible sizes for uncropped dimensions: + // the size blob has more channels than the data blob, but this is fine + // since the channels dimension is not cropped in this configuration. + this->blob_bottom_1_->Reshape(2, 5, 4, 2); + CropLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->num_axes(); ++i) { + if (i < 2) { + EXPECT_EQ(this->blob_bottom_0_->shape(i), this->blob_top_->shape(i)); + } else { + EXPECT_EQ(this->blob_bottom_1_->shape(i), this->blob_top_->shape(i)); + } + } +} + TYPED_TEST(CropLayerTest, TestCropAll) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; From 75b0d40a856dda87f2e0de77b2c6626753e1e231 Mon Sep 17 00:00:00 2001 From: Evan Shelhamer Date: Thu, 14 Apr 2016 22:16:07 -0700 Subject: [PATCH 17/54] [fix] CropLayer: check dimension bounds only for cropped dimensions check only the dimensions to be cropped for compatible sizes and offsets --- src/caffe/layers/crop_layer.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/caffe/layers/crop_layer.cpp b/src/caffe/layers/crop_layer.cpp index e81bdd732f3..849208f5666 100644 --- a/src/caffe/layers/crop_layer.cpp +++ b/src/caffe/layers/crop_layer.cpp @@ -61,12 +61,11 @@ void CropLayer::Reshape(const vector*>& bottom, // following axis crop_offset = param.offset(i - start_axis); } + // check that the crop and offset are within the dimension bounds + CHECK_GE(bottom[0]->shape(i) - crop_offset, bottom[1]->shape(i)) + << "the crop for dimension " << i << " is out-of-bounds with " + << "size " << bottom[1]->shape(i) << " and offset " << crop_offset; } - // Check that the image we are cropping minus the margin is bigger - // than the destination image. - CHECK_GE(bottom[0]->shape(i) - crop_offset, - bottom[1]->shape(i)) - << "invalid crop parameters in dimension: " << i; // Now set new size and offsets new_shape[i] = new_size; offsets[i] = crop_offset; From 00dc3d1ced4467be00ccc82b8509e4a25d54808d Mon Sep 17 00:00:00 2001 From: Evan Shelhamer Date: Thu, 14 Apr 2016 22:31:38 -0700 Subject: [PATCH 18/54] CropLayer: groom comments --- include/caffe/layers/crop_layer.hpp | 9 +++++++++ src/caffe/layers/crop_layer.cpp | 22 ++++++++-------------- src/caffe/layers/crop_layer.cu | 9 --------- 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/include/caffe/layers/crop_layer.hpp b/include/caffe/layers/crop_layer.hpp index 5c605b2ae9e..c4fda1220c3 100644 --- a/include/caffe/layers/crop_layer.hpp +++ b/include/caffe/layers/crop_layer.hpp @@ -44,6 +44,7 @@ class CropLayer : public Layer { vector offsets; private: + // Recursive copy function. void crop_copy(const vector*>& bottom, const vector*>& top, const vector& offsets, @@ -53,6 +54,14 @@ class CropLayer : public Layer { Dtype* dest_data, bool is_forward); + // Recursive copy function: this is similar to crop_copy() but loops over all + // but the last two dimensions to allow for ND cropping while still relying on + // a CUDA kernel for the innermost two dimensions for performance reasons. An + // alterantive implementation could rely on the kernel more by passing + // offsets, but this is problematic because of its variable length. + // Since in the standard (N,C,W,H) case N,C are usually not cropped a speedup + // could be achieved by not looping the application of the copy_kernel around + // these dimensions. void crop_copy_gpu(const vector*>& bottom, const vector*>& top, const vector& offsets, diff --git a/src/caffe/layers/crop_layer.cpp b/src/caffe/layers/crop_layer.cpp index 849208f5666..aecdcd63194 100644 --- a/src/caffe/layers/crop_layer.cpp +++ b/src/caffe/layers/crop_layer.cpp @@ -15,8 +15,7 @@ namespace caffe { template void CropLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { - // All logic that depends only on the number of dimensions is here, - // the rest is in Reshape because it depends on Blob size. + // LayerSetup() handles the number of dimensions; Reshape() handles the sizes. // bottom[0] supplies the data // bottom[1] supplies the size const CropParameter& param = this->layer_param_.crop_param(); @@ -40,40 +39,35 @@ void CropLayer::Reshape(const vector*>& bottom, int input_dim = bottom[0]->num_axes(); const int start_axis = bottom[0]->CanonicalAxisIndex(param.axis()); - // initialize all offsets to 0 + // Initialize offsets to 0 and the new shape to the current shape of the data. offsets = vector(input_dim, 0); - // initialize new shape to bottom[0] vector new_shape(bottom[0]->shape()); - // apply crops + // Determine crop offsets and the new shape post-crop. for (int i = 0; i < input_dim; ++i) { int crop_offset = 0; - int new_size = bottom[0]->shape(i); + int new_size = bottom[0]->shape(i); if (i >= start_axis) { new_size = bottom[1]->shape(i); - if (param.offset_size() == 1) { - // if only one crop value is supplied, crop all dimensions after axis - // by this crop value + // If only one offset is given, all crops have the same offset. crop_offset = param.offset(0); } else if (param.offset_size() > 1) { - // crop values specified must be equal to the number of dimensions - // following axis + // For several offsets, the number of offsets must be equal to the + // number of dimensions to crop, that is dimensions after the axis. crop_offset = param.offset(i - start_axis); } - // check that the crop and offset are within the dimension bounds + // Check that the crop and offset are within the dimension's bounds. CHECK_GE(bottom[0]->shape(i) - crop_offset, bottom[1]->shape(i)) << "the crop for dimension " << i << " is out-of-bounds with " << "size " << bottom[1]->shape(i) << " and offset " << crop_offset; } - // Now set new size and offsets new_shape[i] = new_size; offsets[i] = crop_offset; } top[0]->Reshape(new_shape); } -// recursive copy function template void CropLayer::crop_copy(const vector*>& bottom, const vector*>& top, diff --git a/src/caffe/layers/crop_layer.cu b/src/caffe/layers/crop_layer.cu index 9ed8f7cce57..f78cecbbeee 100644 --- a/src/caffe/layers/crop_layer.cu +++ b/src/caffe/layers/crop_layer.cu @@ -22,15 +22,6 @@ __global__ void copy_kernel(const int n, const int height, const int width, } } -// recursive copy function, this function is similar to crop_copy but loops -// over all but the last two dimensions. It is implemented this way to allow -// for ND cropping while still relying on a CUDA kernel for the innermost -// two dimensions for performance reasons. -// An alternative way to implement ND cropping relying more on the kernel -// would require passing offsets to the kernel, which is a bit problematic -// because it is of variable length. Since in the standard (N,C,W,H) case -// N,C are usually not cropped a speedup could be achieved by not looping -// the application of the copy_kernel around these dimensions. template void CropLayer::crop_copy_gpu(const vector*>& bottom, const vector*>& top, From 1c49130c33ebdec042ff6da18d03b7c5f6ad8c93 Mon Sep 17 00:00:00 2001 From: ZhouYzzz Date: Fri, 15 Apr 2016 22:51:49 +0800 Subject: [PATCH 19/54] Allow the python layer have attribute "phase" --- include/caffe/layers/python_layer.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/include/caffe/layers/python_layer.hpp b/include/caffe/layers/python_layer.hpp index b839d52684e..66dbbdf13b8 100644 --- a/include/caffe/layers/python_layer.hpp +++ b/include/caffe/layers/python_layer.hpp @@ -26,6 +26,7 @@ class PythonLayer : public Layer { } self_.attr("param_str") = bp::str( this->layer_param_.python_param().param_str()); + self_.attr("phase") = static_cast(this->phase_); self_.attr("setup")(bottom, top); } virtual void Reshape(const vector*>& bottom, From 458928a3bc1ee94e5f12bb254a5de819c449fc0a Mon Sep 17 00:00:00 2001 From: Luke Yeager Date: Mon, 18 Apr 2016 08:54:21 -0700 Subject: [PATCH 20/54] Typo in docs/installation.md --- docs/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.md b/docs/installation.md index aa946911c63..1e29a49d82d 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -137,7 +137,7 @@ See [PR #1667](https://github.com/BVLC/caffe/pull/1667) for options and details. **Laboratory Tested Hardware**: Berkeley Vision runs Caffe with Titan Xs, K80s, GTX 980s, K40s, K20s, Titans, and GTX 770s including models at ImageNet/ILSVRC scale. We have not encountered any trouble in-house with devices with CUDA capability >= 3.0. All reported hardware issues thus-far have been due to GPU configuration, overheating, and the like. -**CUDA compute capability**: devices with compute capability <= 2.0 may have to reduce CUDA thread numbers and batch sizes due to hardware constraints. Brew with caution; we recommend compute capbility >= 3.0. +**CUDA compute capability**: devices with compute capability <= 2.0 may have to reduce CUDA thread numbers and batch sizes due to hardware constraints. Brew with caution; we recommend compute capability >= 3.0. Once installed, check your times against our [reference performance numbers](performance_hardware.html) to make sure everything is configured properly. From bd762101dba321146d2d9cb747c79c4c678cbfdb Mon Sep 17 00:00:00 2001 From: Achal Dave Date: Wed, 20 Apr 2016 17:34:29 -0400 Subject: [PATCH 21/54] Explicitly point out -weights flag in tutorial The -weights flag is somewhat easy to miss as it's only in one command, but is the crucial thing that anyone searching for 'how to finetune' is looking for. Hopefully this more clearly points out the '-weights' flag, which might otherwise be overlooked in this tutorial. --- examples/finetune_flickr_style/readme.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/finetune_flickr_style/readme.md b/examples/finetune_flickr_style/readme.md index 9ba4c9217ff..188dedf1b9a 100644 --- a/examples/finetune_flickr_style/readme.md +++ b/examples/finetune_flickr_style/readme.md @@ -57,7 +57,11 @@ The prototxts in this example assume this, and also assume the presence of the I We'll also need the ImageNet-trained model, which you can obtain by running `./scripts/download_model_binary.py models/bvlc_reference_caffenet`. -Now we can train! (You can fine-tune in CPU mode by leaving out the `-gpu` flag.) +Now we can train! The key to fine-tuning is the `-weights` argument in the +command below, which tells Caffe that we want to load weights from a pre-trained +Caffe model. + +(You can fine-tune in CPU mode by leaving out the `-gpu` flag.) caffe % ./build/tools/caffe train -solver models/finetune_flickr_style/solver.prototxt -weights models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel -gpu 0 From 90426645c36ad71c778c4ac3688ec164242a50a1 Mon Sep 17 00:00:00 2001 From: Luke Yeager Date: Thu, 25 Feb 2016 19:58:01 -0800 Subject: [PATCH 22/54] Don't set map_size=1TB in util/db_lmdb Instead, double the map size on the MDB_MAP_FULL exception. --- include/caffe/util/db_lmdb.hpp | 13 ++++--- src/caffe/util/db_lmdb.cpp | 65 +++++++++++++++++++++++++++------- 2 files changed, 60 insertions(+), 18 deletions(-) diff --git a/include/caffe/util/db_lmdb.hpp b/include/caffe/util/db_lmdb.hpp index 4e1568ace50..ee370322383 100644 --- a/include/caffe/util/db_lmdb.hpp +++ b/include/caffe/util/db_lmdb.hpp @@ -3,6 +3,7 @@ #define CAFFE_UTIL_DB_LMDB_HPP #include +#include #include "lmdb.h" @@ -54,14 +55,16 @@ class LMDBCursor : public Cursor { class LMDBTransaction : public Transaction { public: - explicit LMDBTransaction(MDB_dbi* mdb_dbi, MDB_txn* mdb_txn) - : mdb_dbi_(mdb_dbi), mdb_txn_(mdb_txn) { } + explicit LMDBTransaction(MDB_env* mdb_env) + : mdb_env_(mdb_env) { } virtual void Put(const string& key, const string& value); - virtual void Commit() { MDB_CHECK(mdb_txn_commit(mdb_txn_)); } + virtual void Commit(); private: - MDB_dbi* mdb_dbi_; - MDB_txn* mdb_txn_; + MDB_env* mdb_env_; + vector keys, values; + + void DoubleMapSize(); DISABLE_COPY_AND_ASSIGN(LMDBTransaction); }; diff --git a/src/caffe/util/db_lmdb.cpp b/src/caffe/util/db_lmdb.cpp index 0bc82b53e2b..df83a52a633 100644 --- a/src/caffe/util/db_lmdb.cpp +++ b/src/caffe/util/db_lmdb.cpp @@ -7,11 +7,8 @@ namespace caffe { namespace db { -const size_t LMDB_MAP_SIZE = 1099511627776; // 1 TB - void LMDB::Open(const string& source, Mode mode) { MDB_CHECK(mdb_env_create(&mdb_env_)); - MDB_CHECK(mdb_env_set_mapsize(mdb_env_, LMDB_MAP_SIZE)); if (mode == NEW) { CHECK_EQ(mkdir(source.c_str(), 0744), 0) << "mkdir " << source << "failed"; } @@ -48,19 +45,61 @@ LMDBCursor* LMDB::NewCursor() { } LMDBTransaction* LMDB::NewTransaction() { - MDB_txn* mdb_txn; - MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, 0, &mdb_txn)); - MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi_)); - return new LMDBTransaction(&mdb_dbi_, mdb_txn); + return new LMDBTransaction(mdb_env_); } void LMDBTransaction::Put(const string& key, const string& value) { - MDB_val mdb_key, mdb_value; - mdb_key.mv_data = const_cast(key.data()); - mdb_key.mv_size = key.size(); - mdb_value.mv_data = const_cast(value.data()); - mdb_value.mv_size = value.size(); - MDB_CHECK(mdb_put(mdb_txn_, *mdb_dbi_, &mdb_key, &mdb_value, 0)); + keys.push_back(key); + values.push_back(value); +} + +void LMDBTransaction::Commit() { + MDB_dbi mdb_dbi; + MDB_val mdb_key, mdb_data; + MDB_txn *mdb_txn; + + // Initialize MDB variables + MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, 0, &mdb_txn)); + MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi)); + + bool out_of_memory = false; + for (int i = 0; i < keys.size(); i++) { + mdb_key.mv_size = keys[i].size(); + mdb_key.mv_data = const_cast(keys[i].data()); + mdb_data.mv_size = values[i].size(); + mdb_data.mv_data = const_cast(values[i].data()); + + int put_rc = mdb_put(mdb_txn, mdb_dbi, &mdb_key, &mdb_data, 0); + if (put_rc == MDB_MAP_FULL) { + out_of_memory = true; + break; + } else { + // Failed for some other reason + MDB_CHECK(put_rc); + } + } + + if (!out_of_memory) { + // Commit the transaction + MDB_CHECK(mdb_txn_commit(mdb_txn)); + mdb_dbi_close(mdb_env_, mdb_dbi); + keys.clear(); + values.clear(); + } else { + // Double the map size and retry + mdb_txn_abort(mdb_txn); + mdb_dbi_close(mdb_env_, mdb_dbi); + DoubleMapSize(); + Commit(); + } +} + +void LMDBTransaction::DoubleMapSize() { + struct MDB_envinfo current_info; + MDB_CHECK(mdb_env_info(mdb_env_, ¤t_info)); + size_t new_size = current_info.me_mapsize * 2; + DLOG(INFO) << "Doubling LMDB map size to " << (new_size>>20) << "MB ..."; + MDB_CHECK(mdb_env_set_mapsize(mdb_env_, new_size)); } } // namespace db From f30c61cfdfc0d254ec233b972ff4b6b0aa2f5d4c Mon Sep 17 00:00:00 2001 From: Luke Yeager Date: Thu, 25 Feb 2016 20:02:25 -0800 Subject: [PATCH 23/54] Print to stderr for example LMDB code --- examples/cifar10/convert_cifar_data.cpp | 2 ++ examples/mnist/convert_mnist_data.cpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/examples/cifar10/convert_cifar_data.cpp b/examples/cifar10/convert_cifar_data.cpp index e1b89f42fb6..7385a74a679 100644 --- a/examples/cifar10/convert_cifar_data.cpp +++ b/examples/cifar10/convert_cifar_data.cpp @@ -91,6 +91,8 @@ void convert_dataset(const string& input_folder, const string& output_folder, } int main(int argc, char** argv) { + FLAGS_alsologtostderr = 1; + if (argc != 4) { printf("This script converts the CIFAR dataset to the leveldb format used\n" "by caffe to perform classification.\n" diff --git a/examples/mnist/convert_mnist_data.cpp b/examples/mnist/convert_mnist_data.cpp index 16d28093dd5..32bee5269ba 100644 --- a/examples/mnist/convert_mnist_data.cpp +++ b/examples/mnist/convert_mnist_data.cpp @@ -178,6 +178,8 @@ int main(int argc, char** argv) { namespace gflags = google; #endif + FLAGS_alsologtostderr = 1; + gflags::SetUsageMessage("This script converts the MNIST dataset to\n" "the lmdb/leveldb format used by Caffe to load data.\n" "Usage:\n" From 74040cb2ed9d46a267a16870e9878f3b6911d644 Mon Sep 17 00:00:00 2001 From: Luke Yeager Date: Thu, 25 Feb 2016 20:14:02 -0800 Subject: [PATCH 24/54] Update MNIST example to use new DB classes --- examples/mnist/convert_mnist_data.cpp | 87 ++++----------------------- 1 file changed, 12 insertions(+), 75 deletions(-) diff --git a/examples/mnist/convert_mnist_data.cpp b/examples/mnist/convert_mnist_data.cpp index 32bee5269ba..57ddef77074 100644 --- a/examples/mnist/convert_mnist_data.cpp +++ b/examples/mnist/convert_mnist_data.cpp @@ -22,12 +22,15 @@ #include // NOLINT(readability/streams) #include +#include "boost/scoped_ptr.hpp" #include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" #include "caffe/util/format.hpp" #if defined(USE_LEVELDB) && defined(USE_LMDB) using namespace caffe; // NOLINT(build/namespaces) +using boost::scoped_ptr; using std::string; DEFINE_string(backend, "lmdb", "The backend for storing the result"); @@ -67,43 +70,10 @@ void convert_dataset(const char* image_filename, const char* label_filename, image_file.read(reinterpret_cast(&cols), 4); cols = swap_endian(cols); - // lmdb - MDB_env *mdb_env; - MDB_dbi mdb_dbi; - MDB_val mdb_key, mdb_data; - MDB_txn *mdb_txn; - // leveldb - leveldb::DB* db; - leveldb::Options options; - options.error_if_exists = true; - options.create_if_missing = true; - options.write_buffer_size = 268435456; - leveldb::WriteBatch* batch = NULL; - - // Open db - if (db_backend == "leveldb") { // leveldb - LOG(INFO) << "Opening leveldb " << db_path; - leveldb::Status status = leveldb::DB::Open( - options, db_path, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_path - << ". Is it already existing?"; - batch = new leveldb::WriteBatch(); - } else if (db_backend == "lmdb") { // lmdb - LOG(INFO) << "Opening lmdb " << db_path; - CHECK_EQ(mkdir(db_path, 0744), 0) - << "mkdir " << db_path << "failed"; - CHECK_EQ(mdb_env_create(&mdb_env), MDB_SUCCESS) << "mdb_env_create failed"; - CHECK_EQ(mdb_env_set_mapsize(mdb_env, 1099511627776), MDB_SUCCESS) // 1TB - << "mdb_env_set_mapsize failed"; - CHECK_EQ(mdb_env_open(mdb_env, db_path, 0, 0664), MDB_SUCCESS) - << "mdb_env_open failed"; - CHECK_EQ(mdb_txn_begin(mdb_env, NULL, 0, &mdb_txn), MDB_SUCCESS) - << "mdb_txn_begin failed"; - CHECK_EQ(mdb_open(mdb_txn, NULL, 0, &mdb_dbi), MDB_SUCCESS) - << "mdb_open failed. Does the lmdb already exist? "; - } else { - LOG(FATAL) << "Unknown db backend " << db_backend; - } + + scoped_ptr db(db::GetDB(db_backend)); + db->Open(db_path, db::NEW); + scoped_ptr txn(db->NewTransaction()); // Storing to db char label; @@ -125,52 +95,19 @@ void convert_dataset(const char* image_filename, const char* label_filename, string key_str = caffe::format_int(item_id, 8); datum.SerializeToString(&value); - // Put in db - if (db_backend == "leveldb") { // leveldb - batch->Put(key_str, value); - } else if (db_backend == "lmdb") { // lmdb - mdb_data.mv_size = value.size(); - mdb_data.mv_data = reinterpret_cast(&value[0]); - mdb_key.mv_size = key_str.size(); - mdb_key.mv_data = reinterpret_cast(&key_str[0]); - CHECK_EQ(mdb_put(mdb_txn, mdb_dbi, &mdb_key, &mdb_data, 0), MDB_SUCCESS) - << "mdb_put failed"; - } else { - LOG(FATAL) << "Unknown db backend " << db_backend; - } + txn->Put(key_str, value); if (++count % 1000 == 0) { - // Commit txn - if (db_backend == "leveldb") { // leveldb - db->Write(leveldb::WriteOptions(), batch); - delete batch; - batch = new leveldb::WriteBatch(); - } else if (db_backend == "lmdb") { // lmdb - CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) - << "mdb_txn_commit failed"; - CHECK_EQ(mdb_txn_begin(mdb_env, NULL, 0, &mdb_txn), MDB_SUCCESS) - << "mdb_txn_begin failed"; - } else { - LOG(FATAL) << "Unknown db backend " << db_backend; - } + txn->Commit(); } } // write the last batch if (count % 1000 != 0) { - if (db_backend == "leveldb") { // leveldb - db->Write(leveldb::WriteOptions(), batch); - delete batch; - delete db; - } else if (db_backend == "lmdb") { // lmdb - CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) << "mdb_txn_commit failed"; - mdb_close(mdb_env, mdb_dbi); - mdb_env_close(mdb_env); - } else { - LOG(FATAL) << "Unknown db backend " << db_backend; - } - LOG(ERROR) << "Processed " << count << " files."; + txn->Commit(); } + LOG(INFO) << "Processed " << count << " files."; delete[] pixels; + db->Close(); } int main(int argc, char** argv) { From bff14b47c58cffa28a71b9e3caba93da2354ab07 Mon Sep 17 00:00:00 2001 From: HeGaoYuan <273230305@qq.com> Date: Sat, 23 Apr 2016 14:48:41 +0800 Subject: [PATCH 25/54] Fixed #4029: test the network every 500 iterations, not 1000 iterations --- examples/mnist/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mnist/readme.md b/examples/mnist/readme.md index b87a0f53c7a..35952155a30 100644 --- a/examples/mnist/readme.md +++ b/examples/mnist/readme.md @@ -248,7 +248,7 @@ These messages tell you the details about each layer, its connections and its ou I1203 solver.cpp:36] Solver scaffolding done. I1203 solver.cpp:44] Solving LeNet -Based on the solver setting, we will print the training loss function every 100 iterations, and test the network every 1000 iterations. You will see messages like this: +Based on the solver setting, we will print the training loss function every 100 iterations, and test the network every 500 iterations. You will see messages like this: I1203 solver.cpp:204] Iteration 100, lr = 0.00992565 I1203 solver.cpp:66] Iteration 100, loss = 0.26044 From 0e145c5af91bf42e20cf8c8a295816b06905ee4e Mon Sep 17 00:00:00 2001 From: ebadawy Date: Sun, 24 Apr 2016 20:24:41 +0200 Subject: [PATCH 26/54] Read the data as a binary Appending 'b' in the file mode as hashlib functions require to pass in bytes --- scripts/download_model_binary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/download_model_binary.py b/scripts/download_model_binary.py index 66f72f2477e..fcdbb5a91a2 100755 --- a/scripts/download_model_binary.py +++ b/scripts/download_model_binary.py @@ -60,7 +60,7 @@ def valid_dirname(dirname): # Closure-d function for checking SHA1. def model_checks_out(filename=model_filename, sha1=frontmatter['sha1']): - with open(filename, 'r') as f: + with open(filename, 'rb') as f: return hashlib.sha1(f.read()).hexdigest() == sha1 # Check if model exists. From 8619fbb90f2b5546ea8cb7c4021216d978d4cbc4 Mon Sep 17 00:00:00 2001 From: Sammy Sidhu Date: Wed, 27 Apr 2016 03:05:30 -0700 Subject: [PATCH 27/54] fixed typo in download script command cpp_classification --- examples/cpp_classification/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/cpp_classification/readme.md b/examples/cpp_classification/readme.md index a086db1a035..0de2885b53c 100644 --- a/examples/cpp_classification/readme.md +++ b/examples/cpp_classification/readme.md @@ -42,7 +42,7 @@ script: The ImageNet labels file (also called the *synset file*) is also required in order to map a prediction to the name of the class: ``` -./data/ilsvrc12/get_ilsvrc_aux.sh. +./data/ilsvrc12/get_ilsvrc_aux.sh ``` Using the files that were downloaded, we can classify the provided cat image (`examples/images/cat.jpg`) using this command: From 859cf6e1c3f965b4029b7940b861038031014ed7 Mon Sep 17 00:00:00 2001 From: Kun Wang Date: Wed, 27 Apr 2016 21:09:31 +0800 Subject: [PATCH 28/54] Fix an error in the example of ReshapeParameter. * this small mistake may confuse newer. --- src/caffe/proto/caffe.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 650c87ae3a6..ea40e60aa34 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -987,7 +987,7 @@ message ReshapeParameter { // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } + // reshape_param { shape { dim: 0 dim:-1 dim: 4 } } // optional BlobShape shape = 1; From 8714b53719165e42f7844126f671f32ecc9b2e2f Mon Sep 17 00:00:00 2001 From: Drew Abbot Date: Wed, 27 Apr 2016 23:25:09 -0700 Subject: [PATCH 29/54] avoid non-integer array indices --- python/caffe/classifier.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/caffe/classifier.py b/python/caffe/classifier.py index 537193db8f8..ea29fed86f9 100644 --- a/python/caffe/classifier.py +++ b/python/caffe/classifier.py @@ -79,6 +79,7 @@ def predict(self, inputs, oversample=True): -self.crop_dims / 2.0, self.crop_dims / 2.0 ]) + crop = crop.astype(int) input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :] # Classify From 673e8cfc0b8f05f9fa3ebbad7cc6202822e5d9c5 Mon Sep 17 00:00:00 2001 From: Sean Bell Date: Thu, 28 Apr 2016 13:06:51 -0400 Subject: [PATCH 30/54] Suppress boost registration warnings in pycaffe (Based on #3960) --- python/caffe/_caffe.cpp | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index a2c46a123aa..32b5d921094 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -26,6 +26,19 @@ #define PyArray_SetBaseObject(arr, x) (PyArray_BASE(arr) = (x)) #endif +/* Fix to avoid registration warnings in pycaffe (#3960) */ +#define BP_REGISTER_SHARED_PTR_TO_PYTHON(PTR) do { \ + const boost::python::type_info info = \ + boost::python::type_id >(); \ + const boost::python::converter::registration* reg = \ + boost::python::converter::registry::query(info); \ + if (reg == NULL) { \ + bp::register_ptr_to_python >(); \ + } else if ((*reg).m_to_python == NULL) { \ + bp::register_ptr_to_python >(); \ + } \ +} while (0) + namespace bp = boost::python; namespace caffe { @@ -255,7 +268,7 @@ BOOST_PYTHON_MODULE(_caffe) { .def("_set_input_arrays", &Net_SetInputArrays, bp::with_custodian_and_ward<1, 2, bp::with_custodian_and_ward<1, 3> >()) .def("save", &Net_Save); - bp::register_ptr_to_python > >(); + BP_REGISTER_SHARED_PTR_TO_PYTHON(Net); bp::class_, shared_ptr >, boost::noncopyable>( "Blob", bp::no_init) @@ -275,7 +288,7 @@ BOOST_PYTHON_MODULE(_caffe) { NdarrayCallPolicies())) .add_property("diff", bp::make_function(&Blob::mutable_cpu_diff, NdarrayCallPolicies())); - bp::register_ptr_to_python > >(); + BP_REGISTER_SHARED_PTR_TO_PYTHON(Blob); bp::class_, shared_ptr >, boost::noncopyable>("Layer", bp::init()) @@ -284,7 +297,7 @@ BOOST_PYTHON_MODULE(_caffe) { .def("setup", &Layer::LayerSetUp) .def("reshape", &Layer::Reshape) .add_property("type", bp::make_function(&Layer::type)); - bp::register_ptr_to_python > >(); + BP_REGISTER_SHARED_PTR_TO_PYTHON(Layer); bp::class_("LayerParameter", bp::no_init); @@ -299,7 +312,7 @@ BOOST_PYTHON_MODULE(_caffe) { .def("step", &Solver::Step) .def("restore", &Solver::Restore) .def("snapshot", &Solver::Snapshot); - bp::register_ptr_to_python > >(); + BP_REGISTER_SHARED_PTR_TO_PYTHON(Solver); bp::class_, bp::bases >, shared_ptr >, boost::noncopyable>( From 2da8600acdc922d03b667ef691279cb52c7226ed Mon Sep 17 00:00:00 2001 From: Muneyuki Noguchi Date: Fri, 29 Apr 2016 02:04:02 +0000 Subject: [PATCH 31/54] draw_net: accept prototxt without name Fixes #3819 --- python/caffe/draw.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/caffe/draw.py b/python/caffe/draw.py index cfa3fc5b1fb..61205ca9f37 100644 --- a/python/caffe/draw.py +++ b/python/caffe/draw.py @@ -142,7 +142,7 @@ def get_pydot_graph(caffe_net, rankdir, label_edges=True): ------- pydot graph object """ - pydot_graph = pydot.Dot(caffe_net.name, + pydot_graph = pydot.Dot(caffe_net.name if caffe_net.name else 'Net', graph_type='digraph', rankdir=rankdir) pydot_nodes = {} From cb3c992a2ae00ec634313a394361214d868f9bd2 Mon Sep 17 00:00:00 2001 From: Sheng Zha Date: Sat, 30 Apr 2016 16:40:05 -0700 Subject: [PATCH 32/54] fix grep in CUDA version detection to accomodate OSX's grep (and other grep that doesn't support \d extension) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5424c3a1858..568d9c2774d 100644 --- a/Makefile +++ b/Makefile @@ -272,7 +272,7 @@ endif ifeq ($(OSX), 1) CXX := /usr/bin/clang++ ifneq ($(CPU_ONLY), 1) - CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') + CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release [0-9.]*' | grep -o '[0-9.]*') ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) CXXFLAGS += -stdlib=libstdc++ LINKFLAGS += -stdlib=libstdc++ From 5d423b7a63718decf04bad93a481ebd56291ec7b Mon Sep 17 00:00:00 2001 From: Felix Abecassis Date: Mon, 2 May 2016 16:20:00 -0700 Subject: [PATCH 33/54] Pin the base image version for the GPU Dockerfile The previous Dockerfile can break if image nvidia/cuda:cudnn is updated to any of the following: - Ubuntu 16.04 LTS (already released) - cuDNN v5 (soon) - CUDA 8.0 (soon) --- docker/Makefile | 2 +- docker/standalone/gpu/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/Makefile b/docker/Makefile index 725208c6b2b..0de887d0e19 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -22,7 +22,7 @@ docker_files: standalone_files standalone_files: standalone/cpu/Dockerfile standalone/gpu/Dockerfile -FROM_GPU = "nvidia/cuda:cudnn" +FROM_GPU = "nvidia/cuda:7.5-cudnn4-devel-ubuntu14.04" FROM_CPU = "ubuntu:14.04" GPU_CMAKE_ARGS = -DUSE_CUDNN=1 CPU_CMAKE_ARGS = -DCPU_ONLY=1 diff --git a/docker/standalone/gpu/Dockerfile b/docker/standalone/gpu/Dockerfile index 1ddc6560d16..371aad5b1e9 100644 --- a/docker/standalone/gpu/Dockerfile +++ b/docker/standalone/gpu/Dockerfile @@ -1,4 +1,4 @@ -FROM nvidia/cuda:cudnn +FROM nvidia/cuda:7.5-cudnn4-devel-ubuntu14.04 MAINTAINER caffe-maint@googlegroups.com RUN apt-get update && apt-get install -y --no-install-recommends \ From c2dba923b82c669f2998a3174310fbbb5c64c39f Mon Sep 17 00:00:00 2001 From: ZhouYzzz Date: Wed, 4 May 2016 18:00:12 +0800 Subject: [PATCH 34/54] Add test for attribute "phase" in python layer --- python/caffe/test/test_python_layer.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/python/caffe/test/test_python_layer.py b/python/caffe/test/test_python_layer.py index e46b7118014..899514e90f1 100644 --- a/python/caffe/test/test_python_layer.py +++ b/python/caffe/test/test_python_layer.py @@ -44,6 +44,18 @@ def forward(self, bottom, top): def backward(self, top, propagate_down, bottom): self.blobs[0].diff[0] = 1 +class PhaseLayer(caffe.Layer): + """A layer for checking attribute `phase`""" + + def setup(self, bottom, top): + pass + + def reshape(self, bootom, top): + top[0].reshape() + + def forward(self, bottom, top): + top[0].data[()] = self.phase + def python_net_file(): with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: f.write("""name: 'pythonnet' force_backward: true @@ -76,6 +88,14 @@ def parameter_net_file(): """) return f.name +def phase_net_file(): + with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: + f.write("""name: 'pythonnet' force_backward: true + layer { type: 'Python' name: 'layer' top: 'phase' + python_param { module: 'test_python_layer' layer: 'PhaseLayer' } } + """) + return f.name + @unittest.skipIf('Python' not in caffe.layer_type_list(), 'Caffe built without Python layer support') @@ -140,3 +160,9 @@ def test_parameter(self): self.assertEqual(layer.blobs[0].data[0], 1) os.remove(net_file) + + def test_phase(self): + net_file = phase_net_file() + for phase in caffe.TRAIN, caffe.TEST: + net = caffe.Net(net_file, phase) + self.assertEqual(net.forward()['phase'], phase) From 5acc17a5bfe010d92cc20766f88eff70d4ae92cc Mon Sep 17 00:00:00 2001 From: Achal Dave Date: Wed, 4 May 2016 11:51:00 -0400 Subject: [PATCH 35/54] Exit on error and report argument error details. The statement 'exit' has no effect in Python scripts. Use 'sys.exit()' instead. --- tools/extra/plot_training_log.py.example | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/extra/plot_training_log.py.example b/tools/extra/plot_training_log.py.example index 4d3ed0d15a9..d98c52d33d5 100755 --- a/tools/extra/plot_training_log.py.example +++ b/tools/extra/plot_training_log.py.example @@ -160,7 +160,7 @@ Supported chart types:""" % (len(get_supported_chart_types()) - 1, num = len(supported_chart_types) for i in xrange(num): print ' %d: %s' % (i, supported_chart_types[i]) - exit + sys.exit() def is_valid_chart_type(chart_type): return chart_type >= 0 and chart_type < len(get_supported_chart_types()) @@ -171,17 +171,19 @@ if __name__ == '__main__': else: chart_type = int(sys.argv[1]) if not is_valid_chart_type(chart_type): + print '%s is not a valid chart type.' % chart_type print_help() path_to_png = sys.argv[2] if not path_to_png.endswith('.png'): print 'Path must ends with png' % path_to_png - exit + sys.exit() path_to_logs = sys.argv[3:] for path_to_log in path_to_logs: if not os.path.exists(path_to_log): print 'Path does not exist: %s' % path_to_log - exit + sys.exit() if not path_to_log.endswith(get_log_file_suffix()): + print 'Log file must end in %s.' % get_log_file_suffix() print_help() ## plot_chart accpets multiple path_to_logs plot_chart(chart_type, path_to_png, path_to_logs) From 4f22fceda92a0370f21f64d45d71ef3e354a0312 Mon Sep 17 00:00:00 2001 From: Achal Dave Date: Wed, 4 May 2016 11:52:06 -0400 Subject: [PATCH 36/54] Remove trailing spaces --- tools/extra/plot_training_log.py.example | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/extra/plot_training_log.py.example b/tools/extra/plot_training_log.py.example index d98c52d33d5..c3b47a81664 100755 --- a/tools/extra/plot_training_log.py.example +++ b/tools/extra/plot_training_log.py.example @@ -68,9 +68,9 @@ def get_field_descriptions(chart_type): get_chart_type_description_separator()) y_axis_field = description[0] x_axis_field = description[1] - return x_axis_field, y_axis_field + return x_axis_field, y_axis_field -def get_field_indecies(x_axis_field, y_axis_field): +def get_field_indecies(x_axis_field, y_axis_field): data_file_type = get_data_file_type(chart_type) fields = create_field_index()[0][data_file_type] return fields[x_axis_field], fields[y_axis_field] @@ -138,8 +138,8 @@ def plot_chart(chart_type, path_to_png, path_to_log_list): plt.legend(loc = legend_loc, ncol = 1) # ajust ncol to fit the space plt.title(get_chart_type_description(chart_type)) plt.xlabel(x_axis_field) - plt.ylabel(y_axis_field) - plt.savefig(path_to_png) + plt.ylabel(y_axis_field) + plt.savefig(path_to_png) plt.show() def print_help(): @@ -164,7 +164,7 @@ Supported chart types:""" % (len(get_supported_chart_types()) - 1, def is_valid_chart_type(chart_type): return chart_type >= 0 and chart_type < len(get_supported_chart_types()) - + if __name__ == '__main__': if len(sys.argv) < 4: print_help() From 938918c3f5d0a1a738d2229a337774cea92be95a Mon Sep 17 00:00:00 2001 From: Achal Dave Date: Wed, 4 May 2016 11:55:43 -0400 Subject: [PATCH 37/54] Reformat to fit in 79 columns --- tools/extra/plot_training_log.py.example | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/extra/plot_training_log.py.example b/tools/extra/plot_training_log.py.example index c3b47a81664..3ea66e38075 100755 --- a/tools/extra/plot_training_log.py.example +++ b/tools/extra/plot_training_log.py.example @@ -10,7 +10,8 @@ import matplotlib.legend as lgd import matplotlib.markers as mks def get_log_parsing_script(): - dirname = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + dirname = os.path.dirname(os.path.abspath(inspect.getfile( + inspect.currentframe()))) return dirname + '/parse_log.sh' def get_log_file_suffix(): @@ -61,7 +62,8 @@ def get_data_file_type(chart_type): return data_file_type def get_data_file(chart_type, path_to_log): - return os.path.basename(path_to_log) + '.' + get_data_file_type(chart_type).lower() + return (os.path.basename(path_to_log) + '.' + + get_data_file_type(chart_type).lower()) def get_field_descriptions(chart_type): description = get_chart_type_description(chart_type).split( From c2656f0bc7e1f51b4a82a79e7a5516f0f1fb012f Mon Sep 17 00:00:00 2001 From: Achal Dave Date: Wed, 4 May 2016 11:56:05 -0400 Subject: [PATCH 38/54] Fix typo (indecies->indices) --- tools/extra/plot_training_log.py.example | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/extra/plot_training_log.py.example b/tools/extra/plot_training_log.py.example index 3ea66e38075..79924ae5a5a 100755 --- a/tools/extra/plot_training_log.py.example +++ b/tools/extra/plot_training_log.py.example @@ -72,7 +72,7 @@ def get_field_descriptions(chart_type): x_axis_field = description[1] return x_axis_field, y_axis_field -def get_field_indecies(x_axis_field, y_axis_field): +def get_field_indices(x_axis_field, y_axis_field): data_file_type = get_data_file_type(chart_type) fields = create_field_index()[0][data_file_type] return fields[x_axis_field], fields[y_axis_field] @@ -113,7 +113,7 @@ def plot_chart(chart_type, path_to_png, path_to_log_list): os.system('%s %s' % (get_log_parsing_script(), path_to_log)) data_file = get_data_file(chart_type, path_to_log) x_axis_field, y_axis_field = get_field_descriptions(chart_type) - x, y = get_field_indecies(x_axis_field, y_axis_field) + x, y = get_field_indices(x_axis_field, y_axis_field) data = load_data(data_file, x, y) ## TODO: more systematic color cycle for lines color = [random.random(), random.random(), random.random()] From e6fc797f3be59a12f26d247e2f1f79bf7d8086c4 Mon Sep 17 00:00:00 2001 From: Evan Shelhamer Date: Wed, 4 May 2016 13:31:35 -0700 Subject: [PATCH 39/54] [build] note that `make clean` clears build and distribute dirs --- Makefile.config.example | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile.config.example b/Makefile.config.example index 8fd49c9c1a7..07bed63ae40 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -98,6 +98,7 @@ LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib # (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.) # USE_PKG_CONFIG := 1 +# N.B. both build and distribute dirs are cleared on `make clean` BUILD_DIR := build DISTRIBUTE_DIR := distribute From c419f8517b1e1b3d7a07fe212fc6c90a70b519ea Mon Sep 17 00:00:00 2001 From: Jonathan L Long Date: Thu, 9 Jul 2015 15:49:48 -0700 Subject: [PATCH 40/54] add parameter layer for learning any bottom --- include/caffe/layers/parameter_layer.hpp | 45 ++++++++++++++++++++++++ src/caffe/layers/parameter_layer.cpp | 8 +++++ src/caffe/proto/caffe.proto | 7 +++- 3 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 include/caffe/layers/parameter_layer.hpp create mode 100644 src/caffe/layers/parameter_layer.cpp diff --git a/include/caffe/layers/parameter_layer.hpp b/include/caffe/layers/parameter_layer.hpp new file mode 100644 index 00000000000..188b92acbe2 --- /dev/null +++ b/include/caffe/layers/parameter_layer.hpp @@ -0,0 +1,45 @@ +#ifndef CAFFE_PARAMETER_LAYER_HPP_ +#define CAFFE_PARAMETER_LAYER_HPP_ + +#include + +#include "caffe/layer.hpp" + +namespace caffe { + +template +class ParameterLayer : public Layer { + public: + explicit ParameterLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top) { + if (this->blobs_.size() > 0) { + LOG(INFO) << "Skipping parameter initialization"; + } else { + this->blobs_.resize(1); + this->blobs_[0].reset(new Blob()); + this->blobs_[0]->Reshape(this->layer_param_.parameter_param().shape()); + } + top[0]->Reshape(this->layer_param_.parameter_param().shape()); + } + virtual void Reshape(const vector*>& bottom, + const vector*>& top) { } + virtual inline const char* type() const { return "Parameter"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) { + top[0]->ShareData(*(this->blobs_[0])); + top[0]->ShareDiff(*(this->blobs_[0])); + } + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) + { } +}; + +} // namespace caffe + +#endif diff --git a/src/caffe/layers/parameter_layer.cpp b/src/caffe/layers/parameter_layer.cpp new file mode 100644 index 00000000000..fbd326f8469 --- /dev/null +++ b/src/caffe/layers/parameter_layer.cpp @@ -0,0 +1,8 @@ +#include "caffe/layers/parameter_layer.hpp" + +namespace caffe { + +INSTANTIATE_CLASS(ParameterLayer); +REGISTER_LAYER_CLASS(Parameter); + +} // namespace caffe diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index ea40e60aa34..15810718631 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -306,7 +306,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 145 (last added: crop_param) +// LayerParameter next available layer-specific ID: 146 (last added: parameter_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -385,6 +385,7 @@ message LayerParameter { optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; + optional ParameterParameter parameter_param = 145; optional PoolingParameter pooling_param = 121; optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; @@ -873,6 +874,10 @@ message MVNParameter { optional float eps = 3 [default = 1e-9]; } +message ParameterParameter { + optional BlobShape shape = 1; +} + message PoolingParameter { enum PoolMethod { MAX = 0; From 4e690b22ae30b0d483ccbe971007f2c6732cceb0 Mon Sep 17 00:00:00 2001 From: crazytan Date: Thu, 28 Apr 2016 18:45:13 -0400 Subject: [PATCH 41/54] fix problems in net_surgery.ipynb --- examples/net_surgery.ipynb | 45 +++++++++++++++----------------------- 1 file changed, 18 insertions(+), 27 deletions(-) diff --git a/examples/net_surgery.ipynb b/examples/net_surgery.ipynb index a6092db0c40..d50d503bfe0 100644 --- a/examples/net_surgery.ipynb +++ b/examples/net_surgery.ipynb @@ -22,7 +22,6 @@ "import numpy as np\n", "import matplotlib.pyplot as plt\n", "%matplotlib inline\n", - "import Image\n", "\n", "# Make sure that caffe is on the python path:\n", "caffe_root = '../' # this file is expected to be in {caffe_root}/examples\n", @@ -3511,7 +3510,7 @@ "print(\"blobs {}\\nparams {}\".format(net.blobs.keys(), net.params.keys()))\n", "\n", "# load image and prepare as a single input batch for Caffe\n", - "im = np.array(Image.open('images/cat_gray.jpg'))\n", + "im = np.array(caffe.io.load_image('images/cat_gray.jpg', color=False)).squeeze()\n", "plt.title(\"original image\")\n", "plt.imshow(im)\n", "plt.axis('off')\n", @@ -4480,8 +4479,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "pre-surgery output mean -12.93\n", - "post-surgery output mean -11.93\n" + "pre-surgery output mean -0.02\n", + "post-surgery output mean 0.98\n" ] } ], @@ -4489,7 +4488,7 @@ "# pick first filter output\n", "conv0 = net.blobs['conv'].data[0, 0]\n", "print(\"pre-surgery output mean {:.2f}\".format(conv0.mean()))\n", - "# set first filter bias to 10\n", + "# set first filter bias to 1\n", "net.params['conv'][1].data[0] = 1.\n", "net.forward()\n", "print(\"post-surgery output mean {:.2f}\".format(conv0.mean()))" @@ -5494,13 +5493,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "1,2c1,2\r\n", + "1,2c1\r\n", "< # Fully convolutional network version of CaffeNet.\r\n", "< name: \"CaffeNetConv\"\r\n", "---\r\n", "> name: \"CaffeNet\"\r\n", - "> input: \"data\"\r\n", - "7,11c7\r\n", + "7,11c6\r\n", "< input_param {\r\n", "< # initial shape for a fully convolutional network:\r\n", "< # the shape can be set for each input by reshape.\r\n", @@ -5508,33 +5506,33 @@ "< }\r\n", "---\r\n", "> input_param { shape: { dim: 10 dim: 3 dim: 227 dim: 227 } }\r\n", - "157,158c153,154\r\n", + "157,158c152,153\r\n", "< name: \"fc6-conv\"\r\n", "< type: \"Convolution\"\r\n", "---\r\n", "> name: \"fc6\"\r\n", "> type: \"InnerProduct\"\r\n", - "160,161c156,157\r\n", + "160,161c155,156\r\n", "< top: \"fc6-conv\"\r\n", "< convolution_param {\r\n", "---\r\n", "> top: \"fc6\"\r\n", "> inner_product_param {\r\n", - "163d158\r\n", + "163d157\r\n", "< kernel_size: 6\r\n", - "169,170c164,165\r\n", + "169,170c163,164\r\n", "< bottom: \"fc6-conv\"\r\n", "< top: \"fc6-conv\"\r\n", "---\r\n", "> bottom: \"fc6\"\r\n", "> top: \"fc6\"\r\n", - "175,176c170,171\r\n", + "175,176c169,170\r\n", "< bottom: \"fc6-conv\"\r\n", "< top: \"fc6-conv\"\r\n", "---\r\n", "> bottom: \"fc6\"\r\n", "> top: \"fc6\"\r\n", - "182,186c177,181\r\n", + "182,186c176,180\r\n", "< name: \"fc7-conv\"\r\n", "< type: \"Convolution\"\r\n", "< bottom: \"fc6-conv\"\r\n", @@ -5546,21 +5544,21 @@ "> bottom: \"fc6\"\r\n", "> top: \"fc7\"\r\n", "> inner_product_param {\r\n", - "188d182\r\n", + "188d181\r\n", "< kernel_size: 1\r\n", - "194,195c188,189\r\n", + "194,195c187,188\r\n", "< bottom: \"fc7-conv\"\r\n", "< top: \"fc7-conv\"\r\n", "---\r\n", "> bottom: \"fc7\"\r\n", "> top: \"fc7\"\r\n", - "200,201c194,195\r\n", + "200,201c193,194\r\n", "< bottom: \"fc7-conv\"\r\n", "< top: \"fc7-conv\"\r\n", "---\r\n", "> bottom: \"fc7\"\r\n", "> top: \"fc7\"\r\n", - "207,211c201,205\r\n", + "207,211c200,204\r\n", "< name: \"fc8-conv\"\r\n", "< type: \"Convolution\"\r\n", "< bottom: \"fc7-conv\"\r\n", @@ -5572,9 +5570,9 @@ "> bottom: \"fc7\"\r\n", "> top: \"fc8\"\r\n", "> inner_product_param {\r\n", - "213d206\r\n", + "213d205\r\n", "< kernel_size: 1\r\n", - "219c212\r\n", + "219c211\r\n", "< bottom: \"fc8-conv\"\r\n", "---\r\n", "> bottom: \"fc8\"\r\n" @@ -5610,13 +5608,6 @@ } ], "source": [ - "# Make sure that caffe is on the python path:\n", - "caffe_root = '../' # this file is expected to be in {caffe_root}/examples\n", - "import sys\n", - "sys.path.insert(0, caffe_root + 'python')\n", - "\n", - "import caffe\n", - "\n", "# Load the original network and extract the fully connected layers' parameters.\n", "net = caffe.Net('../models/bvlc_reference_caffenet/deploy.prototxt', \n", " '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', \n", From da004d7c4e5d52b701762ecc8e20b4a4544a3457 Mon Sep 17 00:00:00 2001 From: Eric Tzeng Date: Thu, 5 May 2016 18:29:30 -0700 Subject: [PATCH 42/54] Allow reshaping blobs to size 0. Also add a test that reshapes a blob to shape (0, 5). --- src/caffe/blob.cpp | 4 +++- src/caffe/test/test_blob.cpp | 8 ++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index c86fd5d1d94..4a34e4c5856 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -30,7 +30,9 @@ void Blob::Reshape(const vector& shape) { int* shape_data = static_cast(shape_data_->mutable_cpu_data()); for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); - CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; + if (count_ != 0) { + CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; + } count_ *= shape[i]; shape_[i] = shape[i]; shape_data[i] = shape[i]; diff --git a/src/caffe/test/test_blob.cpp b/src/caffe/test/test_blob.cpp index a9d7d519e45..b88562223d0 100644 --- a/src/caffe/test/test_blob.cpp +++ b/src/caffe/test/test_blob.cpp @@ -51,6 +51,14 @@ TYPED_TEST(BlobSimpleTest, TestReshape) { EXPECT_EQ(this->blob_->count(), 120); } +TYPED_TEST(BlobSimpleTest, TestReshapeZero) { + vector shape(2); + shape[0] = 0; + shape[1] = 5; + this->blob_->Reshape(shape); + EXPECT_EQ(this->blob_->count(), 0); +} + TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { BlobProto blob_proto; From 42642936c2c29e539022e33bc0c691564d7e522d Mon Sep 17 00:00:00 2001 From: Luke Yeager Date: Mon, 9 May 2016 11:21:26 -0700 Subject: [PATCH 43/54] Catch MDB_MAP_FULL errors from mdb_txn_commit --- src/caffe/util/db_lmdb.cpp | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/src/caffe/util/db_lmdb.cpp b/src/caffe/util/db_lmdb.cpp index df83a52a633..4567cd7b93a 100644 --- a/src/caffe/util/db_lmdb.cpp +++ b/src/caffe/util/db_lmdb.cpp @@ -62,36 +62,42 @@ void LMDBTransaction::Commit() { MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, 0, &mdb_txn)); MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi)); - bool out_of_memory = false; for (int i = 0; i < keys.size(); i++) { mdb_key.mv_size = keys[i].size(); mdb_key.mv_data = const_cast(keys[i].data()); mdb_data.mv_size = values[i].size(); mdb_data.mv_data = const_cast(values[i].data()); + // Add data to the transaction int put_rc = mdb_put(mdb_txn, mdb_dbi, &mdb_key, &mdb_data, 0); if (put_rc == MDB_MAP_FULL) { - out_of_memory = true; - break; - } else { - // Failed for some other reason - MDB_CHECK(put_rc); + // Out of memory - double the map size and retry + mdb_txn_abort(mdb_txn); + mdb_dbi_close(mdb_env_, mdb_dbi); + DoubleMapSize(); + Commit(); + return; } + // May have failed for some other reason + MDB_CHECK(put_rc); } - if (!out_of_memory) { - // Commit the transaction - MDB_CHECK(mdb_txn_commit(mdb_txn)); - mdb_dbi_close(mdb_env_, mdb_dbi); - keys.clear(); - values.clear(); - } else { - // Double the map size and retry - mdb_txn_abort(mdb_txn); + // Commit the transaction + int commit_rc = mdb_txn_commit(mdb_txn); + if (commit_rc == MDB_MAP_FULL) { + // Out of memory - double the map size and retry mdb_dbi_close(mdb_env_, mdb_dbi); DoubleMapSize(); Commit(); + return; } + // May have failed for some other reason + MDB_CHECK(commit_rc); + + // Cleanup after successful commit + mdb_dbi_close(mdb_env_, mdb_dbi); + keys.clear(); + values.clear(); } void LMDBTransaction::DoubleMapSize() { From a934ca54f3633479ea0573346c510df4f757df6c Mon Sep 17 00:00:00 2001 From: ray glover Date: Tue, 10 May 2016 15:44:47 +0100 Subject: [PATCH 44/54] [build] (CMake) customisable Caffe version/soversion --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c5d99cef9dd..da7142c9b3c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,8 +10,8 @@ endif() project(Caffe C CXX) # ---[ Caffe version -set(CAFFE_TARGET_VERSION "1.0.0-rc3") -set(CAFFE_TARGET_SOVERSION "1.0.0-rc3") +set(CAFFE_TARGET_VERSION "1.0.0-rc3" CACHE STRING "Caffe logical version") +set(CAFFE_TARGET_SOVERSION "1.0.0-rc3" CACHE STRING "Caffe soname version") add_definitions(-DCAFFE_VERSION=${CAFFE_TARGET_VERSION}) # ---[ Using cmake scripts and modules From bb6ca4720ea41b8e9bdf162f63eb2757571a2e17 Mon Sep 17 00:00:00 2001 From: gdh1995 Date: Wed, 11 May 2016 20:51:07 +0800 Subject: [PATCH 45/54] a comment misses a space char --- src/caffe/util/db_lmdb.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/util/db_lmdb.cpp b/src/caffe/util/db_lmdb.cpp index 4567cd7b93a..fb1d4956aa1 100644 --- a/src/caffe/util/db_lmdb.cpp +++ b/src/caffe/util/db_lmdb.cpp @@ -10,7 +10,7 @@ namespace caffe { namespace db { void LMDB::Open(const string& source, Mode mode) { MDB_CHECK(mdb_env_create(&mdb_env_)); if (mode == NEW) { - CHECK_EQ(mkdir(source.c_str(), 0744), 0) << "mkdir " << source << "failed"; + CHECK_EQ(mkdir(source.c_str(), 0744), 0) << "mkdir " << source << " failed"; } int flags = 0; if (mode == READ) { From 078d9981a2c64b19834decdef3ce3dd032b667c0 Mon Sep 17 00:00:00 2001 From: Kyle Mills Date: Fri, 13 May 2016 11:15:33 -0400 Subject: [PATCH 46/54] fixed typo in io.py --- python/caffe/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/caffe/io.py b/python/caffe/io.py index cee5ace2e88..e1759beb587 100644 --- a/python/caffe/io.py +++ b/python/caffe/io.py @@ -46,7 +46,7 @@ def array_to_blobproto(arr, diff=None): return blob -def arraylist_to_blobprotovecor_str(arraylist): +def arraylist_to_blobprotovector_str(arraylist): """Converts a list of arrays to a serialized blobprotovec, which could be then passed to a network for processing. """ From 87c9dc397081248dd3d40e0dabce191557bcfc15 Mon Sep 17 00:00:00 2001 From: Yale Song Date: Fri, 13 May 2016 16:06:59 -0400 Subject: [PATCH 47/54] Fix Makefile CUDA_VERSION extraction on OSX Yosemite --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 568d9c2774d..403e00a38a1 100644 --- a/Makefile +++ b/Makefile @@ -272,7 +272,7 @@ endif ifeq ($(OSX), 1) CXX := /usr/bin/clang++ ifneq ($(CPU_ONLY), 1) - CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release [0-9.]*' | grep -o '[0-9.]*') + CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release [0-9.]*' | tr -d '[a-z ]') ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) CXXFLAGS += -stdlib=libstdc++ LINKFLAGS += -stdlib=libstdc++ From e8ec9f806bd0051f2ee8d1d2737afdafe314f9e4 Mon Sep 17 00:00:00 2001 From: Bob Poekert Date: Fri, 13 May 2016 22:06:33 -0700 Subject: [PATCH 48/54] add check for background and foreground window size > 0 in WindowData layer --- src/caffe/layers/window_data_layer.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index 4ca8315d791..103dd4b6af8 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -265,6 +265,9 @@ void WindowDataLayer::load_batch(Batch* batch) { const int num_samples[2] = { batch_size - num_fg, num_fg }; int item_id = 0; + CHECK_GT(fg_windows_.size(), 0); + CHECK_GT(bg_windows_.size(), 0); + // sample from bg set then fg set for (int is_fg = 0; is_fg < 2; ++is_fg) { for (int dummy = 0; dummy < num_samples[is_fg]; ++dummy) { From b43c8e43a95608a00033f8f8867d32a201e5eed4 Mon Sep 17 00:00:00 2001 From: Felix Abecassis Date: Mon, 16 May 2016 14:03:38 -0700 Subject: [PATCH 49/54] Add cuDNN v5 support, drop cuDNN v3 support cuDNN v4 is still supported. --- include/caffe/layers/cudnn_relu_layer.hpp | 1 + include/caffe/layers/cudnn_sigmoid_layer.hpp | 1 + include/caffe/layers/cudnn_tanh_layer.hpp | 1 + include/caffe/util/cudnn.hpp | 24 +++++++++++++++++--- src/caffe/layers/cudnn_conv_layer.cu | 12 ++-------- src/caffe/layers/cudnn_relu_layer.cpp | 1 + src/caffe/layers/cudnn_relu_layer.cu | 23 +++++++++++++++++-- src/caffe/layers/cudnn_sigmoid_layer.cpp | 2 ++ src/caffe/layers/cudnn_sigmoid_layer.cu | 23 +++++++++++++++++-- src/caffe/layers/cudnn_tanh_layer.cpp | 1 + src/caffe/layers/cudnn_tanh_layer.cu | 23 +++++++++++++++++-- 11 files changed, 93 insertions(+), 19 deletions(-) diff --git a/include/caffe/layers/cudnn_relu_layer.hpp b/include/caffe/layers/cudnn_relu_layer.hpp index e01f568abc9..a1cb29e7c5f 100644 --- a/include/caffe/layers/cudnn_relu_layer.hpp +++ b/include/caffe/layers/cudnn_relu_layer.hpp @@ -37,6 +37,7 @@ class CuDNNReLULayer : public ReLULayer { cudnnHandle_t handle_; cudnnTensorDescriptor_t bottom_desc_; cudnnTensorDescriptor_t top_desc_; + cudnnActivationDescriptor_t activ_desc_; }; #endif diff --git a/include/caffe/layers/cudnn_sigmoid_layer.hpp b/include/caffe/layers/cudnn_sigmoid_layer.hpp index 9c597958b0b..7b3486f8a7e 100644 --- a/include/caffe/layers/cudnn_sigmoid_layer.hpp +++ b/include/caffe/layers/cudnn_sigmoid_layer.hpp @@ -37,6 +37,7 @@ class CuDNNSigmoidLayer : public SigmoidLayer { cudnnHandle_t handle_; cudnnTensorDescriptor_t bottom_desc_; cudnnTensorDescriptor_t top_desc_; + cudnnActivationDescriptor_t activ_desc_; }; #endif diff --git a/include/caffe/layers/cudnn_tanh_layer.hpp b/include/caffe/layers/cudnn_tanh_layer.hpp index c0f0053f71e..59e758d7031 100644 --- a/include/caffe/layers/cudnn_tanh_layer.hpp +++ b/include/caffe/layers/cudnn_tanh_layer.hpp @@ -37,6 +37,7 @@ class CuDNNTanHLayer : public TanHLayer { cudnnHandle_t handle_; cudnnTensorDescriptor_t bottom_desc_; cudnnTensorDescriptor_t top_desc_; + cudnnActivationDescriptor_t activ_desc_; }; #endif diff --git a/include/caffe/util/cudnn.hpp b/include/caffe/util/cudnn.hpp index 8a7e17c6cd4..a7d8dbbad4c 100644 --- a/include/caffe/util/cudnn.hpp +++ b/include/caffe/util/cudnn.hpp @@ -91,8 +91,13 @@ template inline void createFilterDesc(cudnnFilterDescriptor_t* desc, int n, int c, int h, int w) { CUDNN_CHECK(cudnnCreateFilterDescriptor(desc)); +#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnSetFilter4dDescriptor(*desc, dataType::type, - n, c, h, w)); + CUDNN_TENSOR_NCHW, n, c, h, w)); +#else + CUDNN_CHECK(cudnnSetFilter4dDescriptor_v4(*desc, dataType::type, + CUDNN_TENSOR_NCHW, n, c, h, w)); +#endif } template @@ -123,8 +128,21 @@ inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc, LOG(FATAL) << "Unknown pooling method."; } CUDNN_CHECK(cudnnCreatePoolingDescriptor(pool_desc)); - CUDNN_CHECK(cudnnSetPooling2dDescriptor(*pool_desc, *mode, h, w, - pad_h, pad_w, stride_h, stride_w)); +#if CUDNN_VERSION_MIN(5, 0, 0) + CUDNN_CHECK(cudnnSetPooling2dDescriptor(*pool_desc, *mode, + CUDNN_PROPAGATE_NAN, h, w, pad_h, pad_w, stride_h, stride_w)); +#else + CUDNN_CHECK(cudnnSetPooling2dDescriptor_v4(*pool_desc, *mode, + CUDNN_PROPAGATE_NAN, h, w, pad_h, pad_w, stride_h, stride_w)); +#endif +} + +template +inline void createActivationDescriptor(cudnnActivationDescriptor_t* activ_desc, + cudnnActivationMode_t mode) { + CUDNN_CHECK(cudnnCreateActivationDescriptor(activ_desc)); + CUDNN_CHECK(cudnnSetActivationDescriptor(*activ_desc, mode, + CUDNN_PROPAGATE_NAN, Dtype(0))); } } // namespace cudnn diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index 42c4fd0260c..8bc5346248c 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -30,19 +30,11 @@ void CuDNNConvolutionLayer::Forward_gpu( // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); -#if CUDNN_VERSION_MIN(4, 0, 0) CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType::one, top_descs_[i], top_data + top_offset_ * g)); -#else - CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C, - cudnn::dataType::one, - bias_desc_, bias_data + bias_offset_ * g, - cudnn::dataType::one, - top_descs_[i], top_data + top_offset_ * g)); -#endif } } @@ -82,7 +74,7 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); - CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3( + CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1*this->group_ + g], cudnn::dataType::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, @@ -100,7 +92,7 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - CUDNN_CHECK(cudnnConvolutionBackwardData_v3( + CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[2*this->group_ + g], cudnn::dataType::one, filter_desc_, weight + this->weight_offset_ * g, diff --git a/src/caffe/layers/cudnn_relu_layer.cpp b/src/caffe/layers/cudnn_relu_layer.cpp index c86c6907113..795e0a9efb0 100644 --- a/src/caffe/layers/cudnn_relu_layer.cpp +++ b/src/caffe/layers/cudnn_relu_layer.cpp @@ -13,6 +13,7 @@ void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, CUDNN_CHECK(cudnnCreate(&handle_)); cudnn::createTensor4dDesc(&bottom_desc_); cudnn::createTensor4dDesc(&top_desc_); + cudnn::createActivationDescriptor(&activ_desc_, CUDNN_ACTIVATION_RELU); handles_setup_ = true; } diff --git a/src/caffe/layers/cudnn_relu_layer.cu b/src/caffe/layers/cudnn_relu_layer.cu index 9f617183baa..e7928bbd6e0 100644 --- a/src/caffe/layers/cudnn_relu_layer.cu +++ b/src/caffe/layers/cudnn_relu_layer.cu @@ -15,12 +15,21 @@ void CuDNNReLULayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); +#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnActivationForward(this->handle_, - CUDNN_ACTIVATION_RELU, + activ_desc_, cudnn::dataType::one, this->bottom_desc_, bottom_data, cudnn::dataType::zero, this->top_desc_, top_data)); +#else + CUDNN_CHECK(cudnnActivationForward_v4(this->handle_, + activ_desc_, + cudnn::dataType::one, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->top_desc_, top_data)); +#endif } template @@ -40,13 +49,23 @@ void CuDNNReLULayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); +#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnActivationBackward(this->handle_, - CUDNN_ACTIVATION_RELU, + activ_desc_, cudnn::dataType::one, this->top_desc_, top_data, this->top_desc_, top_diff, this->bottom_desc_, bottom_data, cudnn::dataType::zero, this->bottom_desc_, bottom_diff)); +#else + CUDNN_CHECK(cudnnActivationBackward_v4(this->handle_, + activ_desc_, + cudnn::dataType::one, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->bottom_desc_, bottom_diff)); +#endif } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNReLULayer); diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cpp b/src/caffe/layers/cudnn_sigmoid_layer.cpp index ccb955cdaff..3ce6aef1764 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cpp +++ b/src/caffe/layers/cudnn_sigmoid_layer.cpp @@ -13,6 +13,8 @@ void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, CUDNN_CHECK(cudnnCreate(&handle_)); cudnn::createTensor4dDesc(&bottom_desc_); cudnn::createTensor4dDesc(&top_desc_); + cudnn::createActivationDescriptor(&activ_desc_, + CUDNN_ACTIVATION_SIGMOID); handles_setup_ = true; } diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cu b/src/caffe/layers/cudnn_sigmoid_layer.cu index e2a4b460c6c..48d6cbab6de 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cu +++ b/src/caffe/layers/cudnn_sigmoid_layer.cu @@ -10,12 +10,21 @@ void CuDNNSigmoidLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); +#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnActivationForward(this->handle_, - CUDNN_ACTIVATION_SIGMOID, + activ_desc_, cudnn::dataType::one, this->bottom_desc_, bottom_data, cudnn::dataType::zero, this->top_desc_, top_data)); +#else + CUDNN_CHECK(cudnnActivationForward_v4(this->handle_, + activ_desc_, + cudnn::dataType::one, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->top_desc_, top_data)); +#endif } template @@ -30,13 +39,23 @@ void CuDNNSigmoidLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); +#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnActivationBackward(this->handle_, - CUDNN_ACTIVATION_SIGMOID, + activ_desc_, cudnn::dataType::one, this->top_desc_, top_data, this->top_desc_, top_diff, this->bottom_desc_, bottom_data, cudnn::dataType::zero, this->bottom_desc_, bottom_diff)); +#else + CUDNN_CHECK(cudnnActivationBackward_v4(this->handle_, + activ_desc_, + cudnn::dataType::one, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->bottom_desc_, bottom_diff)); +#endif } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSigmoidLayer); diff --git a/src/caffe/layers/cudnn_tanh_layer.cpp b/src/caffe/layers/cudnn_tanh_layer.cpp index 1a56418227c..e87dd9de0ab 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cpp +++ b/src/caffe/layers/cudnn_tanh_layer.cpp @@ -13,6 +13,7 @@ void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, CUDNN_CHECK(cudnnCreate(&handle_)); cudnn::createTensor4dDesc(&bottom_desc_); cudnn::createTensor4dDesc(&top_desc_); + cudnn::createActivationDescriptor(&activ_desc_, CUDNN_ACTIVATION_TANH); handles_setup_ = true; } diff --git a/src/caffe/layers/cudnn_tanh_layer.cu b/src/caffe/layers/cudnn_tanh_layer.cu index 89df28a3e8b..6b5d7ae7ea7 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cu +++ b/src/caffe/layers/cudnn_tanh_layer.cu @@ -10,12 +10,21 @@ void CuDNNTanHLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); +#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnActivationForward(this->handle_, - CUDNN_ACTIVATION_TANH, + activ_desc_, cudnn::dataType::one, this->bottom_desc_, bottom_data, cudnn::dataType::zero, this->top_desc_, top_data)); +#else + CUDNN_CHECK(cudnnActivationForward_v4(this->handle_, + activ_desc_, + cudnn::dataType::one, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->top_desc_, top_data)); +#endif } template @@ -31,13 +40,23 @@ void CuDNNTanHLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); +#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnActivationBackward(this->handle_, - CUDNN_ACTIVATION_TANH, + activ_desc_, cudnn::dataType::one, this->top_desc_, top_data, this->top_desc_, top_diff, this->bottom_desc_, bottom_data, cudnn::dataType::zero, this->bottom_desc_, bottom_diff)); +#else + CUDNN_CHECK(cudnnActivationBackward_v4(this->handle_, + activ_desc_, + cudnn::dataType::one, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->bottom_desc_, bottom_diff)); +#endif } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNTanHLayer); From 8730b146b7e19af189b9086e59fd1d5bc4214698 Mon Sep 17 00:00:00 2001 From: Felix Abecassis Date: Mon, 16 May 2016 14:32:34 -0700 Subject: [PATCH 50/54] Update Dockerfile to cuDNN v5 --- docker/Makefile | 2 +- docker/standalone/gpu/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/Makefile b/docker/Makefile index 0de887d0e19..3a6575b0c43 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -22,7 +22,7 @@ docker_files: standalone_files standalone_files: standalone/cpu/Dockerfile standalone/gpu/Dockerfile -FROM_GPU = "nvidia/cuda:7.5-cudnn4-devel-ubuntu14.04" +FROM_GPU = "nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04" FROM_CPU = "ubuntu:14.04" GPU_CMAKE_ARGS = -DUSE_CUDNN=1 CPU_CMAKE_ARGS = -DCPU_ONLY=1 diff --git a/docker/standalone/gpu/Dockerfile b/docker/standalone/gpu/Dockerfile index 371aad5b1e9..daf6a7223ff 100644 --- a/docker/standalone/gpu/Dockerfile +++ b/docker/standalone/gpu/Dockerfile @@ -1,4 +1,4 @@ -FROM nvidia/cuda:7.5-cudnn4-devel-ubuntu14.04 +FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 MAINTAINER caffe-maint@googlegroups.com RUN apt-get update && apt-get install -y --no-install-recommends \ From 1c3af7078b64ef71a5bb0c2cef6fee528917adac Mon Sep 17 00:00:00 2001 From: Felix Abecassis Date: Mon, 16 May 2016 14:35:40 -0700 Subject: [PATCH 51/54] Update supported cuDNN version in the documentation --- docs/installation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/installation.md b/docs/installation.md index 1e29a49d82d..4aac7c42d27 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -40,14 +40,14 @@ Optional dependencies: * [OpenCV](http://opencv.org/) >= 2.4 including 3.0 * IO libraries: `lmdb`, `leveldb` (note: leveldb requires `snappy`) -* cuDNN for GPU acceleration (v4) +* cuDNN for GPU acceleration (v5) Pycaffe and Matcaffe interfaces have their own natural needs. * For Python Caffe: `Python 2.7` or `Python 3.3+`, `numpy (>= 1.7)`, boost-provided `boost.python` * For MATLAB Caffe: MATLAB with the `mex` compiler. -**cuDNN Caffe**: for fastest operation Caffe is accelerated by drop-in integration of [NVIDIA cuDNN](https://developer.nvidia.com/cudnn). To speed up your Caffe models, install cuDNN then uncomment the `USE_CUDNN := 1` flag in `Makefile.config` when installing Caffe. Acceleration is automatic. The current version is cuDNN v4; older versions are supported in older Caffe. +**cuDNN Caffe**: for fastest operation Caffe is accelerated by drop-in integration of [NVIDIA cuDNN](https://developer.nvidia.com/cudnn). To speed up your Caffe models, install cuDNN then uncomment the `USE_CUDNN := 1` flag in `Makefile.config` when installing Caffe. Acceleration is automatic. The current version is cuDNN v5; older versions are supported in older Caffe. **CPU-only Caffe**: for cold-brewed CPU-only Caffe uncomment the `CPU_ONLY := 1` flag in `Makefile.config` to configure and build Caffe without CUDA. This is helpful for cloud or cluster deployment. From a8cc860d6bef79edcdfa07d5da4195ba67714991 Mon Sep 17 00:00:00 2001 From: crazytan Date: Wed, 27 Apr 2016 01:01:30 -0400 Subject: [PATCH 52/54] handle image names with spaces --- examples/images/cat gray.jpg | Bin 0 -> 92726 bytes src/caffe/layers/image_data_layer.cpp | 9 +++-- src/caffe/test/test_image_data_layer.cpp | 44 +++++++++++++++++++++-- tools/convert_imageset.cpp | 9 +++-- 4 files changed, 53 insertions(+), 9 deletions(-) create mode 100644 examples/images/cat gray.jpg diff --git a/examples/images/cat gray.jpg b/examples/images/cat gray.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43c5ce377167a49624c3e9d61e83a04becb8df9e GIT binary patch literal 92726 zcmb5VWmsEJw+9-$P+W@!EyW!Q0aDzGJ1s7S76>lIrC4#70;NE4inIhPQrt?>0s#U9 zcefC@`M>vl&p98?bMA8|`@`NlOJz zy%)2M{TnA&8P=n=PF7|oI~i7EF-<{DHwAk~C)M{J_WJL&3~b-Ovz4@C1UOSlyJ{-4(Ztjzx+@p>o2D)*08W)n>vW(A0cJ+m0U2%oK> zkPx%DIKPmnn7FtQFSD?qkg$Lt`V!|85|t7YmlBp>{&!(TYxA&kkkV6B{ zjq>&N<@Xighj_dZ5R#OX{6~hcFdv$N&(q)4%i536)syW%5)|z{Z9SaayqqAe%>PKV zwt;wi$*`hp`v02X;-=^H#?j03zl8s6&h|f4{ojiIA^f)uQkvEt@9fdNsBG=*Y5%W3G&QBP(Uo^{ zwRTp7*m}FzyLzc9%CJ5c6%?0~5SNrt6jqWKlY6e9tRN^Nt}G!aFDNW3A*S#j-v7yi zR>N=SWG5vgrz9jOCLyONDJL!@q$KiOSV&A+Szbs`K?vQUV#5F7Rde<9vUat#|3}|H zyiWhkEBSx&N-22QTYEt~3?LBa{|qD@M~D~1(-Gpvte`K#Z0zJ}2l4g%r;pK+(KagD zdpP;n+bMfMT$umW+)_^ei*`Yw=jb6QAu1>#EGYh5Na(q+u$;V-g0P~pyqKuaa}m~m z^V80!DAJolzYS`tV<FSTBxQxm8QC@_3BJ8tgQk`gb_y_Q!$TE)FS8AuW$@-_7A++mwn^;>R;Zhx@Be19}CWQF#qd6M(01b>0n*l^{#0^#IOpfzwK5k@l6?s{B zCO7c|0GlT%30{7q(;G&Lqof1SQ@>Bu26ytcGj{D-YRn98xMc77#uLDEgdx4u(xCvd zHj)-3aeR$dKkK6fTEF!8Q{J(_8S`t)KT=m8%fyf{ssPDof?}_gYS%p>XiQ z<*wOAB2mBa(s#xmi^QO?u~4EG*-d)yN4%;SfM1tjF4>`Zym_qDm*O+-)&Q(P@#PLkI zCA5bgH!Z%f9`(0SbO*D0H5;j`VY4sJfzj)=W8!m?lj;z-;gd5{rAr z?;jYXe>DlGlW9Em&Dse|lowxb%b53J4`l4#Qv=6>73zo5Rf=i;3^7az<+fW#5WhOn zUnjYOLC(CQc57&I`pK-7%VARBP?4U%6KxH{Q384;fAg!l0JI*GrPl zJ+}!-%7t1&;Y%AOsbReK_8h>b*M2_qg^wQqA&|sRuU%AaDpQ12i)8jV!KdzXf&0=J zTjQ78pk2GaT)O_LEW#qy^k$=m}#o6)^0u0Fn@X*V^AGBOpGwT8PI)aJ6PjXZe(2&nM!RlRHA^%&*R zxKWABL&k#34ucr^ZN{L(5=;K)8L$)}+D#1f_iWvIv#Afg za`$Cf#$dg-wx^s1mKn4rf81w-?<{3kCSyJ7S`6Lk4C@W*km?y(^LW1YwBAge9GjUyexwB@-qZupT#US+g`St439&qEa!Y>AyLakhnSfQ6h5yhg5-q`E|f1K7~K z8qrA|+AVMQl!$WP8A`cUIcj$$%j_N((qqc1w?H~CWoQ~601?BN^&x#Hq(j&>?H`{c zY`joix3tlQcwWIhC;wLbWE*s<*q|`7OQi-KR#RSsJQzamqwkk=wvD`eRErT(FMX7B zBn+8}d8Gv+&MvnLi+>QSCu%)e@JW=EDa_T{AI4wOe*je30Iu&{^V&I{c{8~8Ik@S$ zmGq4`oI(4yLK4AKH`>PR=bS1qj0Uku>yF)3-)Fs1%YrYKlCJP7`M*y*Hy2MJ=6wK+ z2xL$w(+&W-3RFU(*tXmo9{}@)zt_2En~twCC3u6{uo?k|K1^UyD5`0%7U0sAs%&9O zM_N;-LoH9dEA1K-4S~b&QARF^ui~aTP>$!R9sU3~H`;FkfOpYG48X5BgxgLD{^rAg zk2HAPA07aEzp1p=8%f(S;8{m2`x+|u1Q*JM6QbU+4nK;7efQR?HY440*anvsfoFxP z`#&lM0wO&@894R=WJ`YMmG6&AIVR`wybqP$(BxOxpm?MBM&A*rnNikLl@r?Kz;cDw$O3oQEK)s-6d_Z-WC`H;E&Z0 zJB3^_l0AbZW8>19!dWTyfzLsJ^gAE+@T*KBxP}@i6f~>}dxa%;Y1ZaKw_S z<=Tt2;zwV%7%eNlafYZ^5Vf&t9BE8OgyS4zfC{aTM}3i~+q5=vZ2zu#t%EjF()^}V zlbch@17ty+)!-D#&G=&E@fJ*Pc?I)$K6EaBAf3wu$8#P4^a(r}m{E7y2gSweKsw(C z2HpjRO;KG3O#L{?dD}v7Xy_`Iy-6`dOZX|+KPiIq0|JFwa;;K%kBpqiCYXHP^pRb# zBFSg2=y9X6VVXx?Xb1!fFRHD(bLz=2a1K(_d!BFkQG>PG+9WvRL*Strb`m>q3Z*~h zR4pX+MXltg)s>4#%J+~A3QV9CE<9U#;CIhw*|N!tt-hgi^+{hF2W1capKYYw%E;im z&=Y9>t**CtOcG;y>{@NblHMgX)!J)l*W)@2C=B(pL;4~^fp(5;P(Pdt?t2NP=*{i| zA))Ieg&Vs^Mygh1qVK-H^kI0UBLQkirC2ZAL|)ito_odXYOXydY^QBn+cpf-!vaE{ zp3F!sT`ymUHw&;wV$dPeBgP?{mtQxMV8P}u@DJo9zL?1Ha+2%i&BV8!mr8XDp|CDsj5&-T!i5F74T%#)oEEx;QO(!^_d>j8*%n; zA_p7Dm%2U&G}!~`Q}K=-0N*r-K55z;2gS~->BA61lvH@X@9#C6;5*x|3Q{wi813S; z)SsjbWO1x(4a;3QcO4-;cI#C{UMwgcG$VSS_IRGkv;k9Eu2Jn?Bq-WwC63Fi z#801>&m|-szTYljd%C`l84laWqmfmk55z18u^lP*U)yYiP_IbRe~ZGi@&R%t0ij2; z6kw-_HudE$+`CsETD|vucc@UN_gU5KIfd~7G%WV)b%fF+Y|$;zNntZpz}(@rj~u4J zrfyS&g+eTL-F(=|N)jxjQ#VH@BnKNGZzJ{zj!3nr`Zb=qC^E`L5w@M5zfr)v9o^5Pk)TM=K zDG##ff_ncbm;+33V&^QYqOJJnZc zw`?C@KZmL6GPN#vHVN5%$|NHsozTdh@fERzWzS+#NbhgCEhp=Ud8u+iTgrC%ssb+~|Rk(_{*;9SD)d3`6cOk=8%)2Qir7 zS}gB>Qonv7Dad?x&Cfk@ zszeo~2B!HeLn{(4*c1(WXU12o+N0Da)=N`x>2fBLGVR`cFrwc*RAA5loO^ud780rQ z#WLZwQR6H;BC!4aJCoU|bOG&crpCUX(pBbD{m@uXzm_`qr&pHy z#*PXK!~%^&h;NQmMg9uSR~iX53DFgblr;FYCpf*eue*6k-p7&a-%9?aL+T2@^ss-o zI;vtxMPaTy_vEULU)L7I9Z_#x#{gxLDGR#0*_|ozYM*b5XGn}| zWo^o^(F$mE9UfXgS~-D12*e43rM<%fnEB2y40;<#9=Ape5{0g7jUcv;kQWp(zX6rq z41q5Y9UBJ81Rq?k9suX}C?yTw9Xlg(5tr5*vW9EC7!KvGgrtk@h|tCh#_05@H%jaz ze@GIV@XiasS72Dx%C7+jKRbfSLcBX9F*~LYuqw%z(7V2@NFRe>j5er1j-t4nx5OQk z=s?Jpd$b-+!2pYUrz|97+DvUiCaq zIn_wkm+4@>X;UsJ7LFcnK0kRTW`8RGHY!#HPeLhpCtUmJt$L-xI=5ji?MzKX?~Xm~ zy^^N8X&K{CgKxaZJyO!CH%46($Dk~op-XSaXGyeA&5R{eqAnyhUaBobmPH|e{8-kj zp+<}K$H4J|47K&bh(1ESND=kL{GfqkP;Hm~^a%^l)X5cob^7?LZKCu2J60hG5Gn%S|=XM3{ z@pY<9jQLtH5oHs|$W~B?@!$^f{O+f_(U>W<)U3*8^ku;c)=j(n3kjfSr^?HkNvGMw z_A&93oX40ZSLz2)il*i9y~`bsnM)qC&^|tYN?s4wK{@PH6OreZHh0c=2b0FTO#CC` zwskRBh2)A~grGY%!yPYSR3|Iw!19Xt@02mV4YO}@CgEfv88&-FLj_0(0!r4-Z%>So z3Q&^KrB@$K7-En-HPj)OgZ;kOPP%YGHF0j6U-sMDq|_{^&dLXkt@akO`s;DvSf z=(#t`CG%utIMX=hfVVNAf259JS+c;A?Ft1RoBUbh86ME$n~AGr`#he+30vyx-cuMb ziunkd7}mnvM5kJmWaws~dh1)N19eoE^wVCcwq!)<6t`3}q!_^Y|OW4i2$yy|-@QZe-C+yfBai)JoD1 z9I|mdfSzLqvl26cYLq7o04Dk?P8(umEfO>E?vANA`!Rv80<8~LGo2VuGdOJb%;k-j}@sxIWZx2n&{7>uW!%1B7>if?AtZ zc0RWA6AIPaz*1=SieAhC3papGD3TxAC8MA&MGTY0!C`_ENUEVSl2i+Rbn@rvY~eJW z!_|zA+PYcrc?_o7i=dKcK!^`=Ut$|s^wYcEj2-P*X_Y4r8G%j3R(-1A5cc~eN{)+< zFCDC{W{XU7NTO1(4lWJj>kv(&(l^_X!s+jui3(;Wb7=?M-R%vTXU)&$Ym5XM|X{lE-5zh7-L_T(qGSe5EwWP$-Y9LoN_$B;)NV~krozC^;j9(_}Hw%4S zBxA-*TcjV76cI9Wi1uOM3wJN+xoSPn*2*stw;%OMC(0kwHBvu)LEH<_>N#Ap+8*zj z{`1phs}|J^)+b#WNo}Oy$U0jUcgaG;Z2wx2FJ-D zIPtvVduG#2?v-TXhxI8s>vxksxQC~Y-taHR5!!6>v_7fTi*vZmz3kkZ$?*e=&bKaa zQsD8{IzT<9v&!&d2A-(~MAqDegRd=SE8t?su9m$}&&Gr&bj6|uhJ6VxJI9)q5led6ZjcCJusKo_Bf9^Bv%;8Zl8p(&xV(ma zemOcZIy}Pe8m?MusvbpDx6Y`FExxyPxr;lkK8dp!PTySW3lF-#SIJN6N0}dIT0GWm z9L($A8)RN8wZzW$&u@z!fc5tjyV5N@>5Ve9c@m&-LR_j63Yc#XJh*bL7o*{yAWOJD z!&WwF#FWr^mAM+aO`meUXS9*&{L|UKg=f_4`EVb5`B2A7Q;%PELKM;uR>{O2kQ*SH z(LiMvt|nQf?>PU4|IvJk@6sUb;AHlnK(xnJks)K4#v_*Ddx`p`MNiL=-zmS1QOG?j za1)4XJ*ThnT2A{`y!Ji$Zv|;3_7~wij|4nPpOcYP=a8|xCC9SAN7_QUNhb4yI^kRu zzwEB4v~S<4@-VL`IAa#VYCLXxwndCIX!mhA2S(T$$Hg4uXLzZy1)Y+UhL&F4DE9_> zYn>NUslq60el?7-kC8esC?s#Wtsk_%9$XB9!jRHW#x=O#2$k(tDY5!YSD(3U-eoAJ zd- z*ur5jg2b*5?-Du}vNLU9k+@4B zbOa%>;&o~-Zm^39U8c2*Ne0}&g1W(sZjDii^>O%yR(^wb6;x59d4)ixM=%dr6zTW= zuTNB!oXBxpWzF<-+bfyVT!u^r&128%Az%mg9I-#5KBiW>T`qlUSgYZOkLIZ90}t+& z^m+Gaey9(RQ8q+x5;h166FVDVN-P;1Umt5HR*pH;#PEJk@hMG_NLXP}b(!|JoV$Vb zuU}~9CV5*fyb8ff0qrXf`(Lk>0MVY=?5F7Y;bqZ`x4dxO*Ez_R=#gw&c|5RmTo*OhBE7AyG2YEJiC zO;B=XlpghWr^Z(rp>~xl0u;4_2)?6Yj7t#=+0~I zJ)HYw5S^2)k|__0NAC9EOgfLFmk3Gc~8tLZ!^g1|nSU}}1n%2U&iwD|AL z#At+gaq6b++mcer=ll_gkL4ZNb)RHJJRO#g+N_MRR*7;@uEkWQu+QzJ!k{JIN-tkk zZ$?S`3&5(BOjmyJ8r%PjyB&=e7CmUySxcE% zqWmS!mqHrX>C;PEby4EUBUmakSjaz+?Bsrl-}k$ZYb%e2!HaH_DxU|yE?`-l(C3B_ z?`T>zs*nEJrM2UXs)Kw9k7rO5p$bYHhy|NSQ=LonMlqMM`ipR-m|VNWu(QlJ!EeuP zCY$M6;(xxs{Nt7dqq_#EtHNBy2gr~&d)~oe!IW-v5gG;^bLV~*?ar$PG>H-zCPR-| zh(F|cBHd8XZMMCu6%FlykH|DaMi!J^xJoFUxgY*yo8CD~qZ-3!A$2H^N^ zthjXELOH9bzs~x>D64&FWpf=%5Euzx6Ji{%Oj$WyvZ=tqKOO+(KMX$^eI8t@VU996 zV<*|!y1d?*KNp!PzetpTK9y^`C`puD)uH{e8A?2F+af;<%fAyo zpIW!bw0%M@Y1cS#YT9JUz;QOxEWx*O=k@8`!i>A5nu!W16quA8rc=T5WNrD0#|;=L ztzs~2tn`KabpX!e#`ZAaK4yii1SjJ5-~r%4EmNt%&TUq$UNva^tk{Nl5sz$ziftSo zT+8#6oRe%cLkHR%naS3I-Zh(Njdz`)*U+aYQ*RHaEf-D>zPrB43tJbOi$DxAJ{Al9 zFz{w~F17C&O9QJ+l~6Zby^R{~3%pH|RNQ624rum?^zx@%lfz$UFKA${A`O+4Fdu@p zmc@gsIj<+9yLZiCO+8P#%8aEX_nhY$kF=1?@Q{Q%l7er+g{EVh4&U@N91x~%gK@s5 z8l-HX(88W+69 z(>9~nk<}%Qr2Z<<4`7$hknzhf`~rD)E9r!OWJ4x%k}Mn* za^R}AH2Lgi@aQpjU-RPW29%2Pg6LiawpH`_p6=N1rrvNFW%wo!x=2_YX?M`P?#s}A zhXysFN^tY@Onfh7ZpG`ynPg|o{TG_u8_lqR@kLapu^HAyo)ThzSK}%7$xSf7E-9ve zpdhk>6LtTdzz+4?WLwRKQg5kdvA^TEG=>8MWsBJN+3CM~<`g;UocV+=w}ms#F4e)t zGAK-ERduya7JQ?+XS2`=O6YaMH{+jqlPddK<;jqWC2uqYTO2XaE%Q!eCc*7wcmx}+ zzZhqdg0Z>H0ZN0Ok**)0z-M?nW7-^#3E9)!xoiiY6Rn4JjID=ir*)oQ?+#_pH2m<5 znSYA4+UvXtp+6*uy5bU;e&E!e()7u>Iqq5l^d|oo(mNX$X2rJ(yPgd*E70 z?+bH$LpB^I^E-tT8cWnDKwk0yFg%rp%-Z=s^EmxWrRL|5%|EuUv=|_d%VvL>loT-~ zw=*kSHKL|L0>YORB8PP5w;!Pe0;g)S-dldDGRmU~o^cWuWiMz`GpCPC>lvk`rd2_K zn3>$63#tQrS!v$eQ&XECNj{$&$?(k%4!k1N7_oUp@)o9mCzh#_!n`*mYZ{7r&(dFvDU3z4!exB#)Zf;-fM@4 z986+Wop}}Cw!NO%5LUPKFuow~YJ%-+tsI<9_N5Q0rF8xhiz%jlBuPF3C8bk9lHvoQ z7uyM?ZbIEQrE{+1RIO<$HjgKUV16#enC!=L_{SmN?%UM=ShT|i*WAyATc`u`{Zb#d zZ(M1>6wM3AQ(qWi6D7&U7@xZWrynWHSN5OwORAe2whXrsGn1``z-&9w!7K2Kl+j|R zMtYxNluuF|$;g_WjU=@e_*(S=FjaKlcE{n=CgdxW%(j%0jUr1>Q1(k}cMcRjMKp~e z?tXGg9x0wK&@eNbm|KH{4vQ=n%d9lQ4q*0J?z0Z50(Z9Le~3L4{0Tpy_ibhLpM6f& zzpbi>FN!+J8jp<-U{I=w!G%8D_7>Z=PxyvqsiOeo((zyQqwE zVZ+%`DSQ8R%z1dbn!ob5LEoh@xSXAq(BRKyWEQV+pE`&-#rbqu1FNmQ|M`{Wq^{JF z$=Z&JI*(1xXnj>{JcMa>Sn^HVRUW9YavIrthLsadT{Vjy9v?JWhHF2Tuo0Qz_V` z`87ym8C!nYWMv+Xox%>#PGtiDHHH3|Eq)nf&3JAx{U%sZ1UFF_gRL+j8h$8cB<5N* zIFjtoH(EdFkn-9Ac)--3u2$E*xd1V6o8X=mG+5r-T%+e1+7dlV)E!s|F&nD%p2#Wo!@ArV_OulM%tvg1EwRl`pwhvmOf z^H=vKs&4mkpD-WP!62O`w^*icygFVndQ|I6dzTV!IXnPL%r)HgNH}%y@%m9NTN4j} zUZa9WqpTARZ*rURZ1e$ceq#|muqG}#J*XZn*?6U|wEdmj5LWbPQke$$84Lo^a^Nq3!5yG#ML{G85j!xhkIho*(g5sj5u^<;0 z-98NWxYr$2$egruZxNS0X31D$XiAB>i#!ZuY$lhG#a|>rJ^-|0vq2dL6Tq9QZ-#TWlIY4(56re_Zfi>pC!Ag;*!{d6;?L zoCI-q5@`t+*iAw<708ZnwQ8!B^XK{MZm52*D36;fsl)cfgD zmNogM!8C36t?JLz+HzWhoAs{z0ras}2vCNdF52)@e!8E9aXV&Eq$ zHXkyH+c)rBJiH5A40LV_(bN3(?z(QCPJTKPgFv)4p+$ex!}|0Qz#qLYcnkbMV$QuK zMxFIXXTSmVQ!~3bdLZ36n>~jX5gja)#y;HM9tnhhQAb!oK5}tdskA3CS=#u7Pfymj)odGV(+xYjczj+4I(&?H6xaG= zIqY5T_05j%Z*5zlrLS|y6U(rd1oJq4Z))bkjTAl(&)UVSF51vg(iRaTJ5!QQI_?X&Ng_R~CDWt|yJ@$zcN+djV!0fQ_N3!&&@6I~;g7ix>HJ%|0WvoIA z&kKu;P9KnGk7&bR0*&gE>@8Ndg=NQJ0XK)+e7cggE z**_S_LPpb*g#upn!P2%P%D`n&jQ-wZ3yY)2o#$szBN>rhBV(GZfkjFVvk~Dw$9;?_ z>UqX1rY8W{%?TQXT$Xu0oif#JIHbVp!#5qJ^JzS-i--FIs1uqE4(9t4J>A*+-6HR8 zwZO^S(;wgHOXzF`1O1QV!Wc|F{Bd;CiX#jS|`a+O&?vQfR1Xz&hM{73G@ss6{n z7J4*rx9DA1ez?*IrTetqLQ5O57h|Q7D zQYa!CCSK3bbvUw%*0}@hUK@&~vePuAHK9uMVchYj>2g~${9)&R)o&riYv)%}qYQq$ zxLifC3*j$r7?Pvf6rXkm?xS7hQ^5ClZAxtq{+=&B0vhy>N!FuPX&K4zNmXD5>URNo zo+%--B-@wI&Sr9Do@KJ+Z(neONiZaw_*Xl5m!Wi*aZxR}yP6qk5du zt`Rru4Dsx+!&cr0zJAbYkXMe_RdJac$4D;>M}yfMjGaaZ@3VEthDs2o#ONIx~oxd`}YrwvWs{7pJJmkJzSgAK9n+$ z4lvgK(!|$o>i$J#bf_|xP6NsSPNB~=fZ(OJ6s^hA@Ur?Q2E+U?c;bq)uRVPjN~+oE zErk*0S zfW_juulQSTSX5GdcLhHd0X>?PupnoTPZ)GI4fvq4NYYt&dfxJW{Ogy7UxD7qHiT{Z zSk17GyYwtJ_fD3}e~yFYevFq)i+Bbb`ltHlZ@l^FZIbT6V5{HRE(9rT^FKMUNSdrX z=gDU0os?pF^%aKneH0ql)Z-C6_5jG|4#H$@?V}pUxWTcETBka`N24Jymw^uuMv?oJ z5Sl9HVjtgHV{0r44n6!O%e3Gx;`8lkTUR?aK`7R^O}0D9ldCEeWLir63$r1>-UFs zfcxNNlTz0gn0`1MUFbxxbG8Kq?t`=2)>RL7yI^jrxx-ieTkBEm0)sABsM}iAYWBIZ z9bV+MOOI=NlO=&PQ>SQ&lz^wVc`S27=Hn|__~VfQBxAHG`Rg^(>UJ$SOA-fgS5|@n zy7xm^I3aGNX+w2BmL8SOdVoadr?73mj?!SsjB9xLyJ%ubl7~qa!<9Lp^9&1l@uN^J zNV3<#-Mio%m)=tuQ>zlYYGn13ACAxCV`(qtp&j1Lba%Snr-qwVjM;zFgy=u$i7yI1 z0RB7x7KXG5gehO9frHEqUrQvk2qc6t1qLtiT2ybVf9<_A>$H0&%-`ia@X@s=Y_2R+ z79A%+Ki~;5_qCOHl2&wdVGal0CwGoC%Ss@TttDS8RfM_2mUzW|I1Cb95FvHzD#*lE zI{YIQVbm)!woJh+BYD8}aWJe2-m%ykkB0O%vE4FKer3W!JRySs6%qhCW`$kt5*(OT zu~#@qvLuH_r{SK8;~|OSlJy&9|-AlJA>$oA#wm3F&;a#{>;^My*{xcZm)nM`T9mRysSHl$hsND#lk2qOF_(^LW zVX0rm&S0MrC&RpF*dV-_T|f9`?JEXBN&Kf^bx$$d5?&>{?=_UZ$m&m}$uChwY!?-2 zyGG)fU(2YTA54r4qyZ* zL_ChCY&af2-U_KG%e@gG`lxSPJ*Kf}p~gC>NUoXwf!a942SxZ(`cA+yZfv(j+Hlar zkGDa0<_G$#k|&!z%cKG)yQ9bZdOunM#M#U**ApvC2e&(S2UZ=H!{oRsC=tx383*0W zYFzRo*haRAO1QMNY#ebpG&n>5PYX|do~sLj(4BLZkULZ< zd&oao%ZTjM=ik&@j75wjjf>QS0t=ChQQpl(bqnm5;e$O}M;*(u z64V=eRx1q+!xJMg*O!K|@yZPQJG6@+Vo+63hwRex`ZRpXaTQ0AfA!^1S1nIh(l5rbsVrpbPgv zl`_M&C)T=+G4$q?M4hB!dRM8nRMe_!P>tDoJT)Pm5lXbt1owOZ6l(am&T>Z*aV!4Z zQTSj%XlRbRF7j-#Dq`he3j+I$`(s8|k=-$SB7Ja}1t0(GNAt^fZV?Y;49N%LzUI!?{%lcs+^f;WJCrxms_qkKI znSo4ZQCc($>W#S1@)FOTja$eTQGW`+^%hM>+rV$PHWvg`Jz@6GONL>wRA^D~xfRG6 zv>?A~)}0Q@XkGyWd|^{-1egFM*gR3~A=*e~lOH%%>0Jro`_Zv${y1=Npoa?l;2wn` zWB7iya){9SPrFvQCEwFse0#l~E=OR|_za7i~ zsfrs-u|-s{ax%1rdLSvSsraC#`$xb|mb)d@nWEHRW54c{@Z7@XQzbam7R4U`qk$=B zGpFr8_>C1!ZalS?@2GsN7E#qNjvb&2=ZGiZ;)laE7S6*ZE&w8cqHwd~d-)+@yT`6|K7eIH=`BPmv+ zGD;tw0gA>COR-pk#c$R<8J<|q;no;x%P7fi^yO8w zv5^0mkf2&Bf602F5QC9ycEkooU96UOonOKp_$nkAQQDz~DLY!Mjn@hlzH&OPU z$U^;+Ys@-oS%#>P%zO%C<4;?uf~WE>iX9LwxRDZV0r z{ZFqdwL-LZ#xS9$e-KqYOs?x|APpQV`+|cQQ^zqf`s!4Z+EYad?j4fbN88qgcSN@` zHP+$%y7lfTS6SXqo9ZdXqzE|Y8zF}~!S;7uni)T>2KQR{%*oP;S_$SK07To2nC8RN&Ope$d$Dn#r0{1`DTrVcYq?P=7OX6y3Tk8T@I=%fU$+JC*+1U(MXlEL3n z7h}$%^^qFlmmkZFpy$`~UWroUh?B5aw2&D@qR*T5bigr%$8Pl>M;810B9yOTQ0nL76=FM3kj43SX3z_Up5oUn0oM|d@5_s;h0w9AU;zoo2?Wi<1y z|4}(WWu9Tc_qI*3h81ApL@RCt27W(~LS0jP8kyRW4N)oOj)MB0qAK1b(p)+#xG6wc zZZnL((hx-??UJi+*eU>@ERxtnt_7zTao4M2F~+4F0C9qrH0=2KT)EgwH`gb)C0zbQ zU3O;bynaRxX<-V1m0x?*OKYRyr4+8<(E#c#ZbNdN58g^%o6Cf5!{E<3Kx-7 z$Y?Iw)M<(EZXi>;AflL!aA2DJWhkAE!xH^H(7SGlY`1{pL~yvzW^EUro=FeL>22GZ zA3eYrt@pm{?`to=zuy=ga$h7+sM@PSx&0#=!i!om=p&j2N^NC!`P%F{rwqcidA`3@ zolg>3av1JU_Ito@oa5{^?Ea?<><`$4 z=9R6Z-RjT?tBERG!{}X%H&*r=5b}m73JsOh3~1v99pC0@xBdQZo53SbyM^CIWFqZ!$F-Qh3o4GX`bI^g)Es1LvFDiHUuK=bg$nv*0tM02Z{d=00lw%z8p@` zSd0|p%vGfG$ozTO{{ZFEpRLz9l#bKH^RmRnj74KW@sL9vBonYFkB!fdk8fB$o@pN8 zj#~i82^{0G`Hvf^ZbzG+f&F*i$DXy~h^nQL)yYw?P}_h2@=o65;(YvmuCXNoDCK6A z4{|WOF663!%s?Cd!~^HY`nrk|j@ezvZsy@wDN?<@AU4P6&;7?-myDpP`;+Cqc#bD+ zzCrQedFzakM25#D-ZgF_P1kM4>>qMIIg&bsK#{Wu{{S#|7;JXjf_DROe{u3RAJ?uh zM-2NMZ9?}WvVgZF*+5nZ5RHQFtGOF*@z6We zTopJPOEE73NMW&0)JqjoK=&PkY&?#scze=TQy`8Y>UTiRxPBwTjn9^1HUsQ>w?~>u z7BHMqZ^e(V~$om*Mvu_)2H|_7a+;9G!y7d$2_Lwtj%x(_=W{Tax6+1R*L?&v~ z?!0Pp!9#vz6^w>Om47lWOSiS#t(I$iZh)t!tK)K6fi_DC6JpJ}vn+nZ;rVvtm>%VO z5-QIU$g0u5F57#$(^sguH^OThnCw<}A=s<=)tX#}O00Ifc4lH}H5&)$RPu}-$PAdhiOTUbTuZ9yEYaX1{TOIgei1cnlJz_v9Um%6k+{39)Q9Wd_ zpS^|0W+0RJb~3$txqLHmuVeV8E<4!dF{vB(!&&cw;2<%VW*dg+w>|8>S4K;fyJmSH z#p5xwqgqn-vQ%=D%W63A{2EXVQCRz%W8HNJ&fJ!hr}J5Cb&2z{N`|$Zg&a;7u+v2L zWwkBWf*{foWSY`b$jM@N1S~btlZa(KFm(0RTpmY1sxw(Bavw{CuRfhcge^p?r0S&F zgCP`5NRFz=I}l6YabEYG$6W2}Xm0v_Usr2N6#MyFM>~s#M2#)9*{pe*2r_d1x;>0J zi}e7_6@}shKFS*Iu0sHDl@h%fGq6{l#&&4=rEHE)M7-`|L$oNc#)=UR$rLe%-+j7} zy{2$JpJ^JHJT^wfu6^8&Z!J8K!DB0a$8E04@y4LTGP1|Vak9kGk8_>6U$4a1tTC2e zHYQ@h3JGF`RDMfLjujg4-NHoNmOs}b^VvnePU7pWkuZihrgOZqwV5TeF-2Z%<-+$i z3OoNyw*W zGECuR+lj23@>t0&y8Ll&OYzY6OiwiTgNZ$`!e+YBxhN)-ut>X-0lRj_oZe;JJ=ikyVxD2m(yXHtxeJjhPO|U^Zpn-PK%5WtuA0pj8qqZOFGK zRgty?DE%$~1N%SUeu(;RtJ2%O8Ho1*Q^cyP9l3G}PbS1*=0$2 zdtsfHPj>$RultY>9sYlQtPxptbyu+ScF=oejzU~ct>6+)+xY&2Z*NuXw6e}*c%bdV z8+QD%BmA-Bk>BTb*q_;dr$OAAxAp7~bAqgp7{Ti&uaDAl%gXCrc_aj;RpjlO>UEY|tWG!F$*QD6%qkNH|C4nAtX!m<`JS ztfZaPjlM_Re{bv8$Ae@eyT#S_Ans1!gSZFupPkQ8R(sJSs4wIYBT%GA)6;bRoO$N%Li!lgE*p@y<{yulY$EL?uQKOU7^43utl6KTLf_6|h0ep~lX58=g`Ri4lwO|^B zjE`nE1|srqig)Ct!13}%!+-Yw0J@!l)^pODXsmONFvS887j2Iae0+Pi`S~9wezLRr zhCH+)xnV0wC>^z7$&TBG+^ZAI^X+MbW#i}?fD=1YmNeaK0SjWGH#IP(>7G1h^JsH(g zVDzp&SmDQ7$vVp(OA&f1_b?i!7}?r7T89lgq;|WN)?U@9T02oPq_9m4fm{}&)%RN) zXDNhyjg2rRuU{#M%j6p&CYWoZdh*FDpq9oxth|+y?5Y+(=$2h4Kb&CFo}y%R?O|V2 zTC&;vJyD9AHzjA2{w2~_c!7hx5~E3TQD7*TO3xBB`3x)1N^vyQp_!WO1-qEcE|tY) z)xNJG{2voq>_u%*v0o1zfv0l!OmKYX#_RJ(}>#OBtnd^CZ$lQy&Iz{$$WR_@x^VW9ETEY+gKPmTuyXu>En@ zmF*@A3bNdqS>(H769m-M%v?_crx96vqvRL|a3stM#`fPBi za?FxTVlislhiIzTi1VJ{SydHdEi9`a0_h(vu{CaAB`mT??G3M{;a0V192l_C*V&$H zF+^B@7IrZfE=twz?7Pq9E1$#fNm$;3YVbuIZ!DLxq8NFz@+}=mByM(gZTEKIr*Hz6 zZTg9+lGGJugXw9@dd;f>s#&WOToCX(2AUe`5+rYNW+U2JecXpeY;Ll(j(BQarFtcB z>W+{!>heZ+M_8D0_Z8eRazP);+x3;Jb!AJKbv6XKT%DuZ#6x2pXCIJuK4nWS3c8tM z=eaT!2l+{Fm|Yd7txK5Gx@tJgSv=gIS$2687;9o{0B_jg%YI53RD-{{{#UOxKmIJA z#0`&6^%vk@(OoG#%X=NAJwf$iN^n|O>D9&NGc|O^D)5CO=F`hAGVg8qDk?(2EArfV zWYa&1-%&CZ(ku2}7KxC-Aeq(J9e^7*lNS3E_xSLCS4Y}Dpky_b8m%TrjhWD_uyfoA zSvNZ_<8?fj!T8^yZ>w4tD>foqwXG-A#Z-M{QmAMpk7FWmkxubR;DScr{{T0hmk&&G zRdRZfadzbpS!RhLg_#dFg}*W;V0<0Vfxpilo|>9Um7ILd3^2zeb!Ol|_dA$K4ipW( z6qEPZbR~E$P`@%H{{WPddv=Y=0C=~S`||kPZ9{f>e3m z$5VPntTlHokh{q57-mI1xfgW>ym$j<*!=WCe5zK7UEK>Q?k@&N+BP6FY@2|s!ADeE z+JhsOyV`(+j$Y>b6485P3%Wl35GdSkM&GBaO#^dyrkoBayGg@+op=DuSI8q{^ZgH9 zxjXiE?#7QY$PW(RE@e*Ju>ko`f=1)V_4unzaqTj@a9I=nl5Rwd4&i_bd^Y@lZ`J*} zn4xN7s8C58I)Tez!GQLWxI2X{@=sD_CuU-S_{cyTac_m&ay|(ge@>?`a^S4pIGJ*5 z{81CTJ!l+QYQcueW^kY*F&_t)>haMkJSBLM37D|tr?)ub-bUg^@vz(X{{UB7P?|%; zq2i@O7WY(M~y?g1nJ0L|7-X}dsg z#Erlwa(JJe!VkC`4f^M1(#AQnGWRyzZNHBrbtCuKc-wzHSLB>Y9P!48W-3^3xFBrX zc~$ZKH~qTmW5y%}ST_3~J1aMqJNWkv`klgMtyXx0yVgOn@3AP}Kngj2rrVbNGMY;+ zCx*IioyoYVC66t@_aD`{L(6Tr{Q2mPB{4`5l9$wM1?p1a^Jbe6v`}7gk+G8J?OrVy$ z4qCgFm4ds6D#WQ`JcG~Ue;m5h52+ef#tS`gKqfJ~&-shtRh7vDklS!a?Z4^jH&5cE znJ1P;60#lR-HBgso+Xdz-M;?-eja>vE%cB{DH3+U1w(iPZO@S!vhC!4-Z$uXR(xPU%9c{}vQ!GM;V zEs+NZ{jJI{hFA<_NMv4KBw@0hkLo_(->*Wy;iKw)3lF0;JhZb>-C?*}HZqt(=juX)DGU3wNwXj*-*2RV0Emb@2U5 zVzpiPEY^}Z%og%`mlaCtu`=1C{AzoWTxNZbmZ#f|oktMycG11P6R7Ldsi-5+8ghlX zC&fpNn^2fr>k!znZgObFX@FO;3~iBqOn9QZuvQ7wDWja#KC0`Bm*SYoS2Z3stP@K$ zEL|Z~!xYg&G`kn4-A9dIW(f^@7FZ4U1^KlN8gNTCBO@e~Vv3SSdIp{v%u2DAdPX>j zs|4sIdJ%$JNF!TQGa-pytrnt=0F+iAprX;&F=jUbQJ3O{!ecS4} zyY0(jZk*K8 zi5={cTkT5mRG@oe2-THdJ26DU3bE_}UVBNo{XGPy^&&W@SrSW8HD@TY3wGK?k)s~gunj)5OU^ySN{ML z?^Sh-Mogxb(%M5xdUd3OGdQJ$(UcN`#TX}yb!W=9Qrc`gk{hN;K1W(hIC*L;GML0= zEC{{eEAirddHaA5jkepXT^yvChlX^O`9h(`7kz<707!N|<^X@+^>o*!qoJ%c-?FDU zG8B;+qad$v0SPa&P*VgSr8!WW)D{6}|VGO{dF^*ye;1G5p zH~oD*-ka<6%Fu>;F|cW%PU<%0yN(r04cFK=2ax-b@znNb1hy@$h!`w;cP3>6asgws zYJW%Ue_`>|_6Cg_vMiFPwndD4&mEk5oATdoD0kTGf0LqvWT?v+DkF#>5hxpMB8-9w z4168Gq1&NGtjg1xXMhePlQ7%P{{WgfZdC5R4~@U?(Bmg)Rgj82d9tZ=EKszMBPx&p z@OUWwheFK0S0vd973Cn!BOcPo+`A)Wvf)_2%F3m8Q@KAJe068S=&TktbDC^pR=PZwkVsH4 zV0I|OaCRW5BX1pFNsda>J$UMN+;uQ}8$W>qAn?yi}0R0}?hUS%(q; zWA-6`?ftsNP=6`oR`!xXWhF>bKX5lX-Dgy1ue2@Nd;e7u6M%J$KJD?5v8*re8{{T@W{++#_+o{R4`^iyR z4$eKhk>NM_*lc|7_CGwjtzzk1w~|`%G2RKlla}SXVDUUaJ_%Ab@#CXaQ)>W^a28nO zk|bnp*@H|~00(dj^60sybH;g~HWz55X$LZu$ zx(-~A`|rO}ncSvV2av6Ht20CGXDhpG9ZB4|U5VSq!2ETGm-SACe01`>oaUM`juFE+ z+#Ta8VFIJ>tLQHI9W#zug zcLlhO{{V2`JzzF@r!G^nB2uvh0CM68EET`|j=OH^9EDi;D>N~7$wJ(e74kL&9iROt z1&Z(A#I+o%h)I@%+7P8jih#J5CDd>SdHGtRhImyRhQO zHXaH7o=@AY#s2_?R)o*m#A_W-8fEIzv6_ymOLC_gpsH^o{bXW zF;=nDfRd>^ETTD*yfa7xMgWv2bQA3eSCXexN%ZssIj=QsiJE+FsJW%FRVbx3uPv&W z%xJvMGI0WvSe|Gnd7%sKsf~tW@ubo)rus#Udm4vXS+!!_%NZ&)BEv&kic3nw*+-u% ziKmXUGcwe_RMd(YIF>Rrk1>8O>(h_;Ui7N22Sv@6)GU{E7IvODA(%2p73sUPSF?AW zY1{zwHcDxE^Ya{4k04dmddAPZSw)fY*vGqXDPF`YTI>aDWHOfIl3bPSkV!j|VC6{^ z*onBc#f(-j6ee|0&ZDl>t5v-{#JPUzFVy5&i(#7F(*P)8=+-^WM`)r}S!a$VryAG# zX#0^yTn+5b$0aH~n;B~i=0guCsKEnP*<`nWCg|;Pl*zIzajGwIkx_ViNm@TO1sNf+ zEEMcaB8~;KQ!i;&twvcNv5&1Jhk*e$l39?qDLsj|j;_&Guc~cOIUoJqr?D~&?b`L4 zoY@UWVvu_u!wyF)0B6~_nLm~??=$RU3(_mBwKgg}LzasMXgccOrlgk+Vv;zWr>_(>WoeweRjA0_>yKI`QajG!h-D0UN$vgm zdp{Bx%YKJ`1-(=CKTpYahGQe??l!#{=*Z1tDf8LNwbE(U#@&+irsD1#ynBU2+jH0R z=JoF}kkuO7HL0_?Niw;dqq^U}JpJ{ZW?n^PR`A6`02nCo(le?k#cop3#<9Y)4`P5- zOis*D?fP4C#P9Mp9aeva8&_ehvcMU+@<2aOQ?oGGdw?5!eDC+)rZ3UWIO%FyjWo)i zlQhc1{#-Nh9FXiAwnxVQ0B)#XiET5Dt|GRvsId76U=E;$`DXZ5*|*#Mx)JC+;^Jmn zrYVdn>&&nqd$wZd#^OzfpFV%TO?D3}TFoO9dm``^JeCjc8@9*D{UmuHZ`4*+-_lO& z8B~wMqn`ps-;rbcx9RXk{SR}x(}Hwma6P^3)9Sc+mb~$h@<+U~k_sqd81N+c zWBkMYJh=Jkg}0>ek>e(KYeytz0%hQ;{{SU-D8zdKBYnId)zuEZCrubTkwF|Xhahu6 zLay67abdEy^SVBVZVjkd>@auT@$-PNkGMb+!g?FUG`uL4cmV| z9z1o08c5QFu6Ac&s(uH%$UBcJJbk)nboZu@CI+p?UQTR4v`s1 z?B0bvVp#l=2FLdQ0B(ue^A_L0-py#4pj-64@uw}DbK`=6 zyna9a9-?%6NhPY4tT8#1UT$nHsT)71@6e(k-osQ@*DF9 zZMORkom~g$d^)ET7D(2}lei$TRLQ@}o=SJz4ZpwnetJOv02F_REmP?>QqD#pVes~3 zg3kz)a3*P$91ua-L}PutZPJmb^yQ3}Dh}Lyk}A_ou2h|z#8hv&_6>;uc?W&^0L!3t zL~9XRlUjv}>`_SEo;v_r+8FQq_&z$bXn#&}S&czXg^IH(EX&`HR3m+l9xJkt!SL=6YBxv~oN@<=_so8I9m4dj*7V-|T9t9wQ6k3Uy*m=c zqJR*rQFjdc5$ES){W@;FG0|4A+J_1?EsaEQvRH;U{u_^%AZ0?~N{C>QW@ZQf0Ns`# zH^J-Gui+0?;qm^P($RQj)D_@%j{P*$mE@s%M38V^78*2b3{koJvT^;;li>^`M>E%FLxw zcqNg-`{QX;M&JBM;wDE3% zzODW+rTjVJzl6yiUJXN| z_p4vxBlzLcc}YD_Og^c{U@_ShM7EiHQeC0}D$riFgTppDp=2tuM=hBVg9V#@jee2; z0EiwJ@n35lR`l(p{{R&FCYiASxf^2R^=C*#exoCywdEM3cx~90q&ChQGgYOFy%%CY zj_8ARUW#hT;L}*WK|DIJH1cN?ZsqW021DaEZT%d~yX*d0rV zWc2Up7DN)+#NVTllB<}jr5l*ya!X%|mIvwHjaltPd!FIh8**N=Jq*ylR}Lm@vfQs$ zo__(7YMlpDg;S_4aB`NGfGFiCtat27KEKk_SM1V#tg2rZG zeiWi8LVXQeOEgs?uWP%+PYlLsrJm%xUOLd)jZ@l!%d@ftSXFo`Gr1?KmO~8K=EvBc ztK4Z6#Wkqm=K|Oh7;HI`6(Q8i-q;s;8?onx-CiTgX0n*exU6gjzt>f*Nny2F=7=nN zylW*G4^Ly$8Hr`Ewx`b9#>myFTPl`dS!YNk%Vg-lrYu@X zJnUkDOGv5jn}aH+OkbkDl`x8?LV8nPpo>Pamzs&Q)@(?PmE4iMn7&04IHLe8Q1NGD zG3cHCGM=N-&_Cga>P_sHQmz{Lo_;xs$!uKMU9P;;hF%Q^|7hri4upn*c-~q7T zmr9;CgzWLW08PyAAwQ|vM#K%gdE`7FKdAodO`2BR)!?rd#kj1S^DMjUzQ6)}KI3iA z+vlw9LzCwk@?>WoAx15^e+#hKcnC=I_v(=?9_mIosgg?tih@e)Kn^!M@%x>(>f=JG z5l31++x;>tO#cAO!GT2saq<_LA3i?+03BU4d|2tS;;n{Qq-b`G`Q2Q2QOUia`7GaK&O5v zM%$1#@;{rVMX3EYIxAIi@-qYu+=xx&4YO>^@frT3ZmT%2P-Cf>5ZsPv%PSxPQZ_~6 zJBUPn<{PjZeDryrTQzI}YLx`^~+XbwI)Xw zU1Fr1ox@RRCW-!xJsXGxJr;;;HP`ef}*nvvSHXCe6BiqOhjM!hsOcqkB$s#no z5FNR@6h2lnvU|PT_#O}Wy3{7C^vfrNcgfL;X|b}gc@Qf*z!>>(Sb2yao~cTmkb`C9 z=Zhb(+hRx{`PiTKC#-qv@-6K?PTO+-05^r?Kbzy{{asqLUYoUZAd?6tXk(B9-LPW~ zxb~1Ybi9#7A=t8b#ZJ*v-= zsV#XT=2p*QMB-Ka2^6N@;10)rtsbmtWvrEQf*4H86jhwCUO^G}LILx+ANSkOKrAMf zdpgY~h92Ooh$STpgyeaYa(>)@+-=h+@VIoT9p467(4!yo&BC$x#`FjnBBX8~F z$sJMj*QFT>*_OK1V0u|`QFw+fjnfJzt| zoFV(AfdCUX0sXmn^VY>b0=~E6wLS)&9HAdnYgYv(WgWX-)RX1EM|`kj`xQUarU&s~ zS5IqQ8*NrNUc_jQ{>5nrYxxyH1xS%c$AUknTBW7wKh?th+M#2!6saUVwe5gdBxS%J z-_$>#Bi-lEMb!L0Q=3Z3B$X@23`&m)jX-YTP_pu1yN*86q`oJzWWZNy+_4KTOTcW*v%Z#n<~f&8leFzaKdoQQs`8ImIW3L+u+W2jFew^8LDXvYHNs(T=H7x&(s4aSCOCats+6 zr-W{c@xJ@{`w`aFzYke&_;QXn6?VOg=AJ)^w^ikOmQNHL#w1A8wZLwUH*WXv#fr$G-iwM_Y3kID$m9~u ziFV}_QlS;3umcFy(b#c~y4eTeKk)MosLX{s~oS)q)8nNlY+?vTL;nLf?`} zNJsj`rIMZq)d59WBWD6(O8q+2TF+UmcW}70k=GM?aY_wCm0D9rUl(p! zofRt)w(|ou>kl)uhH> z)^n_x3J((@t1KA!Dedp3(uL+rUV;AriN3g>Qfb^DsTIyf4Mxz?#-?g4a!Zn$J(@G6 zbrzqwE-yKFS8m`%kt|tKcq_@sF6n>boekL?A?z6kKg(1*C>uMoeEzPj`m57@Kday~>MUJHo(7Ggo}DF$ zgkxqAIV#4_CENa>+j&fzbNHLlx-P~;If=(&yK?ZkT@3O?GGo<>L{4f?ue zy+`!&3mP8}a=mzr$Y5h4acNdCgg+MLza9Yj_}zNf9z#fWwH|7+Ni@orU7#ELhzHDoc^@8nlX(&; zA&M0Y@{SGQfM5AskbLj(r}ygDi_*68t{EbdLIGBG0P^Mh!`tO|Ujygg)V#VmDJ!he zlPpLYHeyN^JE&Oy0M*FsdbeVo{+1h1fQ0WQzlsNIs9%POE!FKla^7?sprb`-@n$OioJW8xgoYVpXC{$y* zrxsra-R>P%_1=VAe6pCq*}Q>-aiQa0LhKeqULF{R`SJGaE~FRE8l`(ecwRV|%p~R& z9Bww-_AEX}j)>5w}|6_4m-(v_?{A!>;w`vnOsoCvqcV8a7?L62r%j zJbZZRQQ-doP$QBi3>XPx*azE}7exokk>r1W>DI$N3-tOcRcjbvSs<|rKXODu9b6sv zJ014i`2G54arj2YJecT}BQ&o`#x>u#kp%@J3;zJa1UBA05!EA7VXD#8aoPbRj@pLc zqklg>8I#~B`TGvBDy}2ytkqJ1ZA8e*(XSxQuo23;5wZL2`*cOnUYy1CR%NRM3f4ea zk$l1d-)_-6n7om=2V>P6@d2SSc$y)E4E5S5`ISYJvfzRV+!Cp`-|x^<(rqlaE$6K~ zLEH-hHvsZ}Bn4ulZHWMQJM0HXtR9|4nw%`08LgR>K;AhRL*ZoG`F-F20MCwyS=~ER z6e71gYN;XyP6$*NB{qbD2Ipq;@zO{5fVpbjT!dmd6=zhf2bSO7;icuk9mf*8{r2gL z{37WcNcw*pt1GL~qOBzZ9~VJol!bdv0RI5;Hvw5sfL=D^gAKq$vugy&1x#GV%N4PD@ZI^F_Mg;yv!14)k2N?iby{`-MT4g{B1~HD>GTJ zDvEp?lr?b#6Uh^7u&(i7bM^a-AZLI0pmZ2Jl z6;3s4?^X+vnW|>1MIO-oJ1ba%UC3 zbt^h&9qQMocI_qAg9wxNQjVsjkdn;QEpfYx{?YSt#mHH5KRY+RNq#3NsuHMt2wNTA2cL(ATG>9Eq+e12N~GacG1PK}6d zStvi}S<)p~B!R+)hafn~+#UqZ+XwBj1;xQtttVo7-^0vx@v}t~#yH)Q!pxA~w*p2v zq&5^cdCup(3I0+Gd5-@88-K+VI?L9tM!yI+cC$8*)GL+M`tb#;6{5^wbmX*${5@TU z{rU1a`6wju9hI?|6JL@oxjON=_{;wQ5f8;aUr6h3!S+J_nW5;Wj`a&MtFg84S{F{? zeL%|IiW?I{jMW;t8gF+sRyCezE5T&6aYe!(a#ydzXC9CG1Fb&?k^UY%G1OkS;Qs&+ z-jx0;e+vGa{wp;0M+1s{C0|YsyPb}6(-A^k#!Brg*qawL_3U;$l*mbpr-*i80GxF$ly{ANQTh&qnNnyJLWc;h3GDsRSTk-2Po znZ|nAjh6CbvUq7Kq$>>8VltGm3|-cW8Lr7G;{|qe$$|GW4u9$B>@_|X;?l`sEUn0L zf;CwyK_gYissgVBF3~GX!Iis{k`KFW`Z#GzkY^BM1)?Uc8ozQFVu}dch;5QZV=St7 z-^uggo2E}f;8zbuOP8WwtBLFo-H_5n8h1O49x%-^y6(Vj%iHbW^p7Y%j}Js>=e=Xs zS^F|vmX4v#WhSvHp=95+6^jhfLt1CHd6){VK_ z_UT!J(fKpC^WFFw4YJ3H0hk0cFS!_>)Jl(m;EtRB071PxrKfUBQss46Vqnlq8TR(y zGy>0v)V#JeK zp-ROI^K*7;gSD~$05h*0iw+}jHXAof?gQ}IYKyIWoo}y3U&s$m};8f2*qBr}iM!kWz}gOB@qP^s_Xc_~4kR;!h#vPr?0ze@=_f_;0hQZrUpn z&`4tq9p{I`DyVJ|uN|dyKPPZD`|q=Lm6Y_WF^0jU!U$|FM(s~x0#pVJs-dHiK~+2Z zmvgZ_Q|kI_9#Zuiu+>THTl}|X2+p$y?Mq&ASN@`gW512TR$hzOy*nPC&>3AfH?la| zOL*cOMI$j%dm9a+l~{rS*cSGm^v&q4MINfFR<*cHb2BmRG(b$;NLB$r+vT?_@Cfne zqg|a2ws4Y-gA^h~*n-Ne2_O%J@HW`^{XHtW*U;3>H>I=~j|64AGv-p$@!SF9)~C zY{zZC*?K!=bn?ZU7hycUqCKP~ytLqr)bsEdZ{zmy)el-=#fq-)5%1RpkZ}rQ9DyuA z1Ht!f0qI=H<1Y)+it|jaT(n#_*_nOZ0A0M4NxYSIiVWL2G{U#A?{mtIOZ z22elDHX!_u_vysczMA7TmIm;cEUC#2nF11P>=%`H2gxK41Mkx3iTp7g5%u=#EX$6! zB(9AjH*zPJ^HobKY*dEvxcKVNi1b#?c84T?G(cvV2g*nos)PzZLWsuwRrPKNQ@duS zTl3+SO9cbU7fs?H8wXH4jsF0brIRt}#eHFtOf9m0m8^;6bsqPGIVDlPShm1#;O*AH z^hTP~x;IbO!^j||N(zxYX}~Hf)eHk3eU8O*W-e7S6%l>K zIR-HWAdUR|dF!RJ)vjF-2;&loinF^b_DeYfhR0@2l>t9~U14XiGGB)q7=j(%@;mcY zGO6XsyaC~N9zUnM@6r2A;$yuvYG{%OVTMo3J1huQ2XJ>JV647(1AV|7^jOu{EL}}8 zO2wO0s=!p&y&#S3l|T!S>ku1_zE8&c`Tgy_m+4dZEosgm2m;>uUUMM^tM&G@WS z{cXmSMtONJ#3&qxA8$q&@D*ms8PfYJMZ6DJEpnth<50pz?Lkra$PVg~2{~>0cJg|( zWapuQh6JPZHfFNCt5tz#79-#Ty^pJp{+}KrZhLMAo?T*T%=Q+ZdlJ)I6|(kn2xEpi zQr=3E!^ytBy_sN)&wW`-Mr%%`3UeqzjG-6zQYXX2gUrP6_nu`%t^&PD_9y*8dJ1t8c20bR>jD|tq7UmYC zNu`oHtlW`HZxeG5&6PTs8A`1h>1M_1?Sx0Mt-gIhTWl z_JB*t`n^|7XuVjH{Y;Wf(a@Wd*!vU5k}y8Cq<_{{$R z51U_$Nc2~#KAmUd)4siWNnLf8oygx7^7>aVSLG$`L50+KXeFa=G0HHj z&O02{*`eX>44>ei{5WCJmarNp(R^;D)LOq$O%$1(O@9ZF$767IWtK4>YIvuIzo_zo z-KMWjKH4k7oXj-Y%X?n*uhPvG_-p(=>5oXP3Rtzo;3GF7wj;g}EJC!)-*`oQm8X!+ zB>YbgsJAqcCdxWR{Y}z%d)X}hcLR@s;x=ofRC`Y(Nw>GBpVOEj4_+#3EqP~_ z7=w19`gbgRq~A{?1%4qtt_k14?*A91)oSIFPbA3bQB)(=B;<_=qQ zahSU?O&@-&EX&SKm3XAHyAlldk zeSigzg+J4%Us!()mh@#AHEs(ZVr)~-&m#;_mNic!yzARm~7jKS!dfW%2T7r|{>Z|rNeY9{krY-FG=9?@k+uKXjvJVU)Qnw7$q0K{2-=u_p6VUK6U_(sTMTP zD5XfE*$9xUcFxS(0wW*|_a~Rn&mO86?K)$y(W^3YR#;Kc7dtZ(x3_Yme<#Ty`j8qz ze2r--z1fm@q<+IL#Zg=h;o~9kx&0(<{@p10%AL!t8&TVgfT7=s20lTM?g8hwkH3S} zD#n+dv@B#z*j3~~+^Z5#H2Cn!S97@VKk|&gXnJ?l$sAh-m>DGDxo!6$3i^-B<0nUnG3>V3N!eEP_<-b{v?=8-RfaCEw&Q+n3+Q*mosj&Az0*{T}rX>vDx=PJTc;S1K@w+ z>8QqODDg{DT8S!gnH&WLSy=*tK-=yP`v6Y;9JJ1|7^^+|UCCm!S`3Y82;k*N?b(SN zz?Db2LFMC;pFK4da#(z(nXwpJjS5nE<9`ypvm(CC9Dbi``18KwZh_}h(#OcuyD`QD z2e7=nF3v+P?d6k>{{XAWEBo{i*0_|<&2H^=5%(-G2#d?KQ*>EE{gYrkkGVY!DBv>K ztgSh2R@|d9G5CUo~u`Q%5`?F@(hGP@p69l6>w) z-g?3Gnyhsz<+SaUD>XZ7Doj{{M<*P+t@?)~2gib2ufC`0s+YSrV`XV4Lmh<*#YtGt zBHwO%sN4NL1SN;~bt*ODCNldb%$sew*ah>q9f;fy{{Wv=Z8;u6vyTkVBc$`l(3E0O zZz}~MSZ}v2!TWo*`}ERNYPR93C4v!hv&O9MszUb2E6Ol^h6q{ZO3_J z*uU#t!zyx6KPVRC7zg(Ibt9;C3|I8r^lVD)!HC@otf~vJLZQaQ7C$G)M88dO_=LpN znzLnW#0XTHStz1J;XrudhLV}58-^{vw2nPmH8y%JKV!dLXDZcSdk-94#ef3il=g!d zLXt+v1iSwLZkHV2sd|R(#7L-S4F-|<#Zj)s5e_!)Ne|aS^%e|#K;M9l~4_PMjEG5F0owP}4# zidF#NKp(%L&Eu|Izu3uEkcc~XF0_^E*-*@_4=Ey-p8$nf!QYOD(C>@WxV+wP38^vk zui)Uw>Z$`wC6HxBR9wyqI+C;CDNSZy%%xm2RWfdZN@Wr-FXDSgP{nBL8k-+QZ|UeH zmReWw*$MAhN-)lXXqK6y3pQCNa?Q_mQiXXiCAtiL8$DX|mlxcB{q>NEW|Ez)TZzSg z$khj<0E<@B4ggY+k9z`l!k5AX2 z&`!8|(#cV&WwFl`^kcOY>hMD%vt@BIO8|2;n*kt%2Xrj{-q7ID-lbx+O62l1>|;$0=^w0yj*sk1XmD$i=n6uzDR0OV$|QyWd~6fvH}nEGW8 z=Khl-F>lk$;Pf(=2a#<_Qzr#oJ20gre@!+lPbh=s(gO^Ru}x!TV(qac{I=tdTRHR( z6!tL_XL7LAhRl+R_3K3(^^tpsfg}~ChzMt93%`PW+?B$QC;Xd;sLTwI$0W5LTlA#$HO3!#F8G;Mq-#GOgTwox!VzVB$XST_Z@53 z*DphA-%+%6fo5v;?4*wyR5C~TY=HL$$NBdgus#Z} zTKV<6&^^s7uJ(YtnGpdiw z9M6DuDtce@5jEwKyO`Zr$sDB=KI#xI>He+#+n>KcJ#*>hdEQ5MV1+NRw~?qly;eMTU)yVGAOw!oQaM&@n6s-B>@{ZydJ%UR_0|Oy+Xx8fkkIP27=u z$r_&txEt(Smy$j@Azs2oj)kmA8>rzNgpsz$N3g4HvnJcOpDVvW&0pIVrnQWaqsA6k z*&-;=%%_mzN6Hc#fSAZd`v)OWmzH zD%OGBJ%(VU7|*k~q<2RzgSh*6-_Kna@GSJ$1!odQ!Z1`e<^KSfarqoafIfN|P}BGE zStX{Vxw#@t+(1(rKaT(}w_s0?*mP^k=+5P2mMI*_BUgQiJOb{0>OkC`{(gF+T7p`# zO9TU%kj4-NW5QH!23}unz+t)Sa;~4bdW_Oos?w1QF(4j$aocGS9lQg+{{RGaLdWSj z@%gx9SPD0AIM5)#5QZn%8^MDFkOgc3(3a&?z@)XZ|T&AmepAMFx-eVOvY|d$tKDa0(mPdd~T!q z4yQAgs9dcbx^B_U0Deoh-KSlM17o>T%%pickJHx;E3BurkX%R6t68|BDP>2Dhm&F@ zAzR?=1A(yI{4ei2tTc4GQyE`X0Vt?pv_H}RFXvo zITRKQd?{c#NgqEYc?Bo$w^8}bbz?=Ojej<`=(WQ z+yWgv{@_Uaf;Zc$&Qo1m#AhBAcV=lKSrGwExe$kl`yNZ@{{R<6xb>XZg+aE>#XF0$ zey_0RqsGK+e0{p-m%U>lQR1isvTpp%PR)i&3F0b#kvM24@N3MmdIfBF~v$-hC^R^gNe~`Wu=Xf z%_cH@CX%^w=LnFMnzryT*&{sn9ZzWJJrUP7W~ZgSCfC}+npmE`L47*a9U*=S?xSaxF=gb!}_Cv%F88>PyJ)S=;D-TuNa>FylIC!UhEaB z+lHKrW*YTku`7!v2unDaLnJI25Snr_A#HY(77@sc2~F@2fMUXN-0LF$ed6Q0t#=TPXXG-(@oj2;^a zhtEE*mgV$@g;}!MSedcVraA1!)$9IblUPZlJbj&R^YGQ^j-Ju_D$LrKAslz@Kq%)DA@l#W4l1%h+YqdyWy+b(#{?Fwf ztF8rPAN1x3JT8Tb27-QJ$H0v;-}o- zryiE9fijie&PPq$tvn;@u~#l?8~F{GsChfLY{cfDaezS@5Iw_=?0PX^HAWjOw`mPK zrt&pwL)?zWAr|D(#~&1Z_Q|Z#vXBGDP%?X6k5-$Q%K2MVabHDR3wGK#=~o#WILg;j zU(5?!#~sMb>$m}(LYDAcb+8=;ippuNGe;9*CX!0^(s2S>jjg1vf<~=38soblUAJQz zh6DWFcIU4u&Ssw>VmQQRH^;qdv9#tg3zw3_Jbs-205D-2Zli98b__P_HM;RDNA(P2 zW>rK1M-DN>t+3zuj{Hv^L!p+jqaJ1$i&Y$Hq>#YTAVo4NFn-wKywCptr&`SQ`$0{b z!dtCkRBrr)E_f`dV+BQ!c?e5r*{;m9Vs$9*C=7znMILntHh8I}bW+pNwh;i=M zKzOuAek2jN05|IDyGL4Ovvwl>En$j0azny5aDM7|`SBn=K>HKWpHk-66{w_zTtv~a zXxnZ*#dIvHR3FqB{Qm$rC)?F>->CJLwTjYM3nYl}y0GFT4ZoLONAJiV-;Y%aIf@wx zD<$h8%+DWgQsst;fql%fb8Y z=b;{|z+~|km6D{1S-4Kpsu>`R&FqpkU_2q*F&|;EUaK0SW_on0WMYZPj7HNa4Hb~b zc;s0&_P1z!ZNInuT@NFR8Hs};(Mt+D)n*F8IUV*QGvnOcTXDbne}18{*-H5o5yE?_ zk||(=-yvn-;lbRg*zd6a05?+eWbI>GS`e!WoXCykJFpM2B=7paU$hS!f8pyUt<1%8 z3b=Z_c*x5oj!cj%FqL*5)*$#l2W~#o(RUG;IqKCCOHxT3Y9rXq#mbckjV9#oqx7AL z^VO3vr{Id7(A$;?gDswIys=0TLrR5Mllnq|#O^xAz}gfhLn|!r5arEI=zZLnu>*M5 zZyTNVJrwgAf#s)MLqB#K5wS7M!CEpGVhV%0fKJ^PMhjrQBh^%H7pRp2s5ALgot4fY@-t7a*Xq;q{6^PD3daKyuJbVL5Hj|Xo69$!6Eb>1o+IhdoHNCV^T zN5gdoXq-53#lLbxZb#eO=d1Ktj7~!b6^hQ1Zwzc12IUjZ=aC-by}gKUo`Df$;7v;` zQ%oa;=lx8`c!MVB~;i! z;Cy?x=-YPfT)v+ZE5!1~s7dfh-(kcMcH_ymKetQe`q?XuEhzGG*R@_pc>8|TJQ#q! zSI*J#+xH&ss%A{YH7uECk=_U5@;TV6s3l49zT?Kn{@o2C)tRVrO$;_&R31*Tje!V9 zCF95f+aEu_SKs}m+s9RLBfBd`j8WunOjmt?-0YF<{Hf82_QuSLeh43P(Ek8f z)Q3w{mTFKu5vK>5KMZ486rtkaACIv8hW!oc=i4t$mL@FQ7aK0yl{>QRPTOz&{(7}( z9JCb+LoigZWS7GLLxvvURkq^Wf17dp^+>&=mr`mj>#M9$I9VmG!Dj9f0OINh@xJ9) z5JPl9$Y@m4R4ZijSsAM1EXL-krvx_j7F{FQI$~(gBEtkc$qkUXB2!lOB3XaR>h+-Y zWl5_V_zKkYk{Om8c4OvgVdofjgqbtUUc@?Do}55v}#sS2rr z8~B|Q7A3t?imOKx80%HSVeiD^7Po(3?816Ci}eP!CrZAv)KxS-n#tqxI=j=~L-fRW z&q(lh^e!WXja%y%sPN!vik^vn8Sxd7jM|Sc9#2tbWtu#8kg0n8l)cmIQ`KB8=fvRa z<0;J}K0h0bq5l91L8LMZj7lq*rj3B*B)URL@HQsOMUS4u`5ixny++QUwEB}VG;voo zhp%|8KR!Cl*$hsNhRdw9Xya~w36mowSFt84taa)D)+$1_ELpYQ7EEGhZY^QUs-^2M z;yWLy^j@ITSQDtVhGzDd$YD~0EY$H=&S)8Bxk4+fMk=b-m02Bv$nO~90**Zv{{Rb~ zw&e8PTDYAndJ6c;Cly}Sn7dYtmT{4N7@>WaaIbKBwo*Anm+DW(c;e+;ESn9MWgb|m>6y&eub0PJf<{SVNS$MAaJw=} z04ige8KRI$zYWNBaI$jbz_(id^&`-{B{}TAp2B0YHY^9UV0$+%mYt+=W4#JgQ!F9m zJVbv|+)8uXZj{{KjmFvT(PqKvd(O8JAQ=G}v2M5Jtz3*3qGJt%H*;m<$PWjL4f=@F z4|7Z807Vw{3oLP@tnhTnPtW#DtACPd+vhm`0H%)zuG-Ryt-?vc$sE@r930~@PWg-;avao}YS)^_e`4D=ido!en~R%6yoJjzm@;ramjU z-~6RVyQ03Q(|F9adRckZWRhnNveV(mw(*h!4Zk`+{M7WYYn*aYIUxg%b)Ei#u{3WRL_b7u6M2uCQ2a~ux%2_!UU6_r? z1a13tO2J^O=IShX^;m3}ReNBUb!PzgBa3souw~qj3dhg$b^B9#d#JS3QRJ&E)GaEH zVnP}@VBw9b$EcLmH}Yvp4A@{{YYM zj^Q&BWAXN}eZtIz<5W}PVX=M(Z-yuR{hbQ3t#TaDwro2jy~hl!Nl?diM`rL&?Xdox z7_>L26_TxZY|inKY$I}hN6F$-=XGxh^ZR^`k2!4p3Gq@G+)7D3!b*-ZapM_MPm!_y z{Q`9^77NgS5DLGOyvobtw#&IZSp1Mj$oq8@qAx_zON*0{jf~^-5At#jhuOa*-HH8M z9lC_od6+5WDpE+1Woru0WbMj$nl1>#aqM8ORG%Z-2HW*nw~Dc5l#T9A^n{sM7T5sb zm0y_~h+nzrJ3fcZjIE0>O#;CgX*U8$xeh6^Fgu50IUlkA0Dhr0ZB|@Fa=9`zXkc=Y zktB?y0`39ue1W;}NA~DZ>A*(R1}MytMJ%$7h;q?5B#vRD@&P^%$DW(5KdAK%j~RI)hTZ>6l?g!iqbatAta3(RhDR)`zc8ha%IF*Pso(zqw^r-exaxN8hGtGgKNbPhj{%gh_cq~w z`g+XKl3%Nrn?4|h{x>5dHYXD~bpAChNep)KQw2Ps({<%8vbZSMX8!<`fr5$TTfVr+ zWHY$Sl4MrD)pmHD~EPd%YBfa z>$fw%kbI7v&qi{hm1-FsQ8d-?^Vpt_GMqLN46dliR4Mfo@%B(Nx4UUua%S-Ah zFc~JR6$<%fvjCc)H7Cf}sJ3leyLz3r(B7s&dYuhRj2_xtR7ntLJ4vycOa6a5ZS1|nHn@NJtr6I6b8%L zqO`jn!AI-?xr>*70j*f8v^dH@&~1$VP^}r{@Dla|dU%RKt?2&kOryXri0s5qn8$ z?A34KT8vfJSZLllb(<`ZOQ|q%s)%E$K6+|xXETmZ$SY>Ye$vGWM(;(+KCd31{uwFR z%GLfLttL1HZ{e~)pTu!ZT4J)o$yUew)qZ=o)KX&;hU6iv+b>%O!C>iWd|0E2lEhWf zJ4y96Cz>fFpJs`cNxPB5D-~sSF~%2lGDXRQH6PR)dW#u|p7nYd+qGh@S~?b?j#h#| zL6XI6Xo2B&R1M9HrZz3yf=JlxtA$k)+pm;JYQ<{9&eGRQGRkCzZGyFUo!ez!c;9tW zII&IFlNU77%-m&?X%Ql22^29|ndJqGaN`)+w_n`s2lVRoVidJXUhRC9Z))xWrFA@a z2*O{KjBoHhM;?JbqUnq#LYCGj+bRzzkJeb=vZDLm#~r)M!*lbuyW8i_T5kMd`dxyr zjF%^7#To&AqBe#TG>vPy62`E`j&>4{-1t3cD>voiNM z9k<)<`;tFyrD^(?avVGpLu-2RtdYwRjIoHS{%KWxx7ob^0A}bnmdWKZd9z{Xj@3)x zHpmQY#eL&w`E4fqdv^UrcKGXsoB{29Vk$7>$ww7Nc$|V1mH6B%1O8SW82vrUQp0wv z*9I8o9Bd})rsNJvyRbeMc%PFG>a4td-4HcCN(*_KP*;%yeI!yNMEnwFJC&L;hi+m& zJC7rexq64s>nnJ?VX1O({f+@AGO$L7mfMVfCNiL%hn`#gx)amM{eD6`j}&*2_`m59 z$fda>A93b3+;6z^(J3+USe83wQnFcKg=ZHzyr7(O7vLH`iD7#rW6rV)9^S3&b=;x%{?YJAH|6lbuVIY(%%^i^z3>e1E5+g^hDQNu6Hwl1m?Lj3BP@ zFdU2XQ|*1275(??A33OvoLfs0#N)v*x+IX2i@VDili`^8-)+Nx;pj?r1kzntRgyF^ zb8ZpwP7S&gUIW@eAEbQvE7e;Yr?V9l-K`wMtzoZa(agkecSg~L?>nkBWh$p3z zxmsvyix5pLYU%Rytw?Gq^lg3r8b^e)z$f+Mj!@3zQbl4^@BA@auC3k7FZ?mAzmQH7k#Eg z4i6oleYf-W>jpg~YO+1(r|h!s(!#vBG^)ggkOR2nHyi!>AYk^a@kt9Y{tO1TogNE@{OA6b?6A*jyFS6634o#G`Ho6_^_V!ZFjsbXhA|i6@d6>YEv| z_e_@V3CdQbrmD>l-nFaOriLps{*Kd43@WV(kz>VLxqj~mvYq_ldK!8umLXoIo6l;x zQ04Wmb|_2iveVKtW9U$mn=KgT?D9o~4zP8YsU2CdxS{+xWtKI9v}L1`rWYP8MW->) zdqYC272Ee^7d|!~2O{3yiP&sU z{JhR#Q)*a;)i0UfTbQL8qvJ{1i z%iB}A9AWL2L#oz($p10$0NgTMo5W{K^x0PIhBF>S(TLv$PUfR+z!X42TZOg z!z#ujjU*K1M*T8FJo`m2%I@=hRd{4m=aVaP*?w-~THIt!luO?Ofo>8-WbiU4vedAs(t;M!I0JVOj zxVZgCJ|ivk)Kh{_r>+U0wT!az2@@|$tZ*#x)me7$!6a=8G2H%4f%*i~XwlNb-|Yu+ zEveRJUD?8iuN;7QUgLWd4;{`Ymq%?McEmAPk^yP!vlM9x7$Hyw&D^tP`JZtcaT^dD z_Z<=JUpGqQnql;$cNr?iZFd6r-KLkxUppv1I;dxK)hh_@{{UHCf}!tC6mvavUm$jy zbOpB94?aELJ!^0AP3Z*;wl7d*YFd{dUJ1UXEm`hGVs${s?!0!5cj6AfaXvcp!RmcF z^|SYM`KwG>c9kWmP0-hg6#L#r9_5o@Jbdmq=^*t}NM`h%Vym(C@j9! z<~wI#%-#==^8Wyv$4mAf)R-;Xs}xA^Ad`55yKM^)1Ea6VZbcb*{0;vA&9_BG*LeNp zip0A&XWZL_i5Ttf-*dY2J23veM9r#h*@Ven!`F&Nb?!58Rd(W_l?UT=@HX?fZiybB zYAaH01=$*C<1IBOmPPuA(Fkc_NY9oC+p_rc8-GrweNMmpLlE~UayKAJUPx3%Ie@HO zY~LKCAdf#jIt}RUX$6x_EJ)GJtkLj!0OSKM;fWg+<-~$Kjwhr3KT`cTcwk{Ha;OU< z0VxXMOGZKciElpu4zNi?7)<#>1F~<$9^vXdxn4kwh{xujU>90GD#xef+N7Lh35=$69GIym;DPF0m-& zSva57c8p3FykiUlOc<4M}3H1Kzw}lY4s~9 zim`!LMnrKuZ4|7#?iiG1+<~#*hTDGMjiW0yhR>-V~W$+c6K*0e!{iF&%e?0>|LG<$_k)zt1 zyb$b#zI^TA>`DIJE!KTC$yLku!aP>tc(2f~JaL{j1}2S&1Q1mJ075q(&C|7~>&$fi zIL*d6q=cJozc{|BKoi0S-fOc4@OMxT--`55(>{tmzr)9G9~5#T?|zw55@@Wz`%PPI z_A0KdK71dao|tTYjEbg>$xz5w`i!0`r14H2IXXI^k}190{{SHeb^caA<>@_a%*&`D zp=P&kYON>ILffo&D>RPNNV9M53|r%Kw;h1nqC|^N7SEFo9+dI0bqJx=nl&zja`v|p z!}dPjq_CQ&9W>Re?zXGNwA@bJ(2O~i;-};)`*!=U9lBy`tnkg#>VC2)_Vs(Ve~9^^vA2jD^Z^PmPsXtwUStUKwQ;p|LtcELBlYaM)<%K0E++ zV%-_Fp+`%(2FERvES42#v1B=DAd*NasfaGn%^hToeaF}ka`yqyMjZ6=;^&WuTJ0K#vYB|_Hh#cBwv z;&E`R@s5;HtpsK~ecjJ@>E+i|RllCh*s*1vdofbTWh`Bgcezdzw+h1>0o+i|%C6jb zv??M27p~~BRw=^PRfVg9YWRwki4w;qcICJ#)uI`I??+z0ap&9SM*UlCIy&ji7idj9 z7I@Dh2aO-L5ci?9hWw4NApzmRLHQnkZi@P6AN)SVz(A2ev8|Icg1n-V+d(Ud9C+e; zN001x_+7eVbQYw%Z%JZdv11*zyOX_CH&FCPuO(61v*TC3~p5agBI= zMJrM{{{X8S6@8^$my;AcUw*H4wS~2_NY#ql(8x=6zt*g89Q>iT960*YD~;i`9Dzp9 z;o=Uwy~)+a9Y=mhCWS3gS+{1bKMw3`Y7-k+f@s+CSrdK_=k&Vs_7<+R!Z!J#Lk|s)(3rwa9mmo-HjKaJc7LFNx=v#6gm&(-4R-Sll*0ExC@4;>; ztJSe)f`KJCaw`;@mWnxiI6HDjdeRjIdhgWIh_JUTy!N7nR#rJGOD8-$!4cz@#8D>m zW7wiJvi98ym3bm~`JR`|-X7E!*HYo&d}dmNQDZX|AQD-jC3_Y722;(qmlI-~qA25I z%C{7SoTt-lEQ<{~-enbR@#4=Lk@X2wD{PTO^QhnymHl4f$IEnN(~!Ki(v_Ob{JVTo z&C=?d@9%M404~g1Wgb90dNb+e>?}anqfvtXPjr`zMt!{26+F2X3FXeyz6gq{vApw&lO+`8#Y!$?0js=y+_;%7K+^S=pL0q2pFZ-H60!P61k@3+J7mJP>f?2rQj^DSsHG)WO9{X#0V**75_jErDF<(!TXE{kjMmmQEk;GDvrgTS zv|AGXd=(xJ$Clr}F01tPHbpW);f<0eX*-I8V;gxpaSOVUyApiw@zFsHnrgjU%Oh9_ z6{L~;&_c2VhKckWS8o7WaQmx&Z1eHIu_g_bL{#kD5>(H1JX~ z?tCKcu>^QK?YB|5lVUrtx6$)UUBpS(iz(cc{T#i_r+v30`M2@YQHRoX@8DrcAxWY! z^R;1AnO1GT=_w#>5O0t^LF$~|o#X!ijY0G*uh^0&a0(TX=3Y!Vli~P}zrZ8+=oRU{ ze%6`EQdg0Zt%JKeasWuAkU{w3M}z+Who-+jIJJkXsBzM%y9pBZd8m$PWl$!8_9t-Q zZ|BG>%cv}Ne-WrL)@a=Aq;rBfuH36j9?Vdv`DQzC+iwf~`d#%_2P88y%Z$j%^+7S0 zjhSUJnR#)3ByJaP{Q4{4>Vt=hH=SL=4pK*pG8`E%hDjV{cEEYOG@t7<;@y?a31l zJc;)B8;$&QDAby&&5B?$M(HAN0FPt0B@B!d1Gk3x{=eU=&!*K^D_*INNn^3vy3_@IMQr9;hrZuHxS^j7GizAcL#r-jZo)kPOjtXgY1 zet1SSvrS$W42X9jk040o;1*y%uE(dR@aL{h4s6z%w^YYk%O+cOwQVa92FX0zl>RWX@bLbgU7{gJ2!y4w+j1<(?#cJft^yQKG6W@_iJ zAKQH5ZI2`V?cSQN7CQYFsR5~wTa}}E?@b(Sz*??0m$FaNZr)NT)H8j``0C53 zDo1WPv9aW=+nPCNZ>3CaX<+Y3G^}GbBx3}#ljn~X05RpwhW!gy1!}Jpk`K?3Ocj-- zjbx01OHULE+#!KNorpw6KcF725#Xy-j{!X4_sC%GW2L`+%ztuWw+9vkt9VZ%NIm zwH^*UXmO9i-WO}#o)Zmsy*$q(*I|N046y^{8UFw^-H`0OSEotz9JK7>vGi3M)aF}u z-9nkfHu;5;P^9ebAd!au00C7;xOIKhK9?iB9#5`<#b;J%VV<`nXqawf zqRlF9e@g!VHpiD!8CWb(qa@23_TYxZ(<84TcAbGcC74t*axvLe9MgAJ2&H-&=QW-J zIu?@EjiB1R_UzVnYf*h#D32vN0*y38G%wwY+y?06dz4l*{$EhxGZA91!Hv{(W?5&2 zF?nd4Ef(&OOPI!0y8X#ziWw+cYP&Ry^%+&m5Ez@KGIVs6%M>Ay{TFm4FE)AL35}#TR5F6vj@bMz%jACFg=mHSAHhZYt<#eJ!|y)S;G# zknC5UBP6iPx|kiEQcl~H=tXBtVzN-pO|o;rbt0B5R%WrWURkQeYqNh&>J%;`8%(g8 z?&|2_Lm3bh>al-I-o^&EVJ*AT-U*U3&F3)!3rWh$4{H?W0AN7=-DF}xdvsQ$a8667^w?>}#kC1)&o zcNu-#C@R0r*OEW+0Q8GM>5T2`c)C=ymO=|QWsaPJ^_ctoxB5xrVpcuRjzH``)7GQ( z-jICWR?j0Gw#>l8COK7vfe3%n!1otmOK-UxdMR7dx`$6fw<%yIy+Dq0O-_`OO$hFT z&FiTw(UIVlAFJeaJj~rWX+EbOy~n5GU&|tGqLsa}1OEU;mPO*guPuW6cIvTfQo~NH z(?Bddkrr!JLMt>*t+cJ-f#NsZe&_!HFy5%mRd?8QOZlyN(m zII{$7-q!&tMZmjN|`!BoF> zw^^LG5$AE?O0ga`*#7|YblYiMEnO>(ORzFJ#Cv;AJ>j_V3ch{>jG*pzA3Y4UUZu%c zy%cRVrddR<4)z;}VCuUDU!9R#Z`}U?ZiwETA+i@S9M&ycgvDBHYwo*-^WBj{cvnVd zjPJ6tGW1K-xcZc$tvnUlSIBn-nxU3NiBVQZb|jLVPr=*d9)S5=TzDL~w7mX>WJ@Ll z-?pKcyGOUk46HZ%Z|>;TfY-Gx^J~ac{}*qrFU0Qk4d}?@+++M)T9uCt{a!g3^zXG_Vd5=_@2|g zrxA#JTgpPnIH_(Wc@-cd0Czk5ay?b@S!?*{=D$Ikd3dzZ?#4FYcTOq};k@xXe}C?J z^gl*q{Z+-!X2LVul$m0dFJiC^!PDHncH#l}Jd^tMP1i}A(weHy0?aVdt4hhO^<|kP zhRY5ySRm{Tb{iM|{J&SpI~$@R)p)oq&n#m#QaHoGunA^18#r}U_eMble0)CrN#So% z(U&XM&cxDS<*>D_N>`D_KX9*b6Spehw#XBJhXni3ywYavG5c8`?iD(_$Oima_cINtBXTwfr6T46>=9IY6Y00*09?o)t_;{ z(>oO^Nbupd`SN1V`gxhvYIRO6)*H2}L@ZICes)qIdx(R7EH)c{`#0~<>MXXP$9+sy z&tYsrkWB;b0x}K|l%D(X4c6boO!1nLIfWUsQcU{^C)HTEA#apt=P1ZorIF4nORYoep zIiqD8?noo|W&QepJwD0da?MWT*RgzyRe8&jzbaLzjdBwjAE!=MCOfvt;Otv+y)dhp z)A;JM!YI_HpcyaOADdQ>dPYcTC1|Uuj@-K`72{{KJ2&S{k%EmEM+u4<0rd36%(aO) zTBy)SEu`oSp=?JpGEj;N+)J5WIMJkS&mb46$^ActuZ-?e#cYj;YvpcAreublYL(}L zb){0vY*GY0wCvGA2M(s^<%zGp%Bno)bS7juV?_~l#{I^GcPw>&r zcO#jSHTs-=YWUdVXg@XDSYuS}a|d`}3G=4jJ|n~x)lGXDUV zrV|C8t7MpNM@mGu9Llo5R$9|ak}*d~b_2H2gmR>YR(>t_>c6gOVHXWUFNvu;rE1nz zdbq0+!(s|CJKeJBS_x64j39ndsO}DFw@bD|)jKAc`dhZCy@YvSYg0`W)GXa{qqQB+ zWHC!%g>`RlCHEq+<C-KQm0T5{5Q-%lMN624l6yH=26vM@=zb1+Jhad_PW#SuxB)8M6yg5_B1 zT4awkYLiE2B?URFHOdPlQ>?j+C3%UGwb|vT3SFueNW1Ed&xENd{fN(1k0)+=cju#0 z!Gfl95Gp$3ttL(Tp-FiJBC*S{R{8CaTdZ100bk z{EG^Js|ekpU=*j$$~47gXk;gl{SBIP#O4N_t1NK`+>Q^p-S_?W{{1joVjHkiic||~ zCP5QJ68@xS19jYhiU9Mr&;D+RQ%O8kM3ciDX%@$@jm^LZa7=|*MmOX8o%*S2c?L%n zWnsBqorz04EYZ5$-F7Pnba+V0jgI~ZJ$Z=#02Lfw8FBdwQrfQBStGJ8Q?p;R;fQZA z#DFxB`F)%2dh=DFb=-6>*!~p#n>dT+MXxwyOBL2t5(u5V#lGyn=`0H?b?A@qO{cvh z_0J1NottzP#rQi?ncg_wt(W#ZPC*Y|b(DoW@4wH+&Cq}G@%TtzM{moh=;LwssLE8F z<|3R`#P$60{w4J4q11R;2?U*lp}soO4y@CZ=gCcWfRwi-OEr-9V>fVUrg~3e=cjiKF+kgq! z>^9u>U%j8THMGJ-Fp*wmM~z9?qaSLsZL%wTe19h{rEcgvyws)@qF($w&0`}s2^8$i zIqm-dSP`%v4Zl@wEq7CC`|`xgJ}$u~wZUVWH1%a-c}82uC6{+rUydJPzgMXBZJBbM zZT)?hHHxIgQ?`ZMc9D@!oZ`qm9T8P(*A)7lV1r@S~aAROejynuD z+1Y^D{2l!H>1$c*dW(Xon6tY`gO1Eg1!db{OhjzE4agq^dHZxB&pmBFZq;~#3G0=T zIbmYFgrB8ZejSIOpFZxtQ`Yd$4`U)@c&#A_{{RaSKo1e*C}72Z`g(N#00thm;&55q zu9SJAb&r-$8zU77D=!qt(nGi#y9AMc)wuigK>SX&o6YJPy1N{aM;+*$Z&e&fX@~3F zdH5T6`~Lv9O5TX|g55l#hG?H?{u zy+UBQ8oj>9m!l%_PEthw0Is`o_aC#qVBBbfK?<`=F8)w}8$6CJB_!`=SHn*-yURvt%Rg+NV}LU5gT;aj{Wi?NhQ#5KNu1+mM~fF{%06 ztXnwzMn>g}Fi8@lR|UARA!d`5p?CAJW;?FlPTO@!$Z6YIDp#Hwa$FTl2Qi;}1}(7d z?oP@{8~($ntr=e@hspaHOgzHQyty6#?b=jhw>2I%-+%k`#Wd1g!p&N_DPl4uuI?lu zj>-Y^;BCIgfA9Ua3Fv;SlT`X%m~5)AJ9ud##@m->acTu<>|?EbY{_CK5wY8G$KCpR z*Ke%;m`#_rV>f_}V9(?3GlTzeI^AHS6(d2Wk3QnKU)T2KjWDFr#WSmjB9E5c<9_Ps$ z7c6O4kmTFs6{vzN?f?N&@|mJ0%Aw^|zD-IZ)e z9EoJ}GkcUIe41LVYhIvcF}is*j{O9sVhqecXF-;b8}pr1)7z`sd^gg&8mE|+Sv7Mey>WhhGP?g$Ot1M*g#1k6;(m*Z0t9%|f*)3UfCylo(ahiAIup);2 zSsE{@#t9Rn@)RNYyu-b7xF2l>Ugb~SVm5JmhdoZhVc-fiHI9N97z7&nR zkjGXX^rIY+y=zt{zmLn&vpdNkSV9Px#l}klIpna`D*pi0kGvfMu=kYlIP8KoY<;$c z@8hhSRICdP9LZGBmCRLaMCfc&nHo7GD8AR&f$namu{3{;(7*NdEYZ(nY6=re2sQGz zadT}XGOSi&SaLAKWJbjCR{3QKCm`3|F=`sPzu}oznhcZ)^+M#ZwHLPX?E+hpl%-bP zS)(f%o(E$a0P>-3ptXKgX+b5JCYC#Ltclx!97k{+#{&KuHUxW!8e$9|ekE0(=7K_*5_Y|T0V$pf`#1rHM> zh`t1YfcV(3K2)EAV8~Zf62`MUPNjsfMAEPOt|#W9uy zfeg&M?&iOJf9tQ3J_hHnKA+-2Qx~gnnR>L1L6m`IMj06EMUd}C8a5$>LE~UK5S~$j zb>r*QtQJ>7dVyhVh@`J}Sp{ZD_O8%)a8N4yWHaykjruo!AHNW}&qei?H&kM+*<5$j zM7oC$8_4Nyk8x4$S6JJ{xZ?5;%c}6-mma+|OQ>{r;=9o*RnDOPB}+GjM#kkU*ou2q z>=+A06z)9p1Z<_gRPcU3Z5CYoFzRgsuQOUdQN<*brM+kPMx%}+-77qdy^A||=A&|> z!EiYI{EWR<^h;UjXw8>|U&OOWd$9wwyvsBp__p<>Dy%^G2gmB!dIC)qOYq2MjbXJA zhhb8trg<}x;WJXY<$Hi)r@OmJcBni%X&Lg<#!Dga z#gy;;dSW#TO1?^|g01;k4Wn7OnMq@_N>}uf&E$=?+R{1zLzUzVWDmd41-dLiEua#e=Kc+jzeMJM%?~uW9NS$a`^GlE7PqvlhZnf8#uQlGMff$W#pBE zqwrqkdB-8iPtWw}tMvvf#hg|?<;yO$yJ6*S=FGf71)qr=0}?-{jxEp&PGJ6$*vV46 zX0}EfKUa^n%fA91Y~87&@=BeDk>hSk!_Q4VD_0#wjFOM=(OP-v_HV}4L(}hV%1#W+ zv!@^(JaXYK-5EV4(%5gNO7_ug_GU3mB=%UZ+y$!`f^WASwpRP`UvE~7U*f`F!(ObD z8kN%RA&_*MNe{SU?W1y<9_az>#V;05zUlYn)pOLpU+8`yS_=@tkiRZJBV#9MOA}8F z_5Pg3wop8lZwY8)R)<5m^RBG3_@()RF5E!cKh{SY!<1-W8MW>C5d>MB#&f|Tmd-;`yj|!b7Vps?HxB2`#E2t#nMD*A+fxF#4gq@8D(367Hex~(>MYYl znk2IWjhi5~C3w4ww&g-Qbup98#|l!^c;4me-$^6hg1p6SEemduBa6H#mh$IwQ^`7C9_a~nOu27!nLrEz{pj%N8u>#x3tLx zTJx+GSc5PYWUX!(ds3%n1MJBfAWa8?0A7rSj)#kQm>wDPDmNq-TPYj834Uv_gy;99YXr(x_JZ%0Br!n;@)<_N0BqlYt=0ap* zcX;+-0olCle&2qr5yI7BNa?RQc#=%>oDET&*p4mE42GGYwj@5 zEZyi>vWH;H#2FWTw_gB%ZMx9k_<`z8OJP=7>*?E7BwH3JH7fAMced@_WcrHxGO%&E zN8FFo*Pi`pp>P`7t^4_E5mmJm(xVX(4+!(+J)3|}+<5WQSFCWD?Jty$7@&u@0!E$4 z`2?0BzZG@m?{dw*&+6NvXP}>pjQ8QgHfDz9t9l0Q&UuUs(-|SgiKYV0ym$n#i1#;W z!1lK8pQ*=N&Bgk=>8GrEN{*|eW{yuAm!OU;86oen5r$ogjp4{wW>)d!r%K+s$_+;b zLrLC}yp&4JF&PMXO42ADzSPjN?!{!`zn%9UI@iakewt*l_a=xW7UQn1H`Y}rnJfBv zqaDW<9)5mXrQab!<})P>{{T{I$~TfLb@y2+SdVFvNXFxF%YBq>us5Vmh zn&W6g1d=`8A!^&Rk+RQOWU<7ok@n+J;QLFi{?+K8qxBANM&U7doE_Ac7gEGx82Pgr zwdOAJz^nR1Ct%1Agf8CgiuKj%PE<>UzmYE+T(Z6XYZ2mDI{Q_C-+>#AfZQKyj)Gc8 z3riwwLz0<|*+NSNi1;#5%+q>bteZB-+) z8<^NkeEzWi00U;-TW|O4uM4K=*#?bT4QR9uyr5{Be%kzrP&ejCtK~@Dh#ua89=YfY z?uo>l+;Ca7fwCkL+EtLf7Lk0%VLn2im3_WW#Bb7Gv)YE#d^s(IkWE#GCPj(%GDPY& z+CmDQhmEEG08dSRiOYWx#X|(*TO7hiXzW876a(7i2Fl8I{{ZwSZ0%8E_B$10^I5Us z%6@M##DV5@EX0yOZ@>5GAB4$k`Zexd##NIcN-GUwvU1=Gjrj0Te6dwixj)Q)I&r-Z z^+h!)}o&tjHFf2!?O zLE1^x_i0RDrCp1Aqwo(dOV--`3)Gn1QKJ5ZG7+VavBQ3q$p~z1=-lzK8rO^#qQ{Fz z8!-&+u?LS=eK%$tef#re^CCo{-HkDoD#%Rgp|+nXaZ`I2NH4~?l~o!Ro1m9ZGQs4Uq|$?eNO2&xv8s?A~Bk6AT4nr7yj znw@>8n!UN|#eP!v^0QY%Zr%H??4p(~lC^$qx@nYWADQ1t5ceNRsfs2WJ*Yw}#b z4q|Ctr@f7aNnh%*u}{VAeO5suRkBnp;eRh^$3CCUIRLK>TtXBn7B?SK%~g;7T8>$H z+=f;}AonZ1o>Ow&o@s~aL~e|joTEH&FL5os9OfRhas%SyLL#aeHXh}5uQ!qa;;MgF zE~>h+)r@vximb+LY!~apyUGwyQ6ZMKm=GP}LX&Ra18Ceg$5A?{GFF-xrG;V2-L)`B zZR(nH`Kxk1p!&6JeQADVV0~V;|!u}(bKvz+G<{DE?SLvppN~3v24O_ z-Kx?>JS2eI?w$_yd@f2`(>^kdrL|%^P+G2JA#xStWHP}j-H8X)@pz|##6=Oyz_Txs z>(d*j>0#URP=B+5icob>3JlNgn1ok=Q&g0Y(xPW%rTMLr<(!Wn${@6p^4> z^7HKnJ|#%zjO^uou8F?PJTJSeggZ}S2&&qZYfZu8d1M@^SXE_+!t4~uQP+|%KHwYt z-Crt5f*TQAtf^wLPpYF@Sxs!TWPV;-6$Ff^v%&}b%qqz3>~!buRlZ{K1u^l*l=@7S zJ5KT9#oLV7%$O?W{)*J?vw zr#q)hL z`$tRv0OA|ae8cJ4^LYA`0US*<77`|BMQF(?N{yMnM&s@C)}R`fsCf*{yaM8H%aHoc z%rzr~Af7PgL#jC0p;z)w`*|NdJKl;XsUDWYS`>S7(3zsKn8gV!8s63|Gt*xkxrA)P z+qW(rSXc8L$Tk< z8#v#iAEp>=u)@3)=2J356U`wk$V&iP7LC7A+rjzRFZAmZQNfdfTX9{h8&q^iperHW zPQ-4Yjze}Wzn?0+ep{;#rW&sPhK5E&j!52EWqNb7N8PMEPgWAYp~n1_jkfM_>fx@d z=}%PmMU7S|>nIB}U8kusZsIu=hqiT`?g%?J-ro(>Ee}ufSn5iUqR#0MMA_I)XNXlU z6l^!xAw0J5e0**zQ&+Jk)njbL?J~$?bagDz5QLQjeYlVa`SQct^U#)_GSy7g+1BK> zI}Wo$9}=jri5r-YpN8T_;1UNG{{T-|zM~}58jnl~q@L04m{=9}q1~J**fVl%yn)Dd z)rr!X)vA{H-K^=`B}+RX`vJbdh2VeKj*MT2uS>I%)txU1p&aCw9Uq?UJCXAGm|0tH zK=5uO!3@6MTlG=(evY8UdY6XAW3R^8Or*jblze1TmtsL8NY3A;W-7ptzDC_yHC#(` z_(Uz4Xk9EM?gblp)NA;r>tFszq;Ii-xd~Xe=j;H|1+Cs?w{{Sky zoPEFY=wInDxsA)z?b+@xVNIAhZS0d|cZh}Z>FwY59XZ)t9uF;Ar}0IF>&~G?nU#O) zoq!V(4{WeL0rr;tJqYy=q&TWFNsz}&B~dhgRK2fsHn}RNIS4x|0tVY})NiBOC;l(i zEr!d+sZYF8TWVFMtadF@cjSG`-c$gci7FRvnSD*^rXW1}FE;e#GXpJqF?fx;tg)=} zC(7+19D(=)lKJUR^}q0`o6#C}O?jj3*@GvDl)h3r+`*AX@x1Z0dvVz}VBgLM_33x? zkJJq1n}5S|k{51n|=!iQ3{J;0;vrh>PMus}G7@xGo zQ*IGSBx~guCywFU^&da&*G{aDCz;2sXz0t?`gV-X5bh#OssQGyH)h+~e0cHIU(*d` zO9-u}HAE4`EqSF&g_Demk}>Vhz>ji*M*jeCI%p@-+7{-QmexAWG{0iCf+ei{=UYk$ z5zEBv(eA^6+w{8r+jOmIZ8c*fm6IgV8*7bXffFCQ>eF8dx{?w`sQV4kgQQ31x-u^EAYbT=eN zts+jgA*IW;G@+Sac>z{tK+4=tQ1*2`Z&l>^VC+j@FukfbHC|B1JQ{u`cN5N&Nf?f8 zKub(jluotB7~zOIsYyziN;Wb#Wt!dTF{_V*ON{PHRGTA~p%XQHozV@!@JyhIzw3 z=H9U&bwkhC?CEP*39P*gY}aj9(!I(#EQ^(Frl{8$&gF{|HE?+(>}+?XkI1bG#}De} zRHddMinMm^*qssR>L241Rfc4KbturDl0y`z0E#ne+On?4aJ<)!-CJ_lN?BZ8oRBfg zF?-ffH~lBa*KY9Sgl@2_LUtZ~rI-BORP_cOE#V&@VOBV$wUVBsBn7+$xSigzZ^)Tq zSQoSRak`%?3*+xeNI0iBxH!Ls_|%#fQ{MZyzPKo{GG4I^D;Z;}tDC9?-IoN(rrR zh+!gy8$2P@p>AD6YD}@Li)y7fa&;{!hReuWsywK7Xjq>#GC-ARG>3V6r$| zgwU*Y^7UFs>`3dpP#A{QYTW^h z(|V1s*RN1ZLU|ziii;C8m7{V|LqC!2Dhqw~{%){y`KnhbcI(LX#%8b88`qv#3*Lm{BQ7jx@0q$eCBf37tc^2R$q zLarJmJ@#{RwyeRTLJDrGK_E8eNc|`6(a%lmn-OCYEP_Dx9lLcOi^(uXP~C=fE#YKu zY5S{rBacMfXQuhxVN!g>EkAbsd1Deilx4@cav7y+vNIKub(LIj_ZBPn>3j7T(~Rvf z@J=!!`X3c1h*Q4=A&Uhtf!nzmMQzU|J15_&hA#||(zz9?k6X07AG!-l(+!UNw;;wy zZaWe-KdY!_^%|5CHZoZw_czl`6R?f>4UBRrVYco2dE9yFrMInXXh<4KYkyDuMvoaSfYqnQExM5;lO5rCik4VG;@n;)+^O6T{Kz^#*UOs24IVowQ&=(O#$+`lB>gz!DkQ3{6pbF=n05nlPQ!n8 z-5Gr^^&($T4p&TNp^lzHHkOk_=)y}08t$_|T>E}dpOTyY-Er19%~hshaXsrsjzM0o zI>!?vODZY<0G5?d?Hzze+vI(^3HoKObyl0!iJC|x?N#3!nLr*`tyh`9Dt~A5$12An zw&a4lFV9U}`tv=V)lq5-b@n*=k)LD(N4c=tD(W~23*&|12C zB;w{bAzfiI%d-ox71|>zk?)yPiV}9+W8>1%ti375q73#D1+OKCmE$$QUf5ztj1b!g z*_YaU5&YZrZ1k>Kv6%aVC?c9vnU!8$`G(wW9ErC5u;owh?)LSCo@DyV^K7;5B=T66 zzU{(D-`b>c+qnT!IFa$zXQvq4Mt&=i<|K_L?=(?~f(CfxNZWwtw-L;d{Yv#asq`Kr zO~Nv?`88Q&9mtR$`F00kBxWbRTR+xr-w1AX`8I$-N$DPpQrxjMC3?8V%g zrIrYi7V^&nk@<3;lgo8K8});d)7n2>vfS;}h|Iu9;q2Ky>A%Mif zyb(mMU8QbJHbvcw73I4#h`kw#shiZ24Xd{rwjiCgMK5;b{J7gHlm zGHnW?cxto`5-mJrHDyoYxhBa?dpUb!4jHnY+nMZhPBn;c#irx@rMfs{`gU@aX^~Gx@oKZ3E%`Q-J<_{hy+bwHH?9+VZxNLElgmF zw6O!e^1e=aHXpMyjrvyd^`*qiP8?K_n&K%l)z+kfMXiv#l8$Lv8HfzVOEwip>5*gz z+>>rISa-wa=@{b=^-@+Z43$p^tVImXR$BEd^WH0T=9RYJ+axn_1(YumZj`w;Ga%mU z7GhXb8*d3QUv^EaWa_hyQCT4NUMK6EKGUFPJ3|)E415tXwNE@ zXh8#U&uIdvBbQy6;Waw7s?Su(S|&4nD@<>gk(eZlkFexC^!9E@%-zIx;re*>k(6m7 zs`(8oX_CZJo>N66s8_V2My-^_%kaTY!L}&saAt*nB3dt6pTjM4g;& zZkSM(rJ6n}T0-{-6f5VaQ_-qLZc-w| z&`3#h^ypb+0C)%_PCQZeB%vIK+ouSsot2rk8#}NJj7uH6sOsL`iTD2iF2`fnQ}OnR z5UhNho(vi;3`p(G06qgMkPq40$DX?GRI7Hk(^*ks2{s`Qd_N+gmPp(lP`ZHR5PiLC zH}OF%`g%*LTR&4!;%nBt4Lgk^O^#j#ja#lg(Z~+nxep~JM<731`SAQtYRX!J5}K0_ zO|2~`Umo&zly)|e>@=>-OP0Fst{$aoD4L{jEpjr?DPT;ZMj^e;@C8tsuCF8kZ zz%qcG109bRFZ@)wf+=+DXpVt)Y^Af=MksJM*3LiDTx2l|b?0l=qn1Py%CJK;p+|4K7R|EA(T&3H zKtHSR)tS9H)Hs+wjZ6GG5spN=1}u(o+@@q%dvnf?c$rg>UuohA@)$2$k^DC4Ed}b& zqAwLQUhX)8L#=M zB9S1X&umLHb%@+{&yFs9AJjqd`}Diy^Hp^0Qmv>thEHrVvYu*;=Zf(gxCl2>`hoWE z()FFkVD$z>-L8Qga3m-TAlaGuKc8!j_F#MhzTRCP>F4X>GXZi9^-;7dEg2m0683g7 z#Vns@3GR>l!+*a|x8UQ_sy@HuwcV_AD+NqOK6=xb6++MTDjSH2d?TZT-29%kC-|lG zLUr}GUFnRLNiEH$ZY4T0OA~JuNdwu67UShuGoL;;ETy_ybmoNA`dZDGj~6wJXK^H& zS|Mya8-MYptFVwF5xHayxNVz{j-6jcr^mFY`H0q9my}eNxlhzs_>}_<*Z%-7)%`jl z>wix){x3IMJErIQEP@zqV{JyeIg%60w`3;8`5@SzxMHo#ewbU*XiaPBrK~n;7{0dc zN1j-#$QFEdYNULfD$2eF>=sD5aU58qA8$!MmeY96Ic~&ulsJ-mK#9i*e<|08a1pw> zQb_US^80Po_e|c$E=tW`(5z&NILR*`X;_bG`Bqco#=wW@Rj%+^w9Q8F#_vfTr=@x| ze%TCoBlWf9mwXmjytLQpfUy^eEYondKXPr z%in8MWGfYaeY&x!2_i&CHD*5(`6W-=&sVA%GN!YlwW6FX<{lO1KmJt*GE~IG4|HNe z%0c)lKg_)V{X^3E{{T;}4KYxMJki|;1T2>jyr?U<5*xd zmLiw%NhE6(Sy z=rbU}op_)^J#t7wO zj-+L5m(z-_vgPwxhenYCS28e=m}0LNvN6tKxi&nDTGZJ1DK)BkZwDI1r*w8cDC5d% z42_o}rMq;{GgGT(W@jDV<;!g{$znu{iuA|ebJ0O+%W5k%?AFuLe+#!ZK37dQM==)d zdmBo_9kX&rY>4Kv4+ownNP$K1d!?&!`ifk9RGz}aPQ+F0S*((YayO=tDpAJMFSog} z5$Ws#%Hrw#cOlp;F7M4j&@~%tWw5HU+b$^P&m~VZdD540W{3N zsIL814ax9YZ!b-&&2B7GVZL<33mSW3VS?0gvlXaJa4oAb z!*#B8-5Zsdff;be8>ydT<9#LbwKayi|{GX88)#v-vAF)X6MAEv-oR*BKLx5}Nz z9*J0MN+{SlD=}sH#?`JvltxsuRuD$lIioF`2_GHVxU#>wNaf-(g(+)fC2Sr^EaSNr zy`?i&Y{zaGkGeTAm+VI3cuM$H5-YCiId})F$<@%}tye*6thH`eQ6ZQgs)35KA|KYW zDECImCeQBhS$eJI^%NgdS1(ePTeTxB_uA5hauIPWBFXu3ab4M4 z^-(oev4P=MrN+%Uc{QAM=*Jvvvp*9xc@>yP41te?*;|(`0lLD%Y459*EMcd+jaw(Z zEOMy3jt=a*D;1mB8EkMx+iw*OaQM&xe=a*%AG}r=GZg2N5s1jD4}~J(d7&qDb~CBq4pkk+OmA>!Rhl z^(xt(^>Ug% z?TC2A8I2V#R+dw>tY7K@iP$S~SKqBZ>b*Cia^8|>t>b5=P1*Bxr-rSF?c=AMcXpPO z2Q3+3e5r0D`AF-{?v>29raq%*E!%~xQkAkWI+DNg^47I0SGN?hY&>vuZ}Tjhc-x{b zW7PZys6!bAz#~ZKl}V91TqKP*?ebzaK6cypUaxvuzMF(B)sW5GEEsOB8xyzcUS7g| zpo6yCH%Dr^M@?%i9f|9}&`go7dE!pW%T3u2ZOBjaqI+1LEEkFM)NY6TI^$x>Nt4vs zLwgrV#L`5ki#$N`JA?8<^84{%Tlec^x-;<|_+j*iNDMT1ToPqy)mX(>s~gQ=kLK<6ByG2jwS)MF{7qE#FDr8yq_C1=;ITQ5y@!op1H24Hn2=bgE#u>1?)_a3eK_=Q zSk$v$SWl?h7N;UwTJ@eq0!YfbaArHSVDjAj5OzHkeOL7Y{oEY51AWyRB;FV@HsTSm zA=DxX!hq}>&-%9LbLqdT)^H{)Roik;LRsOCNjDPBS~C16WqAQINSp2ZuwJ%8)v0B) zB(qhwQezf8%S!YH7PW7Ivxkki_6DU=Zw2~0p zIuhf^-M-&$f&&LfDT|usX-hn6XA(40$r}I^p^W^+xhhZn+s8tC!UexIYz1J|$8F}f z^wCWWgr7Ct6To6eh5e5I0B);T?MH>m;mme|#cd%g*%BW0Ipg97125`{xAyP0-sSyW zZ3Fx?y;%PM*}f0eSy7$?e$_-&NcUo_iXEF^c8}fiJ_Gfv&%{P!Qe?e*^>bV|cvZ@3 zt4#+aXRkdf@JAda3hm8753>!xB#(}(7|nA^Ok@KmOjgKC+>V`!zm(j;0ShqLs~034 z!Q6j0p04@}H(Mz`Kc`sPY#3*7h5IiTmtnwuP%<~q!)?|#s`vFZTUhJ(S#4RVR!IK5 zR7W#H7?8FS9sI_s-sJ%J^S4Hw3zS*0m2y|ASGcAn5?YUgIB+;aI}|&tq1cC49#yvA zsNH*|Q&!l6Qqy>9*AquuCgmPDv)S*E@NjXA(C~m z91|I8%9{mK8JBXqg>N1|TJ-LEQ>HY>s@{_1>fUP00tk>G`0OqPD7IPqRwN$!$I9;?0IQGFJzib-nDx?;n%N|p_WF~J<=q!_8#o-oK6 zn4dUd*eY%45$42HV-qrZTqc&d!6)u-D@_;jQXX3}a~YzN!~r4K$G&*A}fx_81Rvo@Sa^9G`y6v&yw4iK@$9X(6Jj0C=KF(Ik-ZAN37kCq4Xa#W()|FB`aw zwk8VqXyh^<#BD>hBOI;6mv!1W%x&y;0yzScxKI_F^>tl+b81GvikBk<_Aw1zuSX1r zxh)*Ry}FqY2F@$6hETJTk*IC+8P#VegF}bK9CW<0O1R@yrHa{QB7^q&D%5 ziR|MnaT5kg=p<3(uxXMRlo9|?3iID@(!CqfIO66M@#d)VDOHUDPis0VjscvL&3*|7 z$M5+a(advlebc$$XwhME$6*YGNm%6DhRilr{{A|Q&E+fLaaOW7s!u%%{L;c#Lu92# zaJ=k6F8t%&+mQf;L;G*nkdN_rmj$J>gB$7oa;6STx2)Nxg1u(SWGdsbQ+I6B!`_Lw zNsAIUC5BVFZrga`deWXh)Gc*^!^5WVxtJ3lc3Et#YZhd9OtPt29u41N5n;%$`nscH zeFL?qu5-xhd}(`9O)T-t%xyHy&xvk(mE5-9*nPUa$M{d76rjbOqxA4F9r-HC(M2gi z#R!g0avjJ7{QJ3eiLAXdpVMqgy_=QnSyfxSs@QoW5b|QG>hY^W4}tGrzmwJ$ko5|C zRiuR?mb=?#uOr0&0Gey53(8Ih+&K-``h0E6q8_c)mvq*UkUnm_pw&>Zq=qxP?BQhj zAwAxAK0|%FLLv1&JdUK5Yst;*&ehsU!ZB8mvhU;q03&byo{iW|F>56aTT_!e({!+$ zQ~@(FFTsJ>02g9O+xO_@Z(L;Q;9nUIMp-42zkBJ|U{zI#Xw`&Xvl}nDBO`Br zg*$$t547~&dNcTkrr66E*t?ynB4mvukg$!Ta0;p0WIRFG5xM^W(zmR#l{0yHa&X5j zc@?`7$H<_XMsXx_ZZ_c{<+=Nx-Fm9Wn#-H=MU{I}Vxnb?)n{ac!dQV>yzDmvaz_3S zM$EUXxVi9S1C5>JyCpxRHIcb?qT6IMqQ*GFyX*+^f7S8To6;OP#?_N2l33u5dH$)O zl@o-;M=!0~Pm$nx{fJ(bJl>mWGNCYS3`s3xDyZ%V+En*EP5yO$C^>JkmHwSD{)uI^ zwv5wxJ4ovtN$*8!jebQrMPIoc)wyraXLf}4nZZ8R@v-WgbJcnp>aIf?n5KJk+{mlT zXe8>bZRC+GL}MPwH*a$<-`(f*^eq@%O2%uwh}G-H>ohZv4<=GK54a_c=WYD?{+&T- zuT<&iT)D)WP1|M_UdVoZ=w<{gr-~6Iht+hpmCe=0 z+^&;7OEK7o+b?u9WLLKsUHgx-saBUWB!>6Y`u5(5I2*!B!l;cN{!< z9^XAO{*L;eZx3?qO4p4mHlR@@c65uq861dpVmFZ`efbSKO$@>roo&5B0^rO_QH?4Vz zeK^-Td|@xsygXEzO1$NXqR)*i)m#|f=kO7eX?Ne|4^x6~GvR&Z7^X`^`nAh@imN~kBJE|bq* z$QpQBnImAv%U;2PXzU7-~0+*?%VS@In6oz=yh1f$667((LzjE*@qK)fvO)vTlMmyz1{- z8fos|=%&aUlLFg!BdpUq1awfv)z4M24A};bpfcvKc3o5|z_s97@Wh;1yMZgX3*%$U zf1bJN)UoyXSW!Dw?p&d1rb!HuDjqQrD=^)JMh0&mr;*#DrFiSwdvdt19Eg%j1d0Mk zR?I*&_8ZR?C74E_4nNK1x{aZQb2*Tqk!FrG6C_*DAqGh#cHI8}sdpc8IvRSI&5y!f zvrvduI8AD5&lUq-Qg(xWwv3O}&|4FwAk}iko3UEOPp_;00KYWl((+cPlafO# z&5pRf`mwCc9TnBPWxxkx(wFgZ`pm_Jie4^ zEBK04)uw{AMRm6vi6bjb9De@Xd|0p`e=gg%Oh0ycNN8e?NESiBhF(rK!u z?frTYYi&uWG=@Z5$iq5QuW~~>Hz2Z7oGf#eA7Fs;^XK`xpR$y4x^`@ZYV7u4VQMP~ zfcIHYN!>s`3r0r!ZQ=R^>bNcH1KNdVHah{xM+gF=&QK@AYrY(>9qSG;cOzv0!HeUgD+kO7a$D)6& zn&(Yo@naE+WLuWMDiDhUSmLBFH3wjixlM=PpY71Q(rg8)mgCH297rXYOF4bdA_s`r zw%aIOxqJ_2`nKtY%=(D))0T}X7ByJfs-$}z#7Ie4$oMDZc=Au!^-70PLt+VRQi0H(wpxcS@X zp)M;GfY9}-iTiQMG(rgq+6^h>{DkVEh*=g!8-C%suWL*-*sM#baPviCjP*j=5wkFq zFav4S9n|i4hVl6OAXbx`)&8=*`YMUXRlArKb6R#lRV9r>$sCFX!26qH z@;Y1c+9e>G(_%7`L?YbwM6U3Noi;l{$8+az1K@vdsQ7(Fm8ERO8c95~!ATdlyfZ{j z%2ik3@#VQ6b?bYd;nC=~;G@=^F8M!QbM&caceJxt%4ed})^YaY0T#7K@22n_q9Cx@ zxqq;DHCLv8gYQq{yK7Hms%J5@mSW}MRuHS;Z zN24oC4T;lnYAbXZLe|ECep?M0^6H8D{to{D2mQL#oLbvba)k||UM8qO0c@R_`)G_%#WA5LgzLnI| zWiiseHib-qjs}ACNP*pD07+Wr+n)^d?gHdWcT2mJIK`hRTTp8nu zgQckU8~cP!8-B7kL*YkTI`#I0`oZY^yzZN0Jb3J662%%G;=Ssn`HhJ3jTqeh&f9d1 z`fL-_Dr=`RHF#RaQwt)-%8JPeSd5lqkwH!%jkfX%`RZNLu7kOjz|!$t$JuxZS1;W` zCvq~!Zs;U`a6URv^&UEwLs{F+wn|EpDB)j>gfcWl^6r9nRpK_{+k^iAhkmRW4NGSW zAT}dg804Mjlu;~`%@czQH`Do58y&Xz9J(g!{XM4rNYb^W#qh1YUuKirP@vr1c6Zt>FMXGczg%ZWgr<+nX5s*e;%4$D<1nPZb}keHi~>0X)LDXDSRaB|Z&;d|J4tzDF)N+OQlT4s`H z;_a^P$qW-Jj~*-e_MV}%=Bvr$r+l;%ytJgaE7PpR$qbP~WPF9$i1GCO*<_6f+jdSV zzf1<5$*h`oq^{AIn#-4ow;)3ZY2=K*^QoM~KK|5Y9(uS?xjMgWPX=AFB2+??O zHc*9NfQ~!)0ZAJyd;R)Ty=T-yuAiOeXoU7@%qy8?Dj=>ouT z^&)vS%u?55^jOPR{*J19UniZpS@HPCQct!D?KO$G_}EA1=cX$_Yf16dH1Y&*t;+uZ z5~*t4i8n7=w6y9~k_BDMqa!-;9!knv&n}tYPQ6yn*UjT@n1nH`HWJNFJR-Eza~5$~ zTI`N@F8JTfLf%6s+;7pLq?*V4)>hI=AiFcbwizRQt}ba()lpH2_e~3!WsNuI8#}79 zs~(B?4DK$yfFkZP6jEk|c+Bz0SG0_*0o()dciV{^x;N=s(dREEN|Ohflo3i^c1QOc-RE@o%2mb);^4$w3^){`2M9gH87^9Jdtc%T%@hEo#!hod-_#2VY zV@&EjJ)f3KX2Ud)uW64KOC9SpCCXXj1oE2I` z--uR5X7+xNyEj_g!s2rJYO><$!7Q%~4Nhm4PpF1E=%f%yKJCu{0-v8BJ9R{@l6^gs zy%t)y`ge z@_dhj=l=ll>BvkXnr5y#2y0M+0Bt(QBF=pNfZOfJe0=Sz>))%I* zX}s1`RK=Oe*Rq)I`Hu`N7-5d3r)Z7dV5xH(fP{|pW4PmY@DQie%1U4;2ycOBER!GYB$34%x=WYJs z`pCd&KUAi7b5`lCQ;o^mdlhG=YP-CA z3-9^igQSVzvba;Q`yux9$@;me{{Z8Rb`vL(nPrxYvCi>A>LfAG5Jr+R<7phX_VdSX zm5pB_>DyJNlI)OFm$;=Qj6))!@S<0L4m_`cFdUzXY}|(9lI`Peu=J)s{t(H@YZs3GAzIH77h&7C zDxz$T>Ia>GQL!8De}2DfD%P&NIVmulLWKhvHaKqRb+gSVI>eeoG+nz_IYd#{U2v72(nrq|kBa zW|gJP6q0+9)>fXZjEKc$X+r{$Fyr9(98dd1dKq3FU!Av&HiZne7C^Bi&HU%gh9_<( zc>(=_1NR+lRvdP1v2kK;$g;FBf|k`#Le1a_3XhKie~rFB-i+9M)hiclM5Y;KGQ}Lv zD1=7~jy5Mm<-ZXL+kXHS_ViHEFOaEdS%PF+nGVfp=l*n~C{cSPPQ_w^y*klDQYH38 zOV0eIWL3K9-FJC&`3&VHw=uzCrm2CTuiTaI%6l04aZ0Vaxtq5n>6`vO4n3JXx9>E@Czz#xx9(&fEV0%Zp1Y2itZs6ZaiA8VfC#&ER+L#a|sl zlCd0kC-kGVjT{U@^X+Bvc^o)@tx$9@H?_ZX6`9jDwR) zIZ#Ts6WovzPbnMx-4C@FtCn7lMHW|@#!8pfM`lI}1S@xn&Nm`-4m3|Dtrd~cgv0t!moV^)B8(8f>le$)k#9fk%P)Y=@YgfLOG8Pq=^;ml` z#~Kg^F4oK8dS|e>d1R?PwCAN3Lbds2ua2F2P<=*6h1fV*+sAOZ05Kc3!MZ3u7Vdu& z{4x3cOq|*qF@mBC5te320XxSstMT1F$B7(zkHcxbO^k1%$V-023meBBn2D8yYC;06 z7q}R1HXD#i{{SydKcRSCRglQiSQ5@cwJD4rQ6vVul>p>&o7@MIRDi*E>tR_8 zWy;uUaKma!c>}emqjcFAv|=UtfXUmAKl*_I{YTt{Y9o3Ww~NW#BbbYleABjcYXOO*pCO` z_y?=UrusuG6?tcd%TG=?jMK#&ukc5Pa&`=&HeI;yw%>lZ!$IFtSe0Rt&8oH$qjJo> zq2ZQ5+_qon0Q0}~>6z0x%elCP1)9*S9~7H|>{>|nV-iTK`MW6G?eWW_g*^jHH(47U zb_02Mhp#2W1%Lqu+cY~U+;6_*@9zHHH~L3UR>d*RvWG7An|g zWtKJskbVjFx4WhDGo|$AgEGaOrFKe^2_3Q~26c#eF{IJLrNRx*CjcqwUe_8Y5v!~m zwg4(F<&ogBk6G0T?a1(dBX8iiKO=9yRP1(yp2WtN9!3=BZ1aq$i-|0x50~HJN1xxv zR1H(5bJXl5Ip|uWT$E+*{!+X@%+4dUlkiXdp(x`p(`D>6{EyXH ziWg~^OR*}izinTT=67S~;BD<69emcf_0jr{-3=}j)VTyU40Y@gENn+U+%rqQ?CB~B|7!(zJ``-)lR z1H(Ty{Y}T?@9q8i8D;ft#?*!Bv-G_woI@VzfWvmltN# zSn`$W#t~zNwY4!wu#L7`3U(}^d2HXy;eStEUaRU%R#{S7kjD*)cYWb<&`KoSnLz{E z`>)R60nm=7r+2Rn8#gDq)YKU1OgES?Zv|Uzoj*?>zy12E>$FXC1yqW_jUs)!5-$b} zqjp0h5JPT02gcifHvM9HYo+Z_)RorMb6TxY#L#+P3`#MR1~wz1 za;v`Y4DtS%M&5njsY>+EelUF@$@)L0w8S_o^V82w^e_lak=l@3wIeqDpa^Q~6ml>> z8+*BV=`8gB0P$^}{v!UEYd9>nSlCMO&GgL@7I^|RjNp>cl2DKGmg$j;T8$}|sY^Fj zwOSXqn!TiSjlua;`j@CsZXs5?Cu8kRQ%tnuv@Rd6qo7zXmMlVe@_NB*OG&K@Q zDX5z~R?#ZX(TCY2Nq$7%U^n-F;p=bygMJ--6#QFg3wN~kvWH9SjBQ5YGS3R-v9=-w zZP&`sb`fQ2iNJ0rX8WV7b*4YXccvbtdKKy&ycVXhQ!|T>)1(>P6qe3D>`3e)w;Xc& z=t$9>o(VZ@T=8aJui6HDR;l#n42B+p#d5uNl4zkIGfb`e8*}cI5H=g^cyG|o(rL|j zEz-$J^cjqNlEku>Se1l9nNN9u54?a4j^0<>@zo3X&73>Ir=e>0YswlKnlm6*1ns=B z{5Q0y_U=4yzsFWsv{nlhJv{Cv;#beR*I{O?d;^I+($}5i^2$_k0hDe1`geUl%cRsJ zT7~^Wu z0G`s4ckow^{ZD_xIXpza!qFqWB(UyfNR=MrN~9%7(5Nv%@X{TXPxAcrH-z+-{fxb6 zA(~1|ak&Ele`$-%g5A|(_&q}tf)#o=$6ryq}CQ9jbF}g-t1R)^X%aadOe&*(<*O{;8wU1|9CmI#?wd?aybTQX zI&W@E$re##s|*eecrmoG_UziTD|=MrEaW7RBwlzg75z=o-UzZ80fx$?#UCXXH>#|{ z2qrdB;;C$>fXf4ZUme{_sSCGER*lJ7TeAICr<$sY(Mbz5VWOPEq%?(xxi;WQKbhn8 zap|_xx@NXgyfLChC9;1eVIf}J#W{%skI}gv2k9gF^wRoaj(kNcVrrrsygjlFaIhO3 zca>yGEGSulAFH3VkAv1@<+4|Gy+|azE>9C}qKf5lVIFz~z(10Kcy`2t;eVHQ<;SKb zG1`VT0Fsf2M4y&is*U0(`vsLs?d|d4{^0ejKk-BSMa$@qp_l4a!B3U{0E)5qAY7dr z6IaUMaxr+TVkqHY@RlTec?0=)@7IB^P)@G=LTen3PH3KzXoryRUQ$5% zt6@109|vN5eycXJ*LrjC@u62uVbOn!&jQp^0d}hMovm7uPnqX$ad-LT+n^mNzN;nEya)rFoiBC!bDwDG7LQ#*kBNL!Z>e3Q$e z_Dfo1Gz3;^&zO$JNtFxAWJQ2Uw!$?8gW3mz?tH2L0B*ZI8~jCU>2*Aq>6YF?qz(#6 zGsv}|48#U&1d}i-cH_5>UUu`=(zHIf(fZzsP}MZ?uw9xS&Af+r%8;vX8^)2SB(X++ z0DxHgiu7y4dRO>J#n?%8K4zNBk!aaYTKzg;5Q`@`C_ubWl1CK^!^mUq=&_ajDEcF+ zv5%AV*CT*=IEiEW{99YAG`vr2wV3$I2txkg#Qy-NtzZ5ie+(X?(aOn2hARF#J98%- zhAE}b;wOl=ffOn``1Ig^)yJR5x2307dN-~8HpH#u(|N46MmU~nR)q_BLhgs+7L5pV zyC4H#cYkv3I-sP#i7cFP!;188T&z;fA1Nil&Nhp}yNC|Ql7ssnckkz;h9A*CRAST9 zWHNr8>vzddQ=3OF8p`h@fU+S^3__2Ox7+)I^<+ASBNi)EWA2Bt)wvF3pJM6UF-i7k zBf_7Km0b6zTIRlT8y6||pn`Xf&ARf<*k(rrhgTab2H%h!TJ8GkUaT1TqaIZwM@CgC zA@-I8ey=BDPlLZ6u+F=)W0bJaHdWBb(Li}gDoDW`2)G@N>Un>E->$~=?&hhvUp+>X z;00HVxU>XvSW>%!gXBWHdEEKq(K?lB@wA>S)#$9p19pKN8W|Zu0F~vsub2M-^!Uru z8aofDa`o~xvQW}hm$xKMKb4o6Q6ecG2E*O*Ht4^T^g9`->|{nKEfiPcsUfm}H(~A_ zgi_UIZMeLN`hg?om-B9@0G#n_UhtH=qGSX+~NX{~o5On08PM;`+Qi|X5Lqo2Hu+i*AEbH5&( z&t1P0+UHYi%x)jl90klydrD7kwD_6j-bn++8X;o94T$aqKkD)FdK=_4tUAY2L9H=3 zsUGz-sdhu1wNioZr$w%L%d`=uR&ce# z!yt5u(t{b2sRwrFu^Tsm&x!HU{7CCd$;QOi1p_`>y{5Gr#)O&x2WW*)!!v$QctO`yDHqM!9b96_jvN8?bOev7`pmF zF5%>ey;#|uB??u(l?L%zj40w{Rwt5=l_!54Sw@b;WbNhh^)YZ0VUla+oFjfH3rBoL zyMY@31MT*HUU>CK%KA%;xp(NW)%$lXs?SEs$|r^fnVEve7(C)q+xwNa8~GguwWgTP z=={uBT%=|1-5^)91n%*cQUrfmn}&){Xx&H~a{Whcj9!oA^$q<)9v>%0zBV|=1hIW= zr{xKWdBkeF$r)e2{{Y3-$u!h|!z!ZFWs@l-We``Z0EMeSTmEA0fKj8PT~R}!DqWdJ zmOWZ3#}1#Crc*NvSq#PNkpP{(C))pyV7mi1$SzXCxQ(I=; zk6O zg}aBA3sBaYSY1p`5cfo@8v_ilzx_Fs1weU!FCLg5OS4(5Zcdd8P_4{vN;jSfVU@QL z@gb2OSjQ<~n}W{)1=oXd@XFn6q6p-pq@b{{)&}h)lOYCO<&GH8#ug(=$B84ZQ2S$2 zO7a_jTK0p+In3HgUIpj*^x}dRj=WvCN4mc;$Vsu-o%Uh={TXw45r@R(GMrL*G4!Oe zm84`yp4?HH5fQd68F$%1+=2G~zEuAJ#0LxO=dYfk+q+*-T*B%3Ws-~OSfXhfambD? zL-~X!j%Yp)!t{h){{Rl(#GO1He5RSf(WO@KUN$4CBgh(Ei6Rm}u_GaGKdbM{rF&if z0EM@#I*TV2Ek^k z?s{T$PxwuG7f!WIPE#X)JuK3vCHk3(<0mHAg>O4I9{_paIx=Rz!H@8#Qyn7L!An{{RQyNnn>LPa!wcs+BNTn)S;q8i2uG2*BUF z=k%8U0DigSKf(*r3YVs_nzdBXNguT!qhs>`+j!)TX8hfw9^iIR8<$5NC;lD(0EfJK z8klS?J1G=qIb>++lnlNMfnRWh$-m#_{zp*y*ZeI%4;h&S)X_9DR@;M24GL2#?A}#f zy{zDQ@%L}l)w@Ce0EIu{(kRwjnkqWd2tf+8>BAf`0-z0`UC!H|o&Nx@L=1oUarzgh z@&w}a+-7aT0yrW>;J(%7+kRjvw#0qkmJ;=JsyLWq{{Zp{w)u=#*#6%mh#&Y*Mi_kx(R!T& z$))hMZpNLiMud^fAyg?p9O}$RAlO;u+6cjD-$yW zJD<{^f#7ey`*cLb>ufUK5#s9Zi4Z8O1Y%Ag?Su z%#SZ9il20x)rTQBJ(*Y$x$whovNi4+zE39>x}|a}^WJFfSf^f2GFy*vxB#Crg*y^< zAHPx_sOh-l(0ILNB{<@ucr>;+qqoz@$f`MyAw-U&ef*GmTXFYhf`0ZY{?8EHRc*LF z>f7-n$lXuJ>^k(r{tMCQ=(KOCo~Ua(wVNoA#f-*dEg0I=m1@S)v%*-Bvo|k;zxup& ztWU;|s8|nCeNoGLnTw7gKm1Ra##xFK5i8=>+ocN}NCIY44&!1sEZr9U1Yh|3LfwN4 zkx26pNi>feN~vP5(QNNJF5mLax{>xj*P(ttPi8cxqq*JG2JT@mah9y5JFPI0T@Wbw zgs;Z_e14vfzNl&)VXv|HJVlvoTC0po;i)v1C5alc2|-xXeh3^X46DB+Eywna^xpm3 zGT*H=T(oy7r)pW6DVRJHDMbt8^QPp#(_!=HqMn}1ded80VDeaPJCV!RrE(UtR^V8Z zB^U-2Vs>vqJx-Gik<=J^m>Fo&i5k3Rp`K{~;Lyz)sS~i@TS9fW3OVQ3j}5n zZ;US}iIa(m7B}}WJ8$;it{#(mC9d>NM<%(Ph0KmF^yJi*aasxSZ12WOQw1bRS=-1` zd=dLL-4d;OFJl>&p?*7Y#F9wvKAsp=;}VitH)cFQe{zB2W9RjC7pOl4_BH0AsjRY^ zvN1I%l>Hf)K`fG~C1sL0(`gSMBYnv0B0o#9nrl+UQ-Hm~BPVgYl13UZqyrd5Ex04) zfFy78)!yE%OeChAEPQcFSCnuF8drO*vO{+%EKAPU0>Pq)zX7tZ4g7RS(YVSG-iI-f zkG*3VOyu$zER3rps{H$eO%d5Kl_MOupxK$^ZXmZz@1d54psjaRTZJK-#m2i1`dFGyBVfA$;ks<8=W4#2Yw|>{WXeqlSfUHUDJBaaB_xMQE3&X}0ZVZ` zCO?U7D_c=u@{>CI1T#jf8UzGClwb(l5aZ9=`CG3!+RdcVny)UAc4Ugg_9Lw=ba+)A zHHMcck}x(_H8MYrM5?Nd);+=!dTKEdVA;}I z_y&PiPWxN43M4c(LN}3}ha<$Q#DFt906zU#@U-e*pDC5L(_Ottxv^d;jcT;x<=dl9 z#Nt5H%BamW?cq$DLl`_*^j*?;`dJAq;H(&`y4%OcRM$izNu{qgPTZ4_H!QI%Xs3V6 zVn0iC%W3@*`1-c%Q?k{5q6iwKfkzu1kraz0ATPDRc;qA7xK?lMf70<;vSGyJ`yW=Y zb{1)55y3QzBJ3ZL;U6z)QhXNs^!R!$rE!mLNbggIN$X1$#ix+RnBud*h~FVvXpxm| z#CGMwVJg4sV!jUEqg<|#Q^&Pw#F7PgZAVF3C?i5cD%f+fFa^9S^4tBolhF~_uZof9 zCT30|s~DOiEopangsjWE@m~RlgZ=s&diCn|o%HJkEj*rDzAoj)+j~e&cdatNiKHXj zAtT4`2hUorlKzm?8SI{V##)`l9OuW$8Hl3G8TK;B$K#ZYIUV=ks62nFx{a^ZuX8Tb zV=Lqz=&{^+Bf|;eth;=$A17hIz~a3eFdnca+`n@#qUvNZIO#atvm{I0kr44a?FkY$ z6(^6#A0K|Nxx1c)>znX>K9y1#LABeB)<~PlLmq>>D-2NGNMB>vrJWAph;wvQ4+Ca`Ca6+kt<1VN+^Yg#^ zbViTy0VZmAWxYy8%D{*uuU@M~95JcjS)aGGfg6$Bo=2A^UZz%QadZA2GND0hFsoqoRy{9oBe8T?m%XFPclqkje!3ES5u3iFf=Q7 zsOO1@##YRD=2tzZBV}Nxi>JXO%YL4u;Lhm{FHTclX)am?R2D~(LjWTQz$ce&()3(EL)ng~>hAU6Pa!B(%Xq}^$d6f)n#Q9G$ z5_s?ON6T;S>mx?$Jbt~9*~VKfHx0@%!jSHFAyBjZwp0HA%ge4=Pf{>i9@FEqI-?$! zXMWGpv=B?YlJBr99D%n1eEq$hSTOli)JYZiEkgCDj7c2Touh(Q<+qT`8C!_(cm95* z*HB(tQC7#}{-RDrMV*|MPzWl4izys;9__aysLfC6wH+4q*HYtNR+2Sg91*+B;Epfb zNcY%*`ipo6u8;k{X_l(8;k1qRat`Xzx{%v%$f;6J%k$&O>-ow0b*yzYdQwAw6tj%N zp|0_8E#2T_cqQh$ob@sQlCe3WteGlkSVJnX8wBDPky0CR)!WmLMJweo z(&}A0@->QK-bzeD*akK|+S7(zqeJc=8-U$UYJ5DlG~JO7A{ZVZK?--V3z-h^qU=MU zJdc6!{U_<9^U>-w$mc3pIK+`7inLX%yvnNK!nARz_ki01K6!EJkZ()!n)0F@Kcw?^ zrj9U?ESTw}C73Git;du^?Y`E%4K*nWX z(g+@SuP&NT!-wIg)Xj5|7;DrnVzJA@8LVY15j-r<;Tafv6a(%)IqkO*^Va5czxaN7 zU6Im|>0eT8Hl)Y3SmBc%WpP?K#H(kx7A8`EGi|)@vhn@8F<^hfTkv6f9~MFkEgWTu zWR5WWCRSaJ&G}-UElT}_zm;T@ZYz(Bq{cf#i4n}qzzu?NCx4&q)gv^y9Z9P!L2l#~YR0Q7 zJvn1WhA`cv_3;mU$*@nbY-)Mr(QbU%duMbwVCGLL>q0$sT+{W4ZmDU@od4X)|Jaj zgJ4ypVm~psozJuadL~rIW_1Qpau`XjNl3#Oj>K}x(T8Q&5{JRu{Qkgyzgd=aPI^0n z^^59QCeuVC7I z)D>aVbY<^QhSXKgKC*VH<#NTi#a6}Gvm}+IjHt0D%u(trWooztkcvc# z51;y;emW3(#b8N_zX6Ybrj})r7Cu!bB=@#E76p^Z`0LJJt=LcE{-IrhqL{5ok}GDh zA*;v(afZT;xA!m4+;RBn!1R|?Y2QrnUrlcq{{X~c#dq($g)K|ABjn);@K~ShN5}H{ zxJT;N8(ZnQrPDKTwD#Co?Sitw+9bDZ$lvne5CYD32&=gFFGQ^o3O!vrPad-5?BwRZ zdU+f_zU=+La@SC|F&t=yI=k}Zs?K=t?QHa=d`>>BnRww@aoCwMH7-iP{Y>}VcHOPa z$7t)3EG&d{;rCXRIPBSw_VyqD#kM;j}h5@v5*^uj||@)s|*NWsyR!R7P zM0=8X0oJ(muDuJh0rIr}#--N?!xUIJRmtI9kKLB_P#B^}0t2CM#GwJO~ zr7{vnVPa!%htxTLYK4ixB=*O~!{N4Wi8^OoUcZ>hRL)~`-5eV5k}RbRf+4-&DBd4E zr{b@${$4|uo{G5pct~l>4l!v)^DvPunWAmt3(d+l-?8!RP(q$ z8!q1*wqdcDPfu_-FuJGcqRrfcy&BiA3p%usf=?xH%ap5lCw=&zj;ZkhR&;levU4Y#WbH1>Ryu!6bzcdzwS@ z;B3Bq+jP!oOE{lQAz5;w9NXFg6AHv`MutEPuNjl{@f+>9_#dl&u6ca!gUM8qYS?^k zHqE$M)u)LfkA2FhP|^Si+_6*V+wi?AxnE6m+vJ}ocTv^H&odGtr8KJ?h{dEVRb*k! zRg?mwiwEP-pVh4!e>sjCdroBLv%Gtjtp#_GA#Kt|eYnm7@5%Q&2J`&AP<=nrUZCkp z&q`bNX{c`6+D3^(9zZ(HviR8#&wvAUDVV`cOCt@urYh#9+557}D6N@QVk~!vabw>A zY&Ilq)u&AQ_ou1Y{s~V`>gpD206w1167_15aM=Qf`ce+($N-JE=&MWAO>tt~S?J(i zEMe_;M5{VX>&UkbM!0VPc_59C{&HHsT2D2YY{jG={g#ZDd8*qe8NBW}1MLI)dF9n> z6OPq+S(_mh2>lvF54U6r>XJbzBVpQokQ^A_^B${8$dk;RSo$7Wdj zJ4puRJDm0fNMq#qZHM2c1JoZ(EMf#br;Cczc@Y%L6z7idp(KO$@ymZ3 z9*NApyXjMQ7-p}XiZ_NDqG+SAmN*g4xk0`nFGIavOg9l^=|uz+#ewM>f>MV z-|1y5$dxqp8KH0jFt1y<#O?`DskqqqApCSs#QGsa6>h8G@OaAgQ)iMXApnHf6Uh}v zB6e-JJ~}Dh(i(cy5Y-w>74?0-26*a6xN-u9-H+6R%zz!#Ez5f8$_UfZo)SX2Y zG{~$Dv}ssZxEb4X#fLDe2IpXRJ9+l)*3tbB^(z6Lz+$ad)cSc1no*^g=dlXd9LC4> zo%i4N{{SxC9J1J*C4Ly0?BjA52n8M+mBCWOVn-L>Zy#^B>iOtjm(zM@A7ZtsZAU^X z@&dveK#9G@m3R&JAo(BFx}@d(QpDq~)Wn*1slj!dY^9ls!`nl@vzLxXZTx%>^>rhr zwdNlEn=>w{{XNXj)z$t zRgR;LgBep31JAwuT_b5 zW4J#gcs_pK4?_ID2U%Iv7>b$9e1m%`mFg@K*&gTOttQ8B3+1=m5AD{{v_^_uJ?Z== zz^fharAa4a8pct$1~Kqo)OO?jdIf4p@{-{$*@731Y~Gf#$|Nfxf+iu^8N4EpJ_pB1 zMzOVQ-L(W(WNL_HS!vBFSV0^)Ow%^+zz;to`gBnAs|hssF((C$ryS9=SLeTCKHSA6 zlYpEFM;g(xtM+$2!n?lRHyURm_40X0sQpE|bTR(`PbGHKww;-0P!8`cnz<_l6-L}gY_`i;0HkIvF^@wZfOQStHRvGn$usz4Eq_?^AdZmIfu+J~l)u066@%WV}}(PCmu zc$2t4CP~&5tqeeN?>$DENe14;WJAZ8>5HXyrJK15ki}jqnG01fz`k^g+s(lVd%#|4l80nZY z&BQmlK3kAOrsKydy)itGqS}F^bG&4a2Ov5;ylQ>U=Tl%(to@hx&hn*0~G1lG7~sOx4VBISAy( zy$PyKG({e%W#)U)Y=k$#ALi;Rub}laO>0x=2rkUjN-|8gAT(?-UQXgAa#;el<8Ryi z-B$G%rWv}FvL^OxCN7vhu+@&tl#&-w?H+6mwjV0Hl0Fx1uNqq?oYXpZ1qC~q>$x0Q zN>WQf*i(@dh|0vOH2u(XnM#aO0aX3=ZYL z)79?@WAPD7w~WnAVy+^zK_MvQF_nC*uD>J}9DFyo=lQy%>hD;85;}5tskf&%T|)#C z`?Gg)J@;5a2Xu_b%a?C;KRa^$`o8J?Q|jJoxyA?a=y<;>RspTFInx*t4ctJBwE%k#Y1BA_f5Wdt+_e#^d#L!(*v>la{Xe{C1~3B&s?HSI#;m`Lg(PLzy&`m<6+1A z$D&pnP1mbDH*tecWb2sG;763&b>jWS0a;hKCz1ml{Ev^fM9R9;Ok-JQOeJeKoOb7q z3c-Yp%z&i_AiEE1YzEvs@A`VX<6_de>|L9e^!&8uXyq#Iq9Qz<-?k3zx7~m5*LEvK zVlBeYM#Lg&RO9U$;>tqtyXSr%ju~qGb!DJ6_0g4*J8duS6|9<_O#A_ z#x9E&(^MfFu!RBAl*V|&m@9K<>4%?8xPeX00K}S zpSQc?uDe=uTI6Z0I;x%Aj3}s)&*c#Yz=bM&gd1!QF60*7I_n)VcZ0qqK~#hRIj1w|h-3CZ}d8eVa3g%E!NMK&Nr%amfAKp|($3`a@Ep z(Yvg&nCzqxw5g1?V=Ez2dBmS2sg0O_O~1|71%Fo-JXp!-ZBp0KgH>E^Lb6Q_6XgeQ z+6xVbgZm$MSeidrb$Jxc##p0xc>sGCJ;I9_UAbVU4lFSE% z-;-=lpYwGi>CdE^Z`2u0{Zn_bXhIyGU6?OI#a*lWU0JLL_bcpFgU79hdUN=8^ur}y z3}~BEX<8A|Yb}*n>RiU%c!~o(shf9=WA>5(;!u0IsOn=q_+ifAtvtFyOh#Uc%_c_H zTOW0L{F1Vpd=ya84S*T_qtuidSYT&y{ZuXVS`RJR`mn#5B5 zC5r~S$0f;ZFPM-X++3T$Db$8zAr-juMff#blrT*3ui-L z_`rn6EZE96yG?FZj87=+_V$a6k{4#$INNi`gRo`mQ+e-0a@i&RAiB+%+mXOjTNb(T zJa>hV9f2dmZ?Qc&UxQyq)6@GG^ZC4llL;B@JeP~}nAD7j>qP|a6iUwPyEnEaUPs;5 z)39-jtoXTc$^1ykJv(hmrx>2Ze7V-44|8RuiEB=uQ9AO8im zglS8%WhSqWg4J@fwYbwEa>O|FW?%mRWf4@fGZxsYFC`l{PA-}B$64c-7Zv?0k;+QT zUOw7KBv~nu(ToK4figd0K0uwF0>a7a*1%eU`UbW|=FsbZIep zxQjG*pyc}`u^vd{i+y zqC64Est;&NzZ>n$55HP({B>vzN0Rk&>C;%eb<7pKT}$~`FV`=NT)n!hEbVu-A95*V z3El1vLwP?=`}CDy@i~npr1JWMMc2+tdlw8-UemaCu%l*XTN2Wa(E?bN!)yQ?m-&44 zTB+);N7g!`#(M_;09S#vUmqa&=|yUG|edp6hI|#1kpL&M(HKu zm!(^{%27`g6)1_gfkeeA9`^a)@Av76tCG!MHS!OVwMqeGp<6WsvBP3L!iBR{+hiw? z>K@U*FP@GVOB$K4FZ^JXvRG?Xm1$%#5=`>P!X-snV{aU{W%1zby-nqG#zI^bk@VFl zU~0QdEsTd6mgk5P+*Wy;d8FHk0Dqfp`iRl`*FAJPMU^a-_6nN}lLq8_xRz$M3gROhRd#RIpEN>tbcGDp!hYGer$fB!C-&A0f8` z#>e%3p09ZMG|S|!!GwmMU8vU&a(a&xp=ES(9zM`x^1gTS0+01|1(Me~PYG_EdV?s` zFA4tu_!s|evm3frJickp|g{kpx|*Lrr_RlSAH!EVL4Rw(PkDu|h{r>VvkWzplJXMGDIPZRclbN;<=2M# zUY%+G08sQ~z5H!j`i@9syY=*n!(m=+xx=(yqlja<+j1M*&K;5VTDz@ONQ2*!KHDKvrG%8!yjW^7I4n*^$I!1>HyyUeYOHC{f!yRf3}KRTs|N zdHCtqtLW`qZa%%b3rGnV2cAM5fY^Js^Rn;beYX9&vuTY7TPGycZcj#(eAz{Wa0;&@ zyM+tk_g%lY>FAGRLgO14wdvTCBq+|R);Sa0VIc~zjfWLGd>{N>NZ7>7fhi@#W{$)$ z1b`0^DIV6@RP7-r%I)W@e06@r83CB>R)v^8J0}u`XN-VWIX?T3XdeTh->LapmLbQ} zt3oMlChRP#6<6D0S;UGr0C^n$rKJChxJVhsp2%wTGP@3K9xfxjYqPS>fuk_XebKY_rVv3`1J`Wy%TC!aBVq13M z)maJ^a$T4K03`q0+Qm^iR!3Fh?7ri?=hz*#@Ol@|>E@%c6gF_WqQ&{%Na4SbwP;Bs zOsu8m?oQjm7+w9O4gTI+p$2zMYPHqXg$&-F)GWwjnrW$5wA7VZl(A^kt8N1iBzfQ5 z-A-A;TEqA5U@rEW%u&*wmDuXkq-{s;vt}OVz=7^42gd8x6|Gg4r;9Y}RH=~4EFl9& zli<%UBgy{&G;D~VZ^!*V@6bor-&OrZUrv6H8>RA@$=;l``}%oKLD&e-9h5fUhYUV` zdPuzo^`}~D9cfEV>rFYTu+M%Kq`5!K*Q+l<^rnc? zweXncjx4QuSlX6m?$$?u%`9mB*ru`>`6zMi*bTpKw#VsLkToewj!}(Du2i z0mXR@y`j1uY5ijtu3A>i^l4)A6zS?5mJS@G)Sl$BWO2@6%3qE&fEebAO6?SqE4L=! zX5-ZZ)c*iWVXLJr$e60tg;?Y@-lT4kA>7E*d%*3fVZX-yJoV-8@x7=r&}1@|tIHhs zEB6wbOgPuEBvOGA&noT~yDr?f{{Si9&yJQYGp{DNnk!G+y(AigCb1jUD%OfBD#;LP z^=ZrcRvvy?-GC#{$6HOy&CdltTc7LsoE+EY{aMs{KtPgY~bjxUGtaqE?ZYt(;2&%tcBSklN|Qt zfqs$SbZXt`%2+VQ{Nn@M0V*~fQ#JRl8gJFTPnN`JYn1BV((5%nRXmhsxg?XzAO7s7 z+~6Bq?JR@GormcIszuB!_zgRzwQgStN)0b>Ct)(zEXy7?fo*M?)Q7T{iN_W zBU@P9wxY&McG!w&ara;in`>4G6;cI^{&j^Rc*Y5R{@oFv^v~4KN^fJb+PYV>qqPQ3 zym?%8DlhzMgjd3IziyLCMQBxcmu3mLW%l@Pt{VL-;-Z=K2C>w6+;mMDuV$rXSZnO6 z{1O>eO@`!nmQ_C+ZPH`vr{ZH>OEfxvE1ooQwWo@;i+QYV*rhRsRC1K13T1M4_k31?v?i^1v& z_F;!g#8`y>7Xqfim=@R*zeFrHGuA9Hp$<<*XQDueR>U-khRoAAMSAkg4%^6}jkX}C zyQXgq=+>at7+YW>*uE8HW&m`nrqEUi9|Z$78YioQds%+S;ImlxEpVv;DtMhU)ikPU8JOhO9cf9Y#7CMu{<*JJpSQ**i86Ij>GQ)OPF! zWg)E4Qwznux8AeMI`_>s1zE11!A&B<)>iLKC&q7I%vn>ok zNmig1=Iuo-7}a-p!U+|I?7Ye2?FGM^?egwV352l}Qep&EXGDgy)v+>6wR?k%bJ&q1 z0!~Bxqiw$H)#>iyaTOB8h8b4%AXd-trsD6#%SAFn-jEdFH?PO(v&Bs zUnP?N0Nl*NNu4ZNX~H276ErgOm&c9v+z*bnarig&lT~V*jGD(=;;Naj4;Za1kFc{W zZk%k%6@=v;{G0Xo1%2Im9`vtVMe5}YO{{t#$8w4ra91naLmLGw8Bd>Z-*9#XM%#hY zl5toZRy4@6{Xu9}1%g?*0K?ijF8gosu^%U_DKws+p!j@~ScYZDNjbY(NsLKV3hY7J zK*w)CKl%Li&K+-}w7t`hitTYpgp$Nxc?47c0F-P#Hc~u(ul{ckdY{$&C5V2f9fp?a z8?Sa~=6J{lV8wSIe~%k~FI78PZDW(hoTR#b^=xG*d%sIcHJWMCPlFPcCvvBC2gma$ z>X)aq#wSwhI}qe@mg!PJC10tt98#px@$R54=Zd-F;CNrvxE|h_I=VkWX`MY&_(q4D zF{>Wq!wzxeT)?Xj9nwH5?(oRoJ3+Pt{kq|T!eFaraf4Cj+Z`rtUhZzyimQdpBz?mg zQjTd)wM1jy03Z){x2v*xv8S|-A2X$NwjQ<~8d>6%FRV7Q2UO$4$Mb96Nfj$xRy>K@ z-MHvqlJyHs=5V(2c^vj(GL~|7Y9^S@VjkuqI8_XDT83#0IV_&-{{VLO`+Ba)>pDK1 z{{V*O&eww$$Pxc-cCVw17YX-^;`b{iLYDbi-OK3rY?CUgt*Jn ztzKUIVnNx4W6c$#Ra6IWKlpkc*L^`AVy-dk<>}MPWI4LxMPrSlMH}g3y?WmKLP({U zgXO+<>kCy<{{V(83vlarR2b)T41pNDyvAzcK77{7Me;WCe@|Wa^0^|G z4xaS!g-dWVRhu15;?>?Tc*S~YNM-?#oxaQao`bEb=v7Kl>n%T#ud1an#f`_yRN2Hu zByL2HT7CG+L~1?GRG;hTtel`(Nb(V@ofu!v>4l+r|`BSk6`MRzyN`&_*%N{1Z+fg_*>?CxR zD~JlW)!de2<@PGufLr{(9c?qw4S61Nyt1wOuPiWDX(Hq$mxW5|jV*$%K}K=9ug1&P z(|--Qui`$RYU_Gpx@EH(Zr!ZY((cCr$n66gqFGs38GwuO&~B)a1@gzILrGY}YYhci zp$io}o;7&KGdfOK#Lrvr1!~H%VH>$E&oOP;j z3iPz|32e`et$MTsb`sYnlau**EOKH==yCX@*EDJAJdNZ^)z%2+?!gR-ptHrq@0uMS_1k5aGYJyp3|O*C@iDA+SY5S7=nd8Co1iJCBXG0n?m@-}WKrC&j6ooQD! z7F5x*a@$CdRfdgk&9{ETEKyX5INC5~W#=O7LU>Qr*QuYv%=qmk=%$~~>j~;t$<1C9 zm9I?Lzh&-Pk~qyuvL?@A*1?Sms3={yF8enZD@L{RtTm*PNZ-REyNZJq2W4zelD7qF z_HqK$$^QW4$g|50ZcGBi1MwaQTM_gv^yabk3pJ+}3)M2(B0N>hor)6J6ky0@X?;v} zKB11;1N0TRkje?;++ZDm>AUpPC^c4}t*PayZmhR1<>QVElC!||YGo&ay}F(T^=2@~ zDi#V^A8n4^5}n(=RU9cu(YA1n9`d^>D51Hd@kRijVc+-PueYtiLWRtQuHmanrHhuk zQAs?e3b4G>cHoIwr+(aQ0w^rR?4xtHUUU6k(O#VTi|R&GQRuBBqqP1@GF_#mf}U|h zQ^wOXPU|W)f=#lt&{6BhYl9t|KaP{zc9WjuS1{v6yp_Rzm z##;*%owF@rWVB(O2s>=OI~Jv%6PK<0DY9MXuOdZC3yRo@%`nLY;0g$Ft0wHMrGvM} zLb}-;by)KA)zLFzu2{Q1Vtnnob&`foSoY>xT5~CpW06@@xex9wze8;9Q&(KOIL*&y zjaw^Z+Q-zkmod_-!X<|#40a`zt;C=xK$`_+Wj@}!bdBupCXmYIG{tQ}ilFmBPUJQ5 zl+wWM8GfE&LR9)$b}7Fo%duY{&DMTT7jI5t;q|VLM@8UijD#tV)A>P7?c2|z3liPH zb3ainx{&sxn0XsWr{}1fy3lW=c@#uvyA{zPj)>cG zs=Pe>aq81k)9p*1!o#g;WAj!h!yhUlsC8h-3;^onCSg& zYAAKT{8Mu&9Oz-IkZg5Kja3_QjaC@Xec16|J8$}%&rDyW--qu@biO60qky%D$z>;K zq#5yFFJMWqDQ#q3-mO+Vf9ajNX0RH6LSyTu*o+nIo;7SV)~!=34L6QI465-Kl0xsx z-M;?s-Fl^J3pKGb*s}&dPiCi(!zA<4TNwARG^I>&J$o?mk+`zr{_IG#8j57 z_9^8%@Xr%XK1U`d#9^f1IM#S$jI2?+gUfD1&-uEn>aS8psfLt!oGunTOo=JCm%`_3 zR?JjW8RLc#^qOW^UzXXS@o6=9S%E7>--O?n2DgtRfiXj$VG;|hGy<| zY@CK9PN zAzbYr1qf5)e;qKx);zzdk7V>QiB+;+miRJviZThij?^3@P zbm8Z!vN~3*^Fm_&6NSdtFyzny5(zD-Egf_|#$uqb=&4^&`p=uCEgKYdCT{tKmc@KM zcx!zGd!A8JyC7g;1hL$Zc3wwrqAL1%lzFj|%hfrQ|Yz(lh?*IYN6{%H!`)&ntl4qYM8Yqqe#hRE2}Jv@BW_`+uvhtuc(%8jgDw^ErF@2`D_Pau_9& zJZ3*(u`e?KC6jVb5x4{Tb!TWX>uxM`s*=lT8@L2QmtCmAzLqL<=8t~kG(8+kIwxt`HUyf%Ptce z7%V5fmB|7a_CP|B#H145M;@-YiyBEMqb^4M3iHJ$e#5>Fd9s%-R%qpxy(UV`vDCFG z@!aA#Atd#YtbIsoxh+e3Lq(0yF+DnRX1|75sBh(Eaw3jPX4`fvylf=cClCjp+o0}S zSN{OTi~Kfxy%^%g*Rzl#Cj7EiCcK@MlM!l1-bmfzXOR%?vv~IL>#OK8IZQ1qnYT(T zWlVxt$Jp;!iWt(gVm5~DC>h+Am&o^SO8dRNbG??eC7iLhLl+)BDah1p!D*@7UepRK zPVXx>31o6gENEGN{bAuXt(*1l77)J_(jsnK))01nr*@p;*1i7V5~*sl3Ji0#F?psupW(dC{VC&@v# z>Djz>X2AMA9YDdXx@N$SUn!q@Ot?2j^&lANQvO({LJ8y%*Q=WNB5@b@FgR7KDdS z)T=$n)<}f1LkuemNGB}I`Z%+vzivGMGM<^&^w+hEpAnav30R5KHrNU3=CQHtor-y+ zj6c<5*-G)WyKqM3C+X`mO?o$h)OguqdBiVbd3*88ILp~+tUQw<(?(`;ts5s94&aaU zj-qn^0EVno8jBmJ7D}||%^)~R5TyxjUYd|CblIq?4RDbbcioE-@DEkY$Kk)$-6;l3 zFX;wKoEIZmp|ejZKk(XDmN?-(QS^sZ z(~eHfyBXYVc{0%5PGzmfR;D}`Mf+^a$ozTtba3?3{86Z3AW4BtW{7QZ^J#_UG1h{<}Pdp*b2P;h@Ksd}$aTwDelk54(zDl)W&Z$-S(2@VyB-yyy=NN&Mr~EGX1q#F*w>LJ{{WbX zkqd@NLN`Y&PpaBKQfZw(sc>0}_GoIXd9{RXpOo`S6m4E9i+9(;=9{S?w^tr>o*2Zrz+W)X629?HG-f?C>D$ zr=pIO^zT=_k%_gb>QYvj(%B*_p$@+@1B18=fFSXo^a=>|&fJ13-lJjz{yV^&1UWwMs^?hb67IKSmB zRi64P<#I8?{$s7nbH6?Kw^`Y*PU=z1;B$G*z6yJqf-0$|yIN-ynqN-Utjf3a{y!rn ziDAE(n&a}ZFp(y+@fR)|bWxMh%}eynmc#3}!$n$SJseh;fh%G1FJXYOOF2UHGs0tu zxd^~2ptrp3k3F|0i=!|RhHrb`v5OV`3#f@TpMrYA3YP*yfqEEXp`Kg-nSb5-ej zscMydM@F+_DfWfRbT8MhYOzBlQx$}YRk2B>3)z@356hPxgEhTK)LOO(B+-|k%GiRd zrMYm~%CAM18NKFd=Eoy7{$5;riiQ4OyR*YwE7r+s{aAF?BF(~|#I55fWU*CYs|Pz$ zPF>77+oH6CM7VKsgRDLYj1bTT$9VK3Hc3d34FWOmdE zq>y`E)wUm}sVe@MdSxx6Eo}o09L(HliK{T43-#4|ajaQC%u^v@!DE#Ihh4hOn@oCg z*wd%-*w|}iezEGe*5jZ^qK*_cXM{Zw63(%rsPVfqKMxyq6`ZkNt_LEIrt1A{bQ1$Z zF^<0Y<9OJG&N_t5j`G}9iDPGpWmZt;K4F)tCXN39+pR-d&I4QN9VbRzJz|}xa#~7Q zANXRHuWZpbClIZ;19E9>7vFGLow;>U%<4S;fWh9Q>PN3{sxdKInAgbJmrrFO#j8ZN z;DELw{119T%)D@cxm5#xE7f00dZ&T)FCF!I6E$O2>pH7;jR~6s*lP~cOkM3uR^pXv z&XMiiclvnUloxJYTITfw)R?ei1z%rh{V4SV9gs-o#cFLHf*+u-#f@6|icm?9vsTm+ z2XgGr@c>lq)t?!ye-YEK(#K~?Yi7q}>y1gD7@8OANl{Qu5Aacll^Jcv9^aWNvc<&r zO7;A`tHY?7zlgjSI}oEGidQDLXnAI0n<_7#9|SM<>vn(PmH2$tUZ!cxj-$@vMjuJp z$U@zWv1p{qQ+D<}7zy2?Vu#2(uwXd+^}oFlqI3_Z@yAW)rm#XCp0rfi;#C_eMrDX` zSxfO7u}~NK^hj+@fu01pB(h$bw35k>#N$fE0>l$%HyL>MT~wkJh;*Q;e9u=!fm+p(pv^ln*}Rpb(_ zySHOQy)}@Z5wK?C=c_!FG!~u@xw0CXz@p7|douztP{r>d*lPMv&BtJmIq7H;}pqeUJX_LfJIO4MbJaER3Ynstyk z8?fD!l^qzdH+0=hgiB8yUbaxEHUt#}HcmR%-ca(fa_pLW`vYZ1eG)=w7=kU*Wtlqrm?Y^Sz3lvD2NCdQ14?w)Dt=2%xbE!4K{AwA zM_^V|*-IW<^^>Xa*_@5~$}nOKbxzz?gfJzDPBu9CKh{P9OyOC$0Z(FNmx1KI?!J0m zpfaADYg+awVyIxTdYNvdnVT1|i-?L_N=0cWnxu1F7TyG8L@tdRHwjy&3QZ*hbKQHH z*mLn;4+ODKW*gBJg`Om;)@)s6XyvZk5K(tA?pNA6Px1KM*qnB!hXe)|d_HRYc$Z|c zBr)Qt*>#5y`6>*?jJwOiJ`u4I#;y8#`=YdFw$^^4;`P>N7BjkI2biB3inC%niCen# z;)gYFm86K;$iXPu7KsEKFY7GFru#3ZX=ZUvj+yS`1nIhDVx=`ZwdQF;JhaG(+9^?$ zdr}r|AIjTxhZct&(2&KL%Fm2S_2@QKYLCY+QKiwEJQP$X|6Lc_xIUpos%7>UHbCDflY% zt1K}&9apDxJ~K?;5jtutW>s<)Ber0YS)uR51%Y%o1dXFtj%SdFw&2gz0@wIYS+DcxfubsTn54V?$3n#6V65tPRTj#=&H>e`yHvud+6++??2$K*`T zoEy(>RFRX;-APcy!;XrL41F3g!+oZmMT#)hu=Bzl!lYHxc0&|yVJ#yr+*|&qR2@ly zv07^`uPv(f5*C^m=Vi*72PzVsYjiWLR|TuviWW2^rF61>>$HL7H_ z1}bbn{UxcFlEP0mm66;;-P)LPz~Wb9O{7LsiB?Gfqbe_GJO zUGBVb1;)*k#7BzCLe+yIZbUfz-4im(BUs0F#}!i&N_TASZKbff%Q0UiAEo;5{{U#2S~RgU zW^ZZS^hsAU6@`-rRQqCTbsdCnq z)jD@T-Nxke)~mevD|K>ufK8G5Q&*7Yv#94-_Tpw!*M0pvgp&n_`ppk(O~aCcw5fXu zt8l%A$=gWk%PpNNYSoMQngyfmqr+*!bMB%=$bcc$UY>f{q%}oso|nVqt!W$URtmU0 z_3S1F4I4ZvQSZ>c?nbZ6UNOe7$2?**lENr*-rlqEx;t3mFuC1Ls&tmH)R-o!@@i~0 zVr*>;&Y!mLZpKG1ijd_dt9unJe@-pc`e`OsjTRD|!gMRtSMZ*m`c;3{YV+D%4UCCq zj@#+GPPxphSW4G4!NOv(jUanc$yx-p3_@C6wv&&SBA)ge}5I!|ZyH$^IR6R#rV7Yft*YpTS9|EnUIm zaoCejOX2L{F4m4J_N082E|+X~rP?_x6@!FL$=BpqFdv5=Ry^U9lmh3;c(9Sd!oHPmaK`PD&C>>ODqscLoa1D z43_OpkAW#<87x&xwbs3W_G*1aa`Eh_oich$OI^?D9Ih`%)ym^E#!*3!{vp6;@lA!N zkqhoFbo^3s65qb2iSe-L!sTs=H%|U+`nWk>7 bWv@?B+jK_~y&P~SxO literal 0 HcmV?d00001 diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 62fda4accce..56d354655dc 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -37,10 +37,13 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, const string& source = this->layer_param_.image_data_param().source(); LOG(INFO) << "Opening file " << source; std::ifstream infile(source.c_str()); - string filename; + string line; + size_t pos; int label; - while (infile >> filename >> label) { - lines_.push_back(std::make_pair(filename, label)); + while (std::getline(infile, line)) { + pos = line.find_last_of(' '); + label = atoi(line.substr(pos + 1).c_str()); + lines_.push_back(std::make_pair(line.substr(0, pos), label)); } if (this->layer_param_.image_data_param().shuffle()) { diff --git a/src/caffe/test/test_image_data_layer.cpp b/src/caffe/test/test_image_data_layer.cpp index a4080ccd145..ce5e0bc62d6 100644 --- a/src/caffe/test/test_image_data_layer.cpp +++ b/src/caffe/test/test_image_data_layer.cpp @@ -34,16 +34,24 @@ class ImageDataLayerTest : public MultiDeviceTest { std::ofstream outfile(filename_.c_str(), std::ofstream::out); LOG(INFO) << "Using temporary file " << filename_; for (int i = 0; i < 5; ++i) { - outfile << EXAMPLES_SOURCE_DIR "images/cat.jpg " << i; + outfile << EXAMPLES_SOURCE_DIR "images/cat.jpg " << i << std::endl; } outfile.close(); // Create test input file for images of distinct sizes. MakeTempFilename(&filename_reshape_); std::ofstream reshapefile(filename_reshape_.c_str(), std::ofstream::out); LOG(INFO) << "Using temporary file " << filename_reshape_; - reshapefile << EXAMPLES_SOURCE_DIR "images/cat.jpg " << 0; - reshapefile << EXAMPLES_SOURCE_DIR "images/fish-bike.jpg " << 1; + reshapefile << EXAMPLES_SOURCE_DIR "images/cat.jpg " << 0 << std::endl; + reshapefile << EXAMPLES_SOURCE_DIR "images/fish-bike.jpg " << 1 + << std::endl; reshapefile.close(); + // Create test input file for images with space in names + MakeTempFilename(&filename_space_); + std::ofstream spacefile(filename_space_.c_str(), std::ofstream::out); + LOG(INFO) << "Using temporary file " << filename_space_; + spacefile << EXAMPLES_SOURCE_DIR "images/cat.jpg " << 0 << std::endl; + spacefile << EXAMPLES_SOURCE_DIR "images/cat gray.jpg " << 1 << std::endl; + spacefile.close(); } virtual ~ImageDataLayerTest() { @@ -54,6 +62,7 @@ class ImageDataLayerTest : public MultiDeviceTest { int seed_; string filename_; string filename_reshape_; + string filename_space_; Blob* const blob_top_data_; Blob* const blob_top_label_; vector*> blob_bottom_vec_; @@ -177,5 +186,34 @@ TYPED_TEST(ImageDataLayerTest, TestShuffle) { } } +TYPED_TEST(ImageDataLayerTest, TestSpace) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(1); + image_data_param->set_source(this->filename_space_.c_str()); + image_data_param->set_shuffle(false); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_label_->num(), 1); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + // cat.jpg + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 1); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), 360); + EXPECT_EQ(this->blob_top_data_->width(), 480); + EXPECT_EQ(this->blob_top_label_->cpu_data()[0], 0); + // cat gray.jpg + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 1); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), 360); + EXPECT_EQ(this->blob_top_data_->width(), 480); + EXPECT_EQ(this->blob_top_label_->cpu_data()[0], 1); +} + } // namespace caffe #endif // USE_OPENCV diff --git a/tools/convert_imageset.cpp b/tools/convert_imageset.cpp index 9c52bfa0ef8..90cdb15d427 100644 --- a/tools/convert_imageset.cpp +++ b/tools/convert_imageset.cpp @@ -73,10 +73,13 @@ int main(int argc, char** argv) { std::ifstream infile(argv[2]); std::vector > lines; - std::string filename; + std::string line; + size_t pos; int label; - while (infile >> filename >> label) { - lines.push_back(std::make_pair(filename, label)); + while (std::getline(infile, line)) { + pos = line.find_last_of(' '); + label = atoi(line.substr(pos + 1).c_str()); + lines.push_back(std::make_pair(line.substr(0, pos), label)); } if (FLAGS_shuffle) { // randomly shuffle data From 4bf4b186076b054a0fa06103bc8989a3577468ba Mon Sep 17 00:00:00 2001 From: Luke Yeager Date: Tue, 24 May 2016 10:36:23 -0700 Subject: [PATCH 53/54] Overhaul TravisCI * Run on Ubuntu 14.04 * Test cuDNN builds * Build with OpenBLAS NOTE: Python3 build only works with CMake --- .travis.yml | 58 ++++++---- scripts/travis/build.sh | 13 +++ scripts/travis/configure-cmake.sh | 32 ++++++ scripts/travis/configure-make.sh | 36 ++++++ scripts/travis/configure.sh | 11 ++ scripts/travis/defaults.sh | 10 ++ scripts/travis/install-deps.sh | 105 ++++++++++++++++++ scripts/travis/install-python-deps.sh | 14 +++ scripts/travis/setup-venv.sh | 18 +++ scripts/travis/test.sh | 19 ++++ scripts/travis/travis_build_and_test.sh | 54 --------- scripts/travis/travis_install.sh | 101 ----------------- .../travis/travis_setup_makefile_config.sh | 31 ------ 13 files changed, 292 insertions(+), 210 deletions(-) create mode 100755 scripts/travis/build.sh create mode 100644 scripts/travis/configure-cmake.sh create mode 100644 scripts/travis/configure-make.sh create mode 100755 scripts/travis/configure.sh create mode 100755 scripts/travis/defaults.sh create mode 100755 scripts/travis/install-deps.sh create mode 100755 scripts/travis/install-python-deps.sh create mode 100755 scripts/travis/setup-venv.sh create mode 100755 scripts/travis/test.sh delete mode 100755 scripts/travis/travis_build_and_test.sh delete mode 100755 scripts/travis/travis_install.sh delete mode 100755 scripts/travis/travis_setup_makefile_config.sh diff --git a/.travis.yml b/.travis.yml index 4dc7ed72d6c..92d740cd88b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,40 +1,50 @@ -# Use a build matrix to do two builds in parallel: -# one using CMake, and one using make. +dist: trusty +sudo: required + +language: cpp +compiler: gcc + env: + global: + - NUM_THREADS=4 matrix: - - WITH_CUDA=false WITH_CMAKE=false WITH_IO=true - - WITH_CUDA=false WITH_CMAKE=true WITH_IO=true PYTHON_VERSION=3 - - WITH_CUDA=true WITH_CMAKE=false WITH_IO=true - - WITH_CUDA=true WITH_CMAKE=true WITH_IO=true - - WITH_CUDA=false WITH_CMAKE=false WITH_IO=false - - WITH_CUDA=false WITH_CMAKE=true WITH_IO=false PYTHON_VERSION=3 + # Use a build matrix to test many builds in parallel + # envvar defaults: + # WITH_CMAKE: false + # WITH_PYTHON3: false + # WITH_IO: true + # WITH_CUDA: false + # WITH_CUDNN: false + - BUILD_NAME="default-make" +# - BUILD_NAME="python3-make" WITH_PYTHON3=true + - BUILD_NAME="no-io-make" WITH_IO=false + - BUILD_NAME="cuda-make" WITH_CUDA=true + - BUILD_NAME="cudnn-make" WITH_CUDA=true WITH_CUDNN=true -language: cpp + - BUILD_NAME="default-cmake" WITH_CMAKE=true + - BUILD_NAME="python3-cmake" WITH_CMAKE=true WITH_PYTHON3=true + - BUILD_NAME="no-io-cmake" WITH_CMAKE=true WITH_IO=false + - BUILD_NAME="cuda-cmake" WITH_CMAKE=true WITH_CUDA=true + - BUILD_NAME="cudnn-cmake" WITH_CMAKE=true WITH_CUDA=true WITH_CUDNN=true -# Cache Ubuntu apt packages. cache: apt: true - directories: - - /home/travis/miniconda - - /home/travis/miniconda2 - - /home/travis/miniconda3 - -compiler: gcc before_install: - - export NUM_THREADS=4 - - export SCRIPTS=./scripts/travis - - export CONDA_DIR="/home/travis/miniconda$PYTHON_VERSION" + - source ./scripts/travis/defaults.sh install: - - sudo -E $SCRIPTS/travis_install.sh + - sudo -E ./scripts/travis/install-deps.sh + - ./scripts/travis/setup-venv.sh ~/venv + - source ~/venv/bin/activate + - ./scripts/travis/install-python-deps.sh before_script: - - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib:/usr/local/cuda/lib64:$CONDA_DIR/lib - - export PATH=$CONDA_DIR/bin:$PATH - - if ! $WITH_CMAKE; then $SCRIPTS/travis_setup_makefile_config.sh; fi + - ./scripts/travis/configure.sh -script: $SCRIPTS/travis_build_and_test.sh +script: + - ./scripts/travis/build.sh + - ./scripts/travis/test.sh notifications: # Emails are sent to the committer's git-configured email address by default, diff --git a/scripts/travis/build.sh b/scripts/travis/build.sh new file mode 100755 index 00000000000..bb9406f046c --- /dev/null +++ b/scripts/travis/build.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# build the project + +BASEDIR=$(dirname $0) +source $BASEDIR/defaults.sh + +if ! $WITH_CMAKE ; then + make --jobs $NUM_THREADS all test pycaffe warn +else + cd build + make --jobs $NUM_THREADS all test.testbin +fi +make lint diff --git a/scripts/travis/configure-cmake.sh b/scripts/travis/configure-cmake.sh new file mode 100644 index 00000000000..772f1e2ce8d --- /dev/null +++ b/scripts/travis/configure-cmake.sh @@ -0,0 +1,32 @@ +# CMake configuration + +mkdir -p build +cd build + +ARGS="-DCMAKE_BUILD_TYPE=Release -DBLAS=Open" + +if $WITH_PYTHON3 ; then + ARGS="$ARGS -Dpython_version=3" +fi + +if $WITH_IO ; then + ARGS="$ARGS -DUSE_OPENCV=On -DUSE_LMDB=On -DUSE_LEVELDB=On" +else + ARGS="$ARGS -DUSE_OPENCV=Off -DUSE_LMDB=Off -DUSE_LEVELDB=Off" +fi + +if $WITH_CUDA ; then + # Only build SM50 + ARGS="$ARGS -DCPU_ONLY=Off -DCUDA_ARCH_NAME=Manual -DCUDA_ARCH_BIN=\"50\" -DCUDA_ARCH_PTX=\"\"" +else + ARGS="$ARGS -DCPU_ONLY=On" +fi + +if $WITH_CUDNN ; then + ARGS="$ARGS -DUSE_CUDNN=On" +else + ARGS="$ARGS -DUSE_CUDNN=Off" +fi + +cmake .. $ARGS + diff --git a/scripts/travis/configure-make.sh b/scripts/travis/configure-make.sh new file mode 100644 index 00000000000..ddc40fffa9d --- /dev/null +++ b/scripts/travis/configure-make.sh @@ -0,0 +1,36 @@ +# raw Makefile configuration + +LINE () { + echo "$@" >> Makefile.config +} + +cp Makefile.config.example Makefile.config + +LINE "BLAS := open" +LINE "WITH_PYTHON_LAYER := 1" + +if $WITH_PYTHON3 ; then + # TODO(lukeyeager) this path is currently disabled because of test errors like: + # ImportError: dynamic module does not define init function (PyInit__caffe) + LINE "PYTHON_LIBRARIES := python3.4m boost_python-py34" + LINE "PYTHON_INCLUDE := /usr/include/python3.4 /usr/lib/python3/dist-packages/numpy/core/include" + LINE "INCLUDE_DIRS := \$(INCLUDE_DIRS) \$(PYTHON_INCLUDE)" +fi + +if ! $WITH_IO ; then + LINE "USE_OPENCV := 0" + LINE "USE_LEVELDB := 0" + LINE "USE_LMDB := 0" +fi + +if $WITH_CUDA ; then + # Only build SM50 + LINE "CUDA_ARCH := -gencode arch=compute_50,code=sm_50" +else + LINE "CPU_ONLY := 1" +fi + +if $WITH_CUDNN ; then + LINE "USE_CUDNN := 1" +fi + diff --git a/scripts/travis/configure.sh b/scripts/travis/configure.sh new file mode 100755 index 00000000000..ef740c8982e --- /dev/null +++ b/scripts/travis/configure.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# configure the project + +BASEDIR=$(dirname $0) +source $BASEDIR/defaults.sh + +if ! $WITH_CMAKE ; then + source $BASEDIR/configure-make.sh +else + source $BASEDIR/configure-cmake.sh +fi diff --git a/scripts/travis/defaults.sh b/scripts/travis/defaults.sh new file mode 100755 index 00000000000..d69c0a7d964 --- /dev/null +++ b/scripts/travis/defaults.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# set default environment variables + +set -e + +WITH_CMAKE=${WITH_CMAKE:-false} +WITH_PYTHON3=${WITH_PYTHON3:-false} +WITH_IO=${WITH_IO:-true} +WITH_CUDA=${WITH_CUDA:-false} +WITH_CUDNN=${WITH_CUDNN:-false} diff --git a/scripts/travis/install-deps.sh b/scripts/travis/install-deps.sh new file mode 100755 index 00000000000..f7bfe4c4df9 --- /dev/null +++ b/scripts/travis/install-deps.sh @@ -0,0 +1,105 @@ +#!/bin/bash +# install dependencies +# (this script must be run as root) + +BASEDIR=$(dirname $0) +source $BASEDIR/defaults.sh + +apt-get -y update +apt-get install -y --no-install-recommends \ + build-essential \ + libboost-filesystem-dev \ + libboost-python-dev \ + libboost-system-dev \ + libboost-thread-dev \ + libgflags-dev \ + libgoogle-glog-dev \ + libhdf5-serial-dev \ + libopenblas-dev \ + python-virtualenv \ + wget + +if $WITH_CMAKE ; then + apt-get install -y --no-install-recommends cmake +fi + +if ! $WITH_PYTHON3 ; then + # Python2 + apt-get install -y --no-install-recommends \ + libprotobuf-dev \ + protobuf-compiler \ + python-dev \ + python-numpy \ + python-protobuf \ + python-skimage +else + # Python3 + apt-get install -y --no-install-recommends \ + python3-dev \ + python3-numpy \ + python3-skimage + + # build Protobuf3 since it's needed for Python3 + echo "Building protobuf3 from source ..." + pushd . + PROTOBUF3_DIR=~/protobuf3-build + rm -rf $PROTOBUF3_DIR + mkdir $PROTOBUF3_DIR + + # install some more dependencies required to build protobuf3 + apt-get install -y --no-install-recommends \ + curl \ + dh-autoreconf \ + unzip + + wget https://github.com/google/protobuf/archive/v3.0.0-beta-3.tar.gz -O protobuf3.tar.gz + tar -xzf protobuf3.tar.gz -C $PROTOBUF3_DIR --strip 1 + rm protobuf3.tar.gz + cd $PROTOBUF3_DIR + ./autogen.sh + ./configure --prefix=/usr + make --jobs=$NUM_THREADS + make install + popd +fi + +if $WITH_IO ; then + apt-get install -y --no-install-recommends \ + libleveldb-dev \ + liblmdb-dev \ + libopencv-dev \ + libsnappy-dev +fi + +if $WITH_CUDA ; then + # install repo packages + CUDA_REPO_PKG=cuda-repo-ubuntu1404_7.5-18_amd64.deb + wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/$CUDA_REPO_PKG + dpkg -i $CUDA_REPO_PKG + rm $CUDA_REPO_PKG + + if $WITH_CUDNN ; then + ML_REPO_PKG=nvidia-machine-learning-repo_4.0-2_amd64.deb + wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1404/x86_64/$ML_REPO_PKG + dpkg -i $ML_REPO_PKG + fi + + # update package lists + apt-get -y update + + # install packages + CUDA_PKG_VERSION="7-5" + CUDA_VERSION="7.5" + apt-get install -y --no-install-recommends \ + cuda-core-$CUDA_PKG_VERSION \ + cuda-cudart-dev-$CUDA_PKG_VERSION \ + cuda-cublas-dev-$CUDA_PKG_VERSION \ + cuda-curand-dev-$CUDA_PKG_VERSION + # manually create CUDA symlink + ln -s /usr/local/cuda-$CUDA_VERSION /usr/local/cuda + + if $WITH_CUDNN ; then + apt-get install -y --no-install-recommends libcudnn5-dev + fi +fi + diff --git a/scripts/travis/install-python-deps.sh b/scripts/travis/install-python-deps.sh new file mode 100755 index 00000000000..eeec302791f --- /dev/null +++ b/scripts/travis/install-python-deps.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# install extra Python dependencies +# (must come after setup-venv) + +BASEDIR=$(dirname $0) +source $BASEDIR/defaults.sh + +if ! $WITH_PYTHON3 ; then + # Python2 + : +else + # Python3 + pip install --pre protobuf==3.0.0b3 +fi diff --git a/scripts/travis/setup-venv.sh b/scripts/travis/setup-venv.sh new file mode 100755 index 00000000000..81245f146da --- /dev/null +++ b/scripts/travis/setup-venv.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# setup a Python virtualenv +# (must come after install-deps) + +BASEDIR=$(dirname $0) +source $BASEDIR/defaults.sh + +VENV_DIR=${1:-~/venv} + +# setup our own virtualenv +if $WITH_PYTHON3; then + PYTHON_EXE='/usr/bin/python3' +else + PYTHON_EXE='/usr/bin/python2' +fi + +# use --system-site-packages so that Python will use deb packages +virtualenv $VENV_DIR -p $PYTHON_EXE --system-site-packages diff --git a/scripts/travis/test.sh b/scripts/travis/test.sh new file mode 100755 index 00000000000..fedd7e6b56e --- /dev/null +++ b/scripts/travis/test.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# test the project + +BASEDIR=$(dirname $0) +source $BASEDIR/defaults.sh + +if $WITH_CUDA ; then + echo "Skipping tests for CUDA build" + exit 0 +fi + +if ! $WITH_CMAKE ; then + make runtest + make pytest +else + cd build + make runtest + make pytest +fi diff --git a/scripts/travis/travis_build_and_test.sh b/scripts/travis/travis_build_and_test.sh deleted file mode 100755 index 174f1ee5a0a..00000000000 --- a/scripts/travis/travis_build_and_test.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# Script called by Travis to build and test Caffe. -# Travis CI tests are CPU-only for lack of compatible hardware. - -set -e -MAKE="make --jobs=$NUM_THREADS --keep-going" - -if $WITH_CMAKE; then - mkdir build - cd build - CPU_ONLY=" -DCPU_ONLY=ON" - if ! $WITH_CUDA; then - CPU_ONLY=" -DCPU_ONLY=OFF" - fi - PYTHON_ARGS="" - if [ "$PYTHON_VERSION" = "3" ]; then - PYTHON_ARGS="$PYTHON_ARGS -Dpython_version=3 -DBOOST_LIBRARYDIR=$CONDA_DIR/lib/" - fi - if $WITH_IO; then - IO_ARGS="-DUSE_OPENCV=ON -DUSE_LMDB=ON -DUSE_LEVELDB=ON" - else - IO_ARGS="-DUSE_OPENCV=OFF -DUSE_LMDB=OFF -DUSE_LEVELDB=OFF" - fi - cmake -DBUILD_python=ON -DCMAKE_BUILD_TYPE=Release $CPU_ONLY $PYTHON_ARGS -DCMAKE_INCLUDE_PATH="$CONDA_DIR/include/" -DCMAKE_LIBRARY_PATH="$CONDA_DIR/lib/" $IO_ARGS .. - $MAKE - $MAKE pytest - if ! $WITH_CUDA; then - $MAKE runtest - $MAKE lint - fi - $MAKE clean - cd - -else - if ! $WITH_CUDA; then - export CPU_ONLY=1 - fi - if $WITH_IO; then - export USE_LMDB=1 - export USE_LEVELDB=1 - export USE_OPENCV=1 - fi - $MAKE all test pycaffe warn lint || true - if ! $WITH_CUDA; then - $MAKE runtest - fi - $MAKE all - $MAKE test - $MAKE pycaffe - $MAKE pytest - $MAKE warn - if ! $WITH_CUDA; then - $MAKE lint - fi -fi diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh deleted file mode 100755 index 091e92431f0..00000000000 --- a/scripts/travis/travis_install.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/bin/bash -# This script must be run with sudo. - -set -e - -MAKE="make --jobs=$NUM_THREADS" -# Install apt packages where the Ubuntu 12.04 default and ppa works for Caffe - -# This ppa is for gflags and glog -add-apt-repository -y ppa:tuleu/precise-backports -apt-get -y update -apt-get install \ - wget git curl \ - python-dev python-numpy python3-dev\ - libleveldb-dev libsnappy-dev libopencv-dev \ - libprotobuf-dev protobuf-compiler \ - libatlas-dev libatlas-base-dev \ - libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ - bc - -# Add a special apt-repository to install CMake 2.8.9 for CMake Caffe build, -# if needed. By default, Aptitude in Ubuntu 12.04 installs CMake 2.8.7, but -# Caffe requires a minimum CMake version of 2.8.8. -if $WITH_CMAKE; then - # cmake 3 will make sure that the python interpreter and libraries match - wget --no-check-certificate http://www.cmake.org/files/v3.2/cmake-3.2.3-Linux-x86_64.sh -O cmake3.sh - chmod +x cmake3.sh - ./cmake3.sh --prefix=/usr/ --skip-license --exclude-subdir -fi - -# Install CUDA, if needed -if $WITH_CUDA; then - CUDA_URL=http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1204/x86_64/cuda-repo-ubuntu1204_6.5-14_amd64.deb - CUDA_FILE=/tmp/cuda_install.deb - curl $CUDA_URL -o $CUDA_FILE - dpkg -i $CUDA_FILE - rm -f $CUDA_FILE - apt-get -y update - # Install the minimal CUDA subpackages required to test Caffe build. - # For a full CUDA installation, add 'cuda' to the list of packages. - apt-get -y install cuda-core-6-5 cuda-cublas-6-5 cuda-cublas-dev-6-5 cuda-cudart-6-5 cuda-cudart-dev-6-5 cuda-curand-6-5 cuda-curand-dev-6-5 - # Create CUDA symlink at /usr/local/cuda - # (This would normally be created by the CUDA installer, but we create it - # manually since we did a partial installation.) - ln -s /usr/local/cuda-6.5 /usr/local/cuda -fi - -# Install LMDB -LMDB_URL=https://github.com/LMDB/lmdb/archive/LMDB_0.9.14.tar.gz -LMDB_FILE=/tmp/lmdb.tar.gz -pushd . -wget $LMDB_URL -O $LMDB_FILE -tar -C /tmp -xzvf $LMDB_FILE -cd /tmp/lmdb*/libraries/liblmdb/ -$MAKE -$MAKE install -popd -rm -f $LMDB_FILE - -# Install the Python runtime dependencies via miniconda (this is much faster -# than using pip for everything). -export PATH=$CONDA_DIR/bin:$PATH -# clear any cached conda (see #3786) -rm -rf $CONDA_DIR -if [ ! -d $CONDA_DIR ]; then - if [ "$PYTHON_VERSION" -eq "3" ]; then - wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh - else - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh - fi - chmod +x miniconda.sh - ./miniconda.sh -b -p $CONDA_DIR - - conda update --yes conda - # The version of boost we're using for Python 3 depends on 3.4 for now. - if [ "$PYTHON_VERSION" -eq "3" ]; then - conda install --yes python=3.4 - fi - conda install --yes numpy scipy matplotlib scikit-image pip - # Let conda install boost (so that boost_python matches) - conda install --yes -c https://conda.binstar.org/menpo boost=1.56.0 -fi - -# install protobuf 3 (just use the miniconda3 directory to avoid having to setup the path again) -if [ "$PYTHON_VERSION" -eq "3" ] && [ ! -e "$CONDA_DIR/bin/protoc" ]; then - pushd . - wget https://github.com/google/protobuf/archive/v3.0.0-alpha-3.1.tar.gz -O protobuf-3.tar.gz - tar -C /tmp -xzvf protobuf-3.tar.gz - cd /tmp/protobuf-3*/ - ./autogen.sh - ./configure --prefix=$CONDA_DIR - $MAKE - $MAKE install - popd -fi - -if [ "$PYTHON_VERSION" -eq "3" ]; then - pip install --pre protobuf==3.0.0b2 -else - pip install protobuf -fi diff --git a/scripts/travis/travis_setup_makefile_config.sh b/scripts/travis/travis_setup_makefile_config.sh deleted file mode 100755 index 83aacf11fb0..00000000000 --- a/scripts/travis/travis_setup_makefile_config.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -set -e - -mv Makefile.config.example Makefile.config - -if $WITH_CUDA; then - # Only generate compute_50. - GENCODE="-gencode arch=compute_50,code=sm_50" - GENCODE="$GENCODE -gencode arch=compute_50,code=compute_50" - echo "CUDA_ARCH := $GENCODE" >> Makefile.config -fi - -# Remove IO library settings from Makefile.config -# to avoid conflicts with CI configuration -sed -i -e '/USE_LMDB/d' Makefile.config -sed -i -e '/USE_LEVELDB/d' Makefile.config -sed -i -e '/USE_OPENCV/d' Makefile.config - -cat << 'EOF' >> Makefile.config -# Travis' nvcc doesn't like newer boost versions -NVCCFLAGS := -Xcudafe --diag_suppress=cc_clobber_ignored -Xcudafe --diag_suppress=useless_using_declaration -Xcudafe --diag_suppress=set_but_not_used -ANACONDA_HOME := $(CONDA_DIR) -PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ - $(ANACONDA_HOME)/include/python2.7 \ - $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include -PYTHON_LIB := $(ANACONDA_HOME)/lib -INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include -LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib -WITH_PYTHON_LAYER := 1 -EOF From 26879320898aacfcb5236c725938e259788c10fc Mon Sep 17 00:00:00 2001 From: Luke Yeager Date: Wed, 25 May 2016 16:39:55 -0700 Subject: [PATCH 54/54] Remove misleading comment from a test file --- src/caffe/test/test_caffe_main.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/caffe/test/test_caffe_main.cpp b/src/caffe/test/test_caffe_main.cpp index fccf6f1613b..6473b74d0a6 100644 --- a/src/caffe/test/test_caffe_main.cpp +++ b/src/caffe/test/test_caffe_main.cpp @@ -1,6 +1,3 @@ -// The main caffe test code. Your test cpp code should include this hpp -// to allow a main function to be compiled into the binary. - #include "caffe/caffe.hpp" #include "caffe/test/test_caffe_main.hpp"