if not os.path.exists(IMAGES_DIR):
subprocess.run(['mkdir', '-p', IMAGES_DIR])
画像データの読込み
with open(ANNOTATIONS_FILE) as json_file:
data = json.load(json_file)
images = data['images']
categories = data['categories']
annotations = data['annotations']
images_annotations = []
for idx, image in enumerate(images):
image_id = int(image['id'])
random_number = idx
file_name = image['file_name']
# rename files to unique numbers
new_file_name = '%s.jpg' % str(random_number)
file_location = '%s/%s' % (DATA_DIR, file_name)
new_file_location = '%s/%s' % (IMAGES_DIR, new_file_name)
if os.path.isfile(file_location):
print('renamed: %s to %s' % (file_location, new_file_location))
shutil.copy(file_location, new_file_location)
image['file_name'] = new_file_name
image['folder'] = DATA_DIR
# get annotations for the image
_annotations = [a for a in annotations if int(a['image_id']) == image_id]
# something wrong with y coordinates in data
# for a in _annotations:
# (x,y,w,h) = a['bbox']
# a['bbox'][1] = image['height'] - y - h
images_annotations.append((image, _annotations))
実行結果(クリックして表示)
renamed: ./datasets/TACO/data//batch_1/000006.jpg to ./datasets/TACO/images/0.jpg renamed: ./datasets/TACO/data//batch_1/000008.jpg to ./datasets/TACO/images/1.jpg renamed: ./datasets/TACO/data//batch_1/000010.jpg to ./datasets/TACO/images/2.jpg renamed: ./datasets/TACO/data//batch_1/000019.jpg to ./datasets/TACO/images/3.jpg renamed: ./datasets/TACO/data//batch_1/000026.jpg to ./datasets/TACO/images/4.jpg renamed: ./datasets/TACO/data//batch_1/000047.jpg to ./datasets/TACO/images/5.jpg renamed: ./datasets/TACO/data//batch_1/000055.jpg to ./datasets/TACO/images/6.jpg : renamed: ./datasets/TACO/data//batch_9/000099.jpg to ./datasets/TACO/images/1499.jpg
## ラベルファイルの作成
LABEL_PATH = os.path.join(TACO_DIR, 'TACO', 'labelmap.pbtxt')
if not os.path.exists(LABEL_PATH):
print('Building label map from examples')
from object_detection.protos import string_int_label_map_pb2
from google.protobuf import text_format
labelmap = string_int_label_map_pb2.StringIntLabelMap()
for category in categories:
item = labelmap.item.add()
# label map id 0 is reserved for the background label
item.id = int(category['id'])+1
item.name = category['name']
with open(LABEL_PATH, 'w') as f:
f.write(text_format.MessageToString(labelmap))
print('Label map witten to labelmap.pbtxt')
else:
print('Reusing existing labelmap.pbtxt')
with open(LABEL_PATH, 'r') as f:
pprint.pprint(f.readlines())
Building TFRecord files for dataset: train 0 of 1200 annotations 100 of 1200 annotations 200 of 1200 annotations 300 of 1200 annotations 400 of 1200 annotations 500 of 1200 annotations 600 of 1200 annotations 700 of 1200 annotations 800 of 1200 annotations 900 of 1200 annotations 1000 of 1200 annotations 1100 of 1200 annotations Done! Building TFRecord files for dataset: test 0 of 151 annotations 100 of 151 annotations Done! Building TFRecord files for dataset: val 0 of 149 annotations 100 of 149 annotations Done!
学習済みモデルのダウンロード
このチュートリアルで使用する学習済みモデルをダウンロードします。
TensorFlow 2 Detection Model Zooから学習済みモデルをダウンロードし、./experience/pretrained/centernet_hg104_1024x1024_coco17_tpu-32/ディレクトリ内に保存しています。
学習済みモデルのダウンロード関数を作成する
def download_model(model, output_dir):
URL = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/%s.tar.gz' % model
DOWNLOAD_PATH = '%s/%s.tar.gz' % (output_dir, model)
MODEL_DIR = os.path.join(output_dir, model)
# ディレクトリ作成
if not os.path.exists(MODEL_DIR):
subprocess.run(['mkdir', '-p', MODEL_DIR])
if os.path.exists(MODEL_DIR):
subprocess.run(['rm', '-r', MODEL_DIR])
subprocess.run(['mkdir', MODEL_DIR])
print("Downloading %s.." % model)
p = subprocess.run(['wget', '--show-progress', '--progress=bar:force', '-O', DOWNLOAD_PATH, URL])
print("Unpacking..")
p = subprocess.run(['tar', 'zxvf', DOWNLOAD_PATH, '-C', output_dir])
p = subprocess.run(['rm', DOWNLOAD_PATH])
print("Checking..")
pbfile = os.path.join(MODEL_DIR, 'saved_model')
print(pbfile)
print("Success!")
return MODEL_DIR
2021-11-19 16:40:13.801441: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0 2021-11-19 16:40:15.353817: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcuda.so.1 2021-11-19 16:40:15.382263: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-11-19 16:40:15.382598: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: pciBusID: 0000:01:00.0 name: GeForce RTX 3070 computeCapability: 8.6 coreClock: 1.725GHz coreCount: 46 deviceMemorySize: 7.79GiB deviceMemoryBandwidth: 417.29GiB/s 2021-11-19 16:40:15.382624: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0 2021-11-19 16:40:15.384785: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublas.so.11 2021-11-19 16:40:15.384822: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublasLt.so.11 2021-11-19 16:40:15.385442: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcufft.so.10 2021-11-19 16:40:15.385597: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcurand.so.10 2021-11-19 16:40:15.386207: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcusolver.so.11 2021-11-19 16:40:15.386701: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcusparse.so.11 2021-11-19 16:40:15.386811: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudnn.so.8 2021-11-19 16:40:15.386872: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-11-19 16:40:15.387159: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-11-19 16:40:15.387412: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1871] Adding visible gpu devices: 0 2021-11-19 16:40:15.387608: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. 2021-11-19 16:40:15.388072: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-11-19 16:40:15.388305: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: pciBusID: 0000:01:00.0 name: GeForce RTX 3070 computeCapability: 8.6 coreClock: 1.725GHz coreCount: 46 deviceMemorySize: 7.79GiB deviceMemoryBandwidth: 417.29GiB/s 2021-11-19 16:40:15.388351: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-11-19 16:40:15.388601: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-11-19 16:40:15.388822: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1871] Adding visible gpu devices: 0 2021-11-19 16:40:15.388843: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0 2021-11-19 16:40:15.684362: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix: 2021-11-19 16:40:15.684394: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] 0 2021-11-19 16:40:15.684417: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1277] 0: N 2021-11-19 16:40:15.684562: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-11-19 16:40:15.684809: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-11-19 16:40:15.685025: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-11-19 16:40:15.685224: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1418] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5959 MB memory) -> physical GPU (device: 0, name: GeForce RTX 3070, pci bus id: 0000:01:00.0, compute capability: 8.6) WARNING:tensorflow:Collective ops is not configured at program startup. Some performance features may not be enabled. W1119 16:40:15.687392 139819042772800 mirrored_strategy.py:379] Collective ops is not configured at program startup. Some performance features may not be enabled. INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',) I1119 16:40:15.731214 139819042772800 mirrored_strategy.py:369] Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',) INFO:tensorflow:Maybe overwriting train_steps: 300000 I1119 16:40:15.734288 139819042772800 config_util.py:552] Maybe overwriting train_steps: 300000 INFO:tensorflow:Maybe overwriting use_bfloat16: False I1119 16:40:15.734368 139819042772800 config_util.py:552] Maybe overwriting use_bfloat16: False WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/object_detection/model_lib_v2.py:558: StrategyBase.experimental_distribute_datasets_from_function (from tensorflow.python.distribute.distribute_lib) is deprecated and will be removed in a future version. Instructions for updating: rename to distribute_datasets_from_function W1119 16:40:15.769551 139819042772800 deprecation.py:336] From /usr/local/lib/python3.6/dist-packages/object_detection/model_lib_v2.py:558: StrategyBase.experimental_distribute_datasets_from_function (from tensorflow.python.distribute.distribute_lib) is deprecated and will be removed in a future version. Instructions for updating: rename to distribute_datasets_from_function INFO:tensorflow:Reading unweighted datasets: ['/notebooks/git/DeepLearning/object_detection/ObjectDetectionAPI/local/tf2/datasets/TACO/train.record']I1119 16:40:15.780686 139819042772800 dataset_builder.py:163] Reading unweighted datasets: ['/notebooks/git/DeepLearning/object_detection/ObjectDetectionAPI/local/tf2/datasets/TACO/train.record']INFO:tensorflow:Reading record datasets for input file: ['/notebooks/git/DeepLearning/object_detection/ObjectDetectionAPI/local/tf2/datasets/TACO/train.record']I1119 16:40:15.780814 139819042772800 dataset_builder.py:80] Reading record datasets for input file: ['/notebooks/git/DeepLearning/object_detection/ObjectDetectionAPI/local/tf2/datasets/TACO/train.record']INFO:tensorflow:Number of filenames to read: 1 I1119 16:40:15.780904 139819042772800 dataset_builder.py:81] Number of filenames to read: 1 WARNING:tensorflow:num_readers has been reduced to 1 to match input file shards. W1119 16:40:15.780972 139819042772800 dataset_builder.py:88] num_readers has been reduced to 1 to match input file shards. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/object_detection/builders/dataset_builder.py:105: parallel_interleave (from tensorflow.python.data.experimental.ops.interleave_ops) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy execution is desired, use `tf.data.Options.experimental_deterministic`. W1119 16:40:15.782581 139819042772800 deprecation.py:336] From /usr/local/lib/python3.6/dist-packages/object_detection/builders/dataset_builder.py:105: parallel_interleave (from tensorflow.python.data.experimental.ops.interleave_ops) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy execution is desired, use `tf.data.Options.experimental_deterministic`. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/object_detection/builders/dataset_builder.py:237: DatasetV1.map_with_legacy_function (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.data.Dataset.map() W1119 16:40:15.800168 139819042772800 deprecation.py:336] From /usr/local/lib/python3.6/dist-packages/object_detection/builders/dataset_builder.py:237: DatasetV1.map_with_legacy_function (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.data.Dataset.map() WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:206: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version. Instructions for updating: Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead. W1119 16:40:20.734972 139819042772800 deprecation.py:336] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:206: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version. Instructions for updating: Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:206: sample_distorted_bounding_box (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version. Instructions for updating: `seed2` arg is deprecated.Use sample_distorted_bounding_box_v2 instead. W1119 16:40:22.906435 139819042772800 deprecation.py:336] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:206: sample_distorted_bounding_box (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version. Instructions for updating: `seed2` arg is deprecated.Use sample_distorted_bounding_box_v2 instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py:464: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.cast` instead. W1119 16:40:24.083608 139819042772800 deprecation.py:336] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py:464: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.cast` instead. 2021-11-19 16:40:25.574937: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:176] None of the MLIR Optimization Passes are enabled (registered 2) 2021-11-19 16:40:25.596201: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2799925000 Hz /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py:435: UserWarning: `tf.keras.backend.set_learning_phase` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model. warnings.warn('`tf.keras.backend.set_learning_phase` is deprecated and ' 2021-11-19 16:40:40.428122: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudnn.so.8 2021-11-19 16:40:40.787256: I tensorflow/stream_executor/cuda/cuda_dnn.cc:359] Loaded cuDNN version 8100 2021-11-19 16:40:41.305566: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublas.so.11 2021-11-19 16:40:41.662345: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublasLt.so.11 2021-11-19 16:40:41.845904: I tensorflow/stream_executor/cuda/cuda_blas.cc:1838] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once. INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). I1119 16:40:43.612802 139819042772800 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). I1119 16:40:43.613519 139819042772800 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). I1119 16:40:43.614856 139819042772800 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). I1119 16:40:43.615420 139819042772800 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). I1119 16:40:43.616741 139819042772800 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). I1119 16:40:43.617264 139819042772800 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). I1119 16:40:43.618753 139819042772800 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). I1119 16:40:43.619304 139819042772800 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). I1119 16:40:43.620307 139819042772800 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). I1119 16:40:43.620836 139819042772800 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/util/deprecation.py:602: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with dtype is deprecated and will be removed in a future version. Instructions for updating: Use fn_output_signature instead W1119 16:40:44.154879 139808903001856 deprecation.py:534] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/util/deprecation.py:602: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with dtype is deprecated and will be removed in a future version. Instructions for updating: Use fn_output_signature instead 2021-11-19 16:41:05.277797: W tensorflow/core/common_runtime/bfc_allocator.cc:271] Allocator (GPU_0_bfc) ran out of memory trying to allocate 3.04GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available. 2021-11-19 16:41:05.277851: W tensorflow/core/common_runtime/bfc_allocator.cc:271] Allocator (GPU_0_bfc) ran out of memory trying to allocate 3.04GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available. 2021-11-19 16:41:05.413508: W tensorflow/core/common_runtime/bfc_allocator.cc:271] Allocator (GPU_0_bfc) ran out of memory trying to allocate 3.06GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available. 2021-11-19 16:41:05.413550: W tensorflow/core/common_runtime/bfc_allocator.cc:271] Allocator (GPU_0_bfc) ran out of memory trying to allocate 3.06GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available. 2021-11-19 16:41:05.755117: W tensorflow/core/common_runtime/bfc_allocator.cc:271] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.97GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available. 2021-11-19 16:41:05.756092: W tensorflow/core/common_runtime/bfc_allocator.cc:271] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.97GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available. 2021-11-19 16:41:05.794086: W tensorflow/core/common_runtime/bfc_allocator.cc:271] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.99GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available. 2021-11-19 16:41:05.794161: W tensorflow/core/common_runtime/bfc_allocator.cc:271] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.99GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available. 2021-11-19 16:41:05.838792: W tensorflow/core/common_runtime/bfc_allocator.cc:271] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.08GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available. 2021-11-19 16:41:05.839048: W tensorflow/core/common_runtime/bfc_allocator.cc:271] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.08GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available. INFO:tensorflow:Step 100 per-step time 0.786s I1119 16:42:02.429563 139819042772800 model_lib_v2.py:700] Step 100 per-step time 0.786s INFO:tensorflow:{'Loss/classification_loss': 1.4968293, 'Loss/localization_loss': 0.88128555, 'Loss/regularization_loss': 0.29720917, 'Loss/total_loss': 2.675324, 'learning_rate': 0.014666351} : I1121 15:41:59.130978 139819042772800 model_lib_v2.py:701] {'Loss/classification_loss': 0.19363031, 'Loss/localization_loss': 0.06761657, 'Loss/regularization_loss': 0.19008857, 'Loss/total_loss': 0.45133543, 'learning_rate': 0.0}
2021-11-21 15:42:02.114067: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0
2021-11-21 15:42:03.556327: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcuda.so.1
2021-11-21 15:42:03.587644: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-11-21 15:42:03.587958: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties:
pciBusID: 0000:01:00.0 name: GeForce RTX 3070 computeCapability: 8.6
coreClock: 1.725GHz coreCount: 46 deviceMemorySize: 7.79GiB deviceMemoryBandwidth: 417.29GiB/s
2021-11-21 15:42:03.587985: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0
2021-11-21 15:42:03.590198: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublas.so.11
2021-11-21 15:42:03.590239: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublasLt.so.11
2021-11-21 15:42:03.590926: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcufft.so.10
2021-11-21 15:42:03.591108: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcurand.so.10
2021-11-21 15:42:03.591821: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcusolver.so.11
2021-11-21 15:42:03.592308: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcusparse.so.11
2021-11-21 15:42:03.592425: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudnn.so.8
2021-11-21 15:42:03.592481: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-11-21 15:42:03.592747: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-11-21 15:42:03.592949: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1871] Adding visible gpu devices: 0
2021-11-21 15:42:03.593138: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2021-11-21 15:42:03.593603: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-11-21 15:42:03.593811: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties:
pciBusID: 0000:01:00.0 name: GeForce RTX 3070 computeCapability: 8.6
coreClock: 1.725GHz coreCount: 46 deviceMemorySize: 7.79GiB deviceMemoryBandwidth: 417.29GiB/s
2021-11-21 15:42:03.593854: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-11-21 15:42:03.594079: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-11-21 15:42:03.594275: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1871] Adding visible gpu devices: 0
2021-11-21 15:42:03.594296: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0
2021-11-21 15:42:03.891568: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:
2021-11-21 15:42:03.891599: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] 0
2021-11-21 15:42:03.891606: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1277] 0: N
2021-11-21 15:42:03.891825: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-11-21 15:42:03.892101: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-11-21 15:42:03.892342: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-11-21 15:42:03.892568: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1418] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5957 MB memory) -> physical GPU (device: 0, name: GeForce RTX 3070, pci bus id: 0000:01:00.0, compute capability: 8.6)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py:463: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with back_prop=False is deprecated and will be removed in a future version.
Instructions for updating:
back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.map_fn(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))
W1121 15:42:04.055024 139994763663168 deprecation.py:601] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py:463: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with back_prop=False is deprecated and will be removed in a future version.
Instructions for updating:
back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.map_fn(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f51ec14c320>, because it is not built.
W1121 15:42:24.267393 139994763663168 save_impl.py:77] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f51ec14c320>, because it is not built.
2021-11-21 15:42:30.240980: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.
W1121 15:42:44.091759 139994763663168 save.py:243] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxPredictor_layer_call_fn while saving (showing 5 of 520). These functions will not be directly callable after loading.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/generic_utils.py:497: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.
category=CustomMaskWarning)
WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file "keras.metadata" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).
FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.
W1121 15:42:47.660120 139994763663168 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file "keras.metadata" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).
FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.
INFO:tensorflow:Assets written to: ./experience/TACO_model/output_model/saved_model/assets I1121 15:42:48.317938 139994763663168 builder_impl.py:775] Assets written to: ./experience/TACO_model/output_model/saved_model/assets INFO:tensorflow:Writing pipeline config file to ./experience/TACO_model/output_model/pipeline.config I1121 15:42:48.894930 139994763663168 config_util.py:254] Writing pipeline config file to ./experience/TACO_model/output_model/pipeline.config
# テスト用画像の指定
IMAGE_PATHS = ['./datasets/TACO/images/28.jpg','./datasets/TACO/images/27.jpg']
# Load saved model and build the detection function
detect_fn = tf.saved_model.load(model_path)
for image_path in IMAGE_PATHS:
print('Running inference for {}... '.format(image_path), end='')
image_np = load_image_into_numpy_array(image_path)
# Things to try:
# Flip horizontally
# image_np = np.fliplr(image_np).copy()
# Convert image to grayscale
# image_np = np.tile(
# np.mean(image_np, 2, keepdims=True), (1, 1, 3)).astype(np.uint8)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image_np)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# input_tensor = np.expand_dims(image_np, 0)
detections = detect_fn(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes'],
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=200,
min_score_thresh=.30,
agnostic_mode=False)
plt.figure(figsize=(20, 20))
plt.imshow(image_np_with_detections)
print('Done')
plt.savefig('figure.jpg')
plt.show()
実行結果(クリックして表示)
Running inference for ./datasets/TACO/images/28.jpg... Done Running inference for ./datasets/TACO/images/27.jpg... Done
コメント