Comments (5)
One more comment, after all it looks like that defining labels shape cause additional problems in some networks. We will back out that change, you can address your problem calling:
label = tf.reshape(label, (BATCH_SIZE,))
For the output form the daliop.
from dali.
Hi,
Could you provide full minimal case to reproduce this (standalone one we could run without any additional conditions)?
Honestly we have not targeted (tested) Tensorflow slim so it could be bug or just limitation.
Tracked as DALI-209
from dali.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali.tfrecord as tfrec
import tensorflow as tf
import nvidia.dali.plugin.tf as dali_tf
from subprocess import call
import os.path
import numpy as np
slim = tf.contrib.slim
lmdb_folder = "/data/ilsvrc12_train_lmdb"
tfrecord = "/data/imagenet/train-00001-of-01024"
tfrecord_idx = "idx_files/train-00001-of-01024.idx"
tfrecord2idx_script = "tfrecord2idx"
N = 4 # number of GPUs
BATCH_SIZE = 128 # batch size per GPU
ITERATIONS = 32
IMAGE_SIZE = 3
if not os.path.exists("idx_files"):
os.mkdir("idx_files")
if not os.path.isfile(tfrecord_idx):
call([tfrecord2idx_script, tfrecord, tfrecord_idx])
class CommonPipeline(Pipeline):
def init(self, batch_size, num_threads, device_id):
super(CommonPipeline, self).init(batch_size, num_threads, device_id)
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB)
self.resize = ops.Resize(device = "gpu",
image_type = types.RGB,
interp_type = types.INTERP_LINEAR)
self.cmn = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
crop = (227, 227),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
self.uniform = ops.Uniform(range = (0.0, 1.0))
self.resize_rng = ops.Uniform(range = (256, 480))
def base_define_graph(self, inputs, labels):
images = self.decode(inputs)
images = self.resize(images, resize_shorter = self.resize_rng())
output = self.cmn(images, crop_pos_x = self.uniform(),
crop_pos_y = self.uniform())
return (output, labels.gpu())
class CaffeReadPipeline(CommonPipeline):
def init(self, batch_size, num_threads, device_id, num_gpus):
super(CaffeReadPipeline, self).init(batch_size, num_threads, device_id)
self.input = ops.CaffeReader(path = lmdb_folder,
random_shuffle = True, shard_id = device_id, num_shards = num_gpus)
def define_graph(self):
images, labels = self.input()
return self.base_define_graph(images, labels)
class TFRecordPipeline(CommonPipeline):
def init(self, batch_size, num_threads, device_id, num_gpus):
super(TFRecordPipeline, self).init(batch_size, num_threads, device_id)
self.input = ops.TFRecordReader(path = tfrecord,
index_path = tfrecord_idx,
features = {"image/encoded" : tfrec.FixedLenFeature((), tfrec.string, ""),
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1)
})
def define_graph(self):
inputs = self.input()
images = inputs["image/encoded"]
labels = inputs["image/class/label"]
return self.base_define_graph(images, labels)
def get_batch_test_dali(batch_size, pipe_type):
pipe_name, label_type, _ = pipe_type
pipes = [pipe_name(batch_size=batch_size, num_threads=2, device_id = device_id, num_gpus = N) for device_id in range(N)]
serialized_pipes = [pipe.serialize() for pipe in pipes]
del pipes
daliop = dali_tf.DALIIterator()
images = []
labels = []
for d in range(N):
with tf.device('/gpu:%i' % d):
image, label = daliop(serialized_pipeline = serialized_pipes[d],
shape = [BATCH_SIZE, 3, 227, 227],
image_type = tf.int32,
label_type = label_type,
device_id = d)
images.append(image)
labels.append(label)
return [images, labels]
pipe_types = [[CaffeReadPipeline, tf.int32, (0, 999)], [TFRecordPipeline, tf.int64, (1, 1000)]]
for pipe_name in pipe_types:
print ("RUN: " + pipe_name[0].name)
test_batch = get_batch_test_dali(BATCH_SIZE, pipe_name)
batch_queue = slim.prefetch_queue.prefetch_queue(test_batch)
x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3], name='x')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
config = tf.ConfigProto(gpu_options=gpu_options)
with tf.Session(config=config) as sess:
for i in range(ITERATIONS):
imgs, labels = sess.run(test_batch)
# Testing correctness of labels
for label in labels:
## labels need to be integers
assert(np.equal(np.mod(label, 1), 0).all())
## labels need to be in range pipe_name[2]
assert((label >= pipe_name[2][0]).all())
assert((label <= pipe_name[2][1]).all())
print("OK : " + pipe_name[0].__name__)
from dali.
Hi,
Thank you for the full sample - it has really sped up my investigation. There are two things:
- error caused by the fact that prefetch_queue expects a list of tensors, while test_batch is a list of lists of tensors. Images and labels returned from define_graph are lists itself. To make that working use:
for elm in zip(test_batch[0], test_batch[1]):
slim.prefetch_queue.prefetch_queue(elm)
- prefetch_queue expects that all tensors have their sizes defined, in our case only image tensor is, while labels are not. PR #142 should address this problem
from dali.
Hmmm, the issues @JanuszL pointed out aside, I don't think there is a reason to use prefetch queue with DALI @gon9031 . DALI provides you with prefetching out of the box, without any additional work.
from dali.
Related Issues (20)
- A100 hardware decoder HOT 1
- Extract motion vectors HOT 7
- Segmentation fault when using 'mixed' HOT 5
- Bbox Pruning Too Aggressive? HOT 5
- Indexing video with binary mask HOT 1
- source_info tensor not guaranteed to contain correct data HOT 1
- 16 bit gray scale Image read error HOT 1
- COCO Reader pixelwise_masks Emtpy Output HOT 7
- Dali on Jetson: nvidia.dali.fn.readers.video_resize is missing HOT 4
- Numpy reader test (GDS) HOT 4
- How to add a scalar value to the loader? HOT 1
- Can DALI be integrated into HuggingFace Trainer? HOT 9
- Bug in creating `TensorGPU` when `stream` key is `None` in CUDA array interface HOT 2
- Configure max image size HOT 3
- Webdataset reader behavior with many sources HOT 1
- ModuleNotFoundError: No module named 'nvidia.dali.python_function_plugin' HOT 3
- Speed up Dino with DALI HOT 3
- error using webdataset
- webdataset cannot stop cycling at end of epoch HOT 11
- Get audio data from external data sources and start iteration HOT 7
Recommend Projects
-
React
A declarative, efficient, and flexible JavaScript library for building user interfaces.
-
Vue.js
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
-
Typescript
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
-
TensorFlow
An Open Source Machine Learning Framework for Everyone
-
Django
The Web framework for perfectionists with deadlines.
-
Laravel
A PHP framework for web artisans
-
D3
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
-
Recommend Topics
-
javascript
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
-
web
Some thing interesting about web. New door for the world.
-
server
A server is a program made to process requests and deliver data to clients.
-
Machine learning
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
-
Visualization
Some thing interesting about visualization, use data art
-
Game
Some thing interesting about game, make everyone happy.
Recommend Org
-
Facebook
We are working to build community through open source technology. NB: members must have two-factor auth.
-
Microsoft
Open source projects and samples from Microsoft.
-
Google
Google ❤️ Open Source for everyone.
-
Alibaba
Alibaba Open Source for everyone
-
D3
Data-Driven Documents codes.
-
Tencent
China tencent open source team.
from dali.