Criando TfRecords a partir de uma lista de strings e alimentando um gráfico no fluxo tensor após decodificação

O objetivo era criar um banco de dados de TfRecords. Dado: tenho 23 pastas cada uma contendo 7500 imagens e 23 arquivos de texto, cada uma com 7500 linhas descrevendo recursos para as 7500 imagens em pastas separadas.

Eu criei o banco de dados através deste código:

import tensorflow as tf
import numpy as np
from PIL import Image

def _Float_feature(value):
    return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))

def _bytes_feature(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))

def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))

def create_image_annotation_data():
    # Code to read images and features.
    # images represent a list of numpy array of images, and features_labels represent a list of strings
    # where each string represent the whole set of features for each image. 
    return images, features_labels

# This is the starting point of the program.
# Now I have the images stored as list of numpy array, and the features as list of strings.
images, annotations = create_image_annotation_data()

tfrecords_filename = "database.tfrecords"
writer = tf.python_io.TFRecordWriter(tfrecords_filename)

for img, ann in zip(images, annotations):

    # Note that the height and width are needed to reconstruct the original image.
    height = img.shape[0]
    width = img.shape[1]

    # This is how data is converted into binary
    img_raw = img.tostring()
    example = tf.train.Example(features=tf.train.Features(feature={
        'height': _int64_feature(height),
        'width': _int64_feature(width),
        'image_raw': _bytes_feature(img_raw),
        'annotation_raw': _bytes_feature(tf.compat.as_bytes(ann))
    }))

    writer.write(example.SerializeToString())

writer.close()

reconstructed_images = []

record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename)

for string_record in record_iterator:
    example = tf.train.Example()
    example.ParseFromString(string_record)

    height = int(example.features.feature['height']
                 .int64_list
                 .value[0])

    width = int(example.features.feature['width']
                .int64_list
                .value[0])

    img_string = (example.features.feature['image_raw']
                  .bytes_list
                  .value[0])

    annotation_string = (example.features.feature['annotation_raw']
                         .bytes_list
                         .value[0])

    img_1d = np.fromstring(img_string, dtype=np.uint8)
    reconstructed_img = img_1d.reshape((height, width, -1))
    annotation_reconstructed = annotation_string.decode('utf-8')

Portanto, depois de converter imagens e texto em tfRecords e depois de poder lê-los e converter imagens em numpy e o (texto binário) em string em python, tentei ir além usando um filename_queue com um leitor (o objetivo era para fornecer ao gráfico lote de dados, em vez de uma paz de dados por vez.Além disso, o objetivo era enfileirar e desenfileirar a fila de exemplos através de diferentes threads, tornando o treinamento da rede mais rápido)

Portanto, usei o seguinte código:

import tensorflow as tf
import numpy as np
import time

image_file_list = ["database.tfrecords"]
batch_size = 16

# Make a queue of file names including all the JPEG images files in the relative
# image directory.
filename_queue = tf.train.string_input_producer(image_file_list, num_epochs=1, shuffle=False)

reader = tf.TFRecordReader()

# Read a whole file from the queue, the first returned value in the tuple is the
# filename which we are ignoring.
_, serialized_example = reader.read(filename_queue)

features = tf.parse_single_example(
      serialized_example,
      # Defaults are not specified since both keys are required.
      features={
          'height': tf.FixedLenFeature([], tf.int64),
          'width': tf.FixedLenFeature([], tf.int64),
          'image_raw': tf.FixedLenFeature([], tf.string),
          'annotation_raw': tf.FixedLenFeature([], tf.string)
      })

image = tf.decode_raw(features['image_raw'], tf.uint8)
annotation = tf.decode_raw(features['annotation_raw'], tf.float32)

height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)

image = tf.reshape(image, [height, width, 3])

# Note that the minimum after dequeue is needed to make sure that the queue is not empty after dequeuing so that
# we don't run into errors
'''
min_after_dequeue = 100
capacity = min_after_dequeue + 3 * batch_size
ann, images_batch = tf.train.batch([annotation, image],
                                   shapes=[[1], [112, 112, 3]],
                                   batch_size=batch_size,
                                   capacity=capacity,
                                   num_threads=1)
'''

# Start a new session to show example output.
with tf.Session() as sess:
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('C:/Users/user/Documents/tensorboard_logs/New_Runs', sess.graph)

    # Required to get the filename matching to run.
    tf.global_variables_initializer().run()

    # Coordinate the loading of image files.
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    for steps in range(16):
        t1 = time.time()
        annotation_string, batch, summary = sess.run([annotation, image, merged])
        t2 = time.time()
        print('time to fetch 16 faces:', (t2 - t1))
        print(annotation_string)
        tf.summary.image("image_batch", image)
        train_writer.add_summary(summary, steps)

    # Finish off the filename queue coordinator.
    coord.request_stop()
    coord.join(threads)

Finalmente, depois de executar o código acima, recebi o seguinte erro:OutOfRangeError (veja acima para rastreamento): FIFOQueue '_0_input_producer' está fechado e possui elementos insuficientes (solicitado 1, tamanho atual 0) [[Nó: ReaderReadV2 = ReaderReadV2 [_device = "/ job: localhost / réplica: 0 / task: 0 / cpu: 0 "] (TFRecordReaderV2, input_producer)]]

Outra pergunta:

Como decodificar o banco de dados binário (tfrecords) para recuperar os recursos armazenados "como estrutura de dados da string python".Como usar otf.train.batch para criar um lote de exemplos para alimentar a rede.

Obrigado!! Qualquer ajuda é muito apreciada.

questionAnswers(1)

yourAnswerToTheQuestion