The error:
File system scheme '[local]' not implemented (file: '/content/drive/MyDrive/foop/train/beet_salad/2056837.jpg')[[{{node ReadFile}}]]
...
import os
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
import tensorflow as tf
import os
import tensorflow_datasets as tfds
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
print("All devices: ", tf.config.list_logical_devices('TPU'))
strategy = tf.distribute.TPUStrategy(resolver)
#PATH =
P2 = os.path.join('/content', 'drive', 'MyDrive', 'foop', 'train')
P3 = os.path.join('/content', 'drive', 'MyDrive', 'foop', 'testt')
train_dir = P2 #= os.path.join('/content', 'drive', 'MyDrive', 'foop', 'train')
test_dir = P3
BATCH_SIZE = 32
IMG_SIZE = (160, 160)
train_dataset = image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE)
test_dataset = image_dataset_from_directory(test_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE)
class_names = train_dataset.class_names
print(class_names)
val_batches = tf.data.experimental.cardinality(test_dataset)
val_dataset = test_dataset.take(val_batches // 5)
test_dataset = test_dataset.skip(val_batches // 5)
print('NUM val batches : %d' % tf.data.experimental.cardinality(test_dataset))
print('NUM test batches : %d' % tf.data.experimental.cardinality(val_dataset))
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)
val_dataset = val_dataset.prefetch(buffer_size=AUTOTUNE)
preprocess_input = tf.keras.applications.resnet50.preprocess_input
rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1)
IMG_SHAPE = IMG_SIZE + (3,)
#base_model = tf.keras.applications.ResNet50()
image_batch, label_batch = next(iter(train_dataset))
with strategy.scope():
base_model = tf.keras.applications.ResNet50(include_top=False, weights='imagenet',
input_tensor=None, input_shape=None, pooling=None, classes=101)
**image_batch, label_batch = next(iter(train_dataset))**
feature_batch = base_model(image_batch)
print(feature_batch.shape)
base_model.trainable = False
#base_model.summary()
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)
prediction_layer = tf.keras.layers.Dense(1)
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)
inputs = tf.keras.Input(shape=(160, 160, 3))
#x = data_augmentation(inputs)
x = inputs
x = preprocess_input(x)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
...
I am new to distributed tf on google colab TPU runtime . Please help me resolve this issue also if there are any other mistakes in what I am doing please point out
As @Andrey pointed out , how do i modify "image_batch, label_batch = next(iter(train_dataset)) "