diff options
Diffstat (limited to '.virtual_documents/notebooks/TensorFlow_QuickStart.ipynb')
-rw-r--r-- | .virtual_documents/notebooks/TensorFlow_QuickStart.ipynb | 82 |
1 files changed, 0 insertions, 82 deletions
diff --git a/.virtual_documents/notebooks/TensorFlow_QuickStart.ipynb b/.virtual_documents/notebooks/TensorFlow_QuickStart.ipynb deleted file mode 100644 index 5f6d4b3..0000000 --- a/.virtual_documents/notebooks/TensorFlow_QuickStart.ipynb +++ /dev/null @@ -1,82 +0,0 @@ - - - - - - -# pip3 install tensorflow - - -import tensorflow as tf -print("TensorFlow version:", tf.__version__) - - - - - -# Load and prepare the MNIST dataset. The pixel values of the images range from 0 through 255. -# Scale these values to a range of 0 to 1 by dividing the values by 255.0. -# This also converts the sample data from integers to floating-point numbers: -mnist = tf.keras.datasets.mnist - -(x_train, y_train), (x_test, y_test) = mnist.load_data() -x_train, x_test = x_train / 255.0, x_test / 255.0 - - -# You can preview the raw data prior to training the model -print(mnist.load_data()) - - - - - -# Build a tf.keras.Sequential model: -model = tf.keras.models.Sequential([ - tf.keras.layers.Flatten(input_shape=(28, 28)), - tf.keras.layers.Dense(128, activation='relu'), - tf.keras.layers.Dropout(0.2), - tf.keras.layers.Dense(10) -]) - - -# For each example, the model returns a vector of logits or log-odds scores, one for each class. -predictions = model(x_train[:1]).numpy() -predictions - - -# The tf.nn.softmax function converts these logits to probabilities for each class: -tf.nn.softmax(predictions).numpy() - - -# Define a loss function for training using losses.SparseCategoricalCrossentropy: -loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) - - -# Configure and compile the model -model.compile(optimizer='adam', - loss=loss_fn, - metrics=['accuracy']) - - - - - - -# Use the Model.fit method to adjust your model parameters and minimize the loss: -model.fit(x_train, y_train, epochs=5) - - -# The Model.evaluate method checks the model's performance, usually on a validation set or test set. -model.evaluate(x_test, y_test, verbose=2) - - -# If you want your model to return a probability, you can wrap the trained model, and attach the softmax to it: - -probability_model = tf.keras.Sequential([ - model, - tf.keras.layers.Softmax() -]) -probability_model(x_test[:5]) - - - |