aboutsummaryrefslogtreecommitdiff
path: root/.virtual_documents/notebooks
diff options
context:
space:
mode:
authorChristian Cleberg <hello@cleberg.net>2023-09-18 20:53:06 -0500
committerChristian Cleberg <hello@cleberg.net>2023-09-18 20:53:06 -0500
commit5e2bb53d528d60e0a44607377fa3d09553630d5b (patch)
tree1bba9eb02c09f8c758a64d24fb9f80fc4cf24726 /.virtual_documents/notebooks
parenta26d0140151902c594def7e0f6a234b973ddee0d (diff)
downloaddata-science-5e2bb53d528d60e0a44607377fa3d09553630d5b.tar.gz
data-science-5e2bb53d528d60e0a44607377fa3d09553630d5b.tar.bz2
data-science-5e2bb53d528d60e0a44607377fa3d09553630d5b.zip
add tensorflow notebook
Diffstat (limited to '.virtual_documents/notebooks')
-rw-r--r--.virtual_documents/notebooks/IBM Watson Visual Recognition.ipynb75
-rw-r--r--.virtual_documents/notebooks/TensorFlow_QuickStart.ipynb82
-rw-r--r--.virtual_documents/notebooks/Untitled.ipynb47
3 files changed, 204 insertions, 0 deletions
diff --git a/.virtual_documents/notebooks/IBM Watson Visual Recognition.ipynb b/.virtual_documents/notebooks/IBM Watson Visual Recognition.ipynb
new file mode 100644
index 0000000..adeabf5
--- /dev/null
+++ b/.virtual_documents/notebooks/IBM Watson Visual Recognition.ipynb
@@ -0,0 +1,75 @@
+
+
+
+pip install --upgrade --user "ibm-watson>=4.5.0"
+
+
+apikey = "<your-apikey>"
+version = "2018-03-19"
+url = "<your-url>"
+
+
+import json
+from ibm_watson import VisualRecognitionV3
+from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
+
+authenticator = IAMAuthenticator(apikey)
+visual_recognition = VisualRecognitionV3(
+ version=version,
+ authenticator=authenticator
+)
+
+visual_recognition.set_service_url(url)
+
+
+visual_recognition.set_default_headers({'x-watson-learning-opt-out': "true"})
+
+
+data = [
+{
+ "title": "Bear Country, South Dakota",
+ "url": "https://example.com/photos/highres/20140717.jpg"
+},
+{
+ "title": "Pactola Lake",
+ "url": "https://example.com/photos/highres/20140718.jpg"
+},
+{
+ "title": "Welcome to Utah",
+ "url": "https://example.com/photos/highres/20190608_02.jpg"
+},
+{
+ "title": "Honey Badger",
+ "url": "https://example.com/photos/highres/20190611_03.jpg"
+},
+{
+ "title": "Grand Canyon Lizard",
+ "url": "https://example.com/photos/highres/20190612.jpg"
+},
+{
+ "title": "The Workhouse",
+ "url": "https://example.com/photos/highres/20191116_01.jpg"
+}
+]
+
+
+from ibm_watson import ApiException
+
+for x in range(len(data)):
+ try:
+ url = data[x]["url"]
+ images_filename = data[x]["title"]
+ classes = visual_recognition.classify(
+ url=url,
+ images_filename=images_filename,
+ threshold='0.6',
+ owners=["IBM"]).get_result()
+ print("-------------------------------------------------------------------------------------------------------------------------------------")
+ print("Image Title: ", data[x]["title"], "\n")
+ print("Image URL: ", data[x]["url"], "\n")
+ classification_results = classes["images"][0]["classifiers"][0]["classes"]
+ for result in classification_results:
+ print(result["class"], "(", result["score"], ")")
+ print("-------------------------------------------------------------------------------------------------------------------------------------")
+ except ApiException as ex:
+ print("Method failed with status code " + str(ex.code) + ": " + ex.message)
diff --git a/.virtual_documents/notebooks/TensorFlow_QuickStart.ipynb b/.virtual_documents/notebooks/TensorFlow_QuickStart.ipynb
new file mode 100644
index 0000000..5f6d4b3
--- /dev/null
+++ b/.virtual_documents/notebooks/TensorFlow_QuickStart.ipynb
@@ -0,0 +1,82 @@
+
+
+
+
+
+
+# pip3 install tensorflow
+
+
+import tensorflow as tf
+print("TensorFlow version:", tf.__version__)
+
+
+
+
+
+# Load and prepare the MNIST dataset. The pixel values of the images range from 0 through 255.
+# Scale these values to a range of 0 to 1 by dividing the values by 255.0.
+# This also converts the sample data from integers to floating-point numbers:
+mnist = tf.keras.datasets.mnist
+
+(x_train, y_train), (x_test, y_test) = mnist.load_data()
+x_train, x_test = x_train / 255.0, x_test / 255.0
+
+
+# You can preview the raw data prior to training the model
+print(mnist.load_data())
+
+
+
+
+
+# Build a tf.keras.Sequential model:
+model = tf.keras.models.Sequential([
+ tf.keras.layers.Flatten(input_shape=(28, 28)),
+ tf.keras.layers.Dense(128, activation='relu'),
+ tf.keras.layers.Dropout(0.2),
+ tf.keras.layers.Dense(10)
+])
+
+
+# For each example, the model returns a vector of logits or log-odds scores, one for each class.
+predictions = model(x_train[:1]).numpy()
+predictions
+
+
+# The tf.nn.softmax function converts these logits to probabilities for each class:
+tf.nn.softmax(predictions).numpy()
+
+
+# Define a loss function for training using losses.SparseCategoricalCrossentropy:
+loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+
+
+# Configure and compile the model
+model.compile(optimizer='adam',
+ loss=loss_fn,
+ metrics=['accuracy'])
+
+
+
+
+
+
+# Use the Model.fit method to adjust your model parameters and minimize the loss:
+model.fit(x_train, y_train, epochs=5)
+
+
+# The Model.evaluate method checks the model's performance, usually on a validation set or test set.
+model.evaluate(x_test, y_test, verbose=2)
+
+
+# If you want your model to return a probability, you can wrap the trained model, and attach the softmax to it:
+
+probability_model = tf.keras.Sequential([
+ model,
+ tf.keras.layers.Softmax()
+])
+probability_model(x_test[:5])
+
+
+
diff --git a/.virtual_documents/notebooks/Untitled.ipynb b/.virtual_documents/notebooks/Untitled.ipynb
new file mode 100644
index 0000000..7d6f130
--- /dev/null
+++ b/.virtual_documents/notebooks/Untitled.ipynb
@@ -0,0 +1,47 @@
+
+
+
+# pip3 install tensorflow
+
+
+import tensorflow as tf
+print("TensorFlow version:", tf.__version__)
+
+
+# Load and prepare the MNIST dataset. The pixel values of the images range from 0 through 255.
+# Scale these values to a range of 0 to 1 by dividing the values by 255.0.
+# This also converts the sample data from integers to floating-point numbers:
+mnist = tf.keras.datasets.mnist
+
+(x_train, y_train), (x_test, y_test) = mnist.load_data()
+x_train, x_test = x_train / 255.0, x_test / 255.0
+
+
+# Build a tf.keras.Sequential model:
+model = tf.keras.models.Sequential([
+ tf.keras.layers.Flatten(input_shape=(28, 28)),
+ tf.keras.layers.Dense(128, activation='relu'),
+ tf.keras.layers.Dropout(0.2),
+ tf.keras.layers.Dense(10)
+])
+
+
+# For each example, the model returns a vector of logits or log-odds scores, one for each class.
+predictions = model(x_train[:1]).numpy()
+predictions
+
+
+# The tf.nn.softmax function converts these logits to probabilities for each class:
+tf.nn.softmax(predictions).numpy()
+
+
+
+
+
+
+
+
+
+
+
+