From 52576a80754206dc4b668143d38e9ce53f5d545c Mon Sep 17 00:00:00 2001 From: Christian Cleberg Date: Mon, 18 Sep 2023 20:54:34 -0500 Subject: add .gitignore --- .../IBM Watson Visual Recognition-checkpoint.ipynb | 209 ---------- .../TensorFlow_QuickStart-checkpoint.ipynb | 433 --------------------- 2 files changed, 642 deletions(-) delete mode 100644 notebooks/.ipynb_checkpoints/IBM Watson Visual Recognition-checkpoint.ipynb delete mode 100644 notebooks/.ipynb_checkpoints/TensorFlow_QuickStart-checkpoint.ipynb (limited to 'notebooks/.ipynb_checkpoints') diff --git a/notebooks/.ipynb_checkpoints/IBM Watson Visual Recognition-checkpoint.ipynb b/notebooks/.ipynb_checkpoints/IBM Watson Visual Recognition-checkpoint.ipynb deleted file mode 100644 index 86d5862..0000000 --- a/notebooks/.ipynb_checkpoints/IBM Watson Visual Recognition-checkpoint.ipynb +++ /dev/null @@ -1,209 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# IBM Watson Visual Recognition\n", - "Create an account on [IBM Watson Studio](https://www.ibm.com/cloud/watson-studio) and add the [Watson Visual Recognition](https://www.ibm.com/cloud/watson-visual-recognition) service to your free account." - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [], - "source": [ - "pip install --upgrade --user \"ibm-watson>=4.5.0\"" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [], - "source": [ - "apikey = \"\"\n", - "version = \"2018-03-19\"\n", - "url = \"\"" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "from ibm_watson import VisualRecognitionV3\n", - "from ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n", - "\n", - "authenticator = IAMAuthenticator(apikey)\n", - "visual_recognition = VisualRecognitionV3(\n", - " version=version,\n", - " authenticator=authenticator\n", - ")\n", - "\n", - "visual_recognition.set_service_url(url)" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [], - "source": [ - "visual_recognition.set_default_headers({'x-watson-learning-opt-out': \"true\"})" - ] - }, - { - "cell_type": "code", - "execution_count": 60, - "metadata": {}, - "outputs": [], - "source": [ - "data = [\n", - "{\n", - " \"title\": \"Bear Country, South Dakota\",\n", - " \"url\": \"https://example.com/photos/highres/20140717.jpg\"\n", - "},\n", - "{\n", - " \"title\": \"Pactola Lake\",\n", - " \"url\": \"https://example.com/photos/highres/20140718.jpg\"\n", - "},\n", - "{\n", - " \"title\": \"Welcome to Utah\",\n", - " \"url\": \"https://example.com/photos/highres/20190608_02.jpg\"\n", - "},\n", - "{\n", - " \"title\": \"Honey Badger\",\n", - " \"url\": \"https://example.com/photos/highres/20190611_03.jpg\"\n", - "},\n", - "{\n", - " \"title\": \"Grand Canyon Lizard\",\n", - " \"url\": \"https://example.com/photos/highres/20190612.jpg\"\n", - "},\n", - "{\n", - " \"title\": \"The Workhouse\",\n", - " \"url\": \"https://example.com/photos/highres/20191116_01.jpg\"\n", - "}\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 59, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "Image Title: Bear Country, South Dakota \n", - "\n", - "brown bear ( 0.944 )\n", - "bear ( 1 )\n", - "carnivore ( 1 )\n", - "mammal ( 1 )\n", - "animal ( 1 )\n", - "Alaskan brown bear ( 0.759 )\n", - "greenishness color ( 0.975 )\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "Image Title: Pactola Lake \n", - "\n", - "ponderosa pine ( 0.763 )\n", - "pine tree ( 0.867 )\n", - "tree ( 0.867 )\n", - "plant ( 0.867 )\n", - "blue color ( 0.959 )\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "Image Title: Welcome to Utah \n", - "\n", - "signboard ( 0.953 )\n", - "building ( 0.79 )\n", - "blue color ( 0.822 )\n", - "purplish blue color ( 0.619 )\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "Image Title: Honey Badger \n", - "\n", - "American badger ( 0.689 )\n", - "carnivore ( 0.689 )\n", - "mammal ( 0.864 )\n", - "animal ( 0.864 )\n", - "armadillo ( 0.618 )\n", - "light brown color ( 0.9 )\n", - "reddish brown color ( 0.751 )\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "Image Title: Grand Canyon Lizard \n", - "\n", - "western fence lizard ( 0.724 )\n", - "lizard ( 0.93 )\n", - "reptile ( 0.93 )\n", - "animal ( 0.93 )\n", - "ultramarine color ( 0.633 )\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "Image Title: The Workhouse \n", - "\n", - "castle ( 0.896 )\n", - "fortification ( 0.905 )\n", - "defensive structure ( 0.96 )\n", - "stronghold ( 0.642 )\n", - "building ( 0.799 )\n", - "mound ( 0.793 )\n", - "blue color ( 0.745 )\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n" - ] - } - ], - "source": [ - "from ibm_watson import ApiException\n", - "\n", - "for x in range(len(data)):\n", - " try:\n", - " url = data[x][\"url\"]\n", - " images_filename = data[x][\"title\"]\n", - " classes = visual_recognition.classify(\n", - " url=url,\n", - " images_filename=images_filename,\n", - " threshold='0.6',\n", - " owners=[\"IBM\"]).get_result()\n", - " print(\"-------------------------------------------------------------------------------------------------------------------------------------\")\n", - " print(\"Image Title: \", data[x][\"title\"], \"\\n\")\n", - " print(\"Image URL: \", data[x][\"url\"], \"\\n\")\n", - " classification_results = classes[\"images\"][0][\"classifiers\"][0][\"classes\"]\n", - " for result in classification_results:\n", - " print(result[\"class\"], \"(\", result[\"score\"], \")\")\n", - " print(\"-------------------------------------------------------------------------------------------------------------------------------------\")\n", - " except ApiException as ex:\n", - " print(\"Method failed with status code \" + str(ex.code) + \": \" + ex.message)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/notebooks/.ipynb_checkpoints/TensorFlow_QuickStart-checkpoint.ipynb b/notebooks/.ipynb_checkpoints/TensorFlow_QuickStart-checkpoint.ipynb deleted file mode 100644 index 9323a3f..0000000 --- a/notebooks/.ipynb_checkpoints/TensorFlow_QuickStart-checkpoint.ipynb +++ /dev/null @@ -1,433 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "368ae4ce-f2d4-4d31-b4b6-4c95c01c472c", - "metadata": {}, - "source": [ - "# TensorFlow Quickstart\n", - "\n", - "Getting started with neural network machine learning models in TensorFlow." - ] - }, - { - "cell_type": "markdown", - "id": "f97fa6a5-b5db-45ac-98f4-54a4fee7ddaf", - "metadata": {}, - "source": [ - "## Set up TensorFlow" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "3ce17707-7c32-4ccf-8ef1-fbad5a78db7b", - "metadata": {}, - "outputs": [], - "source": [ - "# pip3 install tensorflow" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "9e0d2030-33c0-4da7-bf65-919cdb3c113c", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "TensorFlow version: 2.13.0\n" - ] - } - ], - "source": [ - "import tensorflow as tf\n", - "print(\"TensorFlow version:\", tf.__version__)" - ] - }, - { - "cell_type": "markdown", - "id": "d23e7ecb-d531-426d-85dd-1d1d0f926439", - "metadata": {}, - "source": [ - "## Load a dataset" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "d04bb60f-346b-44f5-bae8-16bb1cc60f87", - "metadata": {}, - "outputs": [], - "source": [ - "# Load and prepare the MNIST dataset. The pixel values of the images range from 0 through 255.\n", - "# Scale these values to a range of 0 to 1 by dividing the values by 255.0.\n", - "# This also converts the sample data from integers to floating-point numbers:\n", - "mnist = tf.keras.datasets.mnist\n", - "\n", - "(x_train, y_train), (x_test, y_test) = mnist.load_data()\n", - "x_train, x_test = x_train / 255.0, x_test / 255.0" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "7895b2b4-3666-4a00-9536-8cdf4c4357da", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "((array([[[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]],\n", - "\n", - " [[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]],\n", - "\n", - " [[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]],\n", - "\n", - " ...,\n", - "\n", - " [[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]],\n", - "\n", - " [[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]],\n", - "\n", - " [[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]]], dtype=uint8), array([5, 0, 4, ..., 5, 6, 8], dtype=uint8)), (array([[[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]],\n", - "\n", - " [[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]],\n", - "\n", - " [[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]],\n", - "\n", - " ...,\n", - "\n", - " [[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]],\n", - "\n", - " [[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]],\n", - "\n", - " [[0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " ...,\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0],\n", - " [0, 0, 0, ..., 0, 0, 0]]], dtype=uint8), array([7, 2, 1, ..., 4, 5, 6], dtype=uint8)))\n" - ] - } - ], - "source": [ - "# You can preview the raw data prior to training the model\n", - "print(mnist.load_data())" - ] - }, - { - "cell_type": "markdown", - "id": "82f07fd0-3341-4ac7-b2cc-ddc99802896f", - "metadata": {}, - "source": [ - "## Build a machine learning model" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "e3903d22-f584-4305-85a7-d7e1494cf909", - "metadata": {}, - "outputs": [], - "source": [ - "# Build a tf.keras.Sequential model:\n", - "model = tf.keras.models.Sequential([\n", - " tf.keras.layers.Flatten(input_shape=(28, 28)),\n", - " tf.keras.layers.Dense(128, activation='relu'),\n", - " tf.keras.layers.Dropout(0.2),\n", - " tf.keras.layers.Dense(10)\n", - "])" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "f4dd7df9-feb6-48b3-b332-4652812571d4", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[ 0.28218323, -0.2626474 , -0.16938315, 0.15272117, -0.2957897 ,\n", - " -0.0528494 , 0.02909562, 0.06403146, 0.67431676, -0.35960984]],\n", - " dtype=float32)" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# For each example, the model returns a vector of logits or log-odds scores, one for each class.\n", - "predictions = model(x_train[:1]).numpy()\n", - "predictions" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "b9a5a663-8d95-4fc5-a569-efb6362454e9", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[0.12565382, 0.07287167, 0.07999501, 0.1103954 , 0.07049612,\n", - " 0.08988202, 0.0975576 , 0.1010261 , 0.18598464, 0.0661376 ]],\n", - " dtype=float32)" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# The tf.nn.softmax function converts these logits to probabilities for each class: \n", - "tf.nn.softmax(predictions).numpy()" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "c11f1e4e-c6a8-4a65-a4cb-36486209797c", - "metadata": {}, - "outputs": [], - "source": [ - "# Define a loss function for training using losses.SparseCategoricalCrossentropy:\n", - "loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "b23213f0-7818-4c58-9672-495bc6bd240a", - "metadata": {}, - "outputs": [], - "source": [ - "# Configure and compile the model\n", - "model.compile(optimizer='adam',\n", - " loss=loss_fn,\n", - " metrics=['accuracy'])\n" - ] - }, - { - "cell_type": "markdown", - "id": "74edfcf4-7b45-407f-8523-40cfab8cabc7", - "metadata": {}, - "source": [ - "## Train and evaluate your model" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "c0438b2f-78f5-469f-a2f9-d31198ac6411", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Epoch 1/5\n", - "1875/1875 [==============================] - 1s 509us/step - loss: 0.3016 - accuracy: 0.9124\n", - "Epoch 2/5\n", - "1875/1875 [==============================] - 1s 514us/step - loss: 0.1462 - accuracy: 0.9572\n", - "Epoch 3/5\n", - "1875/1875 [==============================] - 1s 505us/step - loss: 0.1087 - accuracy: 0.9663\n", - "Epoch 4/5\n", - "1875/1875 [==============================] - 1s 512us/step - loss: 0.0893 - accuracy: 0.9718\n", - "Epoch 5/5\n", - "1875/1875 [==============================] - 1s 499us/step - loss: 0.0774 - accuracy: 0.9758\n" - ] - }, - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Use the Model.fit method to adjust your model parameters and minimize the loss: \n", - "model.fit(x_train, y_train, epochs=5)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "7778bac2-ebd4-43eb-94a8-03b79152c58a", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "313/313 - 0s - loss: 0.0790 - accuracy: 0.9757 - 126ms/epoch - 403us/step\n" - ] - }, - { - "data": { - "text/plain": [ - "[0.07904709875583649, 0.9757000207901001]" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# The Model.evaluate method checks the model's performance, usually on a validation set or test set.\n", - "model.evaluate(x_test, y_test, verbose=2)" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "96d86df4-4f22-4d76-ac4b-720192239015", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# If you want your model to return a probability, you can wrap the trained model, and attach the softmax to it:\n", - "\n", - "probability_model = tf.keras.Sequential([\n", - " model,\n", - " tf.keras.layers.Softmax()\n", - "])\n", - "probability_model(x_test[:5])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ab098ffa-ab7d-4a76-90e0-255aa0763d22", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} -- cgit v1.2.3-70-g09d2