SoFunction
Updated on 2024-11-16

tensorflow basic operation small white quickly build linear regression and classification models

TF is currently releasing version 2.5, read the official documentation beforehand, and recently viewed documentation.

tensorflow is very strong tool, huge ecosystem

tensorflow provides a branch of Keras

Keras-related sequential modeling tutorials are no longer available here.

About the environment: ubuntu GPU, need cuda and nvcc

Will not install: view

Complete Ubuntu 18.04 deep learning GPU environment configuration, NVIDIA graphics driver installation, cuda9.0 installation, cudnn installation, anaconda installation

No installation, just go over the wall and use colab.

Test GPU

>>> from  import device_lib
>>> device_lib.list_local_devices()

It's meant to be hooked up to a video card

Check the official documentation for details:/install

Server running Jupyter

Define tensor constants.

import tensorflow as tf
# Create a Tensor.
hello = ("hello world")
hello
# Define tensor constants.
a = (1)
b = (6)
c = (9)
# tensor variable manipulation
# (+, *, ...)
add = (a, b)
sub = (a, b)
mul = (a, b)
div = (a, b)
# Returning values via numpy is the same as torch.
print("add =", ())
print("sub =", ())
print("mul =", ())
print("div =", ())
add = 7
sub = -5
mul = 6
div = 0.16666666666666666
mean = tf.reduce_mean([a, b, c])
sum_ = tf.reduce_sum([a, b, c])
# Access tensors value.
print("mean =", ())
print("sum =", sum_ .numpy())
mean = 5
sum = 16
# Matrix multiplications.
matrix1 = ([[1., 2.], [3., 4.]])
matrix2 = ([[5., 6.], [7., 8.]])
product = (matrix1, matrix2)
product
<: shape=(2, 2), dtype=float32, numpy=
array([[19., 22.],
       [43., 50.]], dtype=float32)>
# Tensor to Numpy.
np_product = ()
print(type(np_product), np_product)
(,
 array([[19., 22.],
        [43., 50.]], dtype=float32))

Linear Regression

The following is a quick construction of a linear regression model using tensorflow, where instead of using kears' sequential model, torch's model definition is written.

import numpy as np
import tensorflow as tf
# Parameters:
learning_rate = 0.01
training_steps = 1000
display_step = 50
# Training Data.
X = ([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
Y = ([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
random = 
# Weights and biases, randomly initialized.
W = ((), name="weight")
b = ((), name="bias")
# Linear regression (Wx + b).
def linear_regression(x):
    return W * x + b
# Mean square error.
def mean_square(y_pred, y_true):
    return tf.reduce_mean((y_pred - y_true))
# Stochastic gradient descent optimizer.
optimizer = (learning_rate)
# Optimize the process.
def run_optimization():
    # Wrap calculations in GradientTape for automatic differentiation.
    with () as g:
        pred = linear_regression(X)
        loss = mean_square(pred, Y)
    # Calculate the gradient.
    gradients = (loss, [W, b])
        # Update W and b according to the gradient.
    optimizer.apply_gradients(zip(gradients, [W, b]))
# Train for a given number of steps.
for step in range(1, training_steps + 1):
    # Run optimization to update W and b values.
    run_optimization()
        if step % display_step == 0:
        pred = linear_regression(X)
        loss = mean_square(pred, Y)
        print("Step: %i, loss: %f, W: %f, b: %f" % (step, loss, (), ()))


import  as plt
(X, Y, 'ro', label='Original data')
(X, (W * X + b), label='Fitted line')
()
()

classification model

This example uses MNIST handwritten numbers

The dataset contains 60,000 training examples and 10,000 test examples.

These numbers have been size normalized and centered in a fixed size image (28x28 pixels) with values from 0 to 255.

In this example, each image will be converted to float32, normalized to [0, 1], and spread into a one-dimensional array of 784 features (28×28).

import numpy as np
import tensorflow as tf
#  MNIST data
num_classes = 10      # 0->9 digits
num_features = 784    # 28 * 28
# Parameters 
lr = 0.01
batch_size = 256
display_step = 100
training_steps = 1000
# Prepare MNIST data
from  import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Convert to Float32
x_train, x_test = (x_train, np.float32), (x_test, np.float32)
# Flatten images into 1-D vector of 784 dimensions (28 * 28)
x_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features])
# [0, 255] to [0, 1]
x_train, x_test = x_train / 255, x_test / 255
# disorder: API to shuffle and batch data
train_dataset = .from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.repeat().shuffle(5000).batch(batch_size=batch_size).prefetch(1)
# Weight of shape [784, 10] ~= [number_features, number_classes]
W = (([num_features, num_classes]), name='weight')
# Bias of shape [10] ~= [number_classes]
b = (([num_classes]), name='bias')
# Logistic regression: W*x + b
def logistic_regression(x):
    # Apply the softmax function to normalize logit to a probability distribution
    out = ((x, W) + b)
       return out
# Cross entropy loss function
def cross_entropy(y_pred, y_true):
    # Encode the label as a one_hot vector
    y_true = tf.one_hot(y_true, depth=num_classes)
        # Trimming predictions to avoid errors
    y_pred = tf.clip_by_value(y_pred, 1e-9, 1)
        # Calculate the cross entropy
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_true * (y_pred), 1))    
    return cross_entropy
# Accuracy
def accuracy(y_pred, y_true):
    correct = ((y_pred, 1), (y_true, tf.int64))
    return tf.reduce_mean((correct, tf.float32))
# Stochastic gradient descent optimizer
optimizer = (lr)
# Optimization
def run_optimization(x, y):
    with () as g:
        pred = logistic_regression(x)
        loss = cross_entropy(y_pred=pred, y_true=y)
    gradients = (loss, [W, b])   
    optimizer.apply_gradients(zip(gradients, [W, b]))
# Training
for step, (batch_x, batch_y) in enumerate(train_dataset.take(training_steps), 1):
    # Run the optimization to update W and b
    run_optimization(x=batch_x, y=batch_y)
       if step % display_step == 0:
        pred = logistic_regression(batch_x)
        loss = cross_entropy(y_pred=pred, y_true=batch_y)
        acc = accuracy(y_pred=pred, y_true=batch_y)
        print("Step: %i, loss: %f, accuracy: %f" % (step, loss, acc))

pred = logistic_regression(x_test)
print(f"Test Accuracy: {accuracy(pred, y_test)}")

Test Accuracy: 0.892300009727478

import  as plt
n_images = 5
test_images = x_test[:n_images]
predictions = logistic_regression(test_images)
# Predicting the first five
for i in range(n_images):
    ((test_images[i], [28, 28]), cmap='gray')
    ()
    print("Model prediction: %i" % (()[i]))

Model prediction: 7

Model prediction: 2

Model prediction: 1

Model prediction: 0

Model prediction: 4

Above is the tensorflow basic operation white quickly build linear regression and classification models in detail, more information about tensorflow quickly build linear regression and classification models please pay attention to my other related articles!