SoFunction
Updated on 2024-11-17

Python+Tensorflow+CNN sample code to achieve license plate recognition

I. Project overview

The objective of this project is to achieve recognition of automatically generated license plates with various noises. License plate character segmentation is more difficult in case of noise interference, in this license plate recognition 7 characters of license plate are trained at the same time, the characters include 31 abbreviations of provinces, 10 Arabic numerals, 24 letters of the alphabet (except for 'O' and 'I'), a total of 65 categories, 7 characters are trained using a separate loss function.
(Running environment: tensorflow 1.14.0-GPU version)

II. Generating license plate data sets

import os
import cv2 as cv
import numpy as np
from math import *
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw


index = {"Kyo.": 0, "Shanghai.": 1, "Tsin.": 2, "Yu": 3, "Ji.": 4, "Jin": 5, "Mon.": 6, "Liao": 7, "Gee.": 8, "Black.": 9,
       "Sue.": 10, "Zhe": 11, "Anhui": 12, "Min.": 13, "Gan": 14, "Ru": 15, "Yu.": 16, "E.": 17, "Xiang": 18, "Cantonese": 19,
       "Gui.": 20, "Joan.": 21, "Chuan.": 22, "Expensive.": 23, "Clouds.": 24, "Hide.": 25, "Shaanxi": 26, "Gan.": 27, "Green.": 28, "Ning.": 29,
       "New": 30, "0": 31, "1": 32, "2": 33, "3": 34, "4": 35, "5": 36, "6": 37, "7": 38, "8": 39,
       "9": 40, "A": 41, "B": 42, "C": 43, "D": 44, "E": 45, "F": 46, "G": 47, "H": 48, "J": 49,
       "K": 50, "L": 51, "M": 52, "N": 53, "P": 54, "Q": 55, "R": 56, "S": 57, "T": 58, "U": 59,
       "V": 60, "W": 61, "X": 62, "Y": 63, "Z": 64}

chars = ["Kyo.", "Shanghai.", "Tsin.", "Yu", "Ji.", "Jin", "Mon.", "Liao", "Gee.", "Black.",
       "Sue.", "Zhe", "Anhui", "Min.", "Gan", "Ru", "Yu.", "E.", "Xiang", "Cantonese",
       "Gui.", "Joan.", "Chuan.", "Expensive.", "Clouds.", "Hide.", "Shaanxi", "Gan.", "Green.", "Ning.",
       "New", "0", "1", "2", "3", "4", "5", "6", "7", "8",
       "9", "A", "B", "C", "D", "E", "F", "G", "H", "J",
       "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U",
       "V", "W", "X", "Y", "Z"]


def AddSmudginess(img, Smu):
  """
  Fuzzy processing
  :param img: input image
  :param Smu: blurred image
  :return: the image after blurring is added
  """
  rows = r([0] - 50)
  cols = r([1] - 50)
  adder = Smu[rows:rows + 50, cols:cols + 50]
  adder = (adder, (50, 50))
  img = (img,(50,50))
  img = cv.bitwise_not(img)
  img = cv.bitwise_and(adder, img)
  img = cv.bitwise_not(img)
  return img


def rot(img, angel, shape, max_angel):
  """
  Adding Perspective Aberrations
  """
  size_o = [shape[1], shape[0]]
  size = (shape[1]+ int(shape[0] * cos((float(max_angel ) / 180) * 3.14)), shape[0])
  interval = abs(int(sin((float(angel) / 180) * 3.14) * shape[0]))
  pts1 = np.float32([[0, 0], [0, size_o[1]], [size_o[0], 0], [size_o[0], size_o[1]]])
  if angel > 0:
    pts2 = np.float32([[interval, 0], [0, size[1]], [size[0], 0], [size[0] - interval, size_o[1]]])
  else:
    pts2 = np.float32([[0, 0], [interval, size[1]], [size[0] - interval, 0], [size[0], size_o[1]]])
  M = (pts1, pts2)
  dst = (img, M, size)
  return dst


def rotRandrom(img, factor, size):
  """
  Adding radiometric distortion
  :param img: input image
  :param factor: Aberration parameters
  :param size: target size of the image
  :return: image after radiometric distortion
  """
  shape = size
  pts1 = np.float32([[0, 0], [0, shape[0]], [shape[1], 0], [shape[1], shape[0]]])
  pts2 = np.float32([[r(factor), r(factor)], [r(factor), shape[0] - r(factor)], [shape[1] - r(factor), r(factor)],
            [shape[1] - r(factor), shape[0] - r(factor)]])
  M = (pts1, pts2)
  dst = (img, M, size)
  return dst


def tfactor(img):
  """
  Adding noise with saturated lighting
  """
  hsv = (img,cv.COLOR_BGR2HSV)
  hsv[:, :, 0] = hsv[:, :, 0] * (0.8 + () * 0.2)
  hsv[:, :, 1] = hsv[:, :, 1] * (0.3 + () * 0.7)
  hsv[:, :, 2] = hsv[:, :, 2] * (0.2 + () * 0.8)
  img = (hsv, cv.COLOR_HSV2BGR)
  return img


def random_envirment(img, noplate_bg):
  """
  Add noise from natural environment, noplate_bg is background image without license plate
  """
  bg_index = r(len(noplate_bg))
  env = (noplate_bg[bg_index])
  env = (env, ([1], [0]))
  bak = (img == 0)
  bak = (np.uint8) * 255
  inv = cv.bitwise_and(bak, env)
  img = cv.bitwise_or(inv, img)
  return img

 
def GenCh(f, val):
  """
  Generate Chinese characters
  """
  img = ("RGB", (45, 70), (255, 255, 255))
  draw = (img)
  ((0, 3), val, (0, 0, 0), font=f)
  img = ((23, 70))
  A = (img)
  return A


def GenCh1(f, val):
  """
  Generate English characters
  """
  img =("RGB", (23, 70), (255, 255, 255))
  draw = (img)
  ((0, 2), val, (0, 0, 0), font=f)  # ('utf-8')
  A = (img)
  return A

 
def AddGauss(img, level):
  """
  Adding a Gaussian Blur
  """ 
  return (img, (level * 2 + 1, level * 2 + 1))


def r(val):
  return int(() * val)


def AddNoiseSingleChannel(single):
  """
  Adding Gaussian Noise
  """
  diff = 255 - ()
  noise = (0, 1 + r(6), )
  noise = (noise - ()) / (() - ())
  noise *= diff
  # noise= (np.uint8)
  dst = single + noise
  return dst


def addNoise(img):  # sdev = 0.5,avg=10
  img[:, :, 0] = AddNoiseSingleChannel(img[:, :, 0])
  img[:, :, 1] = AddNoiseSingleChannel(img[:, :, 1])
  img[:, :, 2] = AddNoiseSingleChannel(img[:, :, 2])
  return img
 
 
class GenPlate:
  def __init__(self, fontCh, fontEng, NoPlates):
     = (fontCh, 43, 0)
     = (fontEng, 60, 0)
     = (("RGB", (226, 70),(255, 255, 255)))
     = (("data\\images\\"), (226, 70))  # :License plate background image
     = ("data\\images\\")  # :Blurring the image
    self.noplates_path = []
    for parent, parent_folder, filenames in (NoPlates):
      for filename in filenames:
        path = parent + "\\" + filename
        self.noplates_path.append(path)
 
  def draw(self, val):
    offset = 2
    [0:70, offset+8:offset+8+23] = GenCh(, val[0])
    [0:70, offset+8+23+6:offset+8+23+6+23] = GenCh1(, val[1])
    for i in range(5):
      base = offset + 8 + 23 + 6 + 23 + 17 + i * 23 + i * 6
      [0:70, base:base+23] = GenCh1(, val[i+2])
    return 
  
  def generate(self, text):
    if len(text) == 7:
      fg = (text)  # decode(encoding="utf-8")
      fg = cv.bitwise_not(fg)
      com = cv.bitwise_or(fg, )
      com = rot(com, r(60)-30, ,30)
      com = rotRandrom(com, 10, ([1], [0]))
      com = tfactor(com)
      com = random_envirment(com, self.noplates_path)
      com = AddGauss(com, 1+r(4))
      com = addNoise(com)
      return com

  @staticmethod
  def genPlateString(pos, val):
    """
Generate license plate string, save as image
    Generate license plate list, save as label
    """
    plateStr = ""
    plateList=[]
    box = [0, 0, 0, 0, 0, 0, 0]
    if pos != -1:
      box[pos] = 1
    for unit, cpos in zip(box, range(len(box))):
      if unit == 1:
        plateStr += val
        (val)
      else:
        if cpos == 0:
          plateStr += chars[r(31)]
          (plateStr)
        elif cpos == 1:
          plateStr += chars[41 + r(24)]
          (plateStr)
        else:
          plateStr += chars[31 + r(34)]
          (plateStr)
    plate = [plateList[0]]
    b = [plateList[i][-1] for i in range(len(plateList))]
    (b[1:7])
    return plateStr, plate

  @staticmethod
  def genBatch(batchsize, outputPath, size):
    """
    Write the generated license plate image to the folder, corresponding to the label write
    :param batchsize: batch size
    :param outputPath: the path where the output images will be stored
    :param size: size of the output image
    :return: None
    """
    if not (outputPath):
      (outputPath)
    outfile = open('data\\plate\\', 'w', encoding='utf-8')
    for i in range(batchsize):
      plateStr, plate = (-1, -1)
      # print(plateStr, plate)
      img = (plateStr)
      img = (img, size)
      (outputPath + "\\" + str(i).zfill(2) + ".jpg", img)
      (str(plate) + "\n")


if __name__ == '__main__':
  G = GenPlate("data\\font\\", 'data\\font\\', "data\\NoPlates")
  (101, 'data\\plate', (272, 72))

Generated license plate image size try not to exceed 300, the size of this selection: 272 * 72

Generate the required documents for the license plate:

  • Font files: Chinese '', English and numeric ''
  • Background image: from a cropped image of a vehicle without license plates
  • License plate (blue background):
  • Noisy images:

The license plate is generated and saved to the plate folder, an example is shown below:

在这里插入图片描述

III. Data import

from genplate import *
import  as plt

# Generate data for training
class OCRIter:
  def __init__(self, batch_size, width, height):
    super(OCRIter, self).__init__()
     = GenPlate("data\\font\\", 'data\\font\\', "data\\NoPlates")
    self.batch_size = batch_size
     = height
     = width

  def iter(self):
    data = []
    label = []
    for i in range(self.batch_size):
      img, num = self.gen_sample(, , )
      (img)
      (num)
    return (data), (label)

  @staticmethod
  def rand_range(lo, hi):
    return lo + r(hi - lo)

  def gen_rand(self):
    name = ""
    label = list([])
    (self.rand_range(0, 31))  # Generate tags for 32 provinces at the beginning of license plates
    (self.rand_range(41, 65))  # Generate a tag for the second letter of the license plate
    for i in range(5):
      (self.rand_range(31, 65))  # Generate a tag with the subsequent 5 letters of the license plate
    name += chars[label[0]]
    name += chars[label[1]]
    for i in range(5):
      name += chars[label[i+2]]
    return name, label

  def gen_sample(self, genplate, width, height):
    num, label = self.gen_rand()
    img = (num)
    img = (img, (height, width))
    img = (img, 1/255.0)
    return img, label    # return label as label, img as license plate image

'''
# Test code
O = OCRIter(2, 272, 72)
img, lbl = ()
for im in img:
  (im, cmap='gray')
  ()
print()
print(lbl)
'''

IV.CNN model construction

import tensorflow as tf


def cnn_inference(images, keep_prob):
  W_conv = {
    'conv1': (.truncated_normal([3, 3, 3, 32],
                            stddev=0.1)),
    'conv2': (.truncated_normal([3, 3, 32, 32],
                            stddev=0.1)),
    'conv3': (.truncated_normal([3, 3, 32, 64],
                            stddev=0.1)),
    'conv4': (.truncated_normal([3, 3, 64, 64],
                            stddev=0.1)),
    'conv5': (.truncated_normal([3, 3, 64, 128],
                            stddev=0.1)),
    'conv6': (.truncated_normal([3, 3, 128, 128],
                            stddev=0.1)),
    'fc1_1': (.truncated_normal([5*30*128, 65],
                            stddev=0.01)),
    'fc1_2': (.truncated_normal([5*30*128, 65],
                            stddev=0.01)),
    'fc1_3': (.truncated_normal([5*30*128, 65],
                            stddev=0.01)),
    'fc1_4': (.truncated_normal([5*30*128, 65],
                            stddev=0.01)),
    'fc1_5': (.truncated_normal([5*30*128, 65],
                            stddev=0.01)),
    'fc1_6': (.truncated_normal([5*30*128, 65],
                            stddev=0.01)),
    'fc1_7': (.truncated_normal([5*30*128, 65],
                            stddev=0.01)),
    } 

  b_conv = { 
    'conv1': ((0.1, dtype=tf.float32, 
                     shape=[32])),
    'conv2': ((0.1, dtype=tf.float32,
                     shape=[32])),
    'conv3': ((0.1, dtype=tf.float32,
                     shape=[64])),
    'conv4': ((0.1, dtype=tf.float32,
                     shape=[64])),
    'conv5': ((0.1, dtype=tf.float32,
                     shape=[128])),
    'conv6': ((0.1, dtype=tf.float32,
                     shape=[128])),
    'fc1_1': ((0.1, dtype=tf.float32,
                     shape=[65])),
    'fc1_2': ((0.1, dtype=tf.float32,
                     shape=[65])),
    'fc1_3': ((0.1, dtype=tf.float32,
                     shape=[65])),
    'fc1_4': ((0.1, dtype=tf.float32,
                     shape=[65])),
    'fc1_5': ((0.1, dtype=tf.float32,
                     shape=[65])),
    'fc1_6': ((0.1, dtype=tf.float32,
                     shape=[65])),
    'fc1_7': ((0.1, dtype=tf.float32,
                     shape=[65])),
    } 


  # 1st convolutional layer
  conv1 = .conv2d(images, W_conv['conv1'], strides=[1,1,1,1], padding='VALID')
  conv1 = .bias_add(conv1, b_conv['conv1'])
  conv1 = (conv1)
 
  # 2nd convolutional layer
  conv2 = .conv2d(conv1, W_conv['conv2'], strides=[1,1,1,1], padding='VALID')
  conv2 = .bias_add(conv2, b_conv['conv2'])
  conv2 = (conv2)
  # Layer 1 pooling layer
  pool1 = .max_pool2d(conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')
 
  # Layer 3 convolutional layers
  conv3 = .conv2d(pool1, W_conv['conv3'], strides=[1,1,1,1], padding='VALID')
  conv3 = .bias_add(conv3, b_conv['conv3'])
  conv3 = (conv3)
 
  # 4th convolutional layer
  conv4 = .conv2d(conv3, W_conv['conv4'], strides=[1,1,1,1], padding='VALID')
  conv4 = .bias_add(conv4, b_conv['conv4'])
  conv4 = (conv4)
  # Layer 2 pooling layer
  pool2 = .max_pool2d(conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')

  # 5th convolutional layer
  conv5 = .conv2d(pool2, W_conv['conv5'], strides=[1,1,1,1], padding='VALID')
  conv5 = .bias_add(conv5, b_conv['conv5'])
  conv5 = (conv5)

  # 4th convolutional layer
  conv6 = .conv2d(conv5, W_conv['conv6'], strides=[1,1,1,1], padding='VALID')
  conv6 = .bias_add(conv6, b_conv['conv6'])
  conv6 = (conv6)
  # Layer 3 pooling layer
  pool3 = .max_pool2d(conv6, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')
 
  #1_1 fully connected layer
  # print()
  reshape = (pool3, [-1, 5 * 30 * 128])
  fc1 = (reshape, keep_prob)
  fc1_1 = ((fc1, W_conv['fc1_1']), b_conv['fc1_1'])
  
  #Layer 1_2 fully connected layers
  fc1_2 = ((fc1, W_conv['fc1_2']), b_conv['fc1_2'])

  # Layers 1_3 Fully Connected Layers
  fc1_3 = ((fc1, W_conv['fc1_3']), b_conv['fc1_3'])

  # Layers 1_4 Fully Connected Layers
  fc1_4 = ((fc1, W_conv['fc1_4']), b_conv['fc1_4'])
  
  # Layers 1_5 Fully Connected Layers
  fc1_5 = ((fc1, W_conv['fc1_5']), b_conv['fc1_5'])
  
  # Layers 1_6 Fully Connected Layers
  fc1_6 = ((fc1, W_conv['fc1_6']), b_conv['fc1_6'])
  
  # Layers 1_7 Fully Connected Layers
  fc1_7 = ((fc1, W_conv['fc1_7']), b_conv['fc1_7'])
  
  return fc1_1, fc1_2, fc1_3, fc1_4, fc1_5, fc1_6, fc1_7


def calc_loss(logit1, logit2, logit3, logit4, logit5, logit6, logit7, labels):
  labels = tf.convert_to_tensor(labels, tf.int32)
  
  loss1 = tf.reduce_mean(.sparse_softmax_cross_entropy_with_logits(
    logits=logit1, labels=labels[:, 0]))
  .('loss1', loss1)

  loss2 = tf.reduce_mean(.sparse_softmax_cross_entropy_with_logits(
    logits=logit2, labels=labels[:, 1]))
  .('loss2', loss2)

  loss3 = tf.reduce_mean(.sparse_softmax_cross_entropy_with_logits(
    logits=logit3, labels=labels[:, 2]))
  .('loss3', loss3)

  loss4 = tf.reduce_mean(.sparse_softmax_cross_entropy_with_logits(
    logits=logit4, labels=labels[:, 3]))
  .('loss4', loss4)

  loss5 = tf.reduce_mean(.sparse_softmax_cross_entropy_with_logits(
    logits=logit5, labels=labels[:, 4]))
  .('loss5', loss5)

  loss6 = tf.reduce_mean(.sparse_softmax_cross_entropy_with_logits(
    logits=logit6, labels=labels[:, 5]))
  .('loss6', loss6)

  loss7 = tf.reduce_mean(.sparse_softmax_cross_entropy_with_logits(
    logits=logit7, labels=labels[:, 6]))
  .('loss7', loss7)

  return loss1, loss2, loss3, loss4, loss5, loss6, loss7


def train_step(loss1, loss2, loss3, loss4, loss5, loss6, loss7, learning_rate):
  optimizer1 = .(learning_rate=learning_rate)
  train_op1 = (loss1)

  optimizer2 = .(learning_rate=learning_rate)
  train_op2 = (loss2)

  optimizer3 = .(learning_rate=learning_rate)
  train_op3 = (loss3)

  optimizer4 = .(learning_rate=learning_rate)
  train_op4 = (loss4)

  optimizer5 = .(learning_rate=learning_rate)
  train_op5 = (loss5)

  optimizer6 = .(learning_rate=learning_rate)
  train_op6 = (loss6)

  optimizer7 = .(learning_rate=learning_rate)
  train_op7 = (loss7)

  return train_op1, train_op2, train_op3, train_op4, train_op5, train_op6, train_op7
  

def pred_model(logit1, logit2, logit3, logit4, logit5, logit6, logit7, labels):
  labels = tf.convert_to_tensor(labels, tf.int32)
  labels = ((labels), [-1])
  logits = ([logit1, logit2, logit3, logit4, logit5, logit6, logit7], 0)
  prediction = .in_top_k(logits, labels, 1)
  accuracy = tf.reduce_mean((prediction, tf.float32))
  .('accuracy', accuracy)
  return accuracy

V. Model training

import os
import time
import datetime
import numpy as np
import tensorflow as tf
from input_data import OCRIter
import model

["TF_CPP_MIN_LOG_LEVEL"] = '3'

img_h = 72
img_w = 272
num_label = 7
batch_size = 32
epoch = 10000
learning_rate = 0.0001

logs_path = 'logs\\1005'
model_path = 'saved_model\\1005'

image_holder = .(tf.float32, [batch_size, img_h, img_w, 3])
label_holder = .(tf.int32, [batch_size, 7])
keep_prob = .(tf.float32)


def get_batch():
  data_batch = OCRIter(batch_size, img_h, img_w)
  image_batch, label_batch = data_batch.iter()
  return (image_batch), (label_batch)


logit1, logit2, logit3, logit4, logit5, logit6, logit7 = model.cnn_inference(
  image_holder, keep_prob)

loss1, loss2, loss3, loss4, loss5, loss6, loss7 = model.calc_loss(
  logit1, logit2, logit3, logit4, logit5, logit6, logit7, label_holder)

train_op1, train_op2, train_op3, train_op4, train_op5, train_op6, train_op7 = model.train_step(
  loss1, loss2, loss3, loss4, loss5, loss6, loss7, learning_rate)

accuracy = model.pred_model(logit1, logit2, logit3, logit4, logit5, logit6, logit7, label_holder)

input_image=.('input', image_holder)

summary_op = .(.v1.get_collection(.))

init_op = .v1.global_variables_initializer()

with .() as sess:
  (init_op)
  
  train_writer = .(logs_path, )
  saver = .()

  start_time1 = ()
  for step in range(epoch):
    # Generate license plate images as well as tag data
    img_batch, lbl_batch = get_batch()

    start_time2 = ()
    time_str = ().isoformat()

    feed_dict = {image_holder:img_batch, label_holder:lbl_batch, keep_prob:0.6}
    _1, _2, _3, _4, _5, _6, _7, ls1, ls2, ls3, ls4, ls5, ls6, ls7, acc = (
      [train_op1, train_op2, train_op3, train_op4, train_op5, train_op6, train_op7, 
       loss1, loss2, loss3, loss4, loss5, loss6, loss7, accuracy], feed_dict)
    summary_str = (summary_op, feed_dict)
    train_writer.add_summary(summary_str,step)
    duration = () - start_time2
    loss_total = ls1 + ls2 + ls3 + ls4 + ls5 + ls6 + ls7
    if step % 10 == 0:
      sec_per_batch = float(duration)
      print('%s: Step %d, loss_total = %.2f, acc = %.2f%%, sec/batch = %.2f' %
        (time_str, step, loss_total, acc * 100, sec_per_batch))
    if step % 5000 == 0 or (step + 1) == epoch:
      checkpoint_path = (model_path,'')
      (sess, checkpoint_path, global_step=step)
  end_time = ()
  print("Training over. It costs {:.2f} minutes".format((end_time - start_time1) / 60))

VI. Presentation of training results

Training parameters:
batch_size = 32
epoch = 10000
learning_rate = 0.0001
View the training process in tensorboard
accuracy :

在这里插入图片描述accuracy

The curve reaches convergence at around epoch = 10000, with a final accuracy of around 94%

loss :
在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

The above three are respectively loss1, loss2, loss7 curve image, the first bit character is the abbreviation of the province, the recognition of the relative alphanumeric is more difficult, loss1 = 0.08 or so, the second bit character is a letter, loss2 stabilized at 0.001 or so, but as the character later, the loss value will also be more and more large, the seventh bit character loss7 stabilized at 0.6 or so.

VII. Predicting single license plates

import os
import cv2 as cv
import numpy as np
import tensorflow as tf
import  as plt
from PIL import Image
import model

["TF_CPP_MIN_LOG_LEVEL"] = '3' # Display only Error

index = {"Kyo.": 0, "Shanghai.": 1, "Tsin.": 2, "Yu": 3, "Ji.": 4, "Jin": 5, "Mon.": 6, "Liao": 7, "Gee.": 8, "Black.": 9,
       "Sue.": 10, "Zhe": 11, "Anhui": 12, "Min.": 13, "Gan": 14, "Ru": 15, "Yu.": 16, "E.": 17, "Xiang": 18, "Cantonese": 19,
       "Gui.": 20, "Joan.": 21, "Chuan.": 22, "Expensive.": 23, "Clouds.": 24, "Hide.": 25, "Shaanxi": 26, "Gan.": 27, "Green.": 28, "Ning.": 29,
       "New": 30, "0": 31, "1": 32, "2": 33, "3": 34, "4": 35, "5": 36, "6": 37, "7": 38, "8": 39,
       "9": 40, "A": 41, "B": 42, "C": 43, "D": 44, "E": 45, "F": 46, "G": 47, "H": 48, "J": 49,
       "K": 50, "L": 51, "M": 52, "N": 53, "P": 54, "Q": 55, "R": 56, "S": 57, "T": 58, "U": 59,
       "V": 60, "W": 61, "X": 62, "Y": 63, "Z": 64}

chars = ["Kyo.", "Shanghai.", "Tsin.", "Yu", "Ji.", "Jin", "Mon.", "Liao", "Gee.", "Black.",
       "Sue.", "Zhe", "Anhui", "Min.", "Gan", "Ru", "Yu.", "E.", "Xiang", "Cantonese",
       "Gui.", "Joan.", "Chuan.", "Expensive.", "Clouds.", "Hide.", "Shaanxi", "Gan.", "Green.", "Ning.",
       "New", "0", "1", "2", "3", "4", "5", "6", "7", "8",
       "9", "A", "B", "C", "D", "E", "F", "G", "H", "J",
       "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U",
       "V", "W", "X", "Y", "Z"]


def get_one_image(test):
  """ Random acquisition of a single license plate image """
  n = len(test)
  rand_num =(0,n)
  img_dir = test[rand_num]
  image_show = (img_dir)
  (image_show)  # Display license plate images
  image = (img_dir)
  image = (72, 272, 3)
  image = (image, 1 / 255.0)
  return image

batch_size = 1
x = .(tf.float32, [batch_size, 72, 272, 3])
keep_prob = .(tf.float32)

test_dir = 'data\\plate\\'
test_image = []
for file in (test_dir):
  test_image.append(test_dir + file)
test_image = list(test_image)

image_array = get_one_image(test_image)

logit1, logit2, logit3, logit4, logit5, logit6, logit7 = model.cnn_inference(x, keep_prob)

model_path = 'saved_model\\1005'

saver = .()

with .() as sess:
  print ("Reading checkpoint...")
  ckpt = .get_checkpoint_state(model_path)
  if ckpt and ckpt.model_checkpoint_path:
    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
    (sess, ckpt.model_checkpoint_path)
    print('Loading success, global_step is %s' % global_step)
  else:
    print('No checkpoint file found')

  pre1, pre2, pre3, pre4, pre5, pre6, pre7 = (
    [logit1, logit2, logit3, logit4, logit5, logit6, logit7],
    feed_dict={x:image_array, keep_prob:1.0})
  prediction = (([pre1, pre2, pre3, pre4, pre5, pre6, pre7]), [-1, 65])

  max_index = (prediction, axis=1)
  print(max_index)
  line = ''
  result = ([])
  for i in range([0]):
    if i == 0:
      result = (prediction[i][0:31])
    if i == 1:
      result = (prediction[i][41:65]) + 41
    if i > 1:
      result = (prediction[i][31:65]) + 31
    line += chars[result]+" "
  print ('predicted: ' + line)
()

Randomly test 20 license plates, 18 predicted correctly, 2 predicted incorrectly, from the last two wrongly predicted pictures can be seen, the model for similar characters and occluded characters recognition success rate still needs to be improved. The test results are partially displayed as follows:

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

VIII. Summary

The CNN model constructed this time is relatively simple, with only 6 convolutional layers + 3 pooling layers + 1 fully connected layer, which can be optimized by increasing the depth of the model as well as the number of neurons between each layer to improve the recognition accuracy. This training dataset comes from automatically generated license plates, and the recognition rate will be different because the real license plate image is differentiated from the generated license plate image in terms of noise interference. If the real license plate dataset is used, preprocessing methods such as filtering, equalization, erosion, vector quantization, etc. need to be performed on the license plate.

This is the whole content of this article.