SoFunction
Updated on 2024-12-17

TensorFlow implementation prints the output of each layer

You can generate a pb file with weight directly in the following code, or you can convert ckpt to pb file by using the official freeze_graph.py of tf.

constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,['net_loss/inference/encode/conv_output/conv_output'])
with ('net_model.pb', mode='wb') as f:
  (constant_graph.SerializeToString())

The output of each layer can be obtained in tf1.0 through a pb file with weight and the get_tensor_by_name function

import os
import  as ops
import argparse
import time
import math
 
import tensorflow as tf
import glob
import numpy as np
import  as plt
import cv2
 
["CUDA_VISIBLE_DEVICES"] = "-1"
 
gragh_path = './'
image_path = './'
inputtensorname = 'input_tensor:0'
tensorname = 'loss/inference/encode/resize_images/ResizeBilinear'
filepath='./net_output.txt'
HEIGHT=256
WIDTH=256
VGG_MEAN = [103.939, 116.779, 123.68]
 
with ().as_default():
  graph_def = ()
  with (gragh_path, 'rb') as fid:
    serialized_graph = ()
    graph_def.ParseFromString(serialized_graph)
 
    tf.import_graph_def(graph_def, name='')
 
    image = (image_path)
    image = (image, (WIDTH, HEIGHT), interpolation=cv2.INTER_CUBIC)
    image_np = (image)
    image_np = image_np - VGG_MEAN
    image_np_expanded = np.expand_dims(image_np, axis=0)
 
    with () as sess:
      ops = tf.get_default_graph().get_operations()
      tensor_name = tensorname + ':0'
      tensor_dict = tf.get_default_graph().get_tensor_by_name(tensor_name)
      image_tensor = tf.get_default_graph().get_tensor_by_name(inputtensorname)
      output = (tensor_dict, feed_dict={image_tensor: image_np_expanded})
      
      ftxt = open(filepath,'w')
      transform = (0, 3, 1, 2)
      transform = ()
      weight_count = 0
      for i in transform:
        if weight_count % 10 == 0 and weight_count != 0:
          ('\n')
        (str(i) + ',')
        weight_count += 1
      ()

The above TensorFlow implementation of printing the output of each layer is all that I have shared with you.