# Creates a graph.
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session()
# Runs the op.
options = tf.RunOptions(output_partition_graphs=True)
metadata = tf.RunMetadata()
c_val = sess.run(sum_operation, options=options, run_metadata=metadata)
print(metadata.partition_graphs)
///////////////// 간단 성능 테스트 코드 //////////////////
import sys
import numpy as np
import tensorflow as tf
from datetime import datetime
shape=(int(10000),int(10000))
with tf.device("/gpu:0"):
random_matrix = tf.random_uniform(shape=shape, minval=0, maxval=1)
dot_operation = tf.matmul(random_matrix, tf.transpose(random_matrix))
sum_operation = tf.reduce_sum(dot_operation)
startTime = datetime.now()
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as session:
result = session.run(sum_operation)
print(result)
print("\n" * 2)
print("Time taken:", datetime.now() - startTime)
print("\n" * 2)
'Python Library > TensorFlow' 카테고리의 다른 글
LMDB 불러오기 ( LSUN 데이터 셋 ) (0) | 2019.08.26 |
---|---|
Tensorflow Distributed learning (0) | 2019.08.25 |
텐서플로 tcp 통신을 통한 분산처리 ( google colab gpu 원격 활용 ) (0) | 2019.08.24 |
Select TensorFlow operators to use in TensorFlow Lite (0) | 2019.08.21 |
TensorFlow Lite and TensorFlow operator compatibility (0) | 2019.08.21 |