seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
my_var = tf.Variable(tf.zeros([1,20]))
merged = tf.summary.merge_all()
| tensorflow.summary.merge_all | 7,200 |
import tensorflow as tf
# The final gradient must be finite.
self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())
self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
| tensorflow.is_finite | 7,201 |
import tensorflow as tf
buffers = tf.random_normal((sequence_length, 1, config.d_proj))
transitions = tf.constant(
| tensorflow.constant | 7,202 |
import tensorflow as tf
a = tf.print(tf.reduce_sum(unorm_w))
with tf.control_dependencies([a]):
final_loss = tf.reduce_sum(loss)
return final_loss, cstr_pct
def contra_traj_lossV8(pred, tgt, horizon=12):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0)
horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1])
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
| tensorflow.split | 7,203 |
from tensorflow.python.ops import nn
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusAlphaBeta"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
super(InverseGammaWithSoftplusAlphaBeta, self).__init__(
alpha=nn.softplus(alpha, name="softplus_alpha"),
beta=nn.softplus(beta, name="softplus_gamma"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
| tensorflow.python.ops.nn.softplus | 7,204 |
from tensorflow.python.ops import array_ops
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
| tensorflow.python.ops.array_ops.transpose | 7,205 |
import tensorflow.contrib.slim as slim
tf.GraphKeys.REGULARIZATION_LOSSES)
# weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())
total_losses = total_losses + tf.add_n(regularization_losses)
tf.get_variable_scope().reuse_variables()
grads = optimizer.compute_gradients(total_losses)
if cfgs.GRADIENT_CLIPPING_BY_NORM is not None:
grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM)
tower_grads.append(grads)
self.log_printer(fcos, optimizer, global_step, tower_grads, total_loss_dict, num_gpu*cfgs.BATCH_SIZE, graph)
if __name__ == '__main__':
trainer = TrainFCOS(cfgs)
| tensorflow.contrib.slim.learning.clip_gradient_norms | 7,206 |
import tensorflow as tf
query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
| tensorflow.concat | 7,207 |
import tensorflow as tf
i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1)
# Finally calculate interpolated values.
x0_f = tf.to_float(x0)
x1_f = tf.to_float(x1)
y0_f = tf.to_float(y0)
y1_f = tf.to_float(y1)
z0_f = tf.to_float(z0)
| tensorflow.to_float | 7,208 |
import tensorflow as tf
# raw mask for positive > 0.5, and for negetive < 0.3
# each positive examples has one label
positive_mask = glabels > 0#tf.logical_and(glabels > 0, gscores > params['match_threshold'])
fpositive_mask = tf.cast(positive_mask, tf.float32)
n_positives = tf.reduce_sum(fpositive_mask)
# negtive examples are those max_overlap is still lower than neg_threshold, note that some positive may also has lower jaccard
# note those gscores is 0 is either be ignored during anchors encode or anchors have 0 overlap with all ground truth
#negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(tf.logical_or(positive_mask, glabels < 0)), gscores < params['neg_threshold']), gscores > 0.)
negtive_mask = tf.logical_and(tf.equal(glabels, 0), gscores > 0.)
#negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.)
#negtive_mask = tf.logical_and(gscores < params['neg_threshold'], tf.logical_not(positive_mask))
fnegtive_mask = tf.cast(negtive_mask, tf.float32)
n_negtives = tf.reduce_sum(fnegtive_mask)
n_neg_to_select = tf.cast(params['negative_ratio'] * n_positives, tf.int32)
n_neg_to_select = tf.minimum(n_neg_to_select, tf.cast(n_negtives, tf.int32))
| tensorflow.equal | 7,209 |
import tensorflow as tf
direct_mask = tf.greater(head_idxs, dep_idxs) # [bs, slh, sld]
else:
direct_mask = tf.less(head_idxs, dep_idxs) # [bs, slh, sld]
# [bs, slh, slh]
rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2))
attn_mask = tf.logical_and(direct_mask, rep_mask_tile) # [bs, slh, sld]
# tensor tile
| tensorflow.expand_dims | 7,210 |
import tensorflow as tf
with tf.control_dependencies(control_inputs=(applied,)):
| tensorflow.control_dependencies | 7,211 |
import tensorflow as tf
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
| tensorflow.contrib.tpu.TPUConfig | 7,212 |
import tensorflow as tf
def get_next_sentence_output(bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
| tensorflow.zeros_initializer | 7,213 |
import tensorflow as tf
import numpy as np
from google.protobuf import text_format
import tensorflow as tf
import preprocessing
import datasets
NUM_TEST_IMAGES = 50000
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef()
import os
file_ext = os.path.splitext(model_file)[1]
with open(model_file, "rb") as f:
if file_ext == '.pbtxt':
text_format.Merge(f.read(), graph_def)
else:
| tensorflow.Graph | 7,214 |
import tensorflow as tf
"decay_rate": 0.5
}
ned_lr_decay_fn = opt.get_learning_rate_decay_fn(hparams)
ned_lr = ned_lr_decay_fn(learning_rate=1., global_step=global_step)
ned_lr_true = tf.train.natural_exp_decay(
1., global_step-hparams["start_decay_step"],
hparams["kwargs"]["decay_steps"], hparams["kwargs"]["decay_rate"])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
pc_lr_, pc_lr_true_, ned_lr_, ned_lr_true_ = sess.run(
[pc_lr, pc_lr_true, ned_lr, ned_lr_true])
self.assertEqual(pc_lr_, pc_lr_true_)
self.assertEqual(ned_lr_, ned_lr_true_)
def test_get_gradient_clip_fn(self): # pylint: disable=too-many-locals
"""Tests get_gradient_clip_fn.
"""
| tensorflow.global_variables_initializer | 7,215 |
import tensorflow as tf
# a_fc2 = tf.layers.dense(a_fc1, 128, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc2_e')
# a_fc3 = tf.layers.dense(a_fc2, 64, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc3_e')
self.q_eval = tf.layers.dense(a_fc1, self.num_a, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='q_e')
# ------------------ build target_net ------------------
with tf.variable_scope('target_net'):
a_fc1_ = tf.layers.dense(self.s_, 128, tf.nn.relu, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='agent_fc1_t')
# a_fc2_ = tf.layers.dense(a_fc1_, 128, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc2_t')
# a_fc3_ = tf.layers.dense(a_fc2_, 64, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc3_t')
self.q_next = tf.layers.dense(a_fc1_, self.num_a, kernel_initializer=w_initializer,
| tensorflow.variable_scope | 7,216 |
from tensorflow.python.ops import array_ops
def test_softmax_classifier(self):
with self.cached_session() as session:
features = array_ops.placeholder(dtypes.float32, [None, 3])
labels = array_ops.placeholder(dtypes.float32, [None, 2])
weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]])
| tensorflow.python.ops.array_ops.placeholder | 7,217 |
import tensorflow as tf
self.encode.get_shape().as_list()[1]]
tsv_path = os.path.join(FLAGS.logdir, 'metadata.tsv')
self.embedding_test_ph = tf.placeholder(tf.float32, embedding_shape, name='embedding')
self.embedding_test = tf.Variable(tf.random_normal(embedding_shape), name='test_embedding', trainable=False)
self.embedding_assign = self.embedding_test.assign(self.embedding_test_ph)
| tensorflow.placeholder | 7,218 |
import tensorflow as tf
:param inputdata:
:param is_training:
:param name:
:param scale:
:return:
"""
return tf.layers.batch_normalization(inputs=inputdata, training=is_training, name=name, scale=scale)
@staticmethod
def layergn(inputdata, name, group_size=32, esp=1e-5):
"""
:param inputdata:
| tensorflow.layers.batch_normalization | 7,219 |
import tensorflow as tf
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
| tensorflow.nn.seq2seq.embedding_rnn_seq2seq | 7,220 |
from tensorflow.python.framework import ops
for config_name, config in test_configs.items():
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
with ops.Graph().as_default(), ops.device("/device:GPU:0"):
inputs = seq_length * [
array_ops.zeros([batch_size, num_units], dtypes.float32)
]
cell = lambda: lstm_ops.LSTMBlockCell(num_units=num_units) # pylint: disable=cell-var-from-loop
| tensorflow.python.framework.ops.device | 7,221 |
import tensorflow as tf
from keras.models import Model
import tensorflow as tf
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat(0, [shape[:1] // parts, shape[1:]])
stride = tf.concat(0, [shape[:1] // parts, shape[1:] * 0])
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
# Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
| tensorflow.slice | 7,222 |
import tensorflow as tf
v_shape = [1]
if v_shape == [1]:
v = tf.tile(v, [self._num_datashards])
sharded_features[k] = self._data_parallelism(tf.identity,
tf.split(
v, self._num_datashards,
0))
return sharded_features
| tensorflow.split | 7,223 |
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
predictions: `Tensor` or `dict` of `Tensor` objects.
"""
targets = tensor_signature.create_placeholders_from_signatures(
self._targets_info)
| tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_placeholders_from_signatures | 7,224 |
import tensorflow as tf
scales_to_logits_reversed = (
outputs_to_scales_to_logits_reversed[output])
logits_reversed = _resize_bilinear(
tf.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]),
tf.shape(images)[1:3],
scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype)
outputs_to_predictions[output].append(
tf.expand_dims(tf.nn.softmax(logits_reversed), 4))
for output in sorted(outputs_to_predictions):
predictions = outputs_to_predictions[output]
# Compute average prediction across different scales and flipped images.
predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4)
outputs_to_predictions[output] = tf.argmax(predictions, 3, output_type=tf.dtypes.int32)
outputs_to_predictions[output + PROB_SUFFIX] = tf.nn.softmax(predictions)
return outputs_to_predictions
def predict_labels(images, model_options):
"""Predicts segmentation labels.
Args:
images: A tensor of size [batch, height, width, channels].
model_options: A ModelOptions instance to configure models.
| tensorflow.argmax | 7,225 |
import tensorflow as tf
scene_data = df.get_data()
saver = tf.train.Saver(tf.global_variables())
| tensorflow.global_variables | 7,226 |
import tensorflow as tf
initializer=tf.constant_initializer(float('nan')))
self.strides = [1, d_h, d_w, 1]
self.padding = padding
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,2])
t = tf.nn.conv2d(input_var,v_norm,self.strides,self.padding,data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,1,tf.shape(self.v)[-1]]) * tf.nn.l2_normalize(self.v,axis=[0,1,2])
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,data_format='NHWC',
strides=self.strides, padding=self.padding),
self.b,data_format='NHWC',name=name)
def get_variables(self):
| tensorflow.assign | 7,227 |
import tensorflow as tf
flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)
class_prediction = tf.contrib.layers.fully_connected(
flattened_inputs, self._num_classes)
box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': tf.reshape(
class_prediction, [-1, 1, self._num_classes]),
'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])
}
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
| tensorflow.reshape | 7,228 |
import tensorflow as tf
env.seed(1)
STATE_DIM = env.observation_space.shape[0] # 24
ACTION_DIM = env.action_space.shape[0] # 4
ACTION_BOUND = env.action_space.high # [1, 1, 1, 1]
# all placeholder for tf
with tf.name_scope('S'):
S = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s')
with tf.name_scope('R'):
R = tf.placeholder(tf.float32, [None, 1], name='r')
with tf.name_scope('S_'):
S_ = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s_')
| tensorflow.name_scope | 7,229 |
import tensorflow as tf
# multiply by the embedding matrix.
# embed is the outputs of the hidden layer (embedding layer), it is a
# row vector with 'embedding_size' values.
with tf.variable_scope(name):
embeddings = tf.get_variable(
name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args)
embed = tf.nn.embedding_lookup(embeddings, self.inputs)
# Construct the variables for the NCE loss (i.e. negative sampling)
nce_weights = tf.get_variable(
name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, dtype=LayersConfig.tf_dtype, **nce_W_init_args)
nce_biases = tf.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, dtype=LayersConfig.tf_dtype, **nce_b_init_args)
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels
# each time we evaluate the loss.
self.nce_cost = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
inputs=embed,
| tensorflow.get_variable | 7,230 |
import tensorflow as tf
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])),
| tensorflow.argmax | 7,231 |
import tensorflow as tf
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
| tensorflow.flags.DEFINE_string | 7,232 |
import tensorflow as tf
def get_classification_loss(
FLAGS, features, n_class, is_training):
"""Loss for downstream classification tasks."""
bsz_per_core = tf.shape(features["input_ids"])[0]
inp = tf.transpose(features["input_ids"], [1, 0])
seg_id = tf.transpose(features["segment_ids"], [1, 0])
inp_mask = tf.transpose(features["input_mask"], [1, 0])
label = tf.reshape(features["label_ids"], [bsz_per_core])
xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)
| tensorflow.transpose | 7,233 |
from tensorflow.python.framework import ops
ops.RegisterShape("Cos")(common_shapes.unchanged_shape)
ops.RegisterShape("Exp")(common_shapes.unchanged_shape)
ops.RegisterShape("Floor")(common_shapes.unchanged_shape)
ops.RegisterShape("Imag")(common_shapes.unchanged_shape)
ops.RegisterShape("Inv")(common_shapes.unchanged_shape)
ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape)
ops.RegisterShape("IsInf")(common_shapes.unchanged_shape)
ops.RegisterShape("IsNan")(common_shapes.unchanged_shape)
ops.RegisterShape("Log")(common_shapes.unchanged_shape)
ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape)
ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
ops.RegisterShape("Real")(common_shapes.unchanged_shape)
ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Sign")(common_shapes.unchanged_shape)
ops.RegisterShape("Sin")(common_shapes.unchanged_shape)
ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Square")(common_shapes.unchanged_shape)
ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
| tensorflow.python.framework.ops.RegisterShape | 7,234 |
import tensorflow as tf
# hardware related configuration
tf.app.flags.DEFINE_integer(
| tensorflow.app.flags.DEFINE_integer | 7,235 |
import tensorflow as tf
tgtimg_h0 = lrelu(conv2d(tgtimg, self.df_dim, name='h0_conv'))
tgtimg_h1 = lrelu(conv2d(tgtimg_h0, self.df_dim*2, name='h1_conv'))
tgtimg_h2 = lrelu(conv2d(tgtimg_h1, self.df_dim*4, name='h2_conv'))
tgtimg_h3 = lrelu(conv2d(tgtimg_h2, self.df_dim*8, name='h3_conv'))
tgtimg_h4 = lrelu(linear(tf.reshape(tgtimg_h3, [self.batch_size, -1]), featsize, 'h4_lin'))
tgtimg_z = lrelu(linear(tgtimg_h4, featsize, 'hz_lin'))
with tf.variable_scope("translate") as scope:
trans_h0 = lrelu(linear(tf.concat([srcimg_z, tgtctx_z], 1), featsize, 'trans_h0'))
trans_z = linear(trans_h0, featsize, 'trans_z')
self.translated_z = trans_z
with tf.variable_scope("deconv") as scope:
s_h, s_w = self.output_height, self.output_width
s_h2, s_h4, s_h8, s_h16 = \
int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16)
s_w2, s_w4, s_w8, s_w16 = \
int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16)
output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8])
output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3),
[self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))
output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3),
[self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))
| tensorflow.variable_scope | 7,236 |
import tensorflow as tf
with tf.variable_scope(name):
W = get_variable("W", shape=[size, size, in_channels, out_channels], dtype=tf.float32,
initializer=initializer, regularizer=tf.nn.l2_loss)
b = get_variable("b", shape=[1, 1, 1, out_channels], dtype=tf.float32,
initializer=tf.zeros_initializer(),trainable=bias)
if dilation:
assert(strides == [1, 1, 1, 1])
out = tf.add(tf.nn.atrous_conv2d(inp, W, rate=dilation, padding=padding), b, name='convolution')
out.set_shape([batch_size, res1, res2, out_channels])
else:
out = tf.add(tf.nn.conv2d(inp, W, strides=strides, padding=padding), b, name='convolution')
if apply_relu:
out = relu(out, alpha=alpha, name='relu')
| tensorflow.nn.atrous_conv2d | 7,237 |
from tensorflow.contrib.eager.python.examples.revnet import revnet
with tf.Graph().as_default():
config = config_.get_hparams_cifar_38()
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
x = tf.random_normal(
shape=(self.config.batch_size,) + self.config.input_shape)
t = tf.random_uniform(
shape=(self.config.batch_size,),
minval=0,
maxval=self.config.n_classes,
dtype=tf.int32)
global_step = tf.Variable(0., trainable=False)
model = revnet.RevNet(config=config)
_, saved_hidden = model(x)
grads, _ = model.compute_gradients(saved_hidden=saved_hidden, labels=t)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
train_op = optimizer.apply_gradients(
zip(grads, model.trainable_variables), global_step=global_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(1):
sess.run(train_op)
| tensorflow.contrib.eager.python.examples.revnet.revnet.RevNet | 7,238 |
import tensorflow as tf
# Supervised Encoder Loss
supervised_encoder_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_input, logits=encoder_output_label_))
all_variables = tf.trainable_variables()
| tensorflow.nn.softmax_cross_entropy_with_logits | 7,239 |
from tensorflow.python.framework import ops
used_var /= (1. - bn_lag**(step + 1))
else:
used_mean, used_var = mean, var
cur_mean, cur_var = used_mean, used_var
# update variables
if train:
with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]):
with ops.colocate_with(mean):
new_mean = tf.assign_sub(
mean,
tf.check_numerics(
decay * (mean - cur_mean), "NaN in moving mean."))
with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]):
with ops.colocate_with(var):
new_var = tf.assign_sub(
var,
tf.check_numerics(decay * (var - cur_var),
"NaN in moving variance."))
with tf.name_scope(name, "IncrementTime", [step]):
with ops.colocate_with(step):
new_step = tf.assign_add(step, 1.)
used_var += 0. * new_mean * new_var * new_step
used_var += epsilon
return used_mean, used_var
| tensorflow.python.framework.ops.colocate_with | 7,240 |
import tensorflow as tf
self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1]))
self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1]))
self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1]))
self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1]))
self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients
self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients
self.U0_pred = self.net_U0(self.x0_tf) # N0 x q
self.U1_pred = self.net_U1(self.x1_tf) # N1 x q
self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \
tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred))
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol' : 1.0 * np.finfo(float).eps})
| tensorflow.square | 7,241 |
import tensorflow as tf
params = find_trainable_variables('model')
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(tf.global_variables_initializer())
shapes = json.load(open('model/params_shapes.json'))
| tensorflow.global_variables_initializer | 7,242 |
from tensorflow.python.framework import tensor_shape
def batch_runner_fn(self):
return _scheduled_stamp_resource_op_runner
def _move_tensors(tensors, device):
"""Moves a list of tensors to a device by concatenating/splitting them."""
# Reset the device setting to avoid weird interactions with device merging
# logic.
with ops.device(None):
if all(tensor.shape == tensor_shape.scalar() for tensor in tensors):
with ops.device(tensors[0].device):
values = array_ops.stack(tensors)
with ops.device(device):
return array_ops.unstack(values)
else:
with ops.device(tensors[0].device):
sizes = array_ops.stack(
[array_ops.shape(tensor)[0] for tensor in tensors])
values = array_ops.concat(tensors, axis=0)
| tensorflow.python.framework.tensor_shape.scalar | 7,243 |
import tensorflow as tf
input_idx, sl, rep_mask, rep_map, None) # todo: point !
)
with tf.variable_scope('output'):
if keep_unselected:
range_head = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_head])
scatter_attn = tf.cond(
tf.equal(sl_head, 0),
lambda: tf.zeros([bs, sl+1, hn], tf.float32),
lambda: tf.scatter_nd(
| tensorflow.range | 7,244 |
import tensorflow as tf
l, 15, kernel_shape=1, nl=tf.identity)
return l
with argscope(Conv2D, kernel_shape=7, nl=tf.nn.relu):
out1 = (LinearWrap(shared)
.Conv2D('conv5_1_CPM', 512, kernel_shape=1)
.Conv2D('conv5_2_CPM', 15, kernel_shape=1, nl=tf.identity)())
out2 = add_stage(2, out1)
out3 = add_stage(3, out2)
out4 = add_stage(4, out3)
out5 = add_stage(5, out4)
out6 = add_stage(6, out5)
tf.image.resize_bilinear(out6, [368, 368], name='resized_map')
def run_test(model_path, img_file):
param_dict = np.load(model_path, encoding='latin1').item()
predict_func = OfflinePredictor(PredictConfig(
inputs_desc=[InputDesc(tf.float32, (None, 368, 368, 3), 'input')],
tower_func=CPM,
session_init=DictRestore(param_dict),
input_names=['input'],
output_names=['resized_map']
))
| tensorflow.image.resize_bilinear | 7,245 |
import tensorflow as tf
def load_model_params(net,
param_dict,
sess,
ignore_missing=False):
for param_name, param_data in param_dict:
with tf.variable_scope(param_name, reuse=True):
try:
var = tf.get_variable(param_name)
sess.run(var.assign(param_data))
except ValueError:
| tensorflow.variable_scope | 7,246 |
from tensorflow.contrib.boosted_trees.proto import learner_pb2
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
| tensorflow.contrib.boosted_trees.proto.learner_pb2.LearnerConfig | 7,247 |
import tensorflow as tf
# NOTE(mrry): See also
# dense_update_ops_no_tsan_test.AssignOpTest, which contains a benign
# data race and must run without TSAN.
def testParallelUpdateWithLocking(self):
with self.test_session() as sess:
zeros_t = tf.fill([1024, 1024], 0.0)
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(zeros_t)
adds = [tf.assign_add(p, ones_t, use_locking=True)
for _ in range(20)]
p.initializer.run()
def run_add(add_op):
sess.run(add_op)
threads = [
self.checkedThread(target=run_add, args=(add_op,)) for add_op in adds]
for t in threads:
| tensorflow.assign_add | 7,248 |
import tensorflow as tf
mats: list of hidden state vectors
"""
pred_mat = tf.get_variable('pred_mat',
[in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction on every word.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.cs = self._mask / tf.reduce_sum(self._mask, 1, keep_dims=True)
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
preds_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(self.preds_by_word,
[-1, self._out_vocab_size]))
preds_weighted_reshaped = tf.reshape(preds_weighted,
self.preds_by_word.get_shape())
self.probs = tf.reduce_sum(preds_weighted_reshaped, 0)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights)
| tensorflow.reduce_sum | 7,249 |
import tensorflow as tf
self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ]
def __call__(self,input_var,name=None,**kwargs):
_,h,w,c = input_var.shape.as_list()
_t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2])
_t = tf.pad(_t,self.padding, mode='SYMMETRIC')
return self.conv2d(_t)
| tensorflow.image.resize_nearest_neighbor | 7,250 |
import tensorflow as tf
logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores")
predictions = tf.nn.softmax(logit, name="predictions")
#claulate loss and optimizer
with tf.variable_scope("FCoptimize", reuse=None):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logit, labels=Y)
+ l2_reg_lambda * l2_loss)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# calculate accuracy
correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
print ("done...")
print ("************")
path='save/'
ckpt_name = 'save/model.ckpt'
fname = 'model.tf'
dst_nodes = ['output/predictions']
saver = tf.train.Saver()
| tensorflow.argmax | 7,251 |
import tensorflow as tf
strides=stride, padding="SAME", use_bias=False)
x = x * mask_ratio
if use_bias:
bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x * update_mask
| tensorflow.constant_initializer | 7,252 |
import tensorflow as tf
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights,
)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights
)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]
)
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32
)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions
)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss
)
| tensorflow.argmax | 7,253 |
import tensorflow as tf
post_size = tf.shape(dropout_mask_phs[0])[0]
x = tf.tile(tf.reshape(x, [1, tf.shape(x)[0], tf.shape(x)[1]]), [post_size, 1, 1])
| tensorflow.shape | 7,254 |
import tensorflow as tf
class PyOpTest(tf.test.TestCase):
def testBasic(self):
def my_func(x, y):
return np.sinh(x) + np.cosh(y)
# scalar
with self.test_session():
x = tf.constant(1.0, tf.float32)
y = tf.constant(2.0, tf.float32)
z = tf.py_func(my_func, [x, y], [tf.float32])
self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32))
# array
with self.test_session():
x = tf.constant([1.0, 2.0], tf.float64)
y = tf.constant([2.0, 3.0], tf.float64)
z = tf.py_func(my_func, [x, y], [tf.float64])
self.assertAllEqual(
| tensorflow.constant | 7,255 |
import tensorflow as tf
def crf_decode_layer(self, logits, crf_params, nwords):
with tf.name_scope("CRF_decode"):
pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords)
return pred_ids
def compute_metrics(self, tags, pred_ids, num_tags, indices, nwords):
weights = tf.sequence_mask(nwords)
# metrics_correct_rate, golden, predict = correct_rate(tags, pred_ids)
# metrics_correct_rate = correct_rate(tags, pred_ids, weights)
metrics = {
"acc": tf.metrics.accuracy(tags, pred_ids, weights),
| tensorflow.sequence_mask | 7,256 |
from tensorflow.python.summary import summary
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net model_fn.
Args:
| tensorflow.python.summary.summary.histogram | 7,257 |
import tensorflow as tf
valid_image,valid_label= getinputs(r'C:\JC\tfrecord\64_shuffle\validation.tfrecords')
batch_image,batch_label=get_batch(image,label,trainnum,0)
work=trainwork()
inf=work.inference(batch_image)
loss=work.softmax_loss(inf,batch_label)
opti=work.optimer(loss,learnrate)
test_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum)
test_inf=work.test_inference(test_image_batch)
test_labels=tf.one_hot(test_label_batch,classnum)
test_pre = tf.reshape(test_inf, [testnum, classnum])
correct_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
test_pre = tf.argmax(test_pre, 1)
test_true = tf.argmax(test_labels, 1)
valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum)
valid_inf=work.valid_inference(valid_image_batch)
valid_labels=tf.one_hot(valid_label_batch,classnum)
#train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)
valid_pre = tf.reshape(valid_inf, [validnum, classnum])
valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1))
valid_accuracy=tf.reduce_mean(tf.cast(valid_correct_prediction,tf.float32))
valid_pre = tf.argmax(valid_pre, 1)
valid_true = tf.argmax(valid_labels, 1)
| tensorflow.cast | 7,258 |
import tensorflow as tf
with tf.device('/cpu:0'):
OPT = tf.train.AdamOptimizer(1e-4) # 后续主要是使用该optimizer中的apply—gradients操作
# OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC') # 定义critic训练过程
GLOBAL_AC = ACnet(scope=GLOBAL_NET_SCOPE) # 创建中央大脑GLOBALE_AC,只创建结构(A和C的参数)
workers = []
for i in range(N_workers): # N—workers等于cpu数量
i_name = 'W_%i' % i # worker name
workers.append(Worker(name=i_name, globalAC=GLOBAL_AC)) # 创建独立的worker
COORD = tf.train.Coordinator() # 多线程
SESS.run(tf.global_variables_initializer()) # 初始化所有参数
worker_threads = []
for worker in workers: # 并行过程
job = lambda: worker.work() # worker的工作目标,此处调用Worker类中的work
t = threading.Thread(target=job) # 每一个线程完成一个worker的工作目标
t.start() # 启动每一个worker
worker_threads.append(t) # 每一个worker的工作都加入thread中
COORD.join(worker_threads) # 合并几个worker,当每一个worker都运行完再继续后面步骤
| tensorflow.global_variables_initializer | 7,259 |
import tensorflow as tf
select_targets = tf.gather_nd(targets_list[-1], gather_indcies)
select_heatmap = tf.gather_nd(pred_outputs[-1], gather_indcies)
mse_loss_list.append(tf.losses.mean_squared_error(select_targets, select_heatmap,
weights=1.0 / tf.cast(cur_batch_size, tf.float32),
scope='loss_{}'.format(len(pred_outputs) - 1),
loss_collection=None,#tf.GraphKeys.LOSSES,
# mean all elements of all pixels in all batch
reduction=tf.losses.Reduction.MEAN))
| tensorflow.cast | 7,260 |
import tensorflow as tf
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
| tensorflow.logging.info | 7,261 |
import tensorflow as tf
# 4x4
logits = self.create_base_discriminator_network(
X=X, params=params
)
else:
# Find growth index based on global step and growth frequency.
growth_index = tf.cast(
x=tf.floordiv(
x=tf.train.get_or_create_global_step(),
y=params["num_steps_until_growth"],
name="{}_global_step_floordiv".format(self.name)
),
dtype=tf.int32,
name="{}_growth_index".format(self.name)
)
| tensorflow.train.get_or_create_global_step | 7,262 |
import tensorflow as tf
pred_label = tf.argmax(distillation_loss["st_logits"], axis=-1, output_type=tf.int32)
correct = tf.equal(
tf.cast(tf.ones_like(label_ids, dtype=tf.int32), tf.int32),
tf.cast(pred_label, tf.int32)
| tensorflow.ones_like | 7,263 |
import tensorflow as tf
shape = np.array(shape)
tt_rank = np.array(tt_rank)
_validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank)
num_dims = shape[0].size
if tt_rank.size == 1:
tt_rank = tt_rank * np.ones(num_dims - 1)
tt_rank = np.concatenate([[1], tt_rank, [1]])
tt_rank = tt_rank.astype(int)
tt_cores = [None] * num_dims
with tf.name_scope(name):
for i in range(num_dims):
curr_core_shape = (tt_rank[i], shape[0][i], shape[1][i],
tt_rank[i + 1])
tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev,
dtype=dtype)
return TensorTrain(tt_cores, shape, tt_rank)
def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
| tensorflow.name_scope | 7,264 |
from tensorflow.python.ops import variables
clip_opt = variable_clipping_optimizer.VariableClippingOptimizer(
sgd, {var0: [1]}, 2.0)
update_op = clip_opt.apply_gradients(
list(zip([grads0, grads1], [var0, var1])))
variables.global_variables_initializer().run()
return var0, var1, update_op
def _assertDenseCorrect(self, var0, var1, update_op):
# Fetch params to validate initial values
| tensorflow.python.ops.variables.global_variables_initializer | 7,265 |
import tensorflow as tf
evaluation is to happen.
Returns:
`fetches` with any `Tensor` objects replaced by numpy values.
"""
if any((tf.is_tensor(t) for t in tf.nest.flatten(fetches))):
if session:
fetches = session.run(fetches)
else:
fetches = self.evaluate(fetches)
| tensorflow.nest.flatten | 7,266 |
import tensorflow as tf
config = tf.ConfigProto(allow_soft_placement = True)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config, graph=self.graph)
self.sess.run(self.init)
| tensorflow.Session | 7,267 |
import tensorflow as tf
)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
if not tf.gfile.Exists(train_file) or not FLAGS.data_converted:
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
| tensorflow.logging.info | 7,268 |
import tensorflow as tf
def abs_loss(y_pred,labels):
return tf.reduce_mean(tf.abs(y_pred - labels))
| tensorflow.abs | 7,269 |
import tensorflow as tf
hooks=eval_hooks,
checkpoint_path=checkpoint)
tf.logging.info('Evaluation results: %s' % eval_results)
except tf.errors.NotFoundError:
# skip checkpoint if it gets deleted prior to evaluation
tf.logging.info('Checkpoint %s no longer exists ... skipping')
elif mode == 'train_and_eval':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
tf.logging.info('Starting training at step=%d.' % current_step)
train_steps_per_eval = int(
hparams.num_epochs_per_eval * train_steps_per_epoch)
# Final Evaluation if training is finished.
if current_step >= hparams.num_epochs * train_steps_per_epoch:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
while current_step < hparams.num_epochs * train_steps_per_epoch:
image_classifier.train(
input_fn=imagenet_train.input_fn, steps=train_steps_per_eval)
current_step += train_steps_per_eval
tf.logging.info('Starting evaluation at step=%d.' % current_step)
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
elif mode == 'predict':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting prediction ...')
time_hook = model_lib.SessionTimingHook()
eval_hooks.append(time_hook)
result_iter = image_classifier.predict(
| tensorflow.logging.info | 7,270 |
import tensorflow as tf
total_loss = tf.reduce_mean(per_example_loss)
return total_loss, per_example_loss, logits
def get_regression_loss(
FLAGS, features, is_training):
"""Loss for downstream regression tasks."""
bsz_per_core = tf.shape(features["input_ids"])[0]
inp = tf.transpose(features["input_ids"], [1, 0])
seg_id = tf.transpose(features["segment_ids"], [1, 0])
inp_mask = tf.transpose(features["input_mask"], [1, 0])
label = tf.reshape(features["label_ids"], [bsz_per_core])
xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)
run_config = xlnet.create_run_config(is_training, True, FLAGS)
xlnet_model = xlnet.XLNetModel(
xlnet_config=xlnet_config,
run_config=run_config,
input_ids=inp,
seg_ids=seg_id,
input_mask=inp_mask)
| tensorflow.transpose | 7,271 |
import tensorflow as tf
# 5. 如果希望限制死TensorFlow使用GPU内存的百分比,可以使用config设置per_process_gpu_memory_fraction
config.gpu_options.per_process_gpu_memory_fraction = 0.4
sess_limited = tf.Session(config=config)
# 6. 有时,我们希望代码健壮到可以决定运行多少GPU合适。TensorFlow有内建函数可以探测到。如果我们期望代码在GPU内存合适时利用GPU计算能力,并分配指定操作给GPU,那么该功能是有益的
if tf.test.is_built_with_cuda(): pass
# 7. 我们希望分配指定操作给GPU。下面是一个示例代码,做了一些简单的计算,并将它们分配给主CPU和两个副GPU
with tf.device('/cpu:0'):
a = tf.constant([1.0, 3.0, 5.0], shape=[1,3])
b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1])
with tf.device('/gpu:0'):
c = tf.matmul(a,b)
c = tf.reshape(c, [-1])
with tf.device('/gpu:1'):
d = tf.matmul(b, a)
flat_d = tf.reshape(d, [-1])
| tensorflow.constant | 7,272 |
import tensorflow as tf
self.assertEqual(222, v1.eval())
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, v1.eval())
# Now try a restore with the sharded filename.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(222, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(111, v0.eval())
self.assertEqual(222, v1.eval())
save_path = os.path.join(self.get_temp_dir(), "sharded")
save.restore(sess, save_path + "-?????-of-?????")
self.assertEqual(10, v0.eval())
self.assertEqual(20, v1.eval())
self.assertEqual(
tf.train.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded-?????-of-00002"))
def testSaverDef(self):
| tensorflow.initialize_all_variables | 7,273 |
import tensorflow as tf
epsilon=epsilon, is_training=False)
else:
(X, _, _) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True)
return X
def _mark_for_monitoring(self, name, value):
tf.add_to_collection(TF_COLLECTION_MONITORED, tf.identity(value, name))
def _add_monitoring_of_values(self):
monitored_values = tf.get_collection(TF_COLLECTION_MONITORED)
monitored_values = {
value.name.split(':')[0]: value # Get rid of ':0' from name
for value in monitored_values
| tensorflow.identity | 7,274 |
import tensorflow as tf
self.S = tf.placeholder(tf.float32, [None, self.num_global_s], name='S') # input Global State
self.s = tf.placeholder(tf.float32, [None, self.num_s], name='s1') # input state for agent1
self.S_ = tf.placeholder(tf.float32, [None, self.num_global_s], name='S_') # input Next Global State
self.s_ = tf.placeholder(tf.float32, [None, self.num_s], name='s1_') # input next state for agent1
self.R = tf.placeholder(tf.float32, [None, ], name='R') # input Reward
self.a = tf.placeholder(tf.float32, [None, self.num_a], name='a') # input Action onehot for agent1
self.done = tf.placeholder(tf.float32, [None, ], name='done') # input Done info ???
self.q_m_ = tf.placeholder(tf.float32, [None, ], name='q_value_next_max')
self.q_target = tf.placeholder(tf.float32, [None,], name='q_tot_target')
w_initializer, b_initializer = tf.random_normal_initializer(0., 0.1), tf.constant_initializer(0.0)
# ------------------ build evaluate_net ------------------
with tf.variable_scope('eval_net'):
a_fc1 = tf.layers.dense(self.s, 128, tf.nn.relu, kernel_initializer=w_initializer,
| tensorflow.placeholder | 7,275 |
import tensorflow as tf
else:
direct_mask = tf.greater(sl_col, sl_row) # bl,bl
direct_mask_tile = tf.tile(
tf.expand_dims(tf.expand_dims(direct_mask, 0), 0), [bs, bn, 1, 1]) # bs,bn,bl,bl
rep_mask_tile_1 = tf.tile(tf.expand_dims(rep_mask_split, 2), [1, 1, bl, 1]) # bs,bn,bl,bl
rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl
rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2)
attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl
| tensorflow.expand_dims | 7,276 |
import tensorflow as tf
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
| tensorflow.device | 7,277 |
import tensorflow as tf
x = tf.nn.bias_add(x, bias)
return x * update_mask
x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding=padding, name='zero-conv_' + id,
dilations=(1, dilation, dilation, 1))
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x
def t_conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0):
# good old t-conv. I love it!
| tensorflow.constant_initializer | 7,278 |
import tensorflow as tf
logits,
target_precision,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
"""Computes recall at precision loss.
| tensorflow.constant_initializer | 7,279 |
import tensorflow as tf
feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.global_variables_initializer()])
| tensorflow.nn.rnn_cell.GRUCell | 7,280 |
import tensorflow as tf
"""Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf 'Reducing
Overfitting In Deep Networks by Decorrelating Representation'.
Args:
xs: 4-D `tensor` [batch_size, height, width, channels], input
Returns:
a `float` decov loss
"""
with tf.name_scope(name):
x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
m = tf.reduce_mean(x, 0, True)
z = tf.expand_dims(x - m, 2)
corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0)
corr_frob_sqr = tf.reduce_sum(tf.square(corr))
corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
loss = 0.5 * (corr_frob_sqr - corr_diag_sqr)
return loss
def center_loss(features, label, alpha, num_classes, name='center_loss'):
"""Center loss based on the paper "A Discriminative Feature Learning Approach
for Deep Face Recognition" (http://ydwen.github.io/papers/WenECCV16.pdf)
| tensorflow.expand_dims | 7,281 |
import tensorflow as tf
fw_result = directional_attention_with_selections(
rep_tensor, rep_mask, dep_selection, head_selection,
'forward', hn, keep_unselected,
'forward_resa', keep_prob, is_train, wd, activation
)
bw_result = directional_attention_with_selections(
rep_tensor, rep_mask, dep_selection, head_selection,
'backward', hn, keep_unselected,
'backward_resa', keep_prob, is_train, wd, activation
)
return tf.concat([fw_result, bw_result], -1)
def directional_attention_with_selections(
rep_tensor, rep_mask, dep_selection, head_selection, direction=None, hn=None, keep_unselected=True,
scope=None, keep_prob=1., is_train=None, wd=0., activation='elu'):
bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]
org_ivec = rep_tensor.get_shape().as_list()[2]
ivec = hn or org_ivec
| tensorflow.concat | 7,282 |
import tensorflow as tf
h1 = self.input_integration(
x=x,
c1=c1,
h2=h2,
layer=layer)
# Circuit output receives recurrent input h1
c2, g2 = self.circuit_output(
h1=h1,
layer=layer,
var_scope=var_scope,
layer_idx=layer_idx)
with tf.variable_scope(
'%s/c2_bn' % var_scope,
reuse=self.scope_reuse) as scope:
c2 = tf.contrib.layers.batch_norm(
inputs=c2,
scale=True,
center=False,
fused=True,
renorm=False,
param_initializers=self.param_initializer,
updates_collections=None,
scope=scope,
reuse=self.reuse,
| tensorflow.variable_scope | 7,283 |
import tensorflow as tf
if coverage is not None: # Update coverage vector
coverage += attn_dist
else: # first step of training
coverage = attn_dist
# Calculate the context vector from attn_dist and encoder_states
# shape (batch_size, attn_size).
context_vector = tf.reduce_sum(tf.expand_dims(attn_dist, axis=-1) * encoder_states, axis=1) # [batch_size, encoder_dim]
return context_vector, attn_dist, coverage
def embedding_lookup(self, inputs):
'''
inputs: list of [batch_size], int32
'''
| tensorflow.expand_dims | 7,284 |
import tensorflow as tf
"""Initial learning rate for training.""")
tf.flags.DEFINE_float('num_epochs_per_decay', 0,
"""Steps after which learning rate decays.""")
tf.flags.DEFINE_float('learning_rate_decay_factor', 0.94,
"""Learning rate decay factor.""")
tf.flags.DEFINE_float('momentum', 0.9, """Momentum for training.""")
tf.flags.DEFINE_float('rmsprop_decay', 0.9, """Decay term for RMSProp.""")
tf.flags.DEFINE_float('rmsprop_momentum', 0.9, """Momentum in RMSProp.""")
tf.flags.DEFINE_float('rmsprop_epsilon', 1.0, """Epsilon term for RMSProp.""")
tf.flags.DEFINE_float('gradient_clip', None, """Gradient clipping magnitude.
Disabled by default.""")
tf.flags.DEFINE_float('weight_decay', 0.00004,
"""Weight decay factor for training.""")
# Performance tuning flags.
tf.flags.DEFINE_boolean('winograd_nonfused', True,
| tensorflow.flags.DEFINE_float | 7,285 |
import tensorflow as tf
print('rnn_output: {}'.format(rnn_output.get_shape()))
# Dense NN
dnn_output = rnn_output
dnn_output_size = rnn_output_size
if do_dnn:
last_layer = rnn_output
last_layer_size = rnn_output_size
for i, layer_size in enumerate(dnn_sizes):
layer_name = 'dnn_{}'.format(i)
with tf.variable_scope(layer_name):
dnn_w = tf.get_variable('W', shape=[last_layer_size, layer_size], initializer=dnn_init, dtype=dtype)
dnn_b = tf.get_variable('b', shape=[layer_size], initializer=tf.constant_initializer(0.0), dtype=dtype)
projected = tf.nn.bias_add(tf.matmul(last_layer, dnn_w), dnn_b)
# TODO: argument nonlinearity, change bias to 0.1 if relu
if dnn_nonlin == 'tanh':
last_layer = tf.nn.tanh(projected)
elif dnn_nonlin == 'sigmoid':
last_layer = tf.nn.sigmoid(projected)
elif dnn_nonlin == 'relu':
last_layer = tf.nn.relu(projected)
else:
raise NotImplementedError()
if mode == 'train' and dnn_keep_prob < 1.0:
last_layer = tf.nn.dropout(last_layer, dnn_keep_prob)
last_layer_size = layer_size
| tensorflow.matmul | 7,286 |
import tensorflow as tf
moving_average_decay = 0.9
decay = moving_average_decay
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2])
mu = tf.get_variable('mu', batch_mean.shape, dtype=tf.float32,
initializer=tf.zeros_initializer(), trainable=False)
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, mu)
tf.add_to_collection('mu_sigma_bn', mu)
sigma = tf.get_variable('sigma', batch_var.shape, dtype=tf.float32,
initializer=tf.ones_initializer(), trainable=False)
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, sigma)
tf.add_to_collection('mu_sigma_bn', sigma)
beta = tf.get_variable('beta', batch_mean.shape, dtype=tf.float32,
initializer=tf.zeros_initializer())
gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32,
| tensorflow.add_to_collection | 7,287 |
from tensorflow.python.framework import ops
ops.RegisterShape("Sin")(common_shapes.unchanged_shape)
ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Square")(common_shapes.unchanged_shape)
ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
ops.RegisterShape("Tanh")(common_shapes.unchanged_shape)
ops.RegisterShape("Cast")(common_shapes.unchanged_shape)
ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
@ops.RegisterShape("Add")
@ops.RegisterShape("Complex")
@ops.RegisterShape("Div")
@ops.RegisterShape("Equal")
@ops.RegisterShape("Greater")
@ops.RegisterShape("GreaterEqual")
@ops.RegisterShape("Less")
@ops.RegisterShape("LessEqual")
@ops.RegisterShape("LogicalAnd")
@ops.RegisterShape("LogicalOr")
@ops.RegisterShape("Maximum")
| tensorflow.python.framework.ops.RegisterShape | 7,288 |
import tensorflow as tf
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
| tensorflow.variable_scope | 7,289 |
import tensorflow as tf
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(values=args, axis=1), matrix)
if not bias:
return res
bias_term = tf.get_variable("Bias", [output_size], initializer=tf.constant_initializer(bias_start))
return res + bias_term
| tensorflow.matmul | 7,290 |
import tensorflow as tf
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
| tensorflow.constant_initializer | 7,291 |
import tensorflow as tf
self._sess.run(m.vars_assign_op, feed_dict=var_feeddict)
def _make_placeholders(self):
w = self._train_params['image_size']
h = self._train_params['image_size']
in_ch = 3 # Num channels of input images
train_images_ph = tf.placeholder(tf.int32, name='train_images_ph', shape=(None, w, h, in_ch)) # Train images
pred_images_ph = tf.placeholder(tf.int32, name='pred_images_ph', shape=(None, w, h, in_ch)) # Predict images
train_classes_ph = tf.placeholder(tf.int32, name='train_classes_ph', shape=(None,)) # Train classes
pred_classes_ph = tf.placeholder(tf.int32, name='pred_classes_ph', shape=(None,)) # Predict classes
normal_arch_ph = tf.placeholder(tf.int32, name='normal_arch_ph', shape=(CELL_NUM_BLOCKS, 4))
reduction_arch_ph = tf.placeholder(tf.int32, name='reduction_arch_ph', shape=(CELL_NUM_BLOCKS, 4))
| tensorflow.placeholder | 7,292 |
import tensorflow as tf
'learning_rate_decay_factor': FLAGS.learning_rate_decay_factor,
'decay_steps': FLAGS.decay_steps,
'decay_boundaries': parse_comma_list(FLAGS.decay_boundaries),
'lr_decay_factors': parse_comma_list(FLAGS.lr_decay_factors),
})
tensors_to_log = {
'lr': 'learning_rate',
'ce_loss': 'cross_entropy_loss',
'loc_loss': 'location_loss',
'total_loss': 'total_loss',
'cls_acc': 'cls_accuracy',
}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps)
print('Starting a training cycle.')
xdetector.train(input_fn=input_pipeline(), hooks=[logging_hook])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| tensorflow.train.LoggingTensorHook | 7,293 |
import tensorflow as tf
out = tf.nn.max_pool3d(bottom, ksize=[1,1, kernel_size, kernel_size,1], strides=[1,1,1,1,1], padding='VALID')
else:
out = tf.nn.max_pool3d(bottom, ksize=[1,1, FLAGS.pool_kernel, FLAGS.pool_kernel,1], strides=[1,1,FLAGS.pool_stride,FLAGS.pool_stride,1], padding='VALID')
shape = out.get_shape()
| tensorflow.nn.max_pool3d | 7,294 |
import tensorflow as tf
annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation")
# annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation")
pred_annotation, logits = inference(image, keep_probability)
tf.summary.image("input_image", image, max_outputs=2)
tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
#debug
loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=tf.squeeze(annotation, squeeze_dims=[3]),
name="entropy")))
| tensorflow.cast | 7,295 |
import tensorflow as tf
return (images+1.)/2.
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
# print("c", w.get_shape())
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
class batch_norm(object):
def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x):
return tf.contrib.layers.batch_norm(x,
| tensorflow.constant_initializer | 7,296 |
import tensorflow as tf
'fast_rcnn_box_loss', tf.reduce_mean(fast_rcnn_box_loss),
step=global_step)
if params['include_mask']:
tf.contrib.summary.scalar(
'mask_loss', tf.reduce_mean(mask_loss), step=global_step)
tf.contrib.summary.scalar(
'learning_rate', tf.reduce_mean(learning_rate),
step=global_step)
return tf.contrib.summary.all_summary_ops()
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
global_step_t = tf.reshape(global_step, [1])
total_loss_t = tf.reshape(total_loss, [1])
| tensorflow.contrib.summary.all_summary_ops | 7,297 |
import tensorflow as tf
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
| tensorflow.gfile.Copy | 7,298 |
import tensorflow as tf
return X
def _mark_for_monitoring(self, name, value):
tf.add_to_collection(TF_COLLECTION_MONITORED, tf.identity(value, name))
def _add_monitoring_of_values(self):
monitored_values = tf.get_collection(TF_COLLECTION_MONITORED)
monitored_values = {
value.name.split(':')[0]: value # Get rid of ':0' from name
for value in monitored_values
}
| tensorflow.get_collection | 7,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.