seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf enc_inp, dec_inp_fp_true, feed_previous=True) _, update_fp_false, variables_fp_false = ForwardBackward( enc_inp, dec_inp_holder_fp_false, feed_previous=False) sess.run(tf.global_variables_initializer()) # We only check consistencies between the variables existing in both # the models with True and False feed_previous. Variables created by # the loop_function in the model with True feed_previous are ignored. v_false_name_dict = {v.name.split("/", 1)[-1]: v for v in variables_fp_false} matched_variables = [(v, v_false_name_dict[v.name.split("/", 1)[-1]]) for v in variables_fp_true] for v_true, v_false in matched_variables: sess.run(tf.assign(v_false, v_true)) # Take the symbols generated by the decoder with feed_previous=True as # the true input symbols for the decoder with feed_previous=False. dec_fp_true = sess.run(dec_op_fp_true) output_symbols_fp_true = np.argmax(dec_fp_true, axis=2) dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(), output_symbols_fp_true[:-1])) sess.run(update_fp_true) sess.run(update_fp_false, {holder: inp for holder, inp in zip(dec_inp_holder_fp_false, dec_inp_fp_false)}) for v_true, v_false in matched_variables:
tensorflow.assign
6,900
import tensorflow as tf flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor # add sequence mask for: # 1. random shuffle lm modeling---xlnet with random shuffled input # 2. left2right and right2left language modeling # 3. conditional generation def generate_seq2seq_mask(attention_mask, mask_sequence, seq_type, **kargs): if seq_type == 'seq2seq': if mask_sequence is not None: seq_shape = get_shape_list(mask_sequence, expected_rank=2) seq_len = seq_shape[1] ones = tf.ones((1, seq_len, seq_len)) a_mask = tf.matrix_band_part(ones, -1, 0) s_ex12 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 2) s_ex13 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 3) a_mask = (1 - s_ex13) * (1 - s_ex12) + s_ex13 * a_mask # generate mask of batch x seq_len x seq_len a_mask = tf.reshape(a_mask, (-1, seq_len, seq_len)) out_mask = attention_mask * a_mask else: ones = tf.ones_like(attention_mask[:1]) mask = (tf.matrix_band_part(ones, -1, 0)) out_mask = attention_mask * mask else: out_mask = attention_mask return out_mask
tensorflow.matrix_band_part
6,901
import tensorflow as tf y_w_diff = y[:, :, 1:] - y[:, :, :-1] h_diff = tf.abs(tf.abs(x_h_diff) - tf.abs(y_h_diff)) w_diff = tf.abs(tf.abs(x_w_diff) - tf.abs(y_w_diff)) return h_diff + tf.transpose(w_diff) def leaky_relu(x, leak=0.2, name='leaky_relu'): with tf.variable_scope(name): f1 = 0.5 * (1 + leak) f2 = 0.5 * (1 - leak) return f1 * x + f2 * abs(x) def linear(x, shape, name, bias=False): with tf.variable_scope(name): W = weight_variable(shape) h = tf.matmul(x, W) if bias: b = bias_variable([shape[-1]]) h = h + b return h def conv2d(x, shape, name, bias=False, stride=2, padding='SAME'): with tf.variable_scope(name): W = weight_variable(shape) h = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
tensorflow.variable_scope
6,902
import tensorflow as tf anchors = anchors.astype(np.float32) grid_shape = x_shape[1:3] # print(grid_shape) grid_h, grid_w = grid_shape[0], grid_shape[1] # print(grid_h,tf.range(grid_h)) grid = tf.meshgrid(tf.range(grid_w), tf.range(grid_h)) grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2] box_xy = (box_xy + tf.cast(grid, dtype)) * stride box_wh = tf.exp(box_wh) * anchors box_x1y1 = box_xy - box_wh / 2. box_x2y2 = box_xy + box_wh / 2. box = tf.concat([box_x1y1, box_x2y2], axis=-1)
tensorflow.stack
6,903
import tensorflow as tf x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 64, (4, 4), strides=(2, 2), name="conv2", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 128, (4, 4), strides=(2, 2), name="conv3", activation=common_layers.belu, padding="SAME")
tensorflow.nn.dropout
6,904
import tensorflow as tf # function. # Test case 1, 2. x = tf.placeholder_with_default(input=1, shape=[]) # None would fire an exception were it actually executed. self.assertTrue(normal._is_scalar_helper(x.shape, lambda: None)) self.assertTrue( normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x))) x = tf.placeholder_with_default(input=[1], shape=[1]) # None would fire an exception were it actually executed. self.assertFalse(normal._is_scalar_helper(x.shape, lambda: None)) self.assertFalse( normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x))) # There's no notion of partially known shapes in eager mode, so exit # early. if tf.executing_eagerly(): return # Test case 3. x = tf.placeholder_with_default(input=1, shape=None) is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x)) self.assertTrue(self.evaluate(is_scalar)) x = tf.placeholder_with_default(input=[1], shape=None)
tensorflow.shape
6,905
import tensorflow as tf def data_format(): return "channels_first" if tfe.num_gpus() else "channels_last" def random_dataset(): batch_size = 64 images = tf.random_normal([batch_size, 784]) labels = tf.random_uniform([batch_size], minval=0, maxval=10, dtype=tf.int32) return tf.data.Dataset.from_tensors((images, labels)) def train(defun=False):
tensorflow.random_normal
6,906
import tensorflow as tf epoch, total_epochs, step, iteration, batch_loss, batch_acc)) train_loss += batch_loss train_acc += batch_acc if step % 30 == 0 : summary_str = sess.run(summary_op, feed_dict=train_feed_dict) summary_writer.add_summary(summary=summary_str, global_step=epoch) summary_writer.flush() train_loss /= iteration # average loss train_acc /= iteration # average accuracy train_summary = tf.Summary(value=[tf.Summary.Value(tag='train_loss', simple_value=train_loss), tf.Summary.Value(tag='train_accuracy', simple_value=train_acc)]) # test_acc, test_loss, test_summary = Evaluate(sess) summary_writer.add_summary(summary=train_summary, global_step=epoch) # summary_writer.add_summary(summary=test_summary, global_step=epoch) summary_writer.flush() # line = "epoch: %d/%d, train_loss: %.4f, train_acc: %.4f, test_loss: %.4f, test_acc: %.4f \n" % ( # epoch, total_epochs, train_loss, train_acc, test_loss, test_acc) line = "epoch: %d/%d, train_loss: %.4f, train_acc: %.4f \n" % ( epoch, total_epochs, train_loss, train_acc) print(line) with open('./logs_pretrain/logs.txt', 'a') as f:
tensorflow.Summary.Value
6,907
import tensorflow as tf n_units=100, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=0.1), b_init=tf.constant_initializer(value=0.0),
tensorflow.truncated_normal_initializer
6,908
import tensorflow as tf self._SaveAndLoad("var0", 0.0, 1.0, save_path) for use_tensor in [True, False]: with self.test_session() as sess: var = tf.Variable(1.0, name="var0") save = tf.train.Saver({var.op.name: var}) var.initializer.run() if use_tensor: global_step = tf.constant(global_step_int)
tensorflow.train.Saver
6,909
import tensorflow as tf """ def test_rnn_layer(test_data_x,test_data_y, g, checkpoint, input_prob, output_prob, state_prob, num_test, num_layers): with tf.Session() as sess: sess.run(tf.global_variables_initializer())
tensorflow.Session
6,910
import tensorflow as tf should be the inputs and targets, respectively. seq_lengths: An int Tensor of shape [batch_size] containing the length of each sequence in observations. """ inputs, targets = observations self.seq_lengths = seq_lengths self.max_seq_len = tf.reduce_max(seq_lengths) self.inputs_ta = base.ta_for_tensor(inputs, clear_after_read=False) self.targets_ta = base.ta_for_tensor(targets, clear_after_read=False) targets_encoded = base.encode_all(targets, self.data_encoder) self.targets_encoded_ta = base.ta_for_tensor(targets_encoded, clear_after_read=False)
tensorflow.reduce_max
6,911
import tensorflow as tf def build_ae_model(self): self.input = tf.placeholder(tf.uint8, self.batch_shape, name='input') self.target = tf.placeholder(tf.uint8, self.batch_shape, name='target')
tensorflow.placeholder
6,912
import tensorflow as tf self.logger.info("applying l2 loss") variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables) self.loss += l2_loss if self.config.decay is not None: self.var_ema = tf.train.ExponentialMovingAverage(self.config.decay) ema_op = self.var_ema.apply(tf.trainable_variables()) with tf.control_dependencies([ema_op]): self.loss = tf.identity(self.loss) self.shadow_vars = []
tensorflow.train.ExponentialMovingAverage
6,913
import tensorflow as tf tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features # add by wangxiao # define the inputs of signature def serving_input_fn(): label_ids = tf.placeholder(tf.int32, [None], name='label_ids') input_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids') input_mask = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_mask') segment_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids') input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({ 'label_ids': label_ids, 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids, })() return input_fn def main(_): tf.logging.set_verbosity(tf.logging.INFO) processors = { "cola": ColaProcessor,
tensorflow.placeholder
6,914
import tensorflow as tf else: model_outputs = _model_outputs() # First check if it is in PREDICT mode. if mode == tf.estimator.ModeKeys.PREDICT: predictions = {} predictions['detections'] = model_outputs['detections'] predictions['image_info'] = features['image_info'] if params['include_mask']: predictions['mask_outputs'] = tf.nn.sigmoid(model_outputs['mask_outputs']) if params['use_tpu']: return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions) return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Set up training loss and learning rate. global_step = tf.train.get_or_create_global_step() learning_rate = learning_rates.step_learning_rate_with_linear_warmup( global_step, params['init_learning_rate'], params['warmup_learning_rate'], params['warmup_steps'], params['learning_rate_levels'], params['learning_rate_steps'])
tensorflow.contrib.tpu.TPUEstimatorSpec
6,915
from tensorflow.python.ops import control_flow_ops assert global_step loss = self._loss( self._logits(features), targets, self._get_weight_tensor(features)) logging_ops.scalar_summary("loss", loss) linear_vars = self._get_linear_vars() dnn_vars = self._get_dnn_vars() grads = gradients.gradients(loss, dnn_vars + linear_vars) dnn_grads = grads[0:len(dnn_vars)] linear_grads = grads[len(dnn_vars):] train_ops = self._get_linear_training_ops( linear_grads, linear_vars) + self._get_dnn_training_ops(dnn_grads, dnn_vars) train_step = control_flow_ops.group(*train_ops, name="combined_training_op") with ops.control_dependencies([train_step]): with ops.get_default_graph().colocate_with(global_step): return state_ops.assign_add(global_step, 1).op, loss def _run_metrics(self, predictions, targets, metrics, weights): result = {} targets = math_ops.cast(targets, predictions.dtype) for name, metric in six.iteritems(metrics or {}): if "weights" in inspect.getargspec(metric)[0]: result[name] = metric(predictions, targets, weights=weights) else: result[name] = metric(predictions, targets) return result
tensorflow.python.ops.control_flow_ops.group
6,916
import tensorflow as tf prev_c, prev_h = next_c, next_h query = anchors_w_1.gather(indices) query = tf.reshape(query, [layer_id, self.lstm_size]) query = tf.tanh(query + tf.matmul(next_h[-1], self.w_attn_2)) query = tf.matmul(query, self.v_attn) logits = tf.reshape(query, [1, layer_id]) if self.temperature is not None: logits /= self.temperature if self.tanh_constant is not None: logits = self.tanh_constant * tf.tanh(logits) index = tf.multinomial(logits, 1) index = tf.to_int32(index) index = tf.reshape(index, [1]) arc_seq = arc_seq.write(start_id + 2 * i, index) curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=index) log_prob += curr_log_prob curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits(
tensorflow.tanh
6,917
import tensorflow as tf u = tf.contrib.layers.conv2d_transpose(layer_input,filters,f_size,stride=stride,padding=padding) if dropout_rate: u = tf.contrib.layers.dropout(u,keep_prob=dropout_rate) u = tf.contrib.layers.batch_norm(u) u = tf.nn.relu(u) # u = tf.contrib.keras.layers.concatenate([skip_input,u]) return u
tensorflow.contrib.layers.batch_norm
6,918
import tensorflow as tf :param q_sqrt: None or R x M x M (lower triangular) :param white: bool :return: N x R or R x N x N """ logger.debug("base conditional") # compute kernel stuff num_func = tf.shape(f)[1] # R Lm = tf.cholesky(Kmm) # Compute the projection matrix A A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # compute the covariance due to the conditioning if full_cov: fvar = Knn - tf.matmul(A, A, transpose_a=True) fvar = tf.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N else: fvar = Knn - tf.reduce_sum(tf.square(A), 0) fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False) # construct the conditional mean fmean = tf.matmul(A, f, transpose_a=True) if q_sqrt is not None: if q_sqrt.get_shape().ndims == 2: LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # R x M x N
tensorflow.tile
6,919
import tensorflow as tf f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), ]
tensorflow.constant
6,920
import tensorflow as tf input_shape = bottom.get_shape().as_list() input_channels = input_shape[-1] output_shape = [input_shape[0], input_shape[1]*stride, input_shape[2]*stride, output_channels] with tf.variable_scope(name) as scope: kernel = self.variable('weights', [kernel_size, kernel_size, output_channels, input_channels], initializer, regularizer=tf.contrib.layers.l2_regularizer(0.0005)) deconv = tf.nn.conv2d_transpose(bottom, kernel, output_shape, [1, stride, stride, 1], padding='SAME') biases = self.variable('biases', [output_channels], tf.constant_initializer(0.0)) deconv_layer = tf.nn.bias_add(deconv, biases)
tensorflow.contrib.layers.l2_regularizer
6,921
import tensorflow as tf if FLAGS.no_gpu or tfe.num_gpus() <= 0: print(tfe.num_gpus()) device = "/cpu:0" else: device = "/gpu:0" print("Using device %s." % device) log_dir = os.path.join(FLAGS.dir, "summaries") tf.gfile.MakeDirs(log_dir) train_summary_writer = tf.contrib.summary.create_file_writer( os.path.join(log_dir, "train"), flush_millis=10000) test_summary_writer = tf.contrib.summary.create_file_writer( os.path.join(log_dir, "eval"), flush_millis=10000, name="eval") with tf.device(device): for epoch in range(FLAGS.num_epochs):
tensorflow.gfile.MakeDirs
6,922
import tensorflow as tf def _add_max_pool_3x3_op(self, X, input_idx, ni, w, h, ch, is_reduction, is_dynamic, is_train): filter_size = 3 stride = 2 if is_reduction else 1 with tf.variable_scope('max_pool_3x3_op'): X = tf.nn.max_pool(X, ksize=(1, filter_size, filter_size, 1), strides=[1, stride, stride, 1], padding='SAME') X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check return X def _add_separable_conv_3x3_op(self, *args, **kwargs): return self._add_separable_conv_op(*args, **kwargs, filter_size=3)
tensorflow.reshape
6,923
import tensorflow as tf self.predictions = tf.argmax(self.fc3, 1, name="predictions") losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.fc3, labels=self.input_y) regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.loss = tf.reduce_mean(losses) + sum(regularization_losses) # Accuracy with tf.name_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
tensorflow.cast
6,924
import tensorflow as tf def dot_attention(inputs, memory, mask, hidden, keep_prob=1.0, is_train=None, scope="dot_attention"): with tf.variable_scope(scope):
tensorflow.variable_scope
6,925
from tensorflow.python.framework import ops
tensorflow.python.framework.ops.reset_default_graph
6,926
import tensorflow as tf eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0
tensorflow.logging.info
6,927
import tensorflow as tf else: logprob = heads[name](features).log_prob(target[name]) objectives.append(Objective(name, logprob, max, include, exclude)) objectives = [o._replace(value=tf.reduce_mean(o.value)) for o in objectives] return objectives, cstr_pct def contra_step_lossV1(pred, tgt, temp=10.0): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) soft_sign = tf.tanh((tgt1 - tgt2) * temp) loss = tf.maximum(0.0, soft_sign * ((tgt1 - tgt2) - (pred1 - pred2))) loss = tf.reduce_mean(loss) return loss def contra_step_lossV2(pred, tgt): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0)
tensorflow.split
6,928
import tensorflow as tf all_losses.append(regularization_loss) if all_losses: sum_loss = tf.add_n(all_losses) # Add the summaries out of the clone device block. if clone_loss is not None: tf.summary.scalar('clone_loss', clone_loss) # tf.summary.scalar(clone.scope + '/clone_loss', clone_loss) if regularization_loss is not None: tf.summary.scalar('regularization_loss', regularization_loss) return sum_loss def _optimize_clone(optimizer, clone, num_clones, regularization_losses, **kwargs): """Compute losses and gradients for a single clone.
tensorflow.summary.scalar
6,929
import tensorflow as tf a = 1 values = interpolated inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 0].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 0]) values = tf.math.sign(tf.nn.relu(interpolated + self.tol))
tensorflow.reduce_max
6,930
import tensorflow as tf for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o)
tensorflow.split
6,931
import tensorflow as tf if self.config.decay is not None: self.var_ema = tf.train.ExponentialMovingAverage(self.config.decay) ema_op = self.var_ema.apply(tf.trainable_variables()) with tf.control_dependencies([ema_op]):
tensorflow.trainable_variables
6,932
from tensorflow.python.ops import control_flow_ops """Note: when `rate` is an integer, there are actually two modes: `rate` and `rate - 1`. In this case we return the larger, i.e., `rate`.""") def _mode(self): return math_ops.floor(self.rate) def _assert_valid_sample(self, x, check_integer=True): if not self.validate_args: return x dependencies = [check_ops.assert_non_negative(x)] if check_integer: dependencies += [distribution_util.assert_integer_form( x, message="x has non-integer components.")] return control_flow_ops.with_dependencies(dependencies, x)
tensorflow.python.ops.control_flow_ops.with_dependencies
6,933
import tensorflow as tf """ with tf.name_scope("cross_entropy_sequence_loss"): losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets) loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0])) losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0]) return losses def dice_loss(predictions, targets, weights=1., name='dice_loss'): with tf.name_scope(name): # predictions = tf.to_float(predictions) targets = tf.to_float(targets) intersection = 2 * tf.reduce_sum(predictions * targets) + weights union = weights + tf.reduce_sum(predictions) + tf.reduce_sum(targets) loss = -(intersection / (union)) return loss def precision_recall_auc_loss(labels, logits, precision_range=(0.0, 1.0), num_anchors=20, weights=1.0, dual_rate_factor=0.1, label_priors=None, surrogate_type='xent', lambdas_initializer=tf.constant_initializer(1.0),
tensorflow.reduce_sum
6,934
import tensorflow as tf if cell_type == 'LSTM': if activation == 'linear': lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.identity, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu': lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.nn.relu, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) else: #tanh by default lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif cell_type == 'GRU': if activation == 'linear': gru=tf.nn.rnn_cell.GRUCell(state_size, activation = tf.identity) cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu':
tensorflow.contrib.rnn.DropoutWrapper
6,935
import tensorflow as tf import time import numpy as np import tensorflow as tf import random from tensorflow.contrib import slim from npu_bridge.estimator import npu_ops from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig tf.app.flags.DEFINE_integer('input_size', 512, '') tf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '') tf.app.flags.DEFINE_integer('num_readers', 16, '') tf.app.flags.DEFINE_float('learning_rate', 0.0001, '') tf.app.flags.DEFINE_integer('max_steps', 100000, '') tf.app.flags.DEFINE_integer('loss_scale', 1024, '') tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') tf.app.flags.DEFINE_string('gpu_list', '1', '') tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '') tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint') tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '') tf.app.flags.DEFINE_integer('save_summary_steps', 100, '') tf.app.flags.DEFINE_string('pretrained_model_path', None, '') tf.app.flags.DEFINE_boolean('allow_mix_precision', False, 'whether to allow mix precision') tf.app.flags.DEFINE_boolean('auto_tune', False, 'whether to autotune') tf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data') tf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets') import model import icdar
tensorflow.app.flags.DEFINE_integer
6,936
import tensorflow as tf # pred1, pred2 = tf.split(horizon_pred, 2, axis=0) # tgt1, tgt2 = tf.split(horizon_tgt, 2, axis=0) even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = tf.gather(horizon_pred, even) pred2 = tf.gather(horizon_pred, odd) tgt1 = tf.gather(horizon_tgt, even) tgt2 = tf.gather(horizon_tgt, odd) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small))) loss = tf.reduce_mean(loss) return loss def apply_optimizers(objectives, trainer, config): # Make sure all losses are computed and apply loss scales. processed = [] values = [ob.value for ob in objectives] for ob in objectives:
tensorflow.where
6,937
import tensorflow.contrib.layers as layers def atari_model(img_in, num_actions, scope, reuse=False): # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def atari_learn(env,
tensorflow.contrib.layers.convolution2d
6,938
import tensorflow as tf if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") if not tf.gfile.Exists(predict_file) or not FLAGS.data_converted: file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False
tensorflow.gfile.Exists
6,939
import tensorflow as tf import TensorflowUtils as utils import read_MITSceneParsingDataParis as scene_parsing import datetime import BatchDatsetReader as dataset from six.moves import xrange FLAGS = tf.flags.FLAGS tf.flags.DEFINE_integer("batch_size", "50", "batch size for training") tf.flags.DEFINE_string("logs_dir", "/scratch1/ram095/nips20/logs_mnist128/", "path to logs directory") tf.flags.DEFINE_string("data_dir", "/scratch1/ram095/nips20/paris_street", "path to dataset") tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer") tf.flags.DEFINE_string("model_dir", "Model_zoo/", "Path to vgg model mat") tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False") tf.flags.DEFINE_string('mode', "train", "Mode train/ test/ visualize") MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat' MAX_ITERATION = int(1e5 + 1)
tensorflow.flags.DEFINE_string
6,940
import tensorflow as tf return (images_batch, classes_batch, dataset_init_op) def _get_drop_path_keep_prob(self, layers_ratio, step, is_train=False, **knobs): batch_size = knobs['batch_size'] drop_path_keep_prob = knobs['drop_path_keep_prob'] # Base keep prob for drop path drop_path_decay_epochs = knobs['drop_path_decay_epochs'] N = self._train_params['N'] # Only drop path during training keep_prob = tf.constant(1, dtype=tf.float32) if is_train: # Decrease keep prob deeper into network keep_prob = 1 - layers_ratio * (1 - drop_path_keep_prob) # Decrease keep prob with increasing steps steps_per_epoch = math.ceil(N / batch_size) steps_ratio = tf.minimum(((step + 1) / steps_per_epoch) / drop_path_decay_epochs, 1) keep_prob = 1 - steps_ratio * (1 - keep_prob) keep_prob = tf.cast(keep_prob, tf.float32)
tensorflow.constant
6,941
import tensorflow as tf # where m(x) is the mean_function and \mu(x) is fmean e_mean_mean = expectation(pXnew, mean_function, mean_function) # N x D x D Lit_q_mu = tf.matrix_triangular_solve(Luu, q_mu, adjoint=True) e_mean_Kuf = expectation(pXnew, mean_function, (kern, feat)) # N x D x M # einsum isn't able to infer the rank of e_mean_Kuf, hence we explicitly set the rank of the tensor: e_mean_Kuf = tf.reshape(e_mean_Kuf, [num_data, num_func, num_ind]) e_fmean_mean = tf.einsum("nqm,mz->nqz", e_mean_Kuf, Lit_q_mu) # N x D x D e_related_to_mean = e_fmean_mean + tf.matrix_transpose(e_fmean_mean) + e_mean_mean if full_output_cov: fvar = ( tf.matrix_diag(tf.tile((eKff - tf.trace(Li_eKuffu_Lit))[:, None], [1, num_func])) + tf.matrix_diag(tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov)) + # tf.matrix_diag(tf.trace(tf.matmul(Li_eKuffu_Lit, cov))) + tf.einsum("ig,nij,jh->ngh", q_mu, Li_eKuffu_Lit, q_mu) - # tf.matmul(q_mu, tf.matmul(Li_eKuffu_Lit, q_mu), transpose_a=True) - fmean[:, :, None] * fmean[:, None, :] + e_related_to_mean ) else: fvar = ( (eKff - tf.trace(Li_eKuffu_Lit))[:, None] + tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) + tf.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) -
tensorflow.einsum
6,942
from tensorflow.python.framework import ops Must be the same type as `value` unless `value` is a quantized type, in which case a different quantized type may be used. data_format: A string. 'NHWC' and 'NCHW" are supported. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. """ with ops.op_scope([value, bias], name, "BiasAdd") as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name) ops.RegisterShape("BiasAdd")(common_shapes.bias_add_shape) ops.RegisterShape("BiasAddGrad")(common_shapes.bias_add_grad_shape) # pylint: disable=protected-access def bias_add_v1(value, bias, name=None): """Adds `bias` to `value`. This is a deprecated version of bias_add and will soon to be removed. This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
tensorflow.python.framework.ops.RegisterShape
6,943
import tensorflow as tf # mse_loss = tf.multiply(params['mse_weight'] / params['num_stacks'], tf.add_n(bce_loss_list), name='mse_loss') # tf.summary.scalar('mse', mse_loss) # tf.losses.add_loss(mse_loss) # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy. loss = mse_loss + params['weight_decay'] * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name]) total_loss = tf.identity(loss, name='total_loss') tf.summary.scalar('loss', total_loss) if mode == tf.estimator.ModeKeys.EVAL: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, predictions=predictions, eval_metric_ops=metrics) if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() lr_values = [params['warmup_learning_rate']] + [base_learning_rate * decay for decay in params['lr_decay_factors']] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), [params['warmup_steps']] + [int(float(ep)*params['steps_per_epoch']) for ep in params['decay_boundaries']], lr_values) truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype), name='learning_rate') tf.summary.scalar('lr', truncated_learning_rate)
tensorflow.estimator.EstimatorSpec
6,944
import tensorflow as tf print(tgtimg_z.get_shape()) self.out = output_h4 self.out2 = truthoutput_h4 print(self.out.get_shape()) self.recon1 = tf.nn.l2_loss(tgtimg - self.out) self.recon2 = tf.nn.l2_loss(tgtimg - self.out2) self.loss = self.recon1 + self.recon2 + self.simloss if ablation_type == "None": self.loss = self.recon1 + self.recon2 + self.simloss elif ablation_type == "L2":
tensorflow.nn.l2_loss
6,945
import tensorflow as tf self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testTiedRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 cell = tf.nn.rnn_cell.OutputProjectionWrapper( tf.nn.rnn_cell.GRUCell(2), 4) dec, mem = tf.nn.seq2seq.tied_rnn_seq2seq(inp, dec_inp, cell) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual(1, len(res)) self.assertEqual((2, 2), res[0].shape) def testEmbeddingRNNDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2
tensorflow.global_variables_initializer
6,946
from tensorflow.python.framework import ops value_tensor = array_ops.identity(count) update_op = state_ops.assign_add(count, math_ops.reduce_sum(values)) if metrics_collections: ops.add_to_collections(metrics_collections, value_tensor) if updates_collections: ops.add_to_collections(updates_collections, update_op)
tensorflow.python.framework.ops.add_to_collections
6,947
import tensorflow as tf 'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5') tf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances') tf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance') tf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective') tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model') tf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set') tf.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs') tf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs') tf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every') tf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization') tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ') tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information') tf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode') tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size') tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ') tf.app.flags.DEFINE_float('blur', 5.0, 'Max sigma value for Gaussian blur applied to training set') tf.app.flags.DEFINE_boolean('new_blur', False, 'Use data augmentation as blur info') tf.app.flags.DEFINE_integer('blur_decrease', 10000, 'Decrease image blur every X steps') FLAGS = tf.app.flags.FLAGS slim = tf.contrib.slim AUTOENCODER = 'ae'
tensorflow.app.flags.DEFINE_boolean
6,948
import tensorflow as tf # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T]
tensorflow.tanh
6,949
import tensorflow as tf tail_init = tf.truncated_normal([tail_cnt, self.embedding_size], stddev=init_sd) if self.maxnorm is not None: # Ensure maxnorm constraints are initially satisfied head_init = dense_maxnorm(head_init, self.maxnorm) rel_init = dense_maxnorm(rel_init, self.maxnorm) tail_init = dense_maxnorm(tail_init, self.maxnorm) self.head_embedding_vars = tf.Variable(head_init) self.rel_embedding_vars = tf.Variable(rel_init) self.tail_embedding_vars = tf.Variable(tail_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input) # Model output raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1) self.output, self.loss = self._create_output_and_loss(raw_output) # Optimization self.train_step = self.opt.minimize(self.loss) if self.maxnorm is not None: # Post-processing to limit embedding vars to L2 ball head_constraint = self._norm_constraint_op(self.head_embedding_vars, tf.unique(self.head_input)[0],
tensorflow.nn.embedding_lookup
6,950
import tensorflow as tf with tf.name_scope(name, "click_loglikelihood"): ob_prob=tf.nn.softmax(propensity) rel_prob=tf.nn.softmax(train_output) click_prob=ob_prob*rel_prob click_prob_norm=click_prob/tf.reduce_sum(click_prob,axis=1,keep_dims=True) label_dis = labels/ tf.reduce_sum(labels, 1, keep_dims=True) entropy = tf.reduce_sum(tf.math.log(click_prob_norm)*label_dis,1) return tf.reduce_mean(entropy) def click_weighted_pairwise_loss(self, output, labels, propensity_weights, name=None): """Computes pairwise entropy loss with propensity weighting.
tensorflow.reduce_sum
6,951
import tensorflow as tf else: if activation == 'linear': cell_basic = tf.contrib.rnn.BasicRNNCell(state_size,activation=tf.identity) cell_drop=tf.contrib.rnn.DropoutWrapper(cell_basic,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
tensorflow.contrib.rnn.BasicRNNCell
6,952
import tensorflow as tf batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])), tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)), update_param_noise_threshold_expr, ] _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False}, updates=updates) def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):
tensorflow.group
6,953
import tensorflow as tf diff_vec = tail_embed - (head_embed + rel_embed) # negative dist so higher scores are better (important for pairwise loss) if self.dist == 'manhattan': raw_output = -tf.reduce_sum(tf.abs(diff_vec), 1) elif self.dist == 'euclidean': # +eps because gradients can misbehave for small values in sqrt raw_output = -tf.sqrt(tf.reduce_sum(tf.square(diff_vec), 1) + self.EPS) elif self.dist == 'sqeuclidean': raw_output = -tf.reduce_sum(tf.square(diff_vec), 1) else: raise Exception('Unknown distance type') # Model output self.output, self.loss = ranking_margin_objective(raw_output, self.margin) # Optimization with postprocessing to limit embedding vars to L2 ball self.train_step = self.opt.minimize(self.loss) unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0] self.post_step = self._norm_constraint_op(self.entity_embedding_vars, unique_ent_indices, self.maxnorm)
tensorflow.concat
6,954
import tensorflow as tf tf.summary.scalar('importance weight',tf.reduce_mean(self.weight_ph)) if ent_coef_loss is not None: tf.summary.scalar('ent_coef_loss', ent_coef_loss) tf.summary.scalar('ent_coef', self.ent_coef)
tensorflow.summary.scalar
6,955
import tensorflow as tf for i in range(n_highway): with tf.variable_scope('CNN_high_%s' % i) as scope: W_carry = tf.get_variable(
tensorflow.variable_scope
6,956
import tensorflow as tf ids_ta.size() + tf.size(encoded_ids)), encoded_ids) return i + 1, words, ids_ta _, _, ids_ta = tf.while_loop( lambda i, *_: i < num_words, _WordsToIds, loop_vars=(tf.constant(0, tf.int32), words, ids_ta), parallel_iterations=30, back_prop=False) ids = ids_ta.stack() return ids, self._TokenToString(ids)
tensorflow.constant
6,957
import tensorflow as tf def train(loss_val, var_list): optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) grads = optimizer.compute_gradients(loss_val, var_list=var_list) if FLAGS.debug: # print(len(var_list)) for grad, var in grads: utils.add_gradient_summary(grad, var) return optimizer.apply_gradients(grads) def train_z(loss,Z): return tf.gradients(ys = loss, xs = Z) def main(argv=None): keep_probability = tf.placeholder(tf.float32, name="keep_probabilty") image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image") annotation = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation") z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name="z") # pred_annotation, logits = inference(image, keep_probability,z) # tf.summary.image("input_image", image, max_outputs=2) # tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2) # tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2) # loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, # labels=tf.squeeze(annotation, squeeze_dims=[3]), # name="entropy")))
tensorflow.placeholder
6,958
import tensorflow as tf params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] # compute softplus activation z2, ldj = log_gaussianize(x2, mus, log_sigmas) z2 = tf.where(x2 > self.epsilon, z2, x2) ldj = tf.where(x2 > self.epsilon, ldj, tf.zeros_like(ldj)) return z2, tf.math.reduce_sum(ldj, axis=[1,2,3]) def _inverse(self, x1, z2, **kwargs): params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] x2, ldj = log_gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True)) x2 = tf.where(z2 > self.epsilon, x2, z2) ldj = tf.where(z2 > self.epsilon, ldj, tf.zeros_like(ldj)) return x2, tf.math.reduce_sum(ldj, axis=[1,2,3]) def half_gaussianize(x, log_sigmas, inverse=tf.constant(False)): if inverse: z = tf.math.exp(log_sigmas)*x ldj = tf.math.reduce_sum(log_sigmas, axis=[1,2,3]) else: z = x*tf.math.exp(-log_sigmas) ldj = -tf.math.reduce_sum(log_sigmas, axis=[1,2,3]) return z, ldj class HalfGaussianize(Parameterize): """ Implementation of parameterize for a half-Gaussian prior.
tensorflow.math.reduce_sum
6,959
import tensorflow as tf def multi_box_loss(y_true, y_pred): num_batch = tf.shape(y_true)[0]
tensorflow.shape
6,960
from tensorflow.python.framework import tensor_util def testIsScalar(self): with self.test_session(): mu = 1. sigma = 2. normal = dists.Normal(mu, sigma, validate_args=True) self.assertTrue(tensor_util.constant_value(normal.is_scalar_event)) self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch)) normal = dists.Normal([mu], [sigma], validate_args=True) self.assertTrue(tensor_util.constant_value(normal.is_scalar_event)) self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch))
tensorflow.python.framework.tensor_util.constant_value
6,961
import tensorflow as tf input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128)) if tf.random.uniform(()) > 0.5: input_image = tf.image.flip_left_right(input_image) input_mask = tf.image.flip_left_right(input_mask) input_image, input_mask = normalize(input_image, input_mask) return input_image, input_mask def load_image_test(datapoint): input_image = tf.image.resize(datapoint['image'], (512, 512)) input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128)) input_image, input_mask = normalize(input_image, input_mask) return input_image, input_mask def main(_): train_examples = info.splits['train'].num_examples batch_size = 8 steps_per_epoch = train_examples // batch_size
tensorflow.image.resize
6,962
import tensorflow as tf # specific to FeUdal self.prev_g = tf.placeholder(tf.float32, (None, None, self.g_dim)) self.ri = tf.placeholder(tf.float32, (None,)) self.s_diff = tf.placeholder(tf.float32, (None, self.g_dim)) def build_perception(self): self._obs = tf.expand_dims(self.obs, -1) # !
tensorflow.placeholder
6,963
import tensorflow as tf def baseline_loss(rewards, weights, average_across_timesteps=False, average_across_batch=True): """ :param rewards: tensor of shape (batch_size, time_steps) :param weights: tensor of shape (batch_size, time_steps) """ batch_size = tf.shape(rewards)[0] cost = rewards ** 2 cost = tf.reduce_sum(cost * weights, axis=1) if average_across_timesteps: total_size = tf.reduce_sum(weights, axis=1) total_size += 1e-12 # just to avoid division by 0 for all-0 weights cost /= total_size cost = tf.reduce_sum(cost) if average_across_batch: cost /= tf.to_float(batch_size) return cost
tensorflow.reduce_sum
6,964
from tensorflow.python.client import graph_util def _calc_conv_flops(graph, node): """Calculates the compute resources needed for Conv2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined()
tensorflow.python.client.graph_util.tensor_shape_from_node_def_name
6,965
import tensorflow as tf logits[None], num_samples=self._sample_batch_size)) mask = tf.cast(mask, tf.float32)[:, None] relabelled_tasks = mask * orig_tasks + (1 - mask) * relabelled_tasks states_and_tasks = self._task_distribution.combine(states, relabelled_tasks) next_states_and_tasks = self._task_distribution.combine( next_states, relabelled_tasks) new_observation = tf.concat( [states_and_tasks[:, None], next_states_and_tasks[:, None]], axis=1) assert new_observation.shape == experience.observation.shape experience = experience.replace(observation=new_observation) return experience
tensorflow.concat
6,966
import tensorflow as tf if not phase_train: if FLAGS.forward_only: all_logits = tf.concat(all_logits, 0) fetches = [all_logits] + enqueue_ops else: all_top_1_ops = tf.reduce_sum(all_top_1_ops) all_top_5_ops = tf.reduce_sum(all_top_5_ops) fetches = [all_top_1_ops, all_top_5_ops] + enqueue_ops return (enqueue_ops, fetches) extra_nccl_ops = [] apply_gradient_devices, gradient_state = ( self.variable_mgr.preprocess_device_grads(device_grads)) training_ops = [] for d, device in enumerate(apply_gradient_devices): with tf.device(device): total_loss = tf.reduce_mean(losses) avg_grads = self.variable_mgr.get_gradients_to_apply(d, gradient_state) gradient_clip = FLAGS.gradient_clip learning_rate = self.model_conf.get_learning_rate() if self.dataset and FLAGS.num_epochs_per_decay > 0: num_batches_per_epoch = ( self.dataset.num_examples_per_epoch() / self.batch_size) decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay) # Decay the learning rate exponentially based on the number of steps. learning_rate = tf.train.exponential_decay( FLAGS.learning_rate, global_step, decay_steps, FLAGS.learning_rate_decay_factor, staircase=True)
tensorflow.device
6,967
import tensorflow as tf # random flip on a batch of images def batch_random_flip(input_): """Simultaneous horizontal random flip.""" if isinstance(input_, (float, int)): return input_ shape = input_.get_shape().as_list() batch_size = shape[0] height = shape[1] width = shape[2] channels = shape[3] res = tf.split(axis=0, num_or_size_splits=batch_size, value=input_) res = [elem[0, :, :, :] for elem in res] res = [tf.image.random_flip_left_right(elem) for elem in res] res = [tf.reshape(elem, [1, height, width, channels]) for elem in res] res = tf.concat(axis=0, values=res) return res # build a one hot representation corresponding to the integer tensor # the one-hot dimension is appended to the integer tensor shape def as_one_hot(input_, n_indices):
tensorflow.split
6,968
import tensorflow as tf shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_std = tf.exp(alpha_logstd) # Compute epsilon from {n_samples} standard Gaussian
tensorflow.exp
6,969
import tensorflow as tf predictions: A Tensor of size [batch_size, 4]. labels: A Tensor of size [batch_size, 4]. params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. Returns: A Tensor of size [batch_size], denoting the error between the quaternions. """ assertions = [] assertions.append( tf.Assert( tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)), ['The l2 norm of each prediction quaternion vector should be 1.'])) assertions.append( tf.Assert( tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), ['The l2 norm of each label quaternion vector should be 1.'])) with tf.name_scope(name): with tf.control_dependencies(assertions): product = tf.multiply(predictions, labels) internal_dot_products = tf.reduce_sum(product, [1]) logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) return logcost def log_quaternion_loss(predictions, labels, batch_size, name='log_quaternion_loss'): """A helper function to compute the mean error between batches of quaternions.
tensorflow.square
6,970
import tensorflow as tf self.assertAllEqual([[3, 5]], result) self.assertAllEqual([1], batch_size) def test_two(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) output0 = f(tf.constant([1]), tf.constant([2])) output1 = f(tf.constant([2]), tf.constant([3])) tp = pool.ThreadPool(2)
tensorflow.shape
6,971
import tensorflow as tf epsilon_decay = tf.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=0) ratio = tf.maximum(pi.prob(batch['actions']), 1e-6) / tf.maximum(pi_old.prob(batch['actions']), 1e-6) ratio = tf.clip_by_value(ratio, 0, 10) surr1 = batch['advantage'] * ratio
tensorflow.clip_by_value
6,972
import tensorflow as tf or the images are smaller than the crop dimensions. """ if not image_list: raise ValueError('Empty image_list.') # Compute the rank assertions. rank_assertions = [] for i in range(len(image_list)): image_rank = tf.rank(image_list[i]) rank_assert = tf.Assert( tf.equal(image_rank, 3), ['Wrong rank for tensor %s [expected] [actual]', image_list[i].name, 3, image_rank]) rank_assertions.append(rank_assert) image_shape = control_flow_ops.with_dependencies( [rank_assertions[0]], tf.shape(image_list[0])) image_height = image_shape[0] image_width = image_shape[1]
tensorflow.equal
6,973
import tensorflow as tf c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout) q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout) c_emb = tf.concat([c_emb, ch_emb], axis=2) q_emb = tf.concat([q_emb, qh_emb], axis=2) c_emb = highway(c_emb, size = d, scope = "highway", dropout = self.dropout, reuse = None) q_emb = highway(q_emb, size = d, scope = "highway", dropout = self.dropout, reuse = True) with tf.variable_scope("Embedding_Encoder_Layer"): c = residual_block(c_emb, num_blocks = 1, num_conv_layers = 4, kernel_size = 7, mask = self.c_mask, num_filters = d, num_heads = nh, seq_len = self.c_len, scope = "Encoder_Residual_Block",
tensorflow.variable_scope
6,974
import tensorflow as tf self._batch_size = batch_size self._dropout_keep_prob = dropout_keep_prob self._out_vocab_size = out_vocab_size self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len], name='x') self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size], name='y') # The bidirectional rnn code requires seq_lens as int64 self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens') self.example_weights = tf.placeholder(tf.float32, [batch_size], name='example_weights') embeddings = c2v.GetEmbeddings(self.x) self._inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, max_sequence_len, embeddings)] # Need to prepare a mask to zero out the padding symbols. # Make a batch_size x max_sequence_len matrix where each # row contains the length repeated max_sequence_len times. lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1) lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len]) # Make a matrix where each row contains [0, 1, ..., max_sequence_len] r = tf.range(0, max_sequence_len, 1) range_row = tf.expand_dims(r, 0) range_tiled = tf.tile(range_row, [batch_size, 1])
tensorflow.squeeze
6,975
import tensorflow as tf } features = { key: tf.expand_dims(tensor, -1) for key, tensor in feature_placeholders.items() } features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis = [2]) return tf.estimator.export.ServingInputReceiver(features, feature_placeholders) # Create custom estimator's train and evaluate function def train_and_evaluate(output_dir, use_keras):
tensorflow.squeeze
6,976
import tensorflow as tf mem_name = "mems" mems = mems.get(mem_name, None) inp_k = tf.transpose(features["input_k"], [1, 0]) inp_q = tf.transpose(features["input_q"], [1, 0]) seg_id = tf.transpose(features["seg_id"], [1, 0]) inp_mask = None perm_mask = tf.transpose(features["perm_mask"], [1, 2, 0]) if FLAGS.num_predict is not None: # [num_predict x tgt_len x bsz] target_mapping = tf.transpose(features["target_mapping"], [1, 2, 0]) else: target_mapping = None # target for LM loss
tensorflow.transpose
6,977
import tensorflow as tf metrics = [] metrics.append(tf.summary.histogram('point_distance', dists)) metrics.append(tf.summary.scalar('training/trajectory_length', tf.reduce_sum(dists))) self.blur_ph = tf.placeholder(dtype=tf.float32) metrics.append(tf.summary.scalar('training/blur_sigma', self.blur_ph)) pred = self.embedding_test[1:-1]*2 - self.embedding_test[0:-2] pred_error = l2(pred - self.embedding_test[2:]) mean_dist, mean_pred_error = tf.reduce_mean(dists), tf.reduce_mean(pred_error) improvement = (mean_dist-mean_pred_error)/mean_dist pairwise_improvement = tf.nn.relu(dists[1:] - pred_error) pairwise_improvement_bool = tf.cast(pairwise_improvement > 0, pairwise_improvement.dtype) self.pairwise_improvement_bool = pairwise_improvement_bool metrics.append(tf.summary.scalar('training/avg_dist', mean_dist)) metrics.append(tf.summary.scalar('training/pred_dist', mean_pred_error)) metrics.append(tf.summary.scalar('training/improvement', improvement))
tensorflow.reduce_mean
6,978
import tensorflow as tf if self.options.add_first_word_prob_for_phrase: # add prob of the first word to each phrase attn_dist = add_first_word_prob_to_atten_dists(self.in_passage_words, self.phrase_starts, vocab_dist, attn_dist) # match attn_dist[batch_size, passage_length] to sparse one-hot representation [batch_size, passage_length, extended_vsize] batch_nums = tf.range(0, limit=batch_size) # shape (batch_size) batch_nums = tf.expand_dims(batch_nums, axis=1) # shape (batch_size, 1) batch_nums = tf.tile(batch_nums, [1, passage_length]) # shape (batch_size, passage_length) step_nums = tf.range(0, limit=passage_length) # [passage_length] step_nums = tf.expand_dims(step_nums, axis=0) # shape (1, passage_length)
tensorflow.range
6,979
import tensorflow as tf tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch") tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate")
tensorflow.app.flags.DEFINE_float
6,980
import tensorflow as tf self.kernel = self.gaussian_kernel(size,mean,std) self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1])
tensorflow.tile
6,981
from tensorflow.python.ops import init_ops padding='SAME', data_format=None, rate=1, activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, use_spectral_norm=False, is_training=False,
tensorflow.python.ops.init_ops.zeros_initializer
6,982
import tensorflow as tf tf.summary.histogram(v.name[:-2] + '_hist', v) tf.summary.histogram(v.name[:-2] + '_grad_hist', g) with tf.control_dependencies([train_op]), tf.name_scope('ema'): ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step) train_op = ema.apply(tf.trainable_variables()) return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op)
tensorflow.trainable_variables
6,983
from tensorflow.python.ops import check_ops self.event_ndims): ndims = tensor_util.constant_value(ndims) sample_ndims = (ndims - self._batch_ndims_static - self._event_ndims_static) if sample_ndims < 0: raise ValueError( "expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" % (self._batch_ndims_static, self._event_ndims_static, ndims)) return ops.convert_to_tensor(sample_ndims, name="sample_ndims") else: with ops.name_scope(name="sample_ndims"): sample_ndims = ndims - self.batch_ndims - self.event_ndims if self.validate_args: sample_ndims = control_flow_ops.with_dependencies( [check_ops.assert_non_negative(sample_ndims)], sample_ndims) return sample_ndims def get_dims(self, x, name="get_dims"): """Returns dimensions indexing `sample_shape`, `batch_shape`, `event_shape`. Example: ```python x = ... # Tensor with shape [4, 3, 2, 1] sample_dims, batch_dims, event_dims = _DistributionShape( batch_ndims=2, event_ndims=1).get_dims(x) # sample_dims == [0] # batch_dims == [1, 2]
tensorflow.python.ops.check_ops.assert_non_negative
6,984
import tensorflow as tf seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(features, seq_length, max_predictions_per_seq): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = []
tensorflow.gather
6,985
from tensorflow.python.training import server_lib cs = server_lib.ClusterSpec({ "worker": ["localhost:%s" % port1], "ps": ["localhost:%s" % port2] }) worker = server_lib.Server(cs, job_name="worker", start=True) ps = server_lib.Server(cs, job_name="ps", start=True) return worker, ps @contextlib.contextmanager def _maybeWithDevice(self, device):
tensorflow.python.training.server_lib.Server
6,986
import tensorflow as tf "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) if task_name != "sts-b": probabilities = tf.nn.softmax(logits, axis=-1) predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) else: probabilities = logits
tensorflow.matmul
6,987
import tensorflow as tf def compute_gradients(self, loss, var_list=None, *args, **kwargs): if var_list is None: var_list = ( tf.trainable_variables() + tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)) replaced_list = var_list if self._scale != 1.0: loss = tf.scalar_mul(self._scale, loss) gradvar = self._optimizer.compute_gradients(loss, replaced_list, *args, **kwargs) final_gradvar = [] for orig_var, (grad, var) in zip(var_list, gradvar): if var is not orig_var: grad = tf.cast(grad, orig_var.dtype) if self._scale != 1.0:
tensorflow.scalar_mul
6,988
from tensorflow.contrib.eager.python.examples.spinn import data vocab = data.load_vocabulary(self._temp_data_dir)
tensorflow.contrib.eager.python.examples.spinn.data.load_vocabulary
6,989
import tensorflow as tf self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy()) self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy()) self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy()) self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy()) self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy()) def test_points_mask_iou(self): masks1 = tf.constant([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 0, 1, 0, 1], [0, 1, 0, 1, 0]], dtype=tf.int32) masks2 = tf.constant([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 0, 1, 0, 1]], dtype=tf.int32) iou = isu.points_mask_iou(masks1=masks1, masks2=masks2) expected_iou = tf.constant([[0, 0, 0], [0, 1, 0.6], [0, 0.6, 1.0], [0, 0.4, 0]], dtype=tf.float32) self.assertAllClose(iou.numpy(), expected_iou.numpy()) def test_points_mask_pairwise_iou(self): masks1 = tf.constant([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 0, 1, 0, 1], [0, 1, 0, 1, 0]], dtype=tf.int32) masks2 = tf.constant([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 0], [1, 0, 1, 1, 1]], dtype=tf.int32)
tensorflow.constant
6,990
import tensorflow as tf # Circuit output receives recurrent input h1 c2, g2 = self.circuit_output( h1=h1, layer=layer, var_scope=var_scope, layer_idx=layer_idx) with tf.variable_scope( '%s/c2_bn' % var_scope, reuse=self.scope_reuse) as scope: c2 = tf.contrib.layers.batch_norm( inputs=c2, scale=True, center=False, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train)
tensorflow.contrib.layers.batch_norm
6,991
import tensorflow as tf from collections import deque def sample(logits): noise = tf.random_uniform(tf.shape(logits)) return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
tensorflow.shape
6,992
import tensorflow as tf int. num_bits: Number of bits in the representation. base: Base of the representation. Returns: Integer representation of this number. """ x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits]))) x_labels = [] for i in range(num_bits): x_labels.append(x_l[:, i] * tf.to_int32(base)**tf.to_int32(i)) res = sum(x_labels) return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1])) def int_to_bit(self, x_int, num_bits, base=2): """Turn x_int representing numbers into a bitwise (lower-endian) tensor. Args: x_int: Tensor containing integer to be converted into base notation.
tensorflow.to_int32
6,993
from tensorflow.contrib.framework import deprecated label_name=label_name, weight_column_name=weight_column_name) @deprecated( "2016-11-12", "This file will be removed after the deprecation date." "Please switch to " "third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
tensorflow.contrib.framework.deprecated
6,994
import tensorflow as tf FLAGS.max_seq_length, tokenizer) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d", len(predict_examples)) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) if FLAGS.use_tpu: # Warning: According to tpu_estimator.py Prediction on TPU is an
tensorflow.logging.info
6,995
import tensorflow as tf # If the input is a string, then convert each string to the # equivalent float value. if tf_input_dtype == tf.string: in0 = tf.strings.to_number(in0, tf.int32) in1 = tf.strings.to_number(in1, tf.int32) add = tf.add(in0, in1, "ADD") sub = tf.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == tf.string: cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0")
tensorflow.subtract
6,996
import tensorflow as tf print('\nB-B=') print(sess.run(B - B)) print('\nB*I=') BI = tf.matmul(B, identity_matrix) print(sess.run(BI)) print('\ntranspose(C)=') print(sess.run(tf.transpose(C))) print('\ntranspose(D)=') print(sess.run(tf.transpose(D))) print('\ninverse(D)=') print(sess.run(tf.matrix_inverse(D))) print('\ndeterminant(D)={:.1f}'.format(sess.run(tf.matrix_determinant(D)))) print('\ncholesky(D):') print(sess.run(tf.cholesky(identity_matrix))) print('\nselfAdjointEig(D):') print(sess.run(tf.self_adjoint_eig(D)))
tensorflow.transpose
6,997
import tensorflow as tf return ret else: return tf.nn.bias_add(ret*self.gamma, self.beta,data_format=self.data_format)
tensorflow.nn.bias_add
6,998
import tensorflow as tf used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") cur_mean, cur_var = used_mean, used_var if bn_lag > 0.: used_mean -= (1. - bn_lag) * (used_mean - tf.stop_gradient(mean)) used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var)) used_mean /= (1. - bn_lag**(step + 1))
tensorflow.stop_gradient
6,999