seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf with tf.device('/cpu:0'): return tf.get_variable(name, shape, initializer=initializer, regularizer=regularizer, trainable=True) def fc_layer(self, bottom, in_size, out_size, name): with tf.variable_scope(name): weights, biases = self.get_fc_var(in_size, out_size, name) x = tf.reshape(bottom, [-1, in_size]) fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
tensorflow.variable_scope
5,700
from tensorflow.contrib import layers key=lambda x: x.key) if self._linear_feature_columns else None def _get_dnn_feature_columns(self): return sorted(set( self._dnn_feature_columns)) if self._dnn_feature_columns else None def _dnn_logits(self, features): net = layers.input_from_feature_columns( features, self._get_dnn_feature_columns(), weight_collections=[self._dnn_weight_collection]) for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units): net = layers.legacy_fully_connected( net, num_hidden_units, activation_fn=self._dnn_activation_fn, weight_collections=[self._dnn_weight_collection], bias_collections=[self._dnn_weight_collection], name="hiddenlayer_%d" % layer_id) self._add_hidden_layer_summary(net, "hiddenlayer_%d" % layer_id) logit = layers.legacy_fully_connected( net, self._num_label_columns(), weight_collections=[self._dnn_weight_collection],
tensorflow.contrib.layers.legacy_fully_connected
5,701
import tensorflow as tf with tf.variable_scope(self.mask_scope): for var, var_name_n_prune_ratio in zip(self.maskable_vars, self.var_names_n_prune_ratios): # obtain the dynamic pruning ratio assert var.name == var_name_n_prune_ratio[0], \ 'unmatched variable names: %s vs. %s' % (var.name, var_name_n_prune_ratio[0]) prune_ratio = self.__calc_prune_ratio_dyn(var_name_n_prune_ratio[1]) # create a mask and non-masked backup for each variable name = var.name.replace(':0', '_mask') mask = tf.get_variable(name, initializer=tf.ones(var.shape), trainable=False) name = var.name.replace(':0', '_var_bkup') var_bkup = tf.get_variable(name, initializer=var.initialized_value(), trainable=False) # create update operations var_bkup_update_op = var_bkup.assign(tf.where(mask > 0.5, var, var_bkup)) with tf.control_dependencies([var_bkup_update_op]): mask_thres = tf.contrib.distributions.percentile(tf.abs(var_bkup), prune_ratio * 100) mask_update_op = mask.assign(tf.cast(tf.abs(var_bkup) > mask_thres, tf.float32)) with tf.control_dependencies([mask_update_op]): prune_op = var.assign(var_bkup * mask) # record pruning masks & operations masks += [mask] prune_ops += [prune_op] return masks, tf.group(prune_ops) def __calc_prune_ratio_dyn(self, prune_ratio_fnl):
tensorflow.where
5,702
import tensorflow as tf additional_loss = vat_loss + ent_loss elif FLAGS.method == 'baseline': additional_loss = 0 else: raise NotImplementedError loss = nll_loss + additional_loss opt = tf.train.AdamOptimizer(learning_rate=lr, beta1=mom) tvars = tf.trainable_variables() grads_and_vars = opt.compute_gradients(loss, tvars) train_op = opt.apply_gradients(grads_and_vars, global_step=global_step) return loss, train_op, global_step, ul_u_updated def build_eval_graph(x, y, ul_x, ul_u): losses = {}
tensorflow.trainable_variables
5,703
import tensorflow as tf """Computes mean and std for batch then apply batch_normalization on batch. Returns ------- A tuple length of 3, (normalized_tensor, mean, variance). """ mean, var = tf.nn.moments( x, reduction_axes, shift=None, name=None, keep_dims=False) if sorted(reduction_axes) == range(ndim(x))[:-1]: normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon) else: # need broadcasting target_shape = [] for axis in range(get_ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(tf.shape(x)[axis])
tensorflow.nn.batch_normalization
5,704
import tensorflow as tf name = 'spatial_mean' + str(self.counts['spatial_mean']) self.counts['spatial_mean'] += 1 axes = [1, 2] if self.data_format == 'NHWC' else [2, 3] self.top_layer = tf.reduce_mean( self.top_layer, axes, keep_dims=keep_dims, name=name) return self.top_layer
tensorflow.reduce_mean
5,705
import tensorflow as tf def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64),
tensorflow.FixedLenFeature
5,706
import tensorflow as tf num_features = features.get_shape()[1] centers = tf.get_variable( 'centers', [num_classes, num_features], dtype=tf.float32, initializer=tf.constant_initializer(0), trainable=False) label = tf.reshape(label, [-1]) centers_batch = tf.gather(centers, label) diff = (1 - alpha) * (centers_batch - features) centers = tf.scatter_sub(centers, label, diff) loss = tf.nn.l2_loss(features - centers_batch) return loss, centers def correlation_loss(source_samples, target_samples, weight, name='corr_loss'): """Adds a similarity loss term, the correlation between two representations. Args: source_samples: a tensor of shape [num_samples, num_features] target_samples: a tensor of shape [num_samples, num_features]
tensorflow.nn.l2_loss
5,707
import tensorflow as tf Returns: the triplet loss. """ with tf.name_scope(name): pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1) neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1) basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha) loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0) return loss def decov_loss(xs, name='decov_loss'):
tensorflow.subtract
5,708
import tensorflow as tf return pred_strings def id2word(self, word_ids, name=None): mapping_strings = self.load_word_data() reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_tensor( mapping_strings, name=name )
tensorflow.contrib.lookup.index_to_string_table_from_tensor
5,709
import tensorflow as tf X = tf.reshape(X, (-1, in_w // 2, in_h // 2, out_ch)) # Sanity shape check return X def _add_batch_norm(self, X, in_ch, decay=0.9, epsilon=1e-5, offset=None, scale=None, is_train=False, no_moving_average=False): with tf.variable_scope('batch_norm'): if offset is None: offset = self._make_var('offset', (in_ch,), init_constant=0) if scale is None: scale = self._make_var('scale', (in_ch,), init_constant=1)
tensorflow.variable_scope
5,710
import tensorflow as tf self.b_soft_no_learn = tf.constant(b_soft_no_learn, dtype=tf.float32) with tf.variable_scope("attention"): self.w_attn_1 = tf.get_variable("w_1", [self.lstm_size, self.lstm_size])
tensorflow.variable_scope
5,711
import tensorflow as tf import model_interpreter as interpreter import network_utils as nut import math from tensorflow.contrib.tensorboard.plugins import projector from Bunch import Bunch tf.app.flags.DEFINE_string('input_path', '../data/tmp/grid03.14.c.tar.gz', 'input folder') tf.app.flags.DEFINE_string('input_name', '', 'input folder') tf.app.flags.DEFINE_string('test_path', '', 'test set folder') tf.app.flags.DEFINE_string('net', 'f100-f3', 'model configuration') tf.app.flags.DEFINE_string('model', 'noise', 'Type of the model to use: Autoencoder (ae)' 'WhatWhereAe (ww) U-netAe (u)') tf.app.flags.DEFINE_string('postfix', '', 'Postfix for the training folder') tf.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight') tf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight') tf.app.flags.DEFINE_float('epsilon', 0.000001, 'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5') tf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances') tf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance') tf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective') tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model') tf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set')
tensorflow.app.flags.DEFINE_string
5,712
import tensorflow as tf self.c_maxlen = tf.reduce_max(self.c_len) self.q_maxlen = tf.reduce_max(self.q_len) self.c = tf.slice(self.c, [0, 0], [N, self.c_maxlen]) self.q = tf.slice(self.q, [0, 0], [N, self.q_maxlen]) self.c_mask = tf.slice(self.c_mask, [0, 0], [N, self.c_maxlen]) self.q_mask = tf.slice(self.q_mask, [0, 0], [N, self.q_maxlen])
tensorflow.slice
5,713
import tensorflow as tf rpn_score_loss_t = tf.reshape(rpn_score_loss, [1]) rpn_box_loss_t = tf.reshape(rpn_box_loss, [1]) total_fast_rcnn_loss_t = tf.reshape(total_fast_rcnn_loss, [1]) fast_rcnn_class_loss_t = tf.reshape(fast_rcnn_class_loss, [1]) fast_rcnn_box_loss_t = tf.reshape(fast_rcnn_box_loss, [1]) mask_loss_t = tf.reshape(mask_loss, [1])
tensorflow.reshape
5,714
import tensorflow as tf if reuse: tf.get_variable_scope().reuse_variables() with tf.name_scope('Encoder'): e_dense_1 = tf.nn.relu(dense(x, input_dim, n_l1, 'e_dense_1'))
tensorflow.name_scope
5,715
import tensorflow as tf def _unsparsify(x): if not isinstance(x, tf.IndexedSlices): return x assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape" indices = x.indices while indices.shape.ndims < x.values.shape.ndims: indices = tf.expand_dims(indices, -1) return tf.scatter_nd(indices, x.values, x.dense_shape) # partial derivatives to xs (usually the params of the neural net) d_xs_new = dv[len(checkpoints_other):] for j in range(len(xs)):
tensorflow.expand_dims
5,716
import tensorflow as tf initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities)
tensorflow.nn.bias_add
5,717
from tensorflow.python.training import training as train * `gradients` is empty. """ loss = ops.convert_to_tensor(loss) contrib_framework.assert_scalar(loss) if global_step is None: global_step = train.get_global_step() else: train.assert_global_step(global_step) with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]): # Update ops take UPDATE_OPS collection if not provided. if update_ops is None: update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS)) # Make sure update ops are ran before computing loss. if update_ops:
tensorflow.python.training.training.assert_global_step
5,718
import tensorflow as tf @property def hparams(self): return self._hparams @property def has_input(self): if self._problem_hparams: return "inputs" in self._problem_hparams.input_modality else: return True def call(self, features): tf.get_variable_scope().set_initializer( optimize.get_variable_initializer(self.hparams)) with self._eager_var_store.as_default(): self._fill_problem_hparams_features(features) sharded_features = self._shard_features(features) sharded_logits, losses = self.model_fn_sharded(sharded_features) if isinstance(sharded_logits, dict): concat_logits = {} for k, v in sharded_logits.iteritems(): concat_logits[k] = tf.concat(v, 0) return concat_logits, losses else:
tensorflow.get_variable_scope
5,719
import tensorflow as tf sl_unhead = tf.shape(rep_unhead_tensor)[1] attn_result = tf.cond( tf.equal(sl_head, 0), lambda: tf.zeros([bs, 0, hn], tf.float32), lambda: self_attention_for_selected_head(
tensorflow.equal
5,720
import tensorflow as tf accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32)) # -------------------------------------------- # step4: Hooray, now train the neural network # -------------------------------------------- # Note that log_device_placement can be turned ON but will cause console spam. # Initializing the variables init = tf.initialize_all_variables() # Add ops to save and restore all the variables. saver = tf.train.Saver() best_accuracy = 0.0 # sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False)) if (FLAG == 'train') : # If it is the training mode with tf.Session() as sess: # tf.initialize_all_variables().run() sess.run(init) # .run() f.write("---Save model \n")
tensorflow.train.Saver
5,721
import tensorflow as tf candidate_start_sentence_indices = tf.gather(flattened_sentence_indices, candidate_starts) # [num_words, max_span_width] candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width] candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width] flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width] candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask) # [num_candidates] candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates] candidate_sentence_indices = tf.boolean_mask(tf.reshape(candidate_start_sentence_indices, [-1]), flattened_candidate_mask) # [num_candidates] candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends, cluster_ids) # [num_candidates] candidate_span_emb = self.get_span_emb(flattened_head_emb, context_outputs, candidate_starts, candidate_ends) # [num_candidates, emb] candidate_mention_scores = self.get_mention_scores(candidate_span_emb) # [k, 1]
tensorflow.reshape
5,722
import tensorflow as tf self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)] with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()]) tf.get_variable_scope().reuse_variables() d1, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) d2, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2,
tensorflow.constant
5,723
import tensorflow as tf def q_explained_variance(qpred, q): _, vary = tf.nn.moments(q, axes=[0, 1])
tensorflow.nn.moments
5,724
import tensorflow as tf kernel_size = 5, mask = self.c_mask, num_filters = d, num_heads = nh, seq_len = self.c_len, scope = "Model_Encoder", bias = False, reuse = True if i > 0 else None, dropout = self.dropout) ) with tf.variable_scope("Output_Layer"): start_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[2]],axis = -1),1, bias = False, name = "start_pointer"),-1) end_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[3]],axis = -1),1, bias = False, name = "end_pointer"), -1) self.logits = [mask_logits(start_logits, mask = self.c_mask), mask_logits(end_logits, mask = self.c_mask)] logits1, logits2 = [l for l in self.logits] outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2), tf.expand_dims(tf.nn.softmax(logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, config.ans_limit) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)
tensorflow.concat
5,725
import tensorflow as tf tf.train.import_meta_graph(metagraph) for model in models.values(): model.import_ops() sv = tf.train.Supervisor(logdir=FLAGS.save_path) config_proto = tf.ConfigProto(allow_soft_placement=soft_placement) with sv.managed_session(config=config_proto) as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
tensorflow.ConfigProto
5,726
from tensorflow.contrib.learn.python.learn.graph_actions import infer random_seed.set_random_seed(self._config.tf_random_seed) contrib_framework.create_global_step(g) features, _ = input_fn() feed_dict = feed_fn() if feed_fn is not None else None predictions = self._get_predict_ops(features) if not isinstance(predictions, dict): predictions = {'predictions': predictions} # TODO(ipolosukhin): Support batching return infer(checkpoint_path, predictions, feed_dict=feed_dict) class Estimator(BaseEstimator): """Estimator class is the basic TensorFlow model trainer/evaluator. Parameters: model_fn: Model function, takes features and targets tensors or dicts of
tensorflow.contrib.learn.python.learn.graph_actions.infer
5,727
from tensorflow.python.ops import array_ops `False`, an exception will be raised rather than returning `NaN`""") def _mean(self): mean = self.beta / (self.alpha - 1.) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( self.alpha > 1., mean, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), self.dtype), self.alpha, message="mean not defined for components of self.alpha <= 1"), ], mean) @distribution_util.AppendDocstring( """Variance for inverse gamma is defined only for `alpha > 2`. If `self.allow_nan_stats` is `False`, an exception will be raised rather than returning `NaN`.""") def _variance(self): var = (math_ops.square(self.beta) / (math_ops.square(self.alpha - 1.) * (self.alpha - 2.)))
tensorflow.python.ops.array_ops.ones
5,728
import tensorflow as tf constraint_tf = {} for key in constraint.keys(): low = constraint[key][0] high = constraint[key][1] constraint_tf[key] = (tf.constant(low, dtype=tf.float64), tf.constant(high, dtype=tf.float64)) print("N.B.: using direct data entry") likelihood = sum_pdf(data, nsig, sigmean, sigwidth, nbkg, m0, argpar, constraint_tf['mes'][0], constraint_tf['mes'][1]) nll = tf.neg(tf.reduce_sum(tf.log(likelihood)), name="nll") variables = tf.all_variables() grads = tf.gradients(nll, variables) # ### build constraint inequalities inequalities = [] for key, (lower, upper) in constraint_tf.iteritems(): if key != 'mes': inequalities.append(vdict[key] - lower) inequalities.append(upper - vdict[key]) # ### build bounds instead of inequalities (only for L-BFGS-B, TNC and SLSQP) # N.B.: order important! Also supply variables to be sure the orders match.
tensorflow.all_variables
5,729
import tensorflow as tf argpar = tf.Variable(argpar_num, name="argpar", dtype=tf.float64) m0 = tf.constant(m0_num, name="m0", dtype=tf.float64) vdict['argpar'] = argpar # RooArgusBG argus("argus","Argus PDF",mes,m0,argpar) ; def argus_pdf(m, m0, c, p=0.5): t = m / m0 u = 1 - t * t argus_t_ge_1 = m * tf.pow(u, p) * tf.exp(c * u) return tf.maximum(tf.zeros_like(m), argus_t_ge_1, name="argus_pdf") # // --- Construct signal+background PDF --- # RooRealVar nsig("nsig","#signal events",200,0.,10000) ; # RooRealVar nbkg("nbkg","#background events",800,0.,10000) ; nsig = tf.Variable(200, name="nsig", dtype=tf.float64) nbkg = tf.Variable(800, name="nbkg", dtype=tf.float64) vdict['nsig'] = nsig vdict['nbkg'] = nbkg
tensorflow.zeros_like
5,730
import tensorflow as tf # Item memories a query self._cur_item = self.item_memory(self.input_items) self._cur_item_negative = self.item_memory(self.input_items_negative) def _construct_placeholders(self): self.input_users = tf.placeholder(tf.int32, [None], 'UserID') self.input_items = tf.placeholder(tf.int32, [None], 'ItemID') self.input_items_negative = tf.placeholder(tf.int32, [None], 'NegativeItemID') # Add our placeholders add_to_collection(GraphKeys.PLACEHOLDER, [self.input_users,
tensorflow.placeholder
5,731
import tensorflow as tf weights = np.random.normal(scale=np.sqrt(1.0/shape[0]), size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def init_conv_weights_snn(shape, name=None): weights = np.random.normal(scale=np.sqrt(1.0/(shape[0]*shape[1]*shape[2])), size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def batched_matrix_vector_multiply(vector, matrix): """ computes x^T A in mini-batches. """ vector_batch_as_matricies = tf.expand_dims(vector, [1]) mult_result = tf.matmul(vector_batch_as_matricies, matrix) squeezed_result = tf.squeeze(mult_result, [1]) return squeezed_result def euclidean_loss_layer(a, b, multiplier=100.0, use_l1=False, eps=0.01): """ Math: out = (action - mlp_out)'*precision*(action-mlp_out) = (u-uhat)'*A*(u-uhat)""" multiplier = tf.constant(multiplier, dtype='float') #for bc #10000 uP =a*multiplier-b*multiplier if use_l1: return tf.reduce_mean(eps*tf.square(uP) + tf.abs(uP)) return tf.reduce_mean(tf.square(uP))
tensorflow.squeeze
5,732
import tensorflow as tf if FLAGS.auto_recover: hooks.append(tf.data.experimental.CheckpointInputPipelineHook(estimator))
tensorflow.data.experimental.CheckpointInputPipelineHook
5,733
import tensorflow as tf # placeholder for end of episode mask # this value is 1 if the next state corresponds to the end of an episode, # in which case there is no Q-value at the next state; at the end of an # episode, only the current state reward contributes to the target, not the # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1) done_mask_ph = tf.placeholder(tf.float32, [None]) # casting to float on GPU ensures lower data transfer times. obs_t_float = tf.cast(obs_t_ph, tf.float32) / 255.0 obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0 # Here, you should fill in your own code to compute the Bellman error. This requires # evaluating the current and next Q-values and constructing the corresponding error. # TensorFlow will differentiate this error for you, you just need to pass it to the # optimizer. See assignment text for details. # Your code should produce one scalar-valued tensor: total_error
tensorflow.cast
5,734
from tensorflow.python.framework import constant_op ['batch2-FC1-F1', 'batch2-FC1-F2']]), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
tensorflow.python.framework.constant_op.constant
5,735
import tensorflow as tf [batch_size, num_labels, num_anchors]. If the third dimension is present, the lower bound is computed on each slice [:, :, k] independently. weights: Per-example loss coefficients, with shape broadcast-compatible with that of `labels`. surrogate_type: Either 'xent' or 'hinge', specifying which upper bound should be used for indicator functions. Returns: A `Tensor` of shape [num_labels] or [num_labels, num_anchors]. """ maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) loss_on_negatives = losses_utils.weighted_surrogate_loss( labels, logits, surrogate_type, positive_weights=0.0) / maybe_log2 return tf.reduce_sum(weights * loss_on_negatives, 0)
tensorflow.reduce_sum
5,736
from tensorflow.python.ops import variables def _setupSparse(self, is_distributed, dtype): with self._maybeWithDevice("/job:ps" if is_distributed else None): var0 = variables.Variable( [[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]], dtype=dtype) var1 = variables.Variable(
tensorflow.python.ops.variables.Variable
5,737
import tensorflow as tf values = tf.math.sign(tf.nn.relu(interpolated + self.tol)) inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 1]) values = sdf_values inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 2].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 2]) fig_obj_count += 1
tensorflow.reshape
5,738
import tensorflow as tf def build_ae_model(self): self.input = tf.placeholder(tf.uint8, self.batch_shape, name='input') self.target = tf.placeholder(tf.uint8, self.batch_shape, name='target') self.step = tf.Variable(0, trainable=False, name='global_step')
tensorflow.placeholder
5,739
import tensorflow as tf results = [resized_image] if masks is not None: resized_masks = tf.transpose( tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]), [2, 0, 1]) results.append(resized_masks) results.append(tf.shape(resized_image)) return results num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data,
tensorflow.shape
5,740
import tensorflow as tf hidden_sizes, model_prob=1.0 - dropout_rate) rnd_pred_act_dropout_mask_phs = rnd_pred_act_dropout_mask_generator.generate_dropout_mask_placeholders() rnd_pred_act, rnd_pred_act_reg = mlp_variational(x_ph, rnd_pred_act_dropout_mask_phs, list(hidden_sizes) + [act_dim], activation, output_activation, dropout_rate) rnd_pred_act = act_limit * rnd_pred_act with tf.variable_scope('rnd_targ_cri'): rnd_targ_cri = tf.squeeze(mlp(tf.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('rnd_pred_cri'): rnd_pred_cri = tf.squeeze(mlp(tf.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) rnd_pred_cri_in_ph = tf.concat([x_ph, a_ph], axis=-1) rnd_pred_cri_in_dim = rnd_pred_cri_in_ph.shape.as_list()[1]
tensorflow.variable_scope
5,741
from tensorflow.python.ops import data_flow_ops gpu_copy_stage_op = gpu_copy_stage.put( [host_images, host_labels]) gpu_copy_stage_ops.append(gpu_copy_stage_op) host_images, host_labels = gpu_copy_stage.get() with tf.device(self.raw_devices[device_num]): if not use_synthetic_gpu_images: gpu_compute_stage = data_flow_ops.StagingArea( [tf.float32, tf.int32], shapes=[images_shape, labels_shape] ) # The CPU-to-GPU copy is triggered here. gpu_compute_stage_op = gpu_compute_stage.put( [host_images, host_labels])
tensorflow.python.ops.data_flow_ops.StagingArea
5,742
import tensorflow as tf len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
tensorflow.logging.info
5,743
import tensorflow as tf ValueError: if anchors or groundtruth_boxes are not of type box_list.BoxList """ if not isinstance(anchors, box_list.BoxList): raise ValueError('anchors must be an BoxList') if not isinstance(groundtruth_boxes, box_list.BoxList): raise ValueError('groundtruth_boxes must be an BoxList') if groundtruth_labels is None: groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), 0)) groundtruth_labels = tf.expand_dims(groundtruth_labels, -1) shape_assert = tf.assert_equal(tf.shape(groundtruth_labels)[1:], tf.shape(self._unmatched_cls_target)) with tf.control_dependencies([shape_assert]): match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors) match = self._matcher.match(match_quality_matrix, **params) reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match) cls_targets = self._create_classification_targets(groundtruth_labels, match)
tensorflow.shape
5,744
import tensorflow as tf Args: x: A `Tensor` or `CompositeTensor`. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor` of type int64. """ with tf.compat.v1.name_scope(name, 'size'): # Note: Calling `sum` defined in this module, not the builtin. if isinstance(x, tf.SparseTensor): ones_like_x = tf.SparseTensor( indices=x.indices, values=tf.ones_like(x.values, tf.int64), dense_shape=x.dense_shape) else: ones_like_x = tf.ones_like(x, dtype=tf.int64) return sum(ones_like_x, reduce_instance_dims) @common.log_api_use(common.ANALYZER_COLLECTION) def count_per_key(key: common_types.TensorType, key_vocabulary_filename: Optional[str] = None, name: Optional[str] = None): """Computes the count of each element of a `Tensor`. Args: key: A Tensor or `CompositeTensor` of dtype tf.string or tf.int.
tensorflow.ones_like
5,745
import tensorflow as tf self.rank_loss += self.hparams.l2_loss * tf.nn.l2_loss(p) self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss denoise_gradients = tf.gradients(self.exam_loss, denoise_params) ranking_model_gradients = tf.gradients(self.rank_loss, ranking_model_params) if self.hparams.max_gradient_norm > 0: denoise_gradients, denoise_norm = tf.clip_by_global_norm(denoise_gradients, self.hparams.max_gradient_norm) ranking_model_gradients, ranking_model_norm = tf.clip_by_global_norm(ranking_model_gradients, self.hparams.max_gradient_norm * self.hparams.ranker_loss_weight) self.norm = tf.global_norm(denoise_gradients + ranking_model_gradients)
tensorflow.clip_by_global_norm
5,746
import tensorflow as tf pred_classes_ph = tf.placeholder(tf.int32, name='pred_classes_ph', shape=(None,)) # Predict classes normal_arch_ph = tf.placeholder(tf.int32, name='normal_arch_ph', shape=(CELL_NUM_BLOCKS, 4))
tensorflow.placeholder
5,747
import tensorflow as tf self.saver_train = tf.train.Saver(self.vars) def __build_eval(self): """Build the evaluation graph.""" with tf.Graph().as_default(): # create a TF session for the current graph config = tf.ConfigProto() if FLAGS.enbl_multi_gpu: config.gpu_options.visible_device_list = str(mgw.local_rank()) # pylint: disable=no-member else: config.gpu_options.visible_device_list = '0' # pylint: disable=no-member self.sess_eval = tf.Session(config=config)
tensorflow.ConfigProto
5,748
from tensorflow.python.ops import math_ops appropriately. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) radial_diffs = math_ops.mul(predictions, labels) radial_diffs = math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,], keep_dims=True) mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None, name or 'mean_cosine_distance') mean_distance = math_ops.sub(1.0, mean_distance) update_op = math_ops.sub(1.0, update_op) if metrics_collections: ops.add_to_collections(metrics_collections, mean_distance)
tensorflow.python.ops.math_ops.reduce_sum
5,749
import tensorflow as tf cell=tf.nn.rnn_cell.MultiRNNCell([get_a_cell(state_size,input_prob,state_prob,input_size_x if layer==0 else state_size) for layer in range(num_layers)],state_is_tuple=True) cell=tf.nn.rnn_cell.DropoutWrapper(cell,variational_recurrent=True,dtype=tf.float32,input_size=input_size_x,output_keep_prob=output_prob) init_state = cell.zero_state(batch_size, dtype=tf.float32) """Build dynamic graph""" rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=rnn_inputs,initial_state=init_state) """Add prediction layer""" with tf.variable_scope('softmax'): W = tf.get_variable('W', [state_size, input_size_y]) b = tf.get_variable('b', [input_size_y], initializer=tf.constant_initializer(0.0)) rnn_outputs = tf.reshape(rnn_outputs, [-1, state_size]) predictions = tf.matmul(rnn_outputs, W) + b yy = tf.reshape(y, [-1, input_size_y]) #batch_size*num_steps when yo udefine a placeholder in Tensorflow, the shape of the input during the session should be the same as the shape of the plcae holder "Mean squared error loss"
tensorflow.variable_scope
5,750
import tensorflow as tf best_prior_per_target_index = tf.math.argmax(ious, axis=0) targets = tf.range(tf.shape(best_prior_per_target_index)[0], dtype='int64') best_target_per_prior_index = tf.tensor_scatter_nd_update(best_target_per_prior_index, tf.expand_dims(best_prior_per_target_index, 1), targets) # 2.0 is used to make sure every target has a prior assigned best_target_per_prior = tf.tensor_scatter_nd_update(best_target_per_prior, tf.expand_dims(best_prior_per_target_index, 1), tf.ones_like(best_prior_per_target_index, dtype=tf.float32)*2.0) # size: num_priors labels = tf.gather(gt_labels, best_target_per_prior_index) labels = tf.where(tf.less(best_target_per_prior, iou_threshold), tf.constant(0, dtype='int64'), labels)
tensorflow.ones_like
5,751
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils class DNNLinearCombinedClassifierBenchmark(test.Benchmark): def _assertSingleClassMetrics(self, metrics): estimator_test_utils.assert_in_range(0.9, 1.0, 'auc', metrics) estimator_test_utils.assert_in_range(0.9, 1.0, 'accuracy/threshold_0.500000_mean',
tensorflow.contrib.learn.python.learn.estimators.estimator_test_utils.assert_in_range
5,752
import tensorflow as tf import math from tensorflow.contrib.tensorboard.plugins import projector from Bunch import Bunch tf.app.flags.DEFINE_string('input_path', '../data/tmp/grid03.14.c.tar.gz', 'input folder') tf.app.flags.DEFINE_string('input_name', '', 'input folder') tf.app.flags.DEFINE_string('test_path', '', 'test set folder') tf.app.flags.DEFINE_string('net', 'f100-f3', 'model configuration') tf.app.flags.DEFINE_string('model', 'noise', 'Type of the model to use: Autoencoder (ae)'
tensorflow.app.flags.DEFINE_string
5,753
import tensorflow as tf # Restore a different "v1" from shard 1 of the saved files. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v1 = tf.Variable(222) save = tf.train.Saver({"v1": v1}, sharded=True) tf.initialize_all_variables().run() self.assertEqual(222, v1.eval()) save.restore(sess, save_path + "-00001-of-00002") self.assertEqual(20, v1.eval())
tensorflow.train.Saver
5,754
import tensorflow as tf with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) # self.outputs = act(tf.matmul(self.inputs, W) + b) LayersConfig.set_keep[name] = tf.placeholder(tf.float32) W_dropcon = tf.nn.dropout(W, LayersConfig.set_keep[name]) self.outputs = act(tf.matmul(self.inputs, W_dropcon) + b) # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) self.all_drop.update({LayersConfig.set_keep[name]: keep})
tensorflow.nn.dropout
5,755
import tensorflow as tf target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v1 = tf.Variable(222) save = tf.train.Saver({"v1": v1}, sharded=True) tf.initialize_all_variables().run() self.assertEqual(222, v1.eval()) save.restore(sess, save_path + "-00001-of-00002") self.assertEqual(20, v1.eval())
tensorflow.initialize_all_variables
5,756
import tensorflow as tf zt = p_zt.sample(seed=self.random_seed) p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out) xt = p_xt_given_zt.sample(seed=self.random_seed) new_state = VRNNState(rnn_state=rnn_state, latent_encoded=latent_encoded) return new_state, tf.to_float(xt) # pylint: disable=invalid-name # pylint thinks this is a top-level constant.
tensorflow.to_float
5,757
import tensorflow as tf initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 14*14 self.deconv_2 = self.deconv_bn_relu(self.deconv_1, name = 'deconv_2',kernel_size = 3, output_channels = 512, initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 28*28 self.deconv_3 = self.deconv_bn_relu(self.deconv_2, name = 'deconv_3',kernel_size = 3, output_channels = 256, initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 56*56 self.deconv_4 = self.deconv_bn_relu(self.deconv_3, name = 'deconv_4',kernel_size = 3, output_channels = 128, initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 112*112 self.deconv_5 = self.deconv_bn_relu(self.deconv_4, name = 'deconv_5',kernel_size = 3, output_channels = 64, initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 224*224 # self.final_layer = self.conv_layer(bottom = self.deconv_5, kernal_size = 1, in_channels = 64, out_channels = 3, stride = 1, name = 'final_layer') self.final_layer = self.conv_bn_relu(bottom = self.deconv_5, name = 'final_layer', kernel_size = 1, output_channels = 3, initializer =tf.contrib.layers.variance_scaling_initializer(), bn = False, training = self.is_training, relu=False)
tensorflow.contrib.layers.variance_scaling_initializer
5,758
import tensorflow as tf Reshapes fvar to the correct shape, specified by `full_cov` and `full_output_cov`. :param fvar: has shape N x P (full_cov = False) or P x N x N (full_cov = True). :return: 1. full_cov: True and full_output_cov: True fvar N x P x N x P 2. full_cov: True and full_output_cov: False fvar P x N x N 3. full_cov: False and full_output_cov: True fvar N x P x P 4. full_cov: False and full_output_cov: False fvar N x P """ if full_cov and full_output_cov: fvar = tf.matrix_diag(tf.transpose(fvar)) # N x N x P x P fvar = tf.transpose(fvar, [0, 2, 1, 3]) # N x P x N x P if not full_cov and full_output_cov: fvar = tf.matrix_diag(fvar) # N x P x P if full_cov and not full_output_cov: pass # P x N x N if not full_cov and not full_output_cov: pass # N x P return fvar
tensorflow.transpose
5,759
import tensorflow as tf loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32) next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = tf.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions)
tensorflow.reshape
5,760
from tensorflow.python.ops.control_flow_ops import with_dependencies """Model function.""" assert labels is None, labels (all_scores, model_predictions, losses, training_op, init_op, is_initialized) = gmm_ops.gmm(self._parse_tensor_or_dict(features), self._training_initial_clusters, self._num_clusters, self._random_seed, self._covariance_type, self._params) incr_step = state_ops.assign_add(training_util.get_global_step(), 1) loss = math_ops.reduce_sum(losses) training_op = with_dependencies([training_op, incr_step], loss) training_hooks = [_InitializeClustersHook( init_op, is_initialized, config.is_chief)] predictions = { GMM.ALL_SCORES: all_scores[0], GMM.ASSIGNMENTS: model_predictions[0][0], } eval_metric_ops = { GMM.SCORES: _streaming_sum(loss), } return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions, eval_metric_ops=eval_metric_ops, loss=loss, train_op=training_op,
tensorflow.python.ops.control_flow_ops.with_dependencies
5,761
import tensorflow as tf dl_dir = os.path.join(data_dir, 'download') logging.info( 'No dataset directory provided. ' 'Downloading and generating dataset for %s inside data directory %s ' 'For large datasets it is better to prepare datasets manually!', dataset_name, data_dir) if dataset_name.startswith('t2t_'): # Download and run dataset generator for T2T problem. data_dir = os.path.join(data_dir, dataset_name) tf.io.gfile.makedirs(data_dir) tf.io.gfile.makedirs(dl_dir) t2t_problems().problem(dataset_name[len('t2t_'):]).generate_data( data_dir, dl_dir) else: # Download and prepare TFDS dataset. tfds_builder = tfds.builder(dataset_name) tfds_builder.download_and_prepare(download_dir=dl_dir) else:
tensorflow.io.gfile.makedirs
5,762
import tensorflow as tf Tuple (action logits, value). """ if not isinstance(action_space, gym.spaces.Discrete): raise ValueError("Expecting discrete action space.") obs_shape = common_layers.shape_list(observations) (frame_height, frame_width) = obs_shape[2:4] # TODO(afrozm): We have these dummy problems mainly for hparams, so cleanup # when possible and do this properly. if hparams.policy_problem_name == "dummy_policy_problem_ttt": tf.logging.info("Using DummyPolicyProblemTTT for the policy.") policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT() else: tf.logging.info("Using DummyPolicyProblem for the policy.") policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width) trainer_lib.add_problem_hparams(hparams, policy_problem) hparams.force_full_predict = True model = registry.model(hparams.policy_network)( hparams, tf.estimator.ModeKeys.TRAIN )
tensorflow.logging.info
5,763
import tensorflow as tf TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'tukey_scale'): return _tukey_parameters(x, reduce_instance_dims, output_dtype)[1]
tensorflow.compat.v1.name_scope
5,764
import tensorflow as tf A tuple of possible batch sizes """ for device in device_lib.list_local_devices(): if tf.DeviceSpec.from_string(device.name).device_type == "GPU": if "K20" in device.physical_device_desc: return (16,) if "P100" in device.physical_device_desc: return (16, 32, 64) if tf.DeviceSpec.from_string(device.name).device_type == "TPU": return (32,) return (16, 32) def _force_device_sync(self): """Shamelessly copied from `resnet50_test.py`.""" tf.constant(1.).cpu()
tensorflow.DeviceSpec.from_string
5,765
import tensorflow as tf def estimator_spec_eval(self, features, logits, labels, loss, losses_dict): """Construct EstimatorSpec for EVAL mode.""" hparams = self.hparams if not hasattr(hparams, "problem_instances"): raise NotImplementedError(_no_problem_err("estimator_spec_eval")) problem = hparams.problem_instances[0] if common_layers.is_on_tpu(): eval_metrics_fn = _create_tpu_eval_metrics_fn(problem, hparams) _remove_summaries() if isinstance(logits, dict): # For TPU, logits dict will be passed as keyword arguments to # eval_metrics_fn. Here we add the labels to those arguments. logits.update({"labels": labels}) return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.EVAL, eval_metrics=(eval_metrics_fn, logits), loss=loss) else: return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.EVAL, eval_metrics=(eval_metrics_fn, [logits, labels]), loss=loss) else: eval_metrics_fns = metrics.create_evaluation_metrics([problem], hparams) eval_metrics = {} for metric_name, metric_fn in six.iteritems(eval_metrics_fns): eval_metrics[metric_name] = metric_fn(logits, features)
tensorflow.contrib.tpu.TPUEstimatorSpec
5,766
import tensorflow as tf cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross-entropy') loss = tf.reduce_mean(cross_entropy, name='loss') tf.summary.scalar(scope+'/loss', loss) return loss def accuracy(logits, labels): ''' Evaluate the quality of the logits at predicting the label ''' # for summary with tf.name_scope('accuracy') as scope: correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1)) correct = tf.cast(correct, tf.float32) accuracy = tf.reduce_mean(correct)*100.0 tf.summary.scalar(scope+'accuracy',accuracy) return accuracy def num_correct_prediction(logits, labels): ''' Evaluate the quality of the logits at predicting the label ''' correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1))
tensorflow.arg_max
5,767
import tensorflow as tf self.a_ = self._build_net(S_, scope='target_net', trainable=False) self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net') self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net') def _build_net(self, s, scope, trainable):
tensorflow.get_collection
5,768
import tensorflow as tf # add mask for glabels and cls_pred here glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask)) cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask)) location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask)) gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask)) predictions = { 'classes': tf.argmax(cls_pred, axis=-1),
tensorflow.stop_gradient
5,769
import tensorflow as tf # [batch, 1, n_h_mixer] hidden = tf.nn.elu(tf.matmul(agent_qs_reshaped, w1_reshaped) + b1_reshaped) # Second layer w_final = tf.abs(tf.matmul(state, hyper_w_final)) w_final_reshaped = tf.reshape(w_final, [-1, n_h_mixer, 1]) # reshape into batch of matrices b_final_reshaped = tf.reshape(hyper_b_final, [-1, 1, 1]) # [batch, 1, 1] y = tf.matmul(hidden, w_final_reshaped) + b_final_reshaped q_tot = tf.reshape(y, [-1, 1]) return q_tot class QMix(): def __init__(self, env, num_s, num_a, lr=0.0001, gamma=0.99, replace_target_iter=5000, memory_size=200000, batch_size=256, epsilon=1, epsilon_decay=0.0001): self.n_agents = 2 self.env = env self.name = "qmix"
tensorflow.reshape
5,770
import tensorflow as tf grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1])) grl = fc(grl, 100, True, None, activation=relu, name='fc1') logits = fc(grl, 1, True, None, activation=None, name='fc2') domain_predictions = tf.sigmoid(logits) domain_loss = tf.losses.log_loss(domain_selection_mask, domain_predictions, weights=weight) domain_accuracy = util.accuracy_tf(domain_selection_mask, tf.round(domain_predictions)) assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss]) with tf.control_dependencies([assert_op]): tag_loss = 'losses/domain_loss' barrier = tf.no_op(tag_loss) return domain_loss
tensorflow.round
5,771
import tensorflow as tf self.loss_GABA = self.lambda_l2*squared_loss(self.images_fake_A,self.image_real_A) + binary_cross_entropy_loss(labels=tf.ones_like(self.D_B_fake),logits=self.D_B_fake) self.loss_GBAB = self.lambda_l2*squared_loss(self.images_fake_B_,self.image_real_B) + binary_cross_entropy_loss(labels=tf.ones_like(self.D_A_fake),logits=self.D_A_fake) self.generator_loss = self.loss_GABA + self.loss_GBAB self.D_B_loss_real = binary_cross_entropy_loss(tf.ones_like(self.D_B_real),self.D_B_real) self.D_B_loss_fake = binary_cross_entropy_loss(tf.zeros_like(self.D_B_fake),self.D_B_fake) self.D_B_loss = (self.D_B_loss_real + self.D_B_loss_fake) / 2.0 self.D_A_loss_real = binary_cross_entropy_loss(tf.ones_like(self.D_A_real),self.D_A_real) self.D_A_loss_fake = binary_cross_entropy_loss(tf.zeros_like(self.D_A_fake),self.D_A_fake)
tensorflow.ones_like
5,772
import tensorflow as tf flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor # add sequence mask for: # 1. random shuffle lm modeling---xlnet with random shuffled input # 2. left2right and right2left language modeling # 3. conditional generation def generate_seq2seq_mask(attention_mask, mask_sequence, seq_type, **kargs): if seq_type == 'seq2seq': if mask_sequence is not None: seq_shape = get_shape_list(mask_sequence, expected_rank=2) seq_len = seq_shape[1] ones = tf.ones((1, seq_len, seq_len)) a_mask = tf.matrix_band_part(ones, -1, 0) s_ex12 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 2) s_ex13 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 3) a_mask = (1 - s_ex13) * (1 - s_ex12) + s_ex13 * a_mask # generate mask of batch x seq_len x seq_len a_mask = tf.reshape(a_mask, (-1, seq_len, seq_len)) out_mask = attention_mask * a_mask else: ones = tf.ones_like(attention_mask[:1]) mask = (tf.matrix_band_part(ones, -1, 0)) out_mask = attention_mask * mask else: out_mask = attention_mask
tensorflow.ones
5,773
import tensorflow as tf if use_bias: b = tf.get_variable('b', [out_channel], initializer=b_init) if split == 1: conv = tf.nn.conv2d(inputdata, w, strides, padding, data_format=data_format) else: inputs = tf.split(inputdata, split, channel_axis) kernels = tf.split(w, split, 3) outputs = [tf.nn.conv2d(i, k, strides, padding, data_format=data_format) for i, k in zip(inputs, kernels)] conv = tf.concat(outputs, channel_axis) ret = tf.identity(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name=name) return ret @staticmethod def depthwise_conv(input_tensor, kernel_size, name, depth_multiplier=1, padding='SAME', stride=1):
tensorflow.concat
5,774
import tensorflow as tf scale=True, is_training=(mode=='train'), updates_collections=None, scope=(name+'batch_norm')) def build_model(self): features = self.features captions = self.captions batch_size = tf.shape(features)[0] captions_in = captions[:, :self.T] captions_out = captions[:, 1:] mask = tf.to_float(tf.not_equal(captions_out, self._null)) # batch normalize feature vectors
tensorflow.shape
5,775
import tensorflow as tf depthwise_filter = tf.get_variable( name='depthwise_filter_w', shape=depthwise_filter_shape, initializer=w_init ) result = tf.nn.depthwise_conv2d( input=input_tensor, filter=depthwise_filter, strides=[1, stride, stride, 1], padding=padding,
tensorflow.nn.depthwise_conv2d
5,776
import tensorflow as tf self.adv = tf.placeholder(tf.float32, [None], name="adv") self.r = tf.placeholder(tf.float32, [None], name="r") log_prob_tf = tf.nn.log_softmax(pi.logits) prob_tf = tf.nn.softmax(pi.logits) # the "policy gradients" loss: its derivative is precisely the policy gradient # notice that self.ac is a placeholder that is provided externally.
tensorflow.nn.softmax
5,777
import tensorflow as tf return (input_var - mean) / tf.sqrt(var+self.epsilon) class BatchNorm(object): def __init__(self,name,dims,axis=1,epsilon=1e-3,momentum=0.999,center=True,scale=True) : self.momentum = momentum self.epsilon = epsilon self.axis = axis self.center=center self.scale=scale with tf.variable_scope(name) as scope: with tf.variable_scope('bn') : self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0)) self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False) self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False) self.scope = scope def __call__(self,input_var,is_training,**xargs) : with tf.variable_scope(self.scope) : return tf.layers.batch_normalization( input_var, axis=self.axis, momentum=self.momentum, epsilon=self.epsilon, center=self.center, scale=self.scale,
tensorflow.constant_initializer
5,778
import tensorflow as tf tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.")
tensorflow.flags.DEFINE_string
5,779
import tensorflow as tf l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) out = tf.matmul(l1, self.w2)+self.b2 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) out = tf.matmul(l1, self.w2)+self.b2 return out def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) out = tf.matmul(l1, self.w2)+self.b2 return out def softmax_loss(self,predicts,labels): predicts=tf.nn.softmax(predicts) labels=tf.one_hot(labels,classnum) loss=-tf.reduce_sum(labels*tf.log(predicts)) return loss def optimer(self,loss,lr=0.001):
tensorflow.cast
5,780
import tensorflow as tf def contra_traj_lossV8(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0) horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0) pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
tensorflow.reshape
5,781
import tensorflow as tf total_accuracy1, total_accuracy5 = (0.0, 0.0) num_processed_images = 0 num_remaining_images = 5000 top1 = 0 with tf.compat.v1.Session() as sess: sess_graph = tf.compat.v1.Session(graph=graph, config=config) while num_remaining_images >= batch_size: # Reads and preprocess data np_images, np_labels = sess.run([images[0], labels[0]])
tensorflow.compat.v1.Session
5,782
import tensorflow as tf return x2, ildj def exponentiate(x, log_lambdas, inverse=tf.constant(False)): if not inverse:
tensorflow.constant
5,783
import tensorflow as tf size = tf.concat(0, [shape[:1] // parts, shape[1:]]) stride = tf.concat(0, [shape[:1] // parts, shape[1:] * 0]) start = stride * idx return tf.slice(data, start, size) outputs_all = [] for i in range(len(model.outputs)): outputs_all.append([]) # Place a copy of the model on each GPU, each getting a slice of the batch for i in range(gpu_count): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i) as scope: inputs = [] # Slice each input into a piece for processing on this GPU for x in model.inputs: input_shape = tuple(x.get_shape().as_list())[1:] slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x) inputs.append(slice_n) outputs = model(inputs) if not isinstance(outputs, list):
tensorflow.name_scope
5,784
import tensorflow as tf return host_call_fn, [global_step_tensor] + other_tensors def two_stream_loss(FLAGS, features, labels, mems, is_training): """Pretraining loss with two-stream attention Transformer-XL.""" #### Unpack input mem_name = "mems" mems = mems.get(mem_name, None) inp_k = tf.transpose(features["input_k"], [1, 0]) inp_q = tf.transpose(features["input_q"], [1, 0]) seg_id = tf.transpose(features["seg_id"], [1, 0]) inp_mask = None perm_mask = tf.transpose(features["perm_mask"], [1, 2, 0]) if FLAGS.num_predict is not None: # [num_predict x tgt_len x bsz] target_mapping = tf.transpose(features["target_mapping"], [1, 2, 0]) else:
tensorflow.transpose
5,785
import tensorflow as tf clip_value = clipping_kwargs["value"] clipped_grads_and_vars = [(tf.clip_by_norm(g, clip_value), v) for (g, v) in zip(grads, variables)] elif clipping_method == "clip_by_value": clip_value = clipping_kwargs["value"] clipped_grads_and_vars = [(tf.clip_by_value(g, -clip_value, clip_value), v) for (g, v) in grads_and_vars if g is not None] elif not clipping_method: grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None] clipped_grads_and_vars = grads_and_vars_not_none
tensorflow.clip_by_value
5,786
import tensorflow as tf print(observ_shape) observ_dtype = self._parse_dtype(self._batch_env.observation_space) print(observ_dtype) action_shape = self._parse_shape(self._batch_env.action_space) print(action_shape) action_dtype = self._parse_dtype(self._batch_env.action_space) print(action_dtype) with tf.variable_scope('env_temporary'): self._observ = tf.Variable( lambda: tf.zeros(batch_dims + observ_shape, observ_dtype), name='observ', trainable=False) self._action = tf.Variable( lambda: tf.zeros(batch_dims + action_shape, action_dtype), name='action', trainable=False) self._reward = tf.Variable( lambda: tf.zeros(batch_dims, tf.float32), name='reward', trainable=False) self._done = tf.Variable( lambda: tf.cast(tf.ones(batch_dims), tf.bool), name='done', trainable=False) def __getattr__(self, name): """Forward unimplemented attributes to one of the original environments.
tensorflow.zeros
5,787
import tensorflow as tf tf.flags.DEFINE_string('train_dir', None, """Path to session checkpoints.""") tf.flags.DEFINE_string('eval_dir', '/tmp/tf_cnn_benchmarks/eval', """Directory where to write eval event logs.""") tf.flags.DEFINE_string('pretrain_dir', None, """Path to pretrained session checkpoints.""") tf.flags.DEFINE_string('result_storage', None, """Specifies storage option for benchmark results.
tensorflow.flags.DEFINE_string
5,788
import tensorflow as tf model_adv.build_graph() # Open session and restore checkpoint sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) tf.train.start_queue_runners(sess) sess.run(tf.global_variables_initializer())
tensorflow.ConfigProto
5,789
import tensorflow as tf source_vocab_size = self._encoders["inputs"].vocab_size p.input_modality = { "inputs": (registry.Modalities.SYMBOL, source_vocab_size) } p.target_modality = (registry.Modalities.CLASS_LABEL, self.num_classes) def example_reading_spec(self): data_fields = { "inputs": tf.VarLenFeature(tf.int64), "targets": tf.FixedLenFeature([1], tf.int64), } data_items_to_decoders = None return (data_fields, data_items_to_decoders) def txt_line_iterator(txt_path): """Iterate through lines of file.""" with tf.gfile.Open(txt_path) as f:
tensorflow.FixedLenFeature
5,790
import tensorflow as tf zs = tf.map_fn(loop_hyper_encoder, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Hyper Encoder") z_hats, _ = entropy_bottleneck(zs, False) print("Quantize hyperprior") def loop_hyper_deocder(z): z = tf.expand_dims(z, 0) loc, scale = hyper_decoder(z) return tf.squeeze(loc, [0]), tf.squeeze(scale, [0]) locs, scales = tf.map_fn(loop_hyper_deocder, z_hats, dtype=(tf.float32, tf.float32), parallel_iterations=1, back_prop=False) lower_bound = 1e-9# TODO scales = tf.maximum(scales, lower_bound) print("Hyper Decoder") z_strings, z_min_v, z_max_v = entropy_bottleneck.compress(zs) z_shape = tf.shape(zs)[:]
tensorflow.squeeze
5,791
import tensorflow as tf if dropout > 0: out = tf.layers.dropout(out, rate=dropout, training=training) if sum(dim[2]) > 2: out = deconv2d(out, [2*dim[0], dim[1], dim[2]], scope="%s_conv_out"%scope, training=training, ema=ema, init=init) else: out = conv2d(out, [2*dim[0], dim[1], dim[2]], scope="%s_conv_out"%scope, training=training, ema=ema, init=init) h_stack1, h_stack2 = tf.split(out, 2, 3) sigmoid_out = tf.sigmoid(h_stack2) out = (h_stack1 * sigmoid_out) out_shp = out.get_shape().as_list() if out_shp[1:-1] < in_shp[1:-1]: x = tf.nn.avg_pool(x, [1, dim[2][0], dim[2][1], 1], strides=[1, dim[2][0], dim[2][1], 1], padding='SAME') elif out_shp[1:-1] > in_shp[1:-1]: warnings.warn( "The height and width of the output are larger than the input. There will be no residual connection.")
tensorflow.sigmoid
5,792
import tensorflow as tf "Mean squared error loss" loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1])))
tensorflow.reshape
5,793
import tensorflow as tf import ma_gym import random import datetime import numpy as np import tensorflow as tf def get_variable(name, shape): return tf.get_variable(name, shape, tf.float32, tf.initializers.truncated_normal(0,0.01)) def Qmix_mixer(agent_qs, state, state_dim, n_agents, n_h_mixer): """ Args: agent_qs: shape [batch, n_agents] state: shape [batch, state_dim] state_dim: integer n_agents: integer
tensorflow.initializers.truncated_normal
5,794
import tensorflow as tf tf.reshape(detections['rotations_3d'][i], [3, 3]), 1) for i in range(num_boxes)], axis=0) rotations_y = tf.reshape(rotations_y, [-1, 1]) labeled_boxes = tf.concat([sample['translations_3d'], sample['sizes_3d'], rotations_y], axis=1)
tensorflow.concat
5,795
import tensorflow as tf try: import cPickle except: import _pickle as cPickle def relu(x, name, alpha): if alpha > 0: return tf.maximum(alpha * x, x, name=name) else: return tf.nn.relu(x, name=name) def get_variable(name, shape, dtype, initializer, trainable=True, regularizer=None): with tf.device('/cpu:0'): var = tf.get_variable(name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.GLOBAL_VARIABLES]) return var def conv(inp, name, size, out_channels, strides=[1, 1, 1, 1], dilation=None, padding='SAME', apply_relu=True, alpha=0.0,bias=True, initializer=tf.contrib.layers.xavier_initializer_conv2d()): batch_size = inp.get_shape().as_list()[0] res1 = inp.get_shape().as_list()[1] res2 = inp.get_shape().as_list()[1]
tensorflow.device
5,796
import tensorflow as tf self.c2q = tf.matmul(S_, q) self.q2c = tf.matmul(tf.matmul(S_, S_T), c)
tensorflow.matmul
5,797
import tensorflow as tf lazy_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) warmup_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) sync_warmup_model = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_model.parameters(), model.parameters())]) shadow_models = [DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) for n in range(FLAGS.warmup.n_shadow_models)] sync_model_from_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(model.parameters(), lazy_model.parameters())]) sync_model_to_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(lazy_model.parameters(), model.parameters())]) virt_env = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model)
tensorflow.assign
5,798
import tensorflow as tf def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
tensorflow.metrics.accuracy
5,799