seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf self.max_p_len = config.max_p_len self.max_q_len = config.max_q_len self.max_a_len = config.max_a_len # the vocab self.vocab = vocab # session info sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = False self.sess = tf.Session(config=sess_config) self._build_graph() # save info self.saver = tf.train.Saver() # initialize the model self.sess.run(tf.global_variables_initializer()) def _build_graph(self):
tensorflow.Session
7,800
import tensorflow as tf def build_cnet(self, state_in, name, reuse=False, batch_size=64): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_c = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_c = tf.nn.rnn_cell.DropoutWrapper(lstm_c, output_keep_prob=self.keep_prob) state_init_c = lstm_c.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_cin = tf.expand_dims(layer_c2, axis=1) out_c, state_final_c = tf.nn.dynamic_rnn(cell=lstm_c, inputs=lstm_cin, initial_state=state_init_c) cell_out_c = tf.reshape(out_c, [-1, 256]) vf = tf.layers.dense(cell_out_c, 1, kernel_regularizer=reg) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return vf, params, state_init_c, state_final_c # Update the network def train(self, rollout): start = time() self.sess.run([self.pi_new_params, self.vf_new_params]) for _ in range(self.EPOCHS): np.random.shuffle(rollout) for s, a, r, adv in rollout: self.sess.run(self.data_iter.initializer, feed_dict={self.state: s, self.actions: a, self.rewards: r, self.advantage: adv}) state_a, state_c = self.sess.run([self.pi_state_init, self.vf_state_init])
tensorflow.get_collection
7,801
import tensorflow as tf probs: matrix of probabilities for the minibatch eps: value to clip the probabilities at class_weights: vector of relative weights to be assigned to each class sumd: dimensions along which to sum the x-ent matrix Returns: cross entropy loss for each example in the minibatch """ adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps) xent_mat = -y * tf.log(adjusted_probs) if class_weights is not None: xent_mat *= class_weights return tf.reduce_sum(xent_mat, sumd) def _SafeNegEntropy(probs, batch_size, eps=0.0001): """Computes negative entropy in a way that will not overflow.""" adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps) entropy = tf.mul(probs, tf.log(adjusted_probs)) return tf.reduce_sum(entropy) / batch_size
tensorflow.reduce_sum
7,802
import tensorflow as tf a = np.array([1, 2, 3], dtype=np.float32) tf_v = tf.Variable(5, dtype=tf.float32) sess.run(tf.global_variables_initializer()) print(f'a * tf_v = {sess.run(a * tf_v)}') weights = tf.constant([[1.0, -2], [-3, 4]]); regular_l1 = tf.contrib.layers.l1_regularizer(0.5)(weights) regular_l2 = tf.contrib.layers.l2_regularizer(0.5)(weights) print(f'\nregular_l1={sess.run(regular_l1)} regular_l2={sess.run(regular_l2)}') val_val = sess.run(val) print('\nval=' + str(val_val)) print(f'\nargmax_0={val_val.argmax(0)} argmax_1={val_val.argmax(1)}') print('\ntf.argmax(val, 0)=' + str(sess.run(tf.argmax(val, 0)))) print('tf.argmax(val, 1)=' + str(sess.run(tf.argmax(val, 1))))
tensorflow.contrib.layers.l1_regularizer
7,803
import tensorflow as tf labels.set_shape([cfg.FLAGS.batch_size, 1]) bbox_targets.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) bbox_inside_weights.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) bbox_outside_weights.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) self._proposal_targets['rois'] = rois self._proposal_targets['labels'] = tf.to_int32(labels, name="to_int32") self._proposal_targets['bbox_targets'] = bbox_targets self._proposal_targets['bbox_inside_weights'] = bbox_inside_weights self._proposal_targets['bbox_outside_weights'] = bbox_outside_weights self._score_summaries.update(self._proposal_targets) #self._score_summaries.update(self._anchor_targets)
tensorflow.to_int32
7,804
import tensorflow as tf vocab_filename: The name of the vocabulary. unfiltered_vocabulary_size: A tf.int64 tensor containing the unfiltered vocab size. filtered_vocabulary_size: A tf.int64 tensor containing the filtered vocab size. """ if not common.IS_ANNOTATIONS_PB_AVAILABLE: return from tensorflow_transform import annotations_pb2 # pylint: disable=g-import-not-at-top message_type = annotations_pb2.VocabularyMetadata.DESCRIPTOR.full_name unfiltered_vocabulary_size = tf.expand_dims(unfiltered_vocabulary_size, 0) filtered_vocabulary_size = tf.expand_dims(filtered_vocabulary_size, 0) file_name = tf.convert_to_tensor([vocab_filename]) descriptor_source = descriptor_pb2.FileDescriptorSet() annotations_pb2.VocabularyMetadata.DESCRIPTOR.file.CopyToProto( descriptor_source.file.add()) descriptor_source_str = b'bytes://' + descriptor_source.SerializeToString() message_proto = tf_utils._encode_proto( # pylint: disable=protected-access { 'unfiltered_vocabulary_size': unfiltered_vocabulary_size, 'filtered_vocabulary_size': filtered_vocabulary_size, 'file_name': file_name, }, message_type, descriptor_source=descriptor_source_str) assert message_proto.shape == [1] message_proto = message_proto[0]
tensorflow.convert_to_tensor
7,805
import tensorflow as tf # 定义一个卷积层,命名空间为name,输入为x,卷积核为W,步长为stride,偏差为bias,激活函数默认为relu def conv2d(self,name, x, W, stride, bias): with tf.variable_scope(name) as scope: conv = tf.nn.conv2d(x, W, [1, stride, stride, 1], padding='SAME') pre_activation = tf.nn.bias_add(conv, bias) output = tf.nn.relu(pre_activation, name=scope.name)
tensorflow.nn.conv2d
7,806
import tensorflow as tf if init_constant is not None: initializer = tf.constant_initializer(init_constant, dtype=tf.float32) else: initializer = tf.contrib.keras.initializers.he_normal() # Ensure that name is unique by shape too name += '-shape-{}'.format('x'.join([str(x) for x in shape])) var = tf.get_variable(name, shape=shape, dtype=dtype, initializer=initializer, trainable=trainable) # Add L2 regularization node for trainable var if trainable and not no_reg: l2_loss = tf.nn.l2_loss(var) tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, l2_loss) return var class TimedRepeatCondition(): def __init__(self, every_secs=60): self._every_secs = every_secs self._last_trigger_time = datetime.now() def check(self) -> bool: if (datetime.now() - self._last_trigger_time).total_seconds() >= self._every_secs: self._last_trigger_time = datetime.now() return True
tensorflow.add_to_collection
7,807
import tensorflow as tf # 定义一个weight,其中命名空间为name,形状为shape def weight_variable(self,name, shape): with tf.variable_scope(name) as scope: weights = tf.get_variable('weights', shape=shape,
tensorflow.variable_scope
7,808
import tensorflow as tf self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn["keep_prob"], training=self.train) self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2) self.conv2 = tf.layers.conv2d(self.pool1, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn["keep_prob"], training=self.train) self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2) self.conv3 = tf.layers.conv2d(self.pool2, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2)
tensorflow.layers.dropout
7,809
import tensorflow as tf
tensorflow.Session
7,810
from tensorflow.python.framework import ops cur_mean, cur_var = used_mean, used_var # update variables if train: with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]): with ops.colocate_with(mean): new_mean = tf.assign_sub( mean, tf.check_numerics( decay * (mean - cur_mean), "NaN in moving mean."))
tensorflow.python.framework.ops.colocate_with
7,811
import tensorflow as tf def input_layer(self): # data = np.loadtxt(self.params['vocab'], dtype=np.unicode, encoding=None) data = self.params["vocab_data"] mapping_strings = tf.Variable(data) vocab_words = tf.contrib.lookup.index_table_from_tensor( mapping_strings, num_oov_buckets=1
tensorflow.Variable
7,812
import tensorflow as tf v0_2 = tf.Variable(543.21) save = tf.train.Saver({"v0": v0_2}) tf.initialize_all_variables().run() self.assertAllClose(543.21, v0_2.eval()) save.restore(sess, save_path) self.assertAllClose(123.45, v0_2.eval()) def testVariables(self): save_path = os.path.join(self.get_temp_dir(), "variables") with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(1.0) twos = tf.Variable([2.0, 2.0, 2.0]) init = tf.initialize_all_variables() save = tf.train.Saver(tf.all_variables()) init.run() save.save(sess, save_path) with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(0.0) twos = tf.Variable([0.0, 0.0, 0.0]) # Saver with no arg, defaults to 'all variables'. save = tf.train.Saver() save.restore(sess, save_path) self.assertAllClose(1.0, one.eval()) self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
tensorflow.all_variables
7,813
import tensorflow as tf tf.summary.scalar("after_loss", model.after_loss) if hparams.predict_linear: tf.summary.scalar("linear_loss", model.linear_loss) for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) tf.summary.scalar("regularization_loss", model.regularization_loss) tf.summary.scalar("stop_token_loss", model.stop_token_loss) tf.summary.scalar("loss", model.loss) tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [tf.norm(grad) for grad in model.gradients]
tensorflow.summary.scalar
7,814
import tensorflow as tf 1.0 / model_options.decoder_output_stride) decoder_width = scale_dimension(model_options.crop_size[1], 1.0 / model_options.decoder_output_stride) else: decoder_height = scale_dimension(tf.shape(images)[1], 1.0 / model_options.decoder_output_stride) decoder_width = scale_dimension(tf.shape(images)[2], 1.0 / model_options.decoder_output_stride) features = refine_by_decoder( features, end_points, decoder_height=decoder_height,
tensorflow.shape
7,815
import tensorflow as tf def kd(x, y): x_prob = tf.nn.softmax(x) print(x_prob.get_shape(), y.get_shape(), tf.reduce_sum(x_prob * y, axis=-1).get_shape()) return -tf.reduce_sum(x_prob * y, axis=-1) # higher the better def mse(x, y): x = x - tf.reduce_mean(x, axis=-1, keepdims=True) y = y - tf.reduce_mean(y, axis=-1, keepdims=True) return tf.reduce_sum((x-y)**2, axis=-1) # lower the better def kd_distance(x, y, dist_type): if dist_type == "person": return correlation(x,y) elif dist_type == "kd": return kd(x, y)
tensorflow.reduce_mean
7,816
import tensorflow as tf weights = np.reshape(tmp[0:q**2+q], (q+1,q)) self.IRK_alpha = weights[0:-1,:] self.IRK_beta = weights[-1:,:] self.IRK_times = tmp[q**2+q:] # tf placeholders and graph self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1])) self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1])) self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1])) self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1])) self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.U0_pred = self.net_U0(self.x0_tf) # N0 x q self.U1_pred = self.net_U1(self.x1_tf) # N1 x q self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \ tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred)) self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, method = 'L-BFGS-B', options = {'maxiter': 50000, 'maxfun': 50000, 'maxcor': 50, 'maxls': 50,
tensorflow.placeholder
7,817
import tensorflow as tf use_hvd=True): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" # batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter.
tensorflow.FixedLenFeature
7,818
import tensorflow as tf if config.PRED_DEBUG: pred_indices_ = tf.squeeze(pred_indices)
tensorflow.squeeze
7,819
import tensorflow as tf tf.range(start=0, limit=proposals_shape[0]), 1) return tf.reshape(ones_mat * multiplier, [-1])
tensorflow.reshape
7,820
import tensorflow as tf
tensorflow.executing_eagerly
7,821
import tensorflow as tf last_layer = tf.nn.relu(projected) else: raise NotImplementedError() if mode == 'train' and dnn_keep_prob < 1.0: last_layer = tf.nn.dropout(last_layer, dnn_keep_prob) last_layer_size = layer_size print('{}: {}'.format(layer_name, last_layer.get_shape())) export_feat_tensors[layer_name] = last_layer dnn_output = last_layer dnn_output_size = last_layer_size # Logistic regression with tf.variable_scope('logit') as scope: logit_w = tf.get_variable('W', shape=[dnn_output_size, 1], initializer=tf.truncated_normal_initializer(stddev=1.0 / dnn_output_size, dtype=dtype), dtype=dtype) logit_b = tf.get_variable('b', shape=[1], initializer=tf.constant_initializer(0.0), dtype=dtype) logits = tf.squeeze(tf.nn.bias_add(tf.matmul(dnn_output, logit_w), logit_b), squeeze_dims=[1]) prediction = tf.nn.sigmoid(logits) prediction_inspect = tf.reshape(prediction, [batch_size, rnn_nunroll]) prediction_final = tf.squeeze(tf.slice(prediction_inspect, [0, rnn_nunroll - 1], [-1, 1]), squeeze_dims=[1]) print('logit: {}'.format(logits.get_shape())) # Compute loss if mode != 'gen': neg_log_lhoods = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets) if target_weight_strategy == 'rect': avg_neg_log_lhood = tf.reduce_mean(neg_log_lhoods)
tensorflow.variable_scope
7,822
import tensorflow as tf s_w2, s_w4, s_w8, s_w16 = \ int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16) output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2')) output_h3 = lrelu(deconv2d(tf.concat([output_h2, tgtctx_h1], 3), [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3')) output_h4 = deconv2d(tf.concat([output_h3, tgtctx_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4') scope.reuse_variables() truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))
tensorflow.concat
7,823
import tensorflow as tf util.export_state_tuples(self._initial_state, self._initial_state_name) util.export_state_tuples(self._final_state, self._final_state_name) def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell,
tensorflow.get_collection_ref
7,824
import tensorflow as tf # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") if not tf.gfile.Exists(predict_file) or not FLAGS.data_converted: file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder)
tensorflow.logging.info
7,825
import tensorflow as tf if not forCnn: scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) if return_alphas: return output, scores return output def self_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i):
tensorflow.shape
7,826
from tensorflow.python.framework import ops `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: A list of ints that has length >= 4. The size of the window for each dimension of the input tensor. strides: A list of ints that has length >= 4. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. data_format: A string. 'NHWC' and 'NCHW" are supported. name: Optional name for the operation. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor. """ with ops.op_scope([value], name, "AvgPool") as name: value = ops.convert_to_tensor(value, name="input") return gen_nn_ops._avg_pool(value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None): """Performs the max pooling on the input. Args: value: A 4-D `Tensor` with shape `[batch, height, width, channels]` and
tensorflow.python.framework.ops.op_scope
7,827
import tensorflow as tf test = ContextAEReach() elif args.experiment_type == "push": test = ContextAEPush() elif args.experiment_type == "pushreal": test = ContextAEPushReal() elif args.experiment_type == "sweep": test = ContextAESweep() test.build(tfinput, args.ablation_type) config = tf.ConfigProto() config.gpu_options.allow_growth=True sess = tf.Session(config=config) learning_rate = tf.placeholder(tf.float32, shape=[]) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(test.loss) sess.run(tf.global_variables_initializer()) allloss = [] validloss = [] itr = 0 saver = tf.train.Saver() n = vdata.shape[1] nlen = vdata.shape[0] ntrain = int(0.8*n) nvalid = n - ntrain validdata = vdata[:, ntrain:] traindata = vdata[:, :ntrain] while True: choicesrc = np.random.choice(ntrain, batch_size)
tensorflow.train.AdamOptimizer
7,828
from tensorflow.python.ops import array_ops return math_ops.igammac(math_ops.floor(x + 1), self.rate) def _log_normalization(self): return self.rate def _log_unnormalized_prob(self, x): x = self._assert_valid_sample(x, check_integer=True) return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1) def _mean(self): return array_ops.identity(self.rate) def _variance(self): return array_ops.identity(self.rate) @distribution_util.AppendDocstring( """Note: when `rate` is an integer, there are actually two modes: `rate` and `rate - 1`. In this case we return the larger, i.e., `rate`.""") def _mode(self): return math_ops.floor(self.rate) def _assert_valid_sample(self, x, check_integer=True): if not self.validate_args: return x dependencies = [check_ops.assert_non_negative(x)] if check_integer:
tensorflow.python.ops.array_ops.identity
7,829
import tensorflow as tf 'A flag to override the data format used in the model. channels_first ' 'provides a performance boost on GPU but is not always compatible ' 'with CPU. If left unspecified, the data format will be chosen ' 'automatically based on whether TensorFlow was built for CPU or GPU.') # optimizer related configuration tf.app.flags.DEFINE_integer( 'tf_random_seed', 20180417, 'Random seed for TensorFlow initializers.') tf.app.flags.DEFINE_float( 'weight_decay', 1e-5, 'The weight decay on the model weights.') tf.app.flags.DEFINE_float( 'mse_weight', 1., 'The weight decay on the model weights.') tf.app.flags.DEFINE_float( 'momentum', 0.9, 'The momentum for the MomentumOptimizer and RMSPropOptimizer.') tf.app.flags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')#1e-3 tf.app.flags.DEFINE_float( 'end_learning_rate', 0.000001, 'The minimal end learning rate used by a polynomial decay learning rate.') tf.app.flags.DEFINE_float( 'warmup_learning_rate', 0.00001, 'The start warm-up learning rate to avoid NAN.') tf.app.flags.DEFINE_integer(
tensorflow.app.flags.DEFINE_float
7,830
import tensorflow as tf b_c = tf.get_variable('b_c', [self.H], initializer=self.const_initializer) c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c) return c, h def _word_embedding(self, inputs, reuse=False): with tf.variable_scope('word_embedding', reuse=reuse): w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer) x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M) return x def _project_features(self, features): with tf.variable_scope('project_features'):
tensorflow.get_variable
7,831
import tensorflow as tf # Bellman training error if double_q: q_max = gather_2d(q_target,tf.argmax(q_online_tp1,axis=1,output_type=tf.int32)) else: q_max = tf.reduce_max(q_target,axis=1) target = rew_t_ph + gamma * q_max * (1.0 - done_mask_ph) q_t_act = gather_2d(q_online_t,act_t_ph) total_error = tf.reduce_mean(huber_loss(target - q_t_act)) ###### # construct optimization op (with gradient clipping) learning_rate = tf.placeholder(tf.float32, (), name="learning_rate") optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs) train_fn = minimize_and_clip(optimizer, total_error, var_list=q_func_vars, clip_val=grad_norm_clipping) # update_target_fn will be called periodically to copy Q network to target Q network update_target_fn = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_fn.append(var_target.assign(var)) update_target_fn = tf.group(*update_target_fn)
tensorflow.placeholder
7,832
import tensorflow as tf indices_batch = np.random.randint( batch_size, size=num_elements, dtype=np.int64) indices_value = np.arange(num_elements, dtype=np.int64) indices = np.asarray( sorted(zip(indices_batch, indices_value)), dtype=np.int64) values = ["feature_value_for_embedding_lookup"] * num_elements shape = np.asarray([batch_size, num_elements], dtype=np.int64) with tf.Session() as sess: with tf.device("/cpu:0"): indices = tf.Variable(indices) values = tf.Variable(values) shape = tf.Variable(shape) st = tf.SparseTensor(indices, values, shape) st_handles = add_many_sparse_to_tensors_map(st) st_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=st_handles.op, sparse_handles=st_handles) st_roundtrip_op = st_roundtrip.values.op st_serialized = tf.serialize_many_sparse(st)
tensorflow.Variable
7,833
import tensorflow as tf shape=[num_examples, seq_length], dtype=tf.int32), "masked_lm_positions": tf.constant( all_masked_lm_positions, shape=[num_examples, max_predictions_per_seq],
tensorflow.constant
7,834
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten # Block 3 conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2)
tensorflow.keras.layers.Conv2D
7,835
import tensorflow as tf lambda_max=None)) for support in supports: self._supports.append(self._build_sparse_matrix(support)) @staticmethod def _build_sparse_matrix(L): L = L.tocoo() indices = np.column_stack((L.row, L.col)) L = tf.SparseTensor(indices, L.data, L.shape) return tf.sparse_reorder(L) @property def output_size(self): output_size = self._num_nodes * self._num_units if self._num_proj is not None: output_size = self._num_nodes * self._num_proj return output_size
tensorflow.sparse_reorder
7,836
import tensorflow as tf name: The name scope of this layer. Returns: float logits Tensor. """ input_shape = get_shape_list(input_tensor) num_attention_heads= input_shape[2] with tf.variable_scope(name): w = tf.get_variable( name="kernel", shape=[num_attention_heads * head_size, hidden_size], initializer=initializer) w = tf.reshape(w, [num_attention_heads, head_size, hidden_size]) b = tf.get_variable( name="bias", shape=[hidden_size], initializer=tf.zeros_initializer) ret = tf.einsum("BFND,NDH->BFH", input_tensor, w) ret += b if activation is not None: return activation(ret) else: return ret def dense_layer_2d(input_tensor,
tensorflow.reshape
7,837
import tensorflow as tf attn_result = tf.cond( tf.equal(sl_head, 0), lambda: tf.zeros([bs, 0, hn], tf.float32), lambda: self_attention_for_selected_head( head_selection, head_org_idx, sl_head, rep_head_mask, dep_selection, dep_org_idx, sl_dep, rep_dep_mask, rep_map, rep_dep_tensor, keep_prob, is_train, direction, ivec ) ) if keep_unselected: input_idx = tf.tile(tf.expand_dims(tf.range(sl), 0), [bs, 1]) pooling_result = tf.cond( tf.equal(sl_unhead, 0), lambda: tf.zeros([bs, 0, hn], tf.float32), lambda: mean_pooling_for_unselected_head( unhead_org_idx, sl_unhead, rep_unhead_mask, input_idx, sl, rep_mask, rep_map, None) # todo: point ! ) with tf.variable_scope('output'): if keep_unselected: range_head = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_head]) scatter_attn = tf.cond( tf.equal(sl_head, 0), lambda: tf.zeros([bs, sl+1, hn], tf.float32), lambda: tf.scatter_nd(
tensorflow.equal
7,838
import tensorflow as tf def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :], ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1
tensorflow.reduce_sum
7,839
import tensorflow as tf """Inject a deterministic latent based on the target frame.""" del filters hparams = self.hparams final_filters = common_layers.shape_list(layer)[-1] filters = hparams.hidden_size kernel = (4, 4) if hparams.mode == tf.estimator.ModeKeys.PREDICT: layer_shape = common_layers.shape_list(layer) if hparams.full_latent_tower: rand = tf.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits]) else: rand = tf.random_uniform(layer_shape[:-3] + [ 1, 1, hparams.bottleneck_bits]) d = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 z = tf.layers.dense(d, final_filters, name="unbottleneck") return layer + z, 0.0 # Embed. x = tf.layers.dense( features["cur_target_frame"], filters, name="latent_embed", bias_initializer=tf.random_normal_initializer(stddev=0.01)) x = common_attention.add_timing_signal_nd(x) if hparams.full_latent_tower: for i in range(hparams.num_compress_steps): with tf.variable_scope("latent_downstride%d" % i): x = common_layers.make_even_size(x) if i < hparams.filter_double_steps: filters *= 2
tensorflow.layers.dense
7,840
import tensorflow as tf import tensorflow as tf import numpy as np import random import math class Simulator(): def __init__(self, type) -> None: if type == 'D': # deuteranope self.color_matrix = tf.convert_to_tensor([[1, 0, 0], [0.494207, 0, 1.24827], [0, 0, 1]]) elif type == 'P': # protanope self.color_matrix = tf.convert_to_tensor([[0, 2.02344, -2.52581], [0, 1, 0], [0, 0, 1]]) elif type == 'T': # tritanope self.color_matrix = tf.convert_to_tensor([[1, 0, 0], [0, 1, 0], [-0.395913, 0.801109, 0]]) else: raise("ERROR: invalid type passed into Simulator class (only accepts 'D', 'P', or 'T')") self.rgb2lms = tf.convert_to_tensor([[17.8824, 43.5161, 4.11935], [3.45565, 27.1554, 3.86714], [0.0299566, 0.184309, 1.46709]]) def simulate_image(self, image): # passes an image through the color-blindness simulator inverted_rgb2lms = tf.linalg.inv(self.rgb2lms) product1 = tf.matmul(inverted_rgb2lms, self.color_matrix) product2 = tf.matmul(product1, self.rgb2lms) original_image_shape = image.shape
tensorflow.convert_to_tensor
7,841
import tensorflow as tf strides=stride, padding="SAME", use_bias=False) x = x * mask_ratio if use_bias: bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0)) x = tf.nn.bias_add(x, bias) return x * update_mask if padding == "REFLECT":
tensorflow.nn.bias_add
7,842
import tensorflow as tf # Compute loss if mode != 'gen': neg_log_lhoods = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets) if target_weight_strategy == 'rect': avg_neg_log_lhood = tf.reduce_mean(neg_log_lhoods) else: neg_log_lhoods = tf.multiply(neg_log_lhoods, target_weights) # be careful to have at least one weight be nonzero # should we be taking the mean elem-wise by batch? i think this is a big bug avg_neg_log_lhood = tf.reduce_sum(neg_log_lhoods) / tf.reduce_sum(target_weights) neg_log_lhoods_inspect = tf.reshape(neg_log_lhoods, [batch_size, rnn_nunroll]) # Train op if mode == 'train': lr = tf.Variable(0.0, trainable=False) self._lr = lr self._lr_summary = tf.summary.scalar('learning_rate', self._lr) tvars = tf.trainable_variables() grads = tf.gradients(avg_neg_log_lhood, tvars) if grad_clip > 0.0: grads, _ = tf.clip_by_global_norm(grads, grad_clip) if opt == 'sgd': optimizer = tf.train.GradientDescentOptimizer(lr) else: raise NotImplementedError()
tensorflow.Variable
7,843
import tensorflow as tf self.names_ops = names_ops + ['norm_grads_q', 'norm_grads_policy', 'avg_norm_grads_f', 'avg_norm_k', 'avg_norm_g', 'avg_norm_k_dot_g', 'avg_norm_adj'] self.train_model = train_model self.step_model = step_model self.step = step_model.step self.proba_step = step_model.proba_step self.initial_state = step_model.initial_state tf.global_variables_initializer().run(session=self.sess) self.summary = tf.summary.merge_all() def _train_step(self, obs, actions, rewards, dones, mus, states, masks, steps, writer=None): """ applies a training step to the model :param obs: ([float]) The input observations
tensorflow.global_variables_initializer
7,844
import tensorflow as tf name: A string used as the name for this variable scope. Returns: (tf.Tensor) A single value tensor containing the loss. """ # loss = None with tf.name_scope(name, "click_loglikelihood"): ob_prob=tf.nn.softmax(propensity) rel_prob=tf.nn.softmax(train_output) click_prob=ob_prob*rel_prob click_prob_norm=click_prob/tf.reduce_sum(click_prob,axis=1,keep_dims=True) label_dis = labels/ tf.reduce_sum(labels, 1, keep_dims=True) entropy = tf.reduce_sum(tf.math.log(click_prob_norm)*label_dis,1) return tf.reduce_mean(entropy) def click_weighted_pairwise_loss(self, output, labels, propensity_weights, name=None): """Computes pairwise entropy loss with propensity weighting. Args: output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a relevant example. propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element. name: A string used as the name for this variable scope. Returns: (tf.Tensor) A single value tensor containing the loss.
tensorflow.reduce_mean
7,845
import tensorflow as tf return merge_states(tf.transpose(x, [0, 2, 1, 3])) def conv1d(x, scope, nf, rf, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), pad='VALID', train=False): with tf.variable_scope(scope): #x = [-1,n_ctx,512] nx = shape_list(x)[-1]
tensorflow.variable_scope
7,846
import tensorflow as tf cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1]) log_p = tf.reduce_sum(self.log_pi * self.ac, [1]) worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p worker_loss = -tf.reduce_sum(worker_loss, axis=0) Am = self.r - self.manager_vf manager_vf_loss = .5 * tf.reduce_sum(tf.square(Am)) Aw = (self.r + self.alpha * self.ri) - self.worker_vf worker_vf_loss = .5 * tf.reduce_sum(tf.square(Aw)) entropy = -tf.reduce_sum(self.pi * self.log_pi) beta = tf.train.polynomial_decay(beta_start, self.global_step, end_learning_rate=beta_end, decay_steps=decay_steps, power=1) # worker_loss = tf.Print(worker_loss,[manager_loss,worker_loss,manager_vf_loss,worker_vf_loss,entropy]) self.loss = worker_loss + manager_loss + \ worker_vf_loss + manager_vf_loss - \ entropy * beta
tensorflow.reduce_sum
7,847
import tensorflow as tf while len(initial_output.get_shape().as_list()) < 4: initial_output = tf.expand_dims(initial_output, 2) batch_size = common_layers.shape_list(initial_output)[0] else: batch_size = common_layers.shape_list(features["inputs"])[0] initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64) # Hack: foldl complains when the output shape is less specified than the # input shape, so we confuse it about the input shape. initial_output = tf.slice(initial_output, [0, 0, 0, 0], common_layers.shape_list(initial_output))
tensorflow.zeros
7,848
import tensorflow as tf dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), })
tensorflow.constant
7,849
import tensorflow as tf 'train_epochs', None, 'The number of epochs to use for training.') tf.app.flags.DEFINE_integer( 'batch_size', 12, 'Batch size for training and evaluation.') tf.app.flags.DEFINE_string( 'data_format', 'channels_first', # 'channels_first' or 'channels_last' 'A flag to override the data format used in the model. channels_first ' 'provides a performance boost on GPU but is not always compatible ' 'with CPU. If left unspecified, the data format will be chosen '
tensorflow.app.flags.DEFINE_string
7,850
import tensorflow as tf strides=[1, 1, 1, 1, 1], symmetric_weights=self.symmetric_gate_weights, dilations=self.hgru_dilations[layer_idx]) with tf.variable_scope( '%s/g1_bn' % var_scope, reuse=self.scope_reuse) as scope: g1_intermediate = tf.contrib.layers.batch_norm( inputs=g1_intermediate + gain_bias, scale=True, center=False, fused=True, renorm=False,
tensorflow.contrib.layers.batch_norm
7,851
import tensorflow as tf location_pred = tf.transpose(location_pred, [0, 2, 3, 1]) bboxes_pred = labels['decode_fn'](location_pred)#(tf.reshape(location_pred, tf.shape(location_pred).as_list()[0:-1] + [-1, 4])) cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']]) location_pred = tf.reshape(location_pred, [-1, 4]) glabels = tf.reshape(glabels, [-1]) gscores = tf.reshape(gscores, [-1]) gtargets = tf.reshape(gtargets, [-1, 4]) # raw mask for positive > 0.5, and for negetive < 0.3 # each positive examples has one label
tensorflow.reshape
7,852
import tensorflow as tf # Autoencoder loss autoencoder_loss = tf.reduce_mean(tf.square(x_target - decoder_output)) # Gaussian Discriminator Loss dc_g_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_g_real), logits=d_g_real)) dc_g_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_g_fake), logits=d_g_fake)) dc_g_loss = dc_g_loss_fake + dc_g_loss_real # Categorical Discrimminator Loss dc_c_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_real), logits=d_c_real)) dc_c_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_c_fake), logits=d_c_fake))
tensorflow.zeros_like
7,853
import tensorflow as tf # finally project down if needed if use_proj: embedding = tf.matmul(embedding, W_proj_cnn) + b_proj_cnn # reshape back to (batch_size, tokens, dim) if use_highway or use_proj: shp = tf.concat([batch_size_n_tokens, [projection_dim]], axis=0) embedding = tf.reshape(embedding, shp) # at last assign attributes for remainder of the model self.embedding = embedding def _build_word_embeddings(self): projection_dim = self.options['lstm']['projection_dim']
tensorflow.reshape
7,854
from tensorflow.contrib import layers pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = layers.fully_connected(data, self.params.layer_size) for _ in range(1, self.params.num_layers): # pylint: disable=W0106 nn_activations = layers.fully_connected(nn_activations,
tensorflow.contrib.layers.fully_connected
7,855
import tensorflow as tf def validateMoments(self, shape, mean, stddev, minval, maxval, seed=1618): try: # TruncatedNormalMoments requires scipy.stats. # Give up early if we are unable to import it. import scipy.stats # pylint: disable=g-import-not-at-top,unused-variable tf.set_random_seed(seed) with self.test_session(use_gpu=self._use_gpu): samples = random_ops.parameterized_truncated_normal(shape, mean, stddev, minval, maxval).eval()
tensorflow.set_random_seed
7,856
import tensorflow as tf means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True) scalar_prod = tf.matmul( tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1])) scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2])
tensorflow.transpose
7,857
import tensorflow as tf print((sess.run(custom_polynomial(tf, 11)))) alpha = 0.1 val = tf.constant([[2, 3], [1, 4]], dtype=tf.float32) l1 = tf.contrib.layers.l1_regularizer(alpha)(val) l2 = tf.contrib.layers.l2_regularizer(alpha)(val) A = [[0.8, 0.6, 0.3], [0.1, 0.6, 0.4]]
tensorflow.contrib.layers.l1_regularizer
7,858
import tensorflow as tf total_loss = tf.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits def get_qa_outputs(FLAGS, features, is_training): """Loss for downstream span-extraction QA tasks such as SQuAD.""" inp = tf.transpose(features["input_ids"], [1, 0]) seg_id = tf.transpose(features["segment_ids"], [1, 0]) inp_mask = tf.transpose(features["input_mask"], [1, 0]) cls_index = tf.reshape(features["cls_index"], [-1]) seq_len = tf.shape(inp)[0] xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) output = xlnet_model.get_sequence_output() initializer = xlnet_model.get_initializer()
tensorflow.shape
7,859
import tensorflow as tf with tf.variable_scope(scope_name): dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous) net_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope_name) optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5) update_op = optimizer.minimize( tf.nn.seq2seq.sequence_loss(dec_op, targets, weights), var_list=net_variables) return dec_op, update_op, net_variables dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward( enc_inp, dec_inp_fp_true, feed_previous=True)
tensorflow.nn.seq2seq.sequence_loss
7,860
import tensorflow as tf ######################### print("Hello yes I am in build_act without noise") print(f"Obs space: {ob_space}") print(f"policy.obs_ph: {policy.obs_ph}") print(f"policy.processed_obs: {policy.processed_obs}") print(f"Obs_phs space: {obs_phs}") #assert 5 == 1 ####################### for var in tf.all_variables(): print(var) batch_size = tf.shape(policy.obs_ph)[0] n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
tensorflow.all_variables
7,861
import tensorflow as tf "lr": learning_rate }, every_n_iter=1 ), tf.train.CheckpointSaverHook( checkpoint_dir=params.output, save_secs=params.save_checkpoint_secs or None, save_steps=params.save_checkpoint_steps or None, saver=tf.train.Saver( max_to_keep=params.keep_checkpoint_max, sharded=False ) ) ] config = session_config(params)
tensorflow.train.Saver
7,862
import tensorflow as tf # Build a graph with 2 parameter nodes on different devices. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(10, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(20, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True) tf.initialize_all_variables().run() val = save.save(sess, save_path) self.assertEqual(save_path + "-?????-of-00002", val) meta_graph_filename = save._MetaGraphFilename(val) self.assertEqual(save_path + ".meta", meta_graph_filename) # Restore a different "v0" from shard 0 of the saved files. with tf.Session( target="",
tensorflow.initialize_all_variables
7,863
import tensorflow as tf shifted_sum_x2, shift, name="normalize_moments") return mean, variance def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), ) mean, variance = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats,
tensorflow.identity
7,864
import tensorflow as tf validation_dataset_reader = dataset.BatchDatset(valid_records, image_options) sess = tf.Session() print("Setting up Saver...") saver = tf.train.Saver() # create two summary writers to show training loss and validation loss in the same graph # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir train_writer = tf.summary.FileWriter(osp.join(FLAGS.logs_dir , 'train'), sess.graph)
tensorflow.train.Saver
7,865
import tensorflow as tf in_size: size of the hidden state vectors mats: list of hidden state vectors """ pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction on every word. def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias) return tf.nn.softmax(logits) #self.preds_by_word1 = tf.pack([GetWordPred(o_) for o_ in mats]) #self.preds_by_word = tf.reshape(self.preds_by_word1, self.y.get_shape()) #self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_word) self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats]) self.preds_by_instance = tf.pack([self.preds_by_word[:,i,:] for i in range(self.preds_by_word.get_shape()[1])]) self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_instance)
tensorflow.nn.softmax
7,866
import tensorflow as tf def contra_traj_lossV6(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV7(pred, tgt, horizon=12, temp=100): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
tensorflow.where
7,867
import tensorflow as tf y0 = tf.to_int32(tf.floor(y)) y1 = y0 + 1 z0 = tf.to_int32(tf.floor(z)) z1 = z0 + 1 x0_clip = tf.clip_by_value(x0, zero, max_x) x1_clip = tf.clip_by_value(x1, zero, max_x) y0_clip = tf.clip_by_value(y0, zero, max_y) y1_clip = tf.clip_by_value(y1, zero, max_y) z0_clip = tf.clip_by_value(z0, zero, max_z) z1_clip = tf.clip_by_value(z1, zero, max_z) dim3 = width dim2 = width * height dim1 = width * height * depth
tensorflow.clip_by_value
7,868
import tensorflow as tf # Compute average prediction across different scales and flipped images. predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4)
tensorflow.concat
7,869
import tensorflow as tf else: out = tf.pow(subsamp_sum, 1/pnorm) return out def mpool(inpOp, kH, kW, dH, dW, padding, name): with tf.variable_scope(name): maxpool = tf.nn.max_pool(inpOp, ksize=[1, kH, kW, 1], strides=[1, dH, dW, 1], padding=padding) return maxpool def apool(inpOp, kH, kW, dH, dW, padding, name):
tensorflow.nn.max_pool
7,870
import tensorflow as tf paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) return output def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2])
tensorflow.shape
7,871
import tensorflow as tf Args: boxlist1: Nx4 floatbox boxlist2: Mx4 Returns: a tensor with shape [N, M] representing pairwise intersections """ x_min1, y_min1, x_max1, y_max1 = tf.split(boxlist1, 4, axis=1) x_min2, y_min2, x_max2, y_max2 = tf.split(boxlist2, 4, axis=1) all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) return intersect_heights * intersect_widths @under_name_scope() def pairwise_iou(boxlist1, boxlist2): """Computes pairwise intersection-over-union between box collections.
tensorflow.maximum
7,872
import tensorflow as tf if norm == 'I': X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse, epsilon=0.001) elif norm == 'B': X = tf.layers.batch_normalization(X, reuse=reuse, training=True) elif norm == 'G': X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse) if nonlin: X = tf.nn.leaky_relu(X, 0.2) return X
tensorflow.contrib.layers.group_norm
7,873
from tensorflow.python.ops import math_ops trainable=False) return moving_averages.assign_moving_average( moving_average_variable, value, decay, zero_debias=False) # quicker adaptation at the beginning if global_step is not None: n = math_ops.cast(global_step, dtypes.float32) decay = math_ops.minimum(decay, n / (n + 1.)) # update averages mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay) variance = sq_mean - math_ops.square(mean) std = math_ops.sqrt(math_ops.maximum(epsilon, variance)) max_norms = math_ops.exp(mean + std_factor * std) return max_norms, mean def adaptive_clipping_fn(std_factor=2., decay=0.95, static_max_norm=None, global_step=None, report_summary=False, epsilon=1e-8, name=None):
tensorflow.python.ops.math_ops.square
7,874
import tensorflow as tf value=input_, filter=weights, output_shape=[batch_size, height * 2, width * 2, channels], strides=[1, 2, 2, 1], padding="SAME", name="unsqueeze_2x2") else: res = tf.nn.conv2d( input=input_, filter=weights, strides=[1, 2, 2, 1], padding="SAME", name="squeeze_2x2")
tensorflow.nn.conv2d
7,875
import tensorflow as tf if extra_inputs is None: extra_inputs = tuple() last_loss = f_loss(*(tuple(inputs) + extra_inputs)) start_time = time.time() dataset = BatchDataset(inputs, self._batch_size, extra_inputs=extra_inputs) sess = tf.compat.v1.get_default_session() for epoch in range(self._max_epochs): if self._verbose: logger.log('Epoch {}'.format(epoch)) progbar = pyprind.ProgBar(len(inputs[0])) for batch in dataset.iterate(update=True): sess.run(self._train_op, dict(list(zip(self._input_vars, batch)))) if self._verbose:
tensorflow.compat.v1.get_default_session
7,876
import tensorflow as tf train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss,
tensorflow.metrics.accuracy
7,877
import tensorflow as tf padded_tensor_dict[fields.InputDataFields.groundtruth_boxes] .shape.as_list(), [3, 4]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.groundtruth_classes] .shape.as_list(), [3, 3]) def test_clip_boxes_and_classes(self): input_tensor_dict = { fields.InputDataFields.groundtruth_boxes: tf.placeholder(tf.float32, [None, 4]), fields.InputDataFields.groundtruth_classes: tf.placeholder(tf.int32, [None, 3]), fields.InputDataFields.num_groundtruth_boxes: tf.placeholder(tf.int32, []) } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3,
tensorflow.placeholder
7,878
import tensorflow as tf import gin import numpy as np import random import tensorflow as tf FLAGS = flags.FLAGS # augmentation functions # augment def random_crop_and_resize(images, ratio=0.8): b, h, w, c = images.get_shape().as_list() ch, cw = map(lambda x: int(x * ratio), (h, w)) crop = tf.random_crop(images, size=[b, ch, cw, 3]) crop = tf.image.resize(crop, [h, w]) return crop def random_apply(fn, image, prob=1.): b, *_ = image.get_shape().as_list() chance = tf.less(tf.random_uniform([b], 0, 1.0), prob) return tf.where(chance, fn(image), tf.identity(image)) def color_distortion(image, s=1.0): lower, upper, x = (1 - 0.8 * s), (1 + 0.8 * s), image x = tf.image.random_brightness(x, max_delta=0.8*s) x = tf.image.random_contrast(x, lower=lower, upper=upper) x = tf.image.random_saturation(x, lower=lower, upper=upper) x = tf.image.random_hue(x, max_delta=0.2*s) x = tf.clip_by_value(x, 0, 1)
tensorflow.image.resize
7,879
import tensorflow as tf name="dense2", ) outputs = H * T + inputs * (1.0 - T) return outputs def conv1d_banks(inputs, K=16, is_training=True, scope="conv1d_banks"): with tf.variable_scope(scope): outputs = tf.layers.conv1d(inputs, embed_size // 2, 1, padding="SAME") for k in range(2, K + 1): with tf.variable_scope("num_{}".format(k)): output = tf.layers.conv1d(inputs, embed_size // 2, k, padding="SAME") outputs = tf.concat((outputs, output), -1) outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=is_training)) return outputs
tensorflow.layers.conv1d
7,880
import tensorflow as tf logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10) flat_observations = tf.reshape(observations, [ tf.shape(observations)[0], tf.shape(observations)[1], functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)]) with tf.variable_scope("network_parameters"): with tf.variable_scope("policy"): x = flat_observations for size in config.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) mean = tf.layers.dense( x, action_space.shape[0], activation=tf.tanh, kernel_initializer=mean_weights_initializer)
tensorflow.variable_scope
7,881
import tensorflow as tf correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True) cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight cost = tf.where(cost > 0, cost, 0, name='value')
tensorflow.square
7,882
import tensorflow as tf prediction_dict['class_predictions_with_background'], batch_cls_targets, weights=weights) loss_dict = { 'localization_loss': tf.reduce_sum(location_losses), 'classification_loss': tf.reduce_sum(cls_losses), } return loss_dict def restore_map(self, from_detection_checkpoint=True):
tensorflow.reduce_sum
7,883
import tensorflow as tf def _add_image_summary(self, image, boxes): # add back mean ''' tf.stack()这是一个矩阵拼接的函数,tf.unstack()则是一个矩阵分解的函数 ''' image += cfg.FLAGS2["pixel_means"] # bgr to rgb (opencv uses bgr) channels = tf.unstack(image, axis=-1) image = tf.stack([channels[2], channels[1], channels[0]], axis=-1) # dims for normalization width = tf.to_float(tf.shape(image)[2]) height = tf.to_float(tf.shape(image)[1]) # from [x1, y1, x2, y2, cls] to normalized [y1, x1, y1, x1] cols = tf.unstack(boxes, axis=1) boxes = tf.stack([cols[1] / height, cols[0] / width, cols[3] / height, cols[2] / width], axis=1) # add batch dimension (assume batch_size==1) #assert image.get_shape()[0] == 1 boxes = tf.expand_dims(boxes, dim=0) image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth
tensorflow.shape
7,884
import tensorflow as tf assert len(segment_ids) == max_seq_length if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("id: %s" % (example.unique_id)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in input_tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tensorflow.logging.info
7,885
import tensorflow as tf gstack = tf.concat([self.prev_g, cut_g], axis=1) self.last_c_g = gstack[:, 1:] # print self.last_c_g gsum = tf.reduce_sum(gstack, axis=1) phi = tf.get_variable("phi", (self.g_dim, self.k)) w = tf.matmul(gsum, phi) w = tf.expand_dims(w, [2])
tensorflow.reduce_sum
7,886
import tensorflow as tf tf.add_to_collection(name, op) self._initial_state_name = util.with_prefix(self._name, "initial") self._final_state_name = util.with_prefix(self._name, "final") util.export_state_tuples(self._initial_state, self._initial_state_name) util.export_state_tuples(self._final_state, self._final_state_name) def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params, base_variable_scope="Model/RNN") tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable)
tensorflow.get_collection_ref
7,887
import tensorflow as tf print('Conv layer {0} -> {1}'.format(bottom.get_shape().as_list(),conv_layer.get_shape().as_list())) return conv_layer def batch_norm_layer(self, name, input_tensor,training): with tf.variable_scope(name) as scope: return tf.contrib.layers.batch_norm(input_tensor,scope=scope,is_training=training,decay=0.99) def deconv_bn_relu(self, bottom, name, kernel_size, output_channels, initializer, stride = 1, bn=False, training=False, relu=True): input_shape = bottom.get_shape().as_list() input_channels = input_shape[-1] output_shape = [input_shape[0], input_shape[1]*stride, input_shape[2]*stride, output_channels] with tf.variable_scope(name) as scope: kernel = self.variable('weights', [kernel_size, kernel_size, output_channels, input_channels], initializer, regularizer=tf.contrib.layers.l2_regularizer(0.0005)) deconv = tf.nn.conv2d_transpose(bottom, kernel, output_shape, [1, stride, stride, 1], padding='SAME') biases = self.variable('biases', [output_channels], tf.constant_initializer(0.0)) deconv_layer = tf.nn.bias_add(deconv, biases) if bn: deconv_layer = self.batch_norm_layer('batch_norm_layer',deconv_layer,training) if relu: deconv_layer = tf.nn.relu(deconv_layer, name=scope.name) print('Deconv layer {0} -> {1}'.format(bottom.get_shape().as_list(),deconv_layer.get_shape().as_list())) return deconv_layer
tensorflow.variable_scope
7,888
import tensorflow as tf s_h, s_w = self.output_height, self.output_width s_h0, s_h1, s_h2, s_h3 = \ int(s_h/ns0), int(s_h/ns0/ns1), int(s_h/ns0/ns1/ns2), int(s_h/ns0/ns1/ns2/ns3) s_w0, s_w1, s_w2, s_w3 = \ int(s_w/ns0), int(s_w/ns0/ns1), int(s_w/ns0/ns1/ns2), int(s_w/ns0/ns1/ns2/ns3) def decode(z, skip_h3, skip_h2, skip_h1, skip_h0): z_ = lrelu(linear(tf.nn.dropout(z, keep_prob), nf3*s_h3*s_w3, 'd_h0_lin')) h0 = tf.nn.dropout(tf.reshape(z_, [-1, s_h3, s_w3, nf3]), keep_prob) h1 = lrelu(deconv2d(tf.concat([h0, skip_h3], 3), [self.batch_size, s_h2, s_w2, nf2], name='d_h1', d_h=ns3, d_w=ns3)) h2 = lrelu(deconv2d(tf.concat([h1, skip_h2], 3), [self.batch_size, s_h1, s_w1, nf1], name='d_h2', d_h=ns2, d_w=ns2)) h3 = lrelu(deconv2d(tf.concat([h2, skip_h1], 3), [self.batch_size, s_h0, s_w0, nf0], name='d_h3', d_h=ns1, d_w=ns1))
tensorflow.nn.dropout
7,889
import tensorflow as tf w_z1_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) * (z - z0_f) * x1_valid * y0_valid * z0_valid), 1) w_z1_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) * (z - z0_f) * x0_valid * y0_valid * z0_valid), 1) output = tf.add_n([ w_z0_y0_x0 * i_z0_y0_x0, w_z0_y0_x1 * i_z0_y0_x1, w_z0_y1_x0 * i_z0_y1_x0, w_z0_y1_x1 * i_z0_y1_x1, w_z1_y0_x0 * i_z1_y0_x0, w_z1_y0_x1 * i_z1_y0_x1, w_z1_y1_x0 * i_z1_y1_x0, w_z1_y1_x1 * i_z1_y1_x1 ]) return output
tensorflow.add_n
7,890
import tensorflow as tf def affine(inpOp, nIn, nOut, name, weight_decay=0.0): with tf.variable_scope(name): l2_regularizer = lambda t: l2_loss(t, weight=weight_decay) weights = tf.get_variable("weights", [nIn, nOut], initializer=tf.truncated_normal_initializer(stddev=1e-1), regularizer=l2_regularizer, dtype=inpOp.dtype) biases = tf.get_variable("biases", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype) affine1 = tf.nn.relu_layer(inpOp, weights, biases) return affine1
tensorflow.truncated_normal_initializer
7,891
import tensorflow as tf filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32), loss_class_mask) filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b) filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class]) loss_class = tf.keras.losses.sparse_categorical_crossentropy( y_true=filter_class_true, y_pred=filter_class_pred) loss_class = tf.reduce_mean(loss_class)
tensorflow.keras.losses.sparse_categorical_crossentropy
7,892
import tensorflow as tf def main(_): data_dir = os.path.join(FLAGS.dir, "data") train_data = load_dataset( data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size) eval_data = load_dataset( data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size) model = RNNColorbot( rnn_cell_sizes=FLAGS.rnn_cell_sizes, label_dimension=3, keep_prob=FLAGS.keep_probability) optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) if FLAGS.no_gpu or tfe.num_gpus() <= 0: print(tfe.num_gpus()) device = "/cpu:0" else: device = "/gpu:0" print("Using device %s." % device) log_dir = os.path.join(FLAGS.dir, "summaries") tf.gfile.MakeDirs(log_dir)
tensorflow.train.AdamOptimizer
7,893
import tensorflow as tf optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
tensorflow.train.MomentumOptimizer
7,894
import tensorflow as tf # initializer=tf.random_uniform_initializer(-0.003, 0.003)) initializer=tf.contrib.layers.xavier_initializer()) # initializer=tf.truncated_normal_initializer(stddev=0.1)) b = tf.get_variable('b', [self.num_classes], initializer=tf.constant_initializer(0.1)) logits = tf.matmul(last_outputs, W) + b self.embed_inputs = embed_inputs return logits def loss(self, logits, forward_only=None): cost = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(self.y, tf.float32)) mean_cost = tf.reduce_mean(cost) y_pred = tf.argmax(logits, 1) correct_pred = tf.equal(y_pred, tf.argmax(self.y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) if forward_only: str_summary_type = 'eval' loss_summ = tf.summary.scalar("{0}_loss".format(str_summary_type), mean_cost) acc_summ = tf.summary.scalar("{0}_accuracy".format(str_summary_type), accuracy) merged = tf.summary.merge([loss_summ, acc_summ]) return mean_cost, accuracy, y_pred, merged else: return mean_cost, accuracy, y_pred
tensorflow.argmax
7,895
import tensorflow as tf tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
tensorflow.flags.DEFINE_string
7,896
import tensorflow as tf use_one_hot_embeddings): def model_fn(features, labels, mode, params): tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"]
tensorflow.logging.info
7,897
import tensorflow as tf negative_idx = tf.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if sample_size is None: max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32)) else: max_num_pos = int(positive_fraction * sample_size) sampled_pos_idx = subsample_indicator(positive_idx, max_num_pos)
tensorflow.cast
7,898
import tensorflow as tf # y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # print("----y_conv-----") # print(y_conv) # exit() # Exchange dim 1 and dim 0 # Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36] feature_mat = tf.transpose(feature_mat, [1, 0, 2]) # New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9] print("----feature_mat-----") print(feature_mat) # exit() # Temporarily crush the feature_mat's dimensions
tensorflow.transpose
7,899