seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) print ("querry_size mismatch") query = tf.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all
tensorflow.array_ops.transpose
6,700
import tensorflow as tf mask = tf.equal(mask, tf.ones_like(mask))
tensorflow.ones_like
6,701
import tensorflow as tf 'kappa_%s' % layer, tf.constant(1.)) setattr( self, 'omega_%s' % layer, tf.constant(1.)) if self.adapation: setattr( self, 'eta_%s' % layer, tf.get_variable( name='%s_eta' % self.layer_name, dtype=self.dtype, initializer=tf.random_uniform( [self.timesteps], dtype=tf.float32))) if self.lesion_omega: setattr( self, 'omega_%s' % layer, tf.constant(0.)) if self.lesion_kappa: setattr( self, 'kappa_%s' % layer, tf.constant(0.)) if self.reuse:
tensorflow.random_uniform
6,702
import tensorflow as tf self.prediction = tf.nn.sigmoid(logits) self.loss = loss_fn(labels=self.labels, logits=logits) train_op = train_fn(self.loss, learning_rate=self.learning_rate, learner=self.learner) initializer = tf.global_variables_initializer() config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(graph=graph, config=config) self.sess.run(initializer) loop = trange(self.num_epochs, disable=not self.verbose) for _ in loop:
tensorflow.ConfigProto
6,703
import tensorflow as tf block_conv_input = bottom input_filter = bottom.get_shape().as_list()[-1] block_conv_1 = self.conv_layer(bottom, 1, input_filter, channel_list[0], 1, name + "_branch2a") block_norm_1 = tf.layers.batch_normalization(inputs=block_conv_1, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True) block_relu_1 = tf.nn.relu(block_norm_1) block_conv_2 = self.conv_layer(block_relu_1, 3, channel_list[0], channel_list[1], 1, name + "_branch2b") block_norm_2 = tf.layers.batch_normalization(inputs=block_conv_2, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True) block_relu_2 = tf.nn.relu(block_norm_2) block_conv_3 = self.conv_layer(block_relu_2, 1, channel_list[1], channel_list[2], 1, name + "_branch2c") block_res = tf.add(block_conv_input, block_conv_3) relu = tf.nn.relu(block_res) return relu def avg_pool(self, bottom, kernal_size = 2, stride = 2, name = "avg"): return tf.nn.avg_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='VALID', name=name) def max_pool(self, bottom, kernal_size = 2, stride = 2, name = "max"): return tf.nn.max_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='SAME', name=name) def conv_layer(self, bottom, kernal_size, in_channels, out_channels, stride, name):
tensorflow.nn.relu
6,704
import tensorflow as tf if 'all' in model_scope: lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64), tf.constant(config.global_norm_lvalues, dtype=tf.int64)), 0) rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64), tf.constant(config.global_norm_rvalues, dtype=tf.int64)), 1) else:
tensorflow.constant
6,705
from tensorflow.contrib.layers.python.layers import utils update_variance_op = moving_averages.assign_moving_average( variable=self._moving_variance, value=variance, decay=self._decay_rate, name="update_moving_variance").op return update_mean_op, update_variance_op def build_no_ops(): return (tf.no_op(), tf.no_op()) # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_variance_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) # Every new connection creates a new op which adds its contribution # to the running average when ran. tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_variance_op) def _build_update_ops_second_moment(self, mean, second_moment, is_training): """Builds the moving average update ops when using the moving second moment. Args:
tensorflow.contrib.layers.python.layers.utils.smart_cond
6,706
import tensorflow as tf "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range), ) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer() ) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs)
tensorflow.matmul
6,707
import tensorflow as tf with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_2 = tf.Variable(543.21) save = tf.train.Saver({"v0": v0_2})
tensorflow.Variable
6,708
from tensorflow.python.framework import ops # whether we should use the first or last index in case of ties. min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity)) indices_at_minval = math_ops.equal( math_ops.abs(sensitivities - sensitivity), min_val) indices_at_minval = math_ops.to_int64(indices_at_minval) indices_at_minval = math_ops.cumsum(indices_at_minval) tf_index = math_ops.argmax(indices_at_minval, 0) tf_index = math_ops.cast(tf_index, dtypes.int32) # Now, we have the implicit threshold, so compute the specificity: return math_ops.div(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon, name) specificity = compute_specificity_at_sensitivity('value') with ops.control_dependencies( [tp_update_op, fn_update_op, tn_update_op, fp_update_op]): update_op = compute_specificity_at_sensitivity('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, specificity) if updates_collections: ops.add_to_collections(updates_collections, update_op) return specificity, update_op def streaming_sensitivity_at_specificity( predictions, labels, specificity, weights=None, num_thresholds=200,
tensorflow.python.framework.ops.control_dependencies
6,709
import tensorflow as tf Shape [batch_size, embeddings_dim] Return: pull away term loss """ with tf.name_scope(name): norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True) batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32) pt_loss = (tf.reduce_sum(similarity) - batch_size) / \ (batch_size * (batch_size - 1)) return pt_loss def log_sum_exp(x): """numerically stable log_sum_exp implementation that prevents overflow."""
tensorflow.shape
6,710
import tensorflow as tf def f1(): input_shape = input_tensor.get_shape().as_list() noise_shape = tf.constant(value=[input_shape[0], 1, 1, input_shape[3]]) return tf.nn.dropout(input_tensor, keep_prob, noise_shape, seed=seed, name="spatial_dropout")
tensorflow.constant
6,711
import tensorflow as tf tf.floormod( tf.floordiv(tf.to_int32(x_l), tf.to_int32(base)**i), tf.to_int32(base))) res = tf.concat(x_labels, axis=-1)
tensorflow.to_int32
6,712
import tensorflow as tf query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts))
tensorflow.shape
6,713
import tensorflow as tf def test_one(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) output = f(tf.constant([[1, 3]]), tf.constant([2])) tf.train.start_queue_runners() result, batch_size = session.run(output) self.assertAllEqual([[3, 5]], result)
tensorflow.constant
6,714
import tensorflow as tf def test_do_not_pad_dynamic_images(self): input_tensor_dict = { fields.InputDataFields.image: tf.placeholder(tf.float32, [None, None, 3]), } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict,
tensorflow.placeholder
6,715
import tensorflow as tf assert (len(input_dims) == 4) # batch_size, height, width, num_channels_in assert (len(filter_dims) == 3) # height, width and num_channels out assert (len(stride_dims) == 2) # stride height and width num_channels_in = input_dims[-1] filter_h, filter_w, num_channels_out = filter_dims stride_h, stride_w = stride_dims with tf.variable_scope(scope): conv_weight = tf.Variable( tf.truncated_normal([filter_h, filter_w, num_channels_in, num_channels_out], stddev=0.1, dtype=tf.float32)) conv_bias = tf.Variable(tf.zeros([num_channels_out], dtype=tf.float32)) map = tf.nn.conv2d(input, conv_weight, strides=[1, stride_h, stride_w, 1], padding=padding, dilations=dilation) if bias is True: map = tf.nn.bias_add(map, conv_bias) if non_linear_fn is not None: activation = non_linear_fn(map) else: activation = map # print(activation.get_shape().as_list()) return activation def batch_norm_conv(x, b_train, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): n_out = x.get_shape().as_list()[-1]
tensorflow.nn.bias_add
6,716
import tensorflow as tf inputs=conv1_bn, pool_size=[2, 2], strides=2, name='pool1' ) conv2 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5,5], padding='same', use_bias=False, name='conv2' ) conv2_bn = tf.nn.relu(tf.layers.batch_normalization(conv2, training=train)) pool2 = tf.layers.max_pooling2d( inputs=conv2_bn, pool_size=[2, 2], strides=2, name='pool2' ) v = tf.reshape(pool2, [-1, 4096]) fc1 = tf.layers.dense( inputs=v, units=1024, activation=tf.nn.relu, use_bias=True, name='fc1'
tensorflow.layers.max_pooling2d
6,717
import tensorflow as tf outputs = [] for i in xrange(200): outputs.append(f(tf.fill([1, 5], i), tf.fill([1, 5], i)))
tensorflow.fill
6,718
import tensorflow as tf # How to set placements on multiple devices. # Here, assume we have three devies CPU:0, GPU:0, and GPU:1 if tf.test.is_built_with_cuda(): with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1, 3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:1'): c = tf.matmul(a,b) c = tf.reshape(c, [-1]) with tf.device('/gpu:2'): d = tf.matmul(b,a) flat_d = tf.reshape(d, [-1]) combined = tf.mul(c, flat_d)
tensorflow.device
6,719
import tensorflow as tf Returns: Variable tensor """ with tf.variable_scope(scope) as sc: kernel_d, kernel_h, kernel_w = kernel_size num_in_channels = inputs.get_shape()[-1].value
tensorflow.variable_scope
6,720
import tensorflow as tf loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) loss = tf.reduce_mean(loss) return loss def contra_step_lossV3(pred, tgt, margin=1.0): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small) + margin) loss = tf.reduce_mean(loss) return loss def contra_step_lossV4(pred, tgt): # 50*50 # Step-wise contrastive loss even = [2 * i for i in range(25)]
tensorflow.where
6,721
from tensorflow.contrib.layers.python.layers import utils return update_mean_op, update_second_moment_op def build_no_ops(): return (tf.no_op(), tf.no_op()) # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_second_moment_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) # Every new connection creates a new op which adds its contribution # to the running average when ran. tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_second_moment_op)
tensorflow.contrib.layers.python.layers.utils.smart_cond
6,722
import tensorflow as tf # Register tf dataset handles if self.datasets: self.dataset_handles = {} for n, i in self.dataset_iterators.items(): self.dataset_handles[n] = self.sess.run(i.string_handle()) self.sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()]) def train(self, iterations, validation_interval=100, output_dir=None, save_interval=None, checkpoint_path=None, keep_checkpoints=1): assert 'training' in self.datasets, 'Training dataset is required.' if output_dir is not None: train_writer = tf.summary.FileWriter(output_dir) if not hasattr(self, 'saver'):
tensorflow.local_variables_initializer
6,723
from tensorflow.contrib import slim ): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer): with slim.arg_scope( [slim.separable_conv2d], weights_regularizer=depthwise_regularizer ) as sc:
tensorflow.contrib.slim.arg_scope
6,724
import tensorflow as tf "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class Config(object): """Configuration."""
tensorflow.flags.DEFINE_string
6,725
import tensorflow as tf # Create a new session with a new tf graph. sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tensorflow.ConfigProto
6,726
import tensorflow as tf accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) test_pre = tf.argmax(test_pre, 1) test_true = tf.argmax(test_labels, 1) valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum) valid_inf=work.valid_inference(valid_image_batch) valid_labels=tf.one_hot(valid_label_batch,classnum) #train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy) valid_pre = tf.reshape(valid_inf, [validnum, classnum]) valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1)) valid_accuracy=tf.reduce_mean(tf.cast(valid_correct_prediction,tf.float32)) valid_pre = tf.argmax(valid_pre, 1) valid_true = tf.argmax(valid_labels, 1) target_names = ['class sg', 'class bm', 'class wd', 'class wt', 'class wj', 'class wo', 'class ym', 'class shq', 'class shj', 'class no', 'class yh', 'class fb'] init = tf.initialize_all_variables() config=tf.ConfigProto() config.gpu_options.allow_growth=True #init=tf.initialize_all_variables()
tensorflow.argmax
6,727
import tensorflow as tf tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=validation_input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False, batch_size=FLAGS.eval_batch_size, use_hvd=FLAGS.use_hvd) if FLAGS.auto_recover: hooks.append(tf.data.experimental.CheckpointInputPipelineHook(estimator)) train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps, hooks=hooks) eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) if __name__ == "__main__": # flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
tensorflow.estimator.train_and_evaluate
6,728
import tensorflow as tf errors = U.huber_loss(td_error) weighted_error = tf.reduce_mean(importance_weights_ph * errors)
tensorflow.reduce_mean
6,729
import tensorflow as tf return xs, s def conv_to_fc(x): nh = np.prod([v.value for v in x.get_shape()[1:]]) x = tf.reshape(x, [-1, nh]) return x def discount_with_dones(rewards, dones, gamma): discounted = []
tensorflow.reshape
6,730
import tensorflow as tf # Build first layer with bigger tensor. base_conv_tensors = [ first_base_conv_layer( inputs=tf.zeros( shape=[1] + conv_block[0][0:2] + [num_in_channels], dtype=tf.float32 ) ) ] # Now build the rest of the base conv block layers, store in list. base_conv_tensors.extend( [ base_conv_layer_block[i]( inputs=tf.zeros( shape=[1] + conv_block[i][0:3], dtype=tf.float32 ) ) for i in range(1, len(conv_block)) ] ) print_obj( "\nbuild_discriminator_base_conv_layer_block", "base_conv_tensors", base_conv_tensors ) return base_conv_tensors
tensorflow.zeros
6,731
from tensorflow.python.ops import array_ops The created variable. """ # Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES collections = list(collections or []) collections += [ops.GraphKeys.LOCAL_VARIABLES] return variables.Variable( initial_value=array_ops.zeros(shape, dtype=dtype), name=name, trainable=False, collections=collections, validate_shape=validate_shape)
tensorflow.python.ops.array_ops.zeros
6,732
import tensorflow as tf c, h = self._get_initial_lstm(features=features) x = self._word_embedding(inputs=captions_in) features_proj = self._project_features(features=features) loss = 0.0 alpha_list = [] lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.H) for t in range(self.T): context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0)) alpha_list.append(alpha) if self.selector:
tensorflow.contrib.rnn.BasicLSTMCell
6,733
import tensorflow as tf def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), }
tensorflow.FixedLenFeature
6,734
import tensorflow as tf config.left_right_group_map[category][0], config.left_right_group_map[category][1], config.left_right_group_map[category][2]], tf.int64, stateful=True) with tf.control_dependencies([save_image_op]): pred_x, pred_y = pred_x * 1., pred_y * 1. return pred_x, pred_y
tensorflow.control_dependencies
6,735
import tensorflow as tf with tf.variable_scope('word_embedding', reuse=reuse): w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer) x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M) return x def _project_features(self, features): with tf.variable_scope('project_features'): w = tf.get_variable('w', [self.D, self.D], initializer=self.weight_initializer) features_flat = tf.reshape(features, [-1, self.D]) features_proj = tf.matmul(features_flat, w) features_proj = tf.reshape(features_proj, [-1, self.L, self.D]) return features_proj def _attention_layer(self, features, features_proj, h, reuse=False): with tf.variable_scope('attention_layer', reuse=reuse): w = tf.get_variable('w', [self.H, self.D], initializer=self.weight_initializer) b = tf.get_variable('b', [self.D], initializer=self.const_initializer) w_att = tf.get_variable('w_att', [self.D, 1], initializer=self.weight_initializer) h_att = tf.nn.relu(features_proj + tf.expand_dims(tf.matmul(h, w), 1) + b) # (N, L, D) out_att = tf.reshape(tf.matmul(tf.reshape(h_att, [-1, self.D]), w_att), [-1, self.L]) # (N, L) alpha = tf.nn.softmax(out_att) context = tf.reduce_sum(features * tf.expand_dims(alpha, 2), 1, name='context') #(N, D) return context, alpha def _selector(self, context, h, reuse=False): with tf.variable_scope('selector', reuse=reuse): w = tf.get_variable('w', [self.H, 1], initializer=self.weight_initializer)
tensorflow.variable_scope
6,736
import tensorflow as tf pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False) pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False) if pop_mean not in tf.moving_average_variables(): tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_mean) tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_var) def func1(): # execute at training time batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1)) update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean)) update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var)) with tf.control_dependencies([update_mean, update_var]): return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon) def func2(): # execute at test time return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, gamma, epsilon) return tf.cond(train, func1, func2) def average_gradients(tower_grads):
tensorflow.assign_sub
6,737
import tensorflow as tf metrics = dict(zip(metrics[0], zip(*[m.values() for m in metrics]))) metrics = {m: np.nanmean(metrics[m], axis=0) for m in metrics} return metrics def _checkpoint_var_search(self, checkpoint_path): reader = tf.train.NewCheckpointReader(checkpoint_path) saved_shapes = reader.get_variable_to_shape_map() model_names = tf.model_variables() # Used by tf.slim layers if not len(tf.model_variables()): model_names = tf.global_variables() # Fallback when slim is not used model_names = set([v.name.split(':')[0] for v in model_names]) checkpoint_names = set(saved_shapes.keys()) found_names = model_names & checkpoint_names missing_names = model_names - checkpoint_names shape_conflicts = set() restored = []
tensorflow.model_variables
6,738
import tensorflow as tf reuse = True if i > 0 else None, dropout = self.dropout) ) with tf.variable_scope("Output_Layer"): start_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[2]],axis = -1),1, bias = False, name = "start_pointer"),-1) end_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[3]],axis = -1),1, bias = False, name = "end_pointer"), -1) self.logits = [mask_logits(start_logits, mask = self.c_mask), mask_logits(end_logits, mask = self.c_mask)] logits1, logits2 = [l for l in self.logits]
tensorflow.concat
6,739
import tensorflow as tf y_true=heatmap_true * true_weight[:, i]) losses.append(loss) return tf.reduce_mean(loss) class JointsMSELoss(object): def __init__(self): self.mse = tf.losses.MeanSquaredError() def __call__(self, y_pred, target, target_weight): batch_size = y_pred.shape[0] num_of_joints = y_pred.shape[-1] pred = tf.reshape(tensor=y_pred, shape=(batch_size, -1, num_of_joints)) heatmap_pred_list = tf.split(value=pred, num_or_size_splits=num_of_joints, axis=-1) gt = tf.reshape(tensor=target, shape=(batch_size, -1, num_of_joints)) heatmap_gt_list = tf.split(value=gt, num_or_size_splits=num_of_joints, axis=-1) loss = 0.0 for i in range(num_of_joints): heatmap_pred = tf.squeeze(heatmap_pred_list[i]) heatmap_gt = tf.squeeze(heatmap_gt_list[i]) loss += 0.5 * self.mse(y_true=heatmap_pred * target_weight[:, i], y_pred=heatmap_gt * target_weight[:, i]) return loss / num_of_joints
tensorflow.reshape
6,740
import tensorflow as tf return_dict["end_top_index"] = end_top_index # an additional layer to predict answerability with tf.variable_scope("answer_class"): # get the representation of CLS cls_index = tf.one_hot(cls_index, seq_len, axis=-1, dtype=tf.float32) cls_feature = tf.einsum("lbh,bl->bh", output, cls_index) # get the representation of START start_p = tf.nn.softmax(start_logits_masked, axis=-1, name="softmax_start") start_feature = tf.einsum("lbh,bl->bh", output, start_p) # note(zhiliny): no dependency on end_feature so that we can obtain # one single `cls_logits` for each sample ans_feature = tf.concat([start_feature, cls_feature], -1) ans_feature = tf.layers.dense( ans_feature,
tensorflow.nn.softmax
6,741
import tensorflow as tf @layer def embedding_layer(tensor, vocab_size=None, embedding_dim=None, embedding_matrix=None, **opts): if embedding_matrix is None: initializer = tf.contrib.layers.xavier_initializer(uniform=True) embedding_matrix = tf.get_variable("embedding_matrix", initializer=initializer(shape=(vocab_size, embedding_dim))) out = tf.nn.embedding_lookup(embedding_matrix, tensor) return out
tensorflow.contrib.layers.xavier_initializer
6,742
import tensorflow as tf if bias is True: map = tf.nn.bias_add(map, conv_bias) if non_linear_fn is not None: activation = non_linear_fn(map) else: activation = map # print(activation.get_shape().as_list()) return activation def batch_norm_conv(x, b_train, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = tf.get_variable('beta', initializer=tf.constant(0.0, shape=[n_out])) gamma = tf.get_variable('gamma', initializer=tf.constant(1.0, shape=[n_out])) batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var)
tensorflow.variable_scope
6,743
import tensorflow as tf self.value_estimate = tf.layers.dense(self.state, 1, kernel_initializer=w_init, name='v') # estimated value for state # loss and optimizer self.loss = tf.squared_difference(self.value_estimate, self.target) self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) self.train_op = self.optimizer.minimize(
tensorflow.squared_difference
6,744
import tensorflow as tf q1_in_ph = tf.concat([x, a], axis=-1) q1_in_dim = q1_in_ph.shape.as_list()[1] q1_dropout_mask_generator = DropoutMaskGenerator(q1_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) q1_dropout_mask_phs = q1_dropout_mask_generator.generate_dropout_mask_placeholders() q1, q1_reg = mlp_variational(q1_in_ph, q1_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q1 = tf.squeeze(q1, axis=2) with tf.variable_scope('q1', reuse=True): q1_pi, q1_pi_reg = mlp_variational(tf.concat([x, pi[0]], axis=-1), q1_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q1_pi = tf.squeeze(q1_pi, axis=2) with tf.variable_scope('q2'): q2_in_ph = tf.concat([x, a], axis=-1) q2_in_dim = q2_in_ph.shape.as_list()[1] q2_dropout_mask_generator = DropoutMaskGenerator(q2_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) q2_dropout_mask_phs = q2_dropout_mask_generator.generate_dropout_mask_placeholders() q2, q2_reg = mlp_variational(q2_in_ph, q2_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q2 = tf.squeeze(q2, axis=2) else:
tensorflow.squeeze
6,745
import tensorflow as tf sampled_actions_tiled = self._actor( next_relabelled_obs, step_type=(), network_state=())[0].sample() critic_input = (next_relabelled_obs, sampled_actions_tiled) q_vals, _ = self._critic(critic_input, training=False) q_vals_vec = tf.reshape(q_vals, (batch_size, num_tasks)) rewards, dones = self._task_distribution.evaluate(states_tiled, actions_tiled,
tensorflow.reshape
6,746
import tensorflow as tf if len(from_shape) == 4: broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], 1], tf.float32)
tensorflow.ones
6,747
import tensorflow as tf tf.set_random_seed(np.random.randint(1234)) with tf.Graph().as_default() as g: with tf.device("/cpu:0"): images, labels = inputs(batch_size=FLAGS.batch_size,
tensorflow.device
6,748
import tensorflow as tf # self.learning_rate -- you can get this from self.optimizer_spec.lr_schedule.value(t) # (this is needed by the optimizer to choose the learning rate) # 3.d: periodically update the target network by calling # self.session.run(self.update_target_fn) # you should update every target_update_freq steps, and you may find the # variable self.num_param_updates useful for this (it was initialized to 0) obs_batch, act_batch, rew_batch, obs_tp1_batch, done_mask = self.replay_buffer.sample(self.batch_size) if not self.model_initialized: initialize_interdependent_variables(self.session, tf.global_variables(), {self.obs_t_ph : obs_batch, self.obs_tp1_ph : obs_tp1_batch}) self.model_initialized = True self.session.run(self.train_fn, {self.obs_t_ph: obs_batch, self.act_t_ph: act_batch, self.rew_t_ph: rew_batch, self.obs_tp1_ph: obs_tp1_batch, self.done_mask_ph: done_mask, self.learning_rate : self.optimizer_spec.lr_schedule.value(self.t)}, options=tf.RunOptions(report_tensor_allocations_upon_oom=True))
tensorflow.global_variables
6,749
import tensorflow as tf with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE): weights = tf.get_variable('weights', [prev_node, output_node], initializer=tf.truncated_normal_initializer(stddev=0.1)) self.nnweights.append(weights) biases = tf.get_variable('biases', [output_node], initializer=tf.constant_initializer(0.0)) layer_out = tf.matmul(prev_x, weights) + biases # Output of Network y = layer_out # Global step with tf.variable_scope('training_step', reuse=tf.AUTO_REUSE): global_step = tf.get_variable("global_step", [], dtype=tf.int32, initializer=tf.constant_initializer(0),
tensorflow.matmul
6,750
import tensorflow as tf for i in range(hparams.num_compress_steps): with tf.variable_scope("latent_downstride%d" % i): x = common_layers.make_even_size(x) if i < hparams.filter_double_steps: filters *= 2 x = common_attention.add_timing_signal_nd(x) x = tf.layers.conv2d(x, filters, kernel, activation=common_layers.belu, strides=(2, 2), padding="SAME") x = common_layers.layer_norm(x) else: x = common_layers.double_discriminator(x)
tensorflow.layers.conv2d
6,751
import tensorflow as tf del self.sess gc.collect() class TfGraphTestCase: def setup_method(self): tf.reset_default_graph() self.graph = tf.Graph() for c in self.graph.collections: self.graph.clear_collection(c) self.graph_manager = self.graph.as_default() self.graph_manager.__enter__() self.sess = tf.Session(graph=self.graph) self.sess_manager = self.sess.as_default()
tensorflow.Graph
6,752
import tensorflow as tf weights_regularizer=None, scope=scope + '_depthwise') return slim.conv2d( outputs, filters, 1, weights_initializer=tf.truncated_normal_initializer( stddev=pointwise_weights_initializer_stddev), weights_regularizer=slim.l2_regularizer(weight_decay), scope=scope + '_pointwise')
tensorflow.truncated_normal_initializer
6,753
import tensorflow as tf weighted_metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, list_weights) tf.summary.scalar('Weighted_%s_%d' % (metric, topn), weighted_metric_value, collections=['train'])
tensorflow.summary.scalar
6,754
import tensorflow as tf smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32) smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2) smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2) smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign), tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))
tensorflow.multiply
6,755
import tensorflow as tf # forward-only cannot be enabled with eval at the same time. tf.flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking') tf.flags.DEFINE_boolean('forward_only', False, """whether use forward-only or training for benchmarking""") tf.flags.DEFINE_integer('batch_size', 0, 'batch size per compute device') tf.flags.DEFINE_integer('num_batches', 100, 'number of batches to run, excluding warmup') tf.flags.DEFINE_integer('num_warmup_batches', None, 'number of batches to run before timing') tf.flags.DEFINE_integer('autotune_threshold', None,
tensorflow.flags.DEFINE_integer
6,756
import tensorflow as tf mask3 = tf.constant([[1, 1], [1, 1]], dtype=tf.float32) mask4 = tf.constant([[0, 0], [0, 0]], dtype=tf.float32) mask5 = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) masks = tf.stack([mask0, mask1, mask2, mask3, mask4, mask5]) scores = tf.constant([[0.05, 1.0, 0.2],
tensorflow.constant
6,757
import tensorflow as tf # print('VNET Out:', X.get_shape().as_list()) # if self.args.mode == 'adapt': return X, X_EARLY, X_MIDDLE, X_LATE, prediction # else: # return X, prediction def D_4(self, X, reuse): def discrim_conv(name, X, out_channels, filtersize, stride=1, norm='', nonlin=True, init_stddev=-1): with tf.variable_scope(name) as scope: if init_stddev <= 0.0: init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32) else: init = tf.truncated_normal_initializer(stddev=init_stddev) X = tf.layers.conv2d(X, out_channels, kernel_size=filtersize, strides=(stride, stride), padding="valid", kernel_initializer=init) if norm == 'I': X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse, epsilon=0.001) elif norm == 'B': X = tf.layers.batch_normalization(X, reuse=reuse, training=True) elif norm == 'G': X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse)
tensorflow.contrib.layers.variance_scaling_initializer
6,758
import tensorflow as tf # sum over hidden units num = tf.reduce_sum(tf.square(num), axis=2) denom = tf.reduce_sum(tf.square(denom), axis=2) bounded = tf.where(tf.greater(denom, 1e-20), tf.div(num, 1.0 * denom), tf.ones_like(num)) nelems = tf.reduce_mean(tf.where(tf.greater(denom, 1e-20), 1.0 * tf.ones_like(num), 1.0 * tf.zeros_like(num)), axis=1) # sum mean over each batch by time steps Omega = tf.square(bounded - 1.0) Omega = tf.reduce_sum(tf.reduce_mean(Omega, axis=1)) / (1.0 * tf.reduce_sum(nelems)) out = tf.gradients(Omega, self.W_rec) out[0] = tf.Print(out[0], [out[0], self.W_rec, Omega], "omega grads") out[0] = tf.verify_tensor_all_finite(out[0], "dead omega grad") return out, test def sussillo_reg(self): states = self.states reg = 0 for state in states: dJr = tf.matmul(tf.nn.relu(state), tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec)) reg += tf.reduce_sum(tf.square(dJr))
tensorflow.verify_tensor_all_finite
6,759
import tensorflow as tf print('\nD=') print(sess.run(D)) print('\nA+B=') print(sess.run(A + B)) print('\nB-B=') print(sess.run(B - B)) print('\nB*I=') BI = tf.matmul(B, identity_matrix) print(sess.run(BI)) print('\ntranspose(C)=') print(sess.run(tf.transpose(C))) print('\ntranspose(D)=') print(sess.run(tf.transpose(D))) print('\ninverse(D)=')
tensorflow.matmul
6,760
import tensorflow as tf return inputs + shortcut return inputs # Three types of downsampling methods described by paper def downsampling(inputs, downsampling_type, name, optional_shortcut=False, shortcut=None): # k-maxpooling if downsampling_type=='k-maxpool': k = math.ceil(int(inputs.get_shape()[1]) / 2) pool = tf.nn.top_k(tf.transpose(inputs, [0,2,1]), k=k, name=name, sorted=False)[0] pool = tf.transpose(pool, [0,2,1]) # Linear elif downsampling_type=='linear': pool = tf.layers.conv1d(inputs=inputs, filters=inputs.get_shape()[2], kernel_size=3, strides=2, padding='same', use_bias=False) # Maxpooling else: pool = tf.layers.max_pooling1d(inputs=inputs, pool_size=3, strides=2, padding='same', name=name) if optional_shortcut:
tensorflow.transpose
6,761
import tensorflow as tf rightmost_transposed_ndims = tf.convert_to_tensor( value=rightmost_transposed_ndims, dtype=np.int32, name='rightmost_transposed_ndims') rightmost_transposed_ndims_ = tf.get_static_value( rightmost_transposed_ndims) with tf.control_dependencies(_maybe_validate_rightmost_transposed_ndims( rightmost_transposed_ndims, validate_args)): rightmost_transposed_ndims = tf.identity(rightmost_transposed_ndims) perm = tf.range( start=rightmost_transposed_ndims - 1, limit=-1, delta=-1, name='perm') else: # perm is not None: perm = tf.convert_to_tensor(value=perm, dtype=np.int32, name='perm')
tensorflow.identity
6,762
import tensorflow as tf patch_size = self.get_random_patch_size() print('Patch size:',patch_size) window = [1,patch_size,patch_size,1] print('Window:',window) n_row,n_col,n_channel = x.shape n_patch = n_row*n_col // (patch_size**2) patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=window,strides=window,rates=[1, 1, 1, 1],padding='VALID') patches = tf.reshape(patches,[n_patch,patch_size,patch_size,n_channel]) patches = tf.random.shuffle(patches) rows = tf.split(patches,n_col//patch_size,axis=0) rows = [tf.concat(tf.unstack(x),axis=1) for x in rows] x_aug = tf.concat(rows,axis=0) x_aug = tf.convert_to_tensor(x_aug) return tf.concat([x, x_aug],axis=2) def gaussian_blur(self,x): #create random gaussian blur filter mean = 0 std = tf.random.uniform(shape=[],minval=5,maxval=10,dtype=tf.float32) # std [5-10] size = tf.random.uniform(shape=[],minval=3,maxval=7,dtype=tf.int32) # size [7-15]
tensorflow.concat
6,763
import tensorflow as tf return shape def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) return output_tensor def reshape_from_matrix(output_tensor, orig_shape_list): """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" if len(orig_shape_list) == 2: return output_tensor output_shape = get_shape_list(output_tensor) orig_dims = orig_shape_list[0:-1] width = output_shape[-1]
tensorflow.reshape
6,764
import tensorflow as tf padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])
tensorflow.pad
6,765
import tensorflow as tf return np.sinh(x) + np.cosh(y) # scalar with self.test_session(): x = tf.constant(1.0, tf.float32) y = tf.constant(2.0, tf.float32) z = tf.py_func(my_func, [x, y], [tf.float32]) self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32)) # array with self.test_session(): x = tf.constant([1.0, 2.0], tf.float64) y = tf.constant([2.0, 3.0], tf.float64) z = tf.py_func(my_func, [x, y], [tf.float64]) self.assertAllEqual( z[0].eval(), my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64)) # a bit exotic type (complex64) with self.test_session(): x = tf.constant(1+2j, tf.complex64) y = tf.constant(3+4j, tf.complex64)
tensorflow.constant
6,766
import tensorflow as tf end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0]) end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) else: # during inference, compute the end logits based on beam search start_top_log_probs, start_top_index = tf.nn.top_k( start_log_probs, k=FLAGS.start_n_top) start_index = tf.one_hot(start_top_index, depth=seq_len, axis=-1, dtype=tf.float32) start_features = tf.einsum("lbh,bkl->bkh", output, start_index) end_input = tf.tile(output[:, :, None], [1, 1, FLAGS.start_n_top, 1]) start_features = tf.tile(start_features[None], [seq_len, 1, 1, 1]) end_input = tf.concat([end_input, start_features], axis=-1) end_logits = tf.layers.dense( end_input, xlnet_config.d_model, kernel_initializer=initializer, activation=tf.tanh, name="dense_0") end_logits = tf.contrib.layers.layer_norm(end_logits, begin_norm_axis=-1) end_logits = tf.layers.dense( end_logits, 1, kernel_initializer=initializer, name="dense_1") end_logits = tf.reshape(end_logits, [seq_len, -1, FLAGS.start_n_top])
tensorflow.concat
6,767
import tensorflow as tf with tf.variable_scope(scope): outputs = tf.layers.dense(inputs, units=num_units[0], activation=tf.nn.relu, name="dense1") outputs = tf.layers.dropout( outputs, rate=dropout_rate, training=is_training, name="dropout1"
tensorflow.layers.dropout
6,768
import tensorflow as tf nce_b_init_args = {} super(Word2vecEmbeddingInputlayer, self).__init__(prev_layer=None, name=name) logging.info("Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) self.inputs = inputs # Look up embeddings for inputs. # Note: a row of 'embeddings' is the vector representation of a word. # for the sake of speed, it is better to slice the embedding matrix # instead of transfering a word id to one-hot-format vector and then # multiply by the embedding matrix. # embed is the outputs of the hidden layer (embedding layer), it is a # row vector with 'embedding_size' values. with tf.variable_scope(name): embeddings = tf.get_variable( name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args) embed = tf.nn.embedding_lookup(embeddings, self.inputs) # Construct the variables for the NCE loss (i.e. negative sampling) nce_weights = tf.get_variable( name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, dtype=LayersConfig.tf_dtype, **nce_W_init_args) nce_biases = tf.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, dtype=LayersConfig.tf_dtype, **nce_b_init_args) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels # each time we evaluate the loss. self.nce_cost = tf.reduce_mean( tf.nn.nce_loss( weights=nce_weights, biases=nce_biases,
tensorflow.get_variable
6,769
import tensorflow as tf # row vector with 'embedding_size' values. with tf.variable_scope(name): embeddings = tf.get_variable( name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args) embed = tf.nn.embedding_lookup(embeddings, self.inputs) # Construct the variables for the NCE loss (i.e. negative sampling) nce_weights = tf.get_variable( name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, dtype=LayersConfig.tf_dtype, **nce_W_init_args) nce_biases = tf.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, dtype=LayersConfig.tf_dtype, **nce_b_init_args) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels
tensorflow.get_variable
6,770
import tensorflow as tf self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len], name='x') self.y = tf.placeholder(tf.float32, [batch_size, max_sequence_len, out_vocab_size], name='y')
tensorflow.placeholder
6,771
import tensorflow as tf else: context_vector = tf.concat(attns, axis=1)
tensorflow.concat
6,772
import tensorflow as tf def main(_): tf.reset_default_graph() # Import data cifar = cf.cifar10(batchSize=FLAGS.batch_size, downloadDir=FLAGS.data_dir) with tf.variable_scope('inputs'): # Create the model x = tf.placeholder(tf.float32, [None, FLAGS.img_width * FLAGS.img_height * FLAGS.img_channels]) # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, FLAGS.num_classes]) # Whether model is training train = tf.placeholder(tf.bool, []) # Build the graph for the deep net y_conv, img_summary = deepnn(x, train) # Define your loss function - softmax_cross_entropy with tf.variable_scope('x_entropy'): cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
tensorflow.placeholder
6,773
import tensorflow as tf facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tensorflow.random_normal
6,774
import tensorflow as tf encode_params, decode_params = stage.get_params(state) with self.session(server_graph) as sess: encode_params, decode_params, state = self.evaluate_tf_py_list( [encode_params, decode_params, state], sess) client_test_data = [] for x in input_values: client_graph = tf.Graph() with client_graph.as_default(): encoded_x, state_update_tensors = stage.encode(x, encode_params) with self.session(client_graph): encoded_x, state_update_tensors = self.evaluate( [encoded_x, state_update_tensors]) client_test_data.append(
tensorflow.Graph
6,775
import tensorflow as tf import tensorflow as tf from collections import deque def sample(logits): noise = tf.random_uniform(tf.shape(logits)) return tf.argmax(logits - tf.log(-tf.log(noise)), 1) def cat_entropy(logits): a0 = logits - tf.reduce_max(logits, 1, keepdims=True) ea0 = tf.exp(a0) z0 = tf.reduce_sum(ea0, 1, keepdims=True) p0 = ea0 / z0 return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1) def cat_entropy_softmax(p0): return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1) def ortho_init(scale=1.0):
tensorflow.exp
6,776
from tensorflow.python.ops import variable_scope attn_dists = [] p_gens = [] vocab_scores = [] sampled_words = [] self.encoder_features = encoder_features with variable_scope.variable_scope("attention_decoder"): # Get the weight vectors v and W_c (W_c is for coverage) v = variable_scope.get_variable("v", [options.attention_vec_size]) v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0) w_c = None if options.use_coverage: with variable_scope.variable_scope("coverage"): w_c = variable_scope.get_variable("w_c", [options.attention_vec_size]) w_c = tf.expand_dims(tf.expand_dims(w_c, axis=0), axis=0)
tensorflow.python.ops.variable_scope.get_variable
6,777
import tensorflow as tf candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width] candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width] flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width] candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask) # [num_candidates] candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates] candidate_sentence_indices = tf.boolean_mask(tf.reshape(candidate_start_sentence_indices, [-1]), flattened_candidate_mask) # [num_candidates] candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends, cluster_ids) # [num_candidates]
tensorflow.reshape
6,778
import tensorflow as tf # Note that this strips spaces from the end of the input as well. # We assume no inputs rely on the existence of trailing whitespace. txt = tf.strings.strip(txt) return txt
tensorflow.strings.strip
6,779
import tensorflow as tf g_decoder_output_dim = input_feature_dim g_decoder_layer2_dim = 72 g_decoder_layer1_dim = 84 d_layer_1_dim = input_feature_dim d_layer_2_dim = 64 d_layer_3_dim = 32 d_layer_4_dim = 16 num_block_layers = 3 dense_layer_depth = 16 def lstm_network(input, scope='lstm_network'): with tf.variable_scope(scope): # tf.nn.rnn_cell lstm_cell1 = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) lstm_cell2 = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) lstm_cells = tf.contrib.rnn.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True) # tf.nn.rnn_cell # lstm_cell1 = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) # lstm_cell2 = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) #lstm_cells = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True) # initial_state = lstm_cells.zero_state(batch_size, tf.float32)
tensorflow.variable_scope
6,780
import tensorflow as tf # (Internally uses a RandomShuffleQueue.) # We run this in two threads to avoid being a bottleneck. images, sparse_labels = tf.train.shuffle_batch( [image, label], batch_size=batch_size, num_threads=8, capacity=1000 + 3 * batch_size, # Ensures a minimum amount of shuffling of examples. min_after_dequeue=1000) return images, sparse_labels def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv_scale(x, W): return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='VALID')
tensorflow.truncated_normal
6,781
import tensorflow as tf predictions=masked_lm_predictions, weights=masked_lm_weights, ) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights ) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]] ) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32 ) next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = tf.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions ) next_sentence_mean_loss = tf.metrics.mean( values=next_sentence_example_loss ) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, }
tensorflow.metrics.accuracy
6,782
import tensorflow as tf use_compression, use_fp16, clip, cos_decay, use_lamb, previous_train_steps, post_train_steps): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] next_sentence_labels = features["next_sentence_labels"]
tensorflow.logging.info
6,783
import tensorflow as tf tf.constant(config.local_norm_lvalues, dtype=tf.int64)), 0) rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64), tf.constant(config.local_norm_rvalues, dtype=tf.int64)), 1)
tensorflow.constant
6,784
import tensorflow as tf sess_config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options) # Set up a RunConfig to only save checkpoints once per training cycle. run_config = tf.estimator.RunConfig().replace( save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace( save_checkpoints_steps=None).replace(
tensorflow.estimator.RunConfig
6,785
import tensorflow as tf soft_placement = True util.auto_parallel(metagraph, m) with tf.Graph().as_default(): tf.train.import_meta_graph(metagraph) for model in models.values(): model.import_ops() sv = tf.train.Supervisor(logdir=FLAGS.save_path)
tensorflow.train.import_meta_graph
6,786
import tensorflow as tf clean_pairs = cleaner_en_xx.clean_en_xx_pairs(clean_pairs) for source, target in clean_pairs: if source and target: lang1_resfile.write(source) lang1_resfile.write("\n") lang2_resfile.write(target) lang2_resfile.write("\n") else: lang1_filename, lang2_filename = dataset[1] lang1_filepath = os.path.join(tmp_dir, lang1_filename) lang2_filepath = os.path.join(tmp_dir, lang2_filename) is_sgm = ( lang1_filename.endswith("sgm") and lang2_filename.endswith("sgm")) if not (tf.gfile.Exists(lang1_filepath) and tf.gfile.Exists(lang2_filepath)): # For .tar.gz and .tgz files, we read compressed. mode = "r:gz" if compressed_filepath.endswith("gz") else "r" with tarfile.open(compressed_filepath, mode) as corpus_tar: corpus_tar.extractall(tmp_dir) if lang1_filepath.endswith(".gz"): new_filepath = lang1_filepath.strip(".gz") generator_utils.gunzip_file(lang1_filepath, new_filepath) lang1_filepath = new_filepath if lang2_filepath.endswith(".gz"): new_filepath = lang2_filepath.strip(".gz") generator_utils.gunzip_file(lang2_filepath, new_filepath) lang2_filepath = new_filepath
tensorflow.gfile.Exists
6,787
import tensorflow as tf def testScaleGradients(self): p = self.TestParams() p.input = base_input_generator.BaseSequenceInputGenerator.Params() task = p.cls(p) task.CreateVariable( 'a', py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0))) var_a = task.theta.a var_grads = py_utils.NestedMap(a=(var_a, tf.ones_like(var_a))) has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads) FLAGS.enable_check_numerics = False with self.session(): tf.global_variables_initializer().run() self.assertFalse(has_nan_or_inf.eval()) self.assertEqual(1.0, grad_scale.eval()) # The final gradient must be finite.
tensorflow.ones_like
6,788
import tensorflow as tf for size in config.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1)[..., 0] mean = tf.check_numerics(mean, "mean") logstd = tf.check_numerics(logstd, "logstd") value = tf.check_numerics(value, "value") policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd)) return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2)) def clip_logits(logits, config): logits_clip = getattr(config, "logits_clip", 0.) if logits_clip > 0: min_logit = tf.reduce_min(logits) return tf.minimum(logits - min_logit, logits_clip) else:
tensorflow.clip_by_value
6,789
import tensorflow as tf self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn["keep_prob"], training=self.train) self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2) self.conv3 = tf.layers.conv2d(self.pool2, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2)
tensorflow.layers.conv2d
6,790
import tensorflow as tf body_cls_output, body_regress_output = backbone(inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN)) cls_pred, location_pred = xdet_body_v3.xdet_head(body_cls_output, body_regress_output, params['num_classes'], num_anchors_list[0], (mode == tf.estimator.ModeKeys.TRAIN), data_format=params['data_format']) if params['data_format'] == 'channels_first': cls_pred = tf.transpose(cls_pred, [0, 2, 3, 1]) location_pred = tf.transpose(location_pred, [0, 2, 3, 1]) bboxes_pred = labels['decode_fn'](location_pred)#(tf.reshape(location_pred, tf.shape(location_pred).as_list()[0:-1] + [-1, 4])) cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']]) location_pred = tf.reshape(location_pred, [-1, 4])
tensorflow.transpose
6,791
import tensorflow as tf def build_ae_model(self): self.input = tf.placeholder(tf.uint8, self.batch_shape, name='input') self.target = tf.placeholder(tf.uint8, self.batch_shape, name='target') self.step = tf.Variable(0, trainable=False, name='global_step') root = self._image_to_tensor(self.input) target = self._image_to_tensor(self.target)
tensorflow.Variable
6,792
import tensorflow as tf return None def vocabulary_size_by_name(self, vocab_filename: str) -> int: """Like vocabulary_file_by_name, but returns the size of vocabulary.""" vocab_size_from_annotations = self._vocabulary_size_from_annotations( vocab_filename) if vocab_size_from_annotations is not None: return vocab_size_from_annotations vocab_path = self.vocabulary_file_by_name(vocab_filename) if not vocab_path: raise ValueError( 'Could not compute vocabulary size for {}, does not exist'.format( vocab_filename)) elif vocab_path.endswith('tfrecord.gz'): dataset = tf.data.TFRecordDataset(vocab_path, compression_type='GZIP') def reduce_fn(accum, elem): return tf.size(elem, out_type=tf.int64, name='vocabulary_size') + accum return _get_tensor_value( dataset.batch(tf.int32.max).reduce( tf.constant(0, tf.int64), reduce_fn)) else: with tf.io.gfile.GFile(vocab_path, 'rb') as f: return sum(1 for _ in f) def vocabulary_by_name(self, vocab_filename: str) -> List[bytes]: """Like vocabulary_file_by_name but returns a list.""" vocab_path = self.vocabulary_file_by_name(vocab_filename)
tensorflow.data.TFRecordDataset
6,793
import tensorflow as tf # # c = tf.constant('haHa') # print(sess.run(c)) # # sess.close() identity_matrix = tf.diag([1.0, 3.0, 1.0]) A = tf.truncated_normal([2, 3]) B = tf.fill([2, 3], 5.0) C = tf.random_uniform([3, 2], maxval=100) D = tf.convert_to_tensor(np.array([[1., 2., 3.], [-3., -7., -1.], [0., 5., -2.]])) sess = tf.Session() # sess.run(tf.global_variables_initializer()) # print(sess.run(tf.random_normal(mean=10, shape=[10]))) # A = tf.Variable(tf.random_normal(shape=[1, 1])) # sess.run(tf.global_variables_initializer()) # print(sess.run(A)) print('\nI=') print(sess.run(identity_matrix)) print('\nA=')
tensorflow.Session
6,794
import tensorflow as tf speaker_ids = speaker_ids[word_offset: word_offset + num_words] gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words) gold_starts = gold_starts[gold_spans] - word_offset gold_ends = gold_ends[gold_spans] - word_offset cluster_ids = cluster_ids[gold_spans] return tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends, labels): same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates] same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates] same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates] candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates] candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates] return candidate_labels def get_dropout(self, dropout_rate, is_training): return 1 - (tf.to_float(is_training) * dropout_rate) def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c): k = util.shape(top_span_emb, 0) top_span_range = tf.range(k) # [k] antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k]
tensorflow.logical_and
6,795
import tensorflow as tf assert len(shape) == 4 self.gamma = safe_get("gamma", [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02)) gamma = tf.reshape(self.gamma, [1, 1, 1, -1])
tensorflow.random_normal_initializer
6,796
import tensorflow as tf end = timer() timings.append(end - start) tf.logging.set_verbosity(tf.logging.INFO) print("Timing total: %f s, average: %f s, minimum: %f s" % (np.sum(timings), np.mean(timings), np.min(timings)))
tensorflow.logging.set_verbosity
6,797
import tensorflow as tf def conv1d(x, scope, nf, rf, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), pad='VALID', train=False):
tensorflow.random_normal_initializer
6,798
import tensorflow as tf logits = tf.reshape(logits, [-1, bil_lstm_win_size, 256*amp_factor]) forward_cell = tf.nn.rnn_cell.LSTMCell(128) backward_cell = tf.nn.rnn_cell.LSTMCell(128) encoder_outputs,_ = tf.nn.bidirectional_dynamic_rnn( forward_cell, backward_cell, logits,
tensorflow.nn.bidirectional_dynamic_rnn
6,799