seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf enqueue_after_list: control dependency from ops. Returns: an op that should be used as control dependency before starting next step. """ self.sync_queue_counter += 1 num_workers = self.cluster.num_tasks('worker') with tf.device(self.sync_queue_devices[ self.sync_queue_counter % len(self.sync_queue_devices)]): sync_queues = [ tf.FIFOQueue(num_workers, [tf.bool], shapes=[[]], shared_name='%s%s' % (name_prefix, i)) for i in range(num_workers)] queue_ops = [] # For each other worker, add an entry in a queue, signaling that it can # finish this step. token = tf.constant(False) with tf.control_dependencies(enqueue_after_list): for i, q in enumerate(sync_queues): if i == self.task_index:
tensorflow.FIFOQueue
700
import tensorflow as tf mdl = p.Instantiate() input_batch = mdl.input_generator.GetPreprocessedInputBatch() dec_out_dict = mdl.Decode(input_batch) tf.global_variables_initializer().run() dec_out = sess.run(dec_out_dict) print('dec_out', dec_out)
tensorflow.global_variables_initializer
701
import tensorflow as tf span_width = 1 + span_ends - span_starts # [k] if self.config["use_features"]: span_width_index = span_width - 1 # [k] span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb] span_width_emb = tf.nn.dropout(span_width_emb, self.dropout) span_emb_list.append(span_width_emb) if self.config["model_heads"]: span_indices = tf.expand_dims(tf.range(self.config["max_span_width"]), 0) + tf.expand_dims(span_starts, 1) # [k, max_span_width] span_indices = tf.minimum(util.shape(context_outputs, 0) - 1, span_indices) # [k, max_span_width] span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb] with tf.variable_scope("head_scores"): self.head_scores = util.projection(context_outputs, 1) # [num_words, 1] span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1] span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1] span_head_scores += tf.log(span_mask) # [k, max_span_width, 1] span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1] span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb]
tensorflow.expand_dims
702
from tensorflow.contrib.layers.python.layers.layers import _build_variable_getter, _add_variable_to_collections bias_regularizer=biases_regularizer, activity_regularizer=None, use_spectral_norm=use_spectral_norm, is_training=is_training, trainable=trainable, name=sc.name, dtype=inputs.dtype.base_dtype, _scope=sc, _reuse=reuse) outputs = layer.apply(inputs) # Add variables to collections. _add_variable_to_collections(layer.kernel, variables_collections, 'weights') if layer.use_bias: _add_variable_to_collections(layer.bias, variables_collections, 'biases') if normalizer_fn is not None: normalizer_params = normalizer_params or {} outputs = normalizer_fn(outputs, **normalizer_params) if activation_fn is not None: outputs = activation_fn(outputs) return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
tensorflow.contrib.layers.python.layers.layers._add_variable_to_collections
703
import tensorflow as tf (distance_kernel + '_a_initializer'), None), b_initializer=distance_kernel_kwargs.get( (distance_kernel + '_b_initializer'), None), name=distance_kernel_kwargs.get((distance_kernel + '_name'), 'MatchingSigmoid')) return compute_l2_sigmoid_matching_distances if distance_kernel == common.DISTANCE_KERNEL_EXPECTED_LIKELIHOOD: def compute_gaussian_likelihoods(lhs, rhs): """Computes sample likelihoods.""" num_lhs_samples = lhs.shape.as_list()[-2] - 2 num_rhs_samples = rhs.shape.as_list()[-2] - 2 lhs_means, lhs_stddevs, lhs_samples = tf.split( lhs, [1, 1, num_lhs_samples], axis=-2) rhs_means, rhs_stddevs, rhs_samples = tf.split( rhs, [1, 1, num_rhs_samples], axis=-2) rhs_likelihoods = distance_utils.compute_gaussian_likelihoods( lhs_means, lhs_stddevs, rhs_samples, min_stddev=distance_kernel_kwargs.get( distance_kernel + '_min_stddev', None), max_squared_mahalanobis_distance=distance_kernel_kwargs.get( distance_kernel + '_max_squared_mahalanobis_distance', None), smoothing=distance_kernel_kwargs.get(distance_kernel + '_smoothing', None))
tensorflow.split
704
from tensorflow.python.framework import tensor_util @ops.RegisterShape("UnsortedSegmentSum") def _UnsortedSegmentSumShape(op): """Shape function for UnsortedSegmentSum.""" data_shape = op.inputs[0].get_shape() segment_ids_shape = op.inputs[1].get_shape() mid = segment_ids_shape.ndims if mid is None: return [tensor_shape.unknown_shape()] else: num_segments = tensor_util.ConstantValue(op.inputs[2]) return [tensor_shape.TensorShape([num_segments]).concatenate( data_shape[mid:])] @ops.RegisterShape("LinSpace") def _LinspaceShape(op): num = tensor_util.ConstantValue(op.inputs[2]) return [tensor_shape.vector(num)]
tensorflow.python.framework.tensor_util.ConstantValue
705
from tensorflow.python.ops import array_ops with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope: fn = _sparse_false_negative_at_k( predictions_idx=predictions_idx, labels=labels, class_id=class_id, weights=weights) batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn)) var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=scope) return var, state_ops.assign_add(var, batch_total_fn, name='update') def streaming_mean_absolute_error(predictions, labels, weights=None, metrics_collections=None, updates_collections=None,
tensorflow.python.ops.array_ops.zeros
706
import tensorflow as tf target_type = tf.float32 else: input_placeholder = tf.placeholder(tf.float32, shape=(None, hparams.quantize_channels, None), name='audio_inputs') target_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1), name='audio_targets')
tensorflow.placeholder
707
import tensorflow as tf img = features['image'] flat = tf.cast(tf.reshape(img, [-1]), tf.int64) tgt = tf.expand_dims(targets, axis=0) flat_with_target = tf.concat([flat, tgt], axis=0)
tensorflow.expand_dims
708
import tensorflow as tf rl_advantage = rl_reward - rl_baseline rl_empirical_loss = -tf.stop_gradient(rl_advantage) * log_prob rl_entropy_loss = -rl_entropy_regularization * rl_entropy enable_rl_optimizer = tf.cast( tf.greater_equal(target_global_step, FLAGS.first_pretrain_steps), tf.float32) rl_learning_rate = FLAGS.rl_learning_rate * enable_rl_optimizer rl_learning_rate = tf.train.piecewise_constant( target_global_step, [800,], [rl_learning_rate, rl_learning_rate * 0.1]) optimizer = tf.train.AdamOptimizer(rl_learning_rate) target_train_op = optimizer.minimize( rl_empirical_loss, target_global_step, var_list=tf.trainable_variables(rl_scope.name))
tensorflow.train.piecewise_constant
709
import tensorflow as tf log_x = tf.math.log(x) ldj = log_x log_y = log_x*scales + mus ldj += log_sigmas z = tf.math.exp(log_y) return z, ldj else: scales = tf.math.exp(-log_sigmas) log_x = tf.math.log(x) ldj = -log_x log_y = (log_x - mus)*scales ldj -= log_sigmas z = tf.math.exp(log_y) return z, ldj
tensorflow.math.exp
710
import tensorflow as tf # loss2 = tf.reduce_mean(tf.square((image - logits)*mask2)) # loss = loss1 + loss2 + loss0 # loss = tf.reduce_mean(tf.squared_difference(logits ,annotation )) loss_summary = tf.summary.scalar("entropy", loss) grads = train_z(loss,z) trainable_var = tf.trainable_variables() if FLAGS.debug: for var in trainable_var: utils.add_to_regularization_and_summary(var) train_op = train(loss, trainable_var) print("Setting up summary op...")
tensorflow.trainable_variables
711
import tensorflow as tf regression = y_pred regression_target = y_true[:, :, :-1] anchor_state = y_true[:, :, -1] # 找到正样本 indices = tf.where(keras.backend.equal(anchor_state, 1)) regression = tf.gather_nd(regression, indices) regression_target = tf.gather_nd(regression_target, indices) # 计算 smooth L1 loss # f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma # |x| - 0.5 / sigma / sigma otherwise regression_diff = regression - regression_target regression_diff = keras.backend.abs(regression_diff)
tensorflow.gather_nd
712
import tensorflow as tf testnum: test_num, validnum: valid_num } with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) tf.train.Saver().restore(sess,path) #test test_acc_avg = 0.0 test_true_total=np.array([]) test_pre_total=np.array([])
tensorflow.train.start_queue_runners
713
import tensorflow as tf conv_weight = tf.Variable(tf.truncated_normal([1, 1, num_channels_in, output_length], stddev=0.1, dtype=tf.float32)) conv = tf.nn.conv2d(input_data, conv_weight, strides=[1, 1, 1, 1], padding='SAME') pool = tf.nn.avg_pool(conv, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) else: pool = tf.nn.avg_pool(input_data, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) pool = tf.squeeze(pool, axis=[1, 2]) return pool
tensorflow.nn.avg_pool
714
import tensorflow as tf output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
tensorflow.nn.log_softmax
715
import tensorflow as tf from tvm.contrib import graph_runtime from tvm.relay.testing.config import ctx_list import keras import tensorflow as tf from tensorflow import keras as tf_keras # prevent Keras from using up all gpu memory if tf.executing_eagerly(): gpus = tf.config.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) else: from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto()
tensorflow.executing_eagerly
716
import tensorflow as tf self.q_mask = tf.cast(self.q, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) if opt: # we have to hardcode the max batch size here! use the batch size from the generator as this will be used for PG N, CL = config.batch_size if not self.demo else config.batch_size, config.char_limit self.c_maxlen = tf.reduce_max(self.c_len) self.q_maxlen = tf.reduce_max(self.q_len) self.c = tf.slice(self.c, [0, 0], [N, self.c_maxlen]) self.q = tf.slice(self.q, [0, 0], [N, self.q_maxlen]) self.c_mask = tf.slice(self.c_mask, [0, 0], [N, self.c_maxlen]) self.q_mask = tf.slice(self.q_mask, [0, 0], [N, self.q_maxlen]) self.ch = tf.slice(self.ch, [0, 0, 0], [N, self.c_maxlen, CL]) self.qh = tf.slice(self.qh, [0, 0, 0], [N, self.q_maxlen, CL]) self.y1 = tf.argmax(tf.slice(self.y1, [0, 0], [N, self.c_maxlen]),axis=-1) self.y2 = tf.argmax(tf.slice(self.y2, [0, 0], [N, self.c_maxlen]),axis=-1) else: self.c_maxlen, self.q_maxlen = config.para_limit, config.ques_limit self.ch_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.ch, tf.bool), tf.int32), axis=2), [-1]) self.qh_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.qh, tf.bool), tf.int32), axis=2), [-1]) self.forward() total_params()
tensorflow.slice
717
import tensorflow as tf 'member/age': tf.io.FixedLenFeature([], tf.int64), 'member/height': tf.io.VarLenFeature(tf.float32), 'member/prefer_prods': tf.io.VarLenFeature(tf.int64)} features = tf.io.parse_single_example(example_proto, features) images = tf.image.decode_png(features['member/encoded'], channels=3) # 注意png原本有4個channel,但執行到下面的處理會出錯,所以前一行先降成3個channel。 images = tf.image.random_brightness(images, 0.1) images = tf.image.random_saturation(images, 0.7, 1.3) images = tf.image.random_contrast(images, 0.6, 1.5) images = tf.image.random_flip_left_right(images) return features, images
tensorflow.image.random_saturation
718
import tensorflow as tf # In[3]: tf.reset_default_graph() sess = tf.InteractiveSession() size_layers = 128 learning_rate = 1e-3 num_layers = 2 model = Model(num_layers, size_layers, learning_rate) sess.run(tf.global_variables_initializer()) # In[4]: paths, lengths, texts, raw_texts = [], [], [], [] text_files = [f for f in os.listdir("mel") if f.endswith(".npy")] for fpath in text_files: with open("%s/%s" % (path, fpath.replace("npy", "txt"))) as fopen: text = fopen.read() paths.append(fpath.replace(".npy", ""))
tensorflow.global_variables_initializer
719
import tensorflow as tf # This creates a cycle of length 136. return tf.mod((num * 13), 137) num = tf.reshape(tf.mod(seed, 136) + 1, (1,)) result = num for _ in range(num_elements - 1):
tensorflow.mod
720
import tensorflow as tf def _add_dynamic_cell(self, cell_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train=False): b = CELL_NUM_BLOCKS # Downsample inputs to have same dimensions as blocks with tf.variable_scope('layer_-1_calibrate'): layers[-1] = (self._calibrate(*layers[-1], w, h, block_ch, is_train=is_train), w, h, block_ch) with tf.variable_scope('layer_-2_calibrate'): layers[-2] = (self._calibrate(*layers[-2], w, h, block_ch, is_train=is_train), w, h, block_ch) cell_inputs = [layers[-2][0] if len(layers) > 1 else layers[-1][0], layers[-1][0]] blocks = [] for bi in range(b): with tf.variable_scope('block_{}'.format(bi)): idx1 = cell_arch[bi][0]
tensorflow.variable_scope
721
import tensorflow as tf self._cell = tf.contrib.cudnn_rnn.CudnnLSTM( num_layers=config.num_layers, num_units=config.hidden_size, input_size=config.hidden_size, dropout=1 - config.keep_prob if is_training else 0) params_size_t = self._cell.params_size() self._rnn_params = tf.get_variable( "lstm_params", initializer=tf.random_uniform( [params_size_t], -config.init_scale, config.init_scale), validate_shape=False) c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training) outputs = tf.transpose(outputs, [1, 0, 2]) outputs = tf.reshape(outputs, [-1, config.hidden_size]) return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) def _get_lstm_cell(self, config, is_training): #if config.rnn_mode == BASIC: # return tf.contrib.rnn.BasicLSTMCell( # config.hidden_size, forget_bias=0.0, state_is_tuple=True, # reuse=not is_training)
tensorflow.zeros
722
from tensorflow.python.ops import math_ops self._num_clusters, self._random_seed, self._covariance_type, self._params) incr_step = state_ops.assign_add(training_util.get_global_step(), 1) loss = math_ops.reduce_sum(losses) training_op = with_dependencies([training_op, incr_step], loss) training_hooks = [_InitializeClustersHook( init_op, is_initialized, config.is_chief)]
tensorflow.python.ops.math_ops.reduce_sum
723
from tensorflow.python.ops import array_ops next_size = _next_array_size(new_size) next_shape = array_ops.pack([next_size] + fixed_shape) new_value = array_ops.zeros(next_shape, dtype=values.dtype) old_value = array.value() assign_op = state_ops.assign(array, new_value, validate_shape=False) with ops.control_dependencies([assign_op]): copy_op = array[:size].assign(old_value[:size]) # return value needs to be the same dtype as no_op() for cond with ops.control_dependencies([copy_op]): return control_flow_ops.no_op() new_size = size + batch_size array_size = array_ops.shape_internal(array, optimize=False)[0] maybe_reallocate_op = control_flow_ops.cond( new_size > array_size, reallocate, control_flow_ops.no_op) with ops.control_dependencies([maybe_reallocate_op]): append_values_op = array[size:new_size].assign(batch_values) with ops.control_dependencies([append_values_op]): update_op = size.assign(new_size) if metrics_collections: ops.add_to_collections(metrics_collections, value) if updates_collections:
tensorflow.python.ops.array_ops.shape_internal
724
from tensorflow.python.ops import state_ops if self._dnn_model else []) with ops.control_dependencies(linear_train_step + dnn_train_step): with ops.get_default_graph().colocate_with(global_step): return state_ops.assign_add(global_step, 1).op, loss def _get_eval_ops(self, features, targets, metrics=None): raise NotImplementedError
tensorflow.python.ops.state_ops.assign_add
725
import tensorflow as tf # Net loss check_shape([loss_policy, loss_q, entropy], [[]] * 3) loss = loss_policy + self.q_coef * loss_q - self.ent_coef * entropy tf.summary.scalar('entropy_loss', entropy) tf.summary.scalar('policy_gradient_loss', loss_policy) tf.summary.scalar('value_function_loss', loss_q) tf.summary.scalar('loss', loss) norm_grads_q, norm_grads_policy, avg_norm_grads_f = None, None, None avg_norm_k, avg_norm_g, avg_norm_k_dot_g, avg_norm_adj = None, None, None, None if self.trust_region: # [n_envs * n_steps, n_act] grad = tf.gradients(- (loss_policy - self.ent_coef * entropy) * self.n_steps * self.n_envs, phi_i)
tensorflow.summary.scalar
726
import tensorflow as tf #accuracy1 = tf.reduce_sum( # tf.nn.in_top_k(tf.cast(tf.Variable(predictions2), tf.float32), # tf.cast((tf.constant(np_labels), 1), tf.float32))) accuracy1 = tf.reduce_sum( input_tensor=tf.cast(tf.nn.in_top_k(predictions=tf.constant(predictions1), targets=tf.constant(np_labels), k=1), tf.float32)) accuracy5 = tf.reduce_sum( input_tensor=tf.cast(tf.nn.in_top_k(predictions=tf.constant(predictions1), targets=tf.constant(np_labels), k=5), tf.float32)) np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) ##print(labels) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \
tensorflow.constant
727
import tensorflow as tf adversarial_sample = attacks.tgsm.tgsm(models, images, hps, RCE_train, y=None, eps=eps/10, epochs=10, clip_min=-0.5, clip_max=0.5) elif method=='jsma': print('Attacking method is jsma') if target_labels==None: print('Target label is the argmin label') model_target_y = models(hps, images, FLAGS.RCE_train, logits=False) target_y64 = tf.argmin(model_target_y,axis=1) else: target_y64=target_labels target_y = tf.cast(target_y64, tf.int32) adversarial_sample = attacks.jsma.jsma(models, images, hps, RCE_train, target_y,epochs=epoch_jsma, eps=eps, clip_min=-0.5, clip_max=0.5, pair=False, min_proba=0.0) elif method=='smda':
tensorflow.argmin
728
from tensorflow.python.ops import array_ops array_ops.reshape(centered_bias, [-1])) return centered_bias def _centered_bias_step(self, targets, features): centered_bias = ops.get_collection(self._centered_bias_weight_collection) batch_size = array_ops.shape(targets)[0] logits = array_ops.reshape( array_ops.tile(centered_bias[0], [batch_size]), [batch_size, self._target_column.num_label_columns]) loss = self._target_column.loss(logits, targets, features) # Learn central bias by an optimizer. 0.1 is a convervative lr for a single # variable. return training.AdagradOptimizer(0.1).minimize(loss, var_list=centered_bias)
tensorflow.python.ops.array_ops.tile
729
import tensorflow as tf self.train_summary = tf.summary.merge_all(key='train') self.eval_summary = tf.summary.merge_all(key='eval') self.saver = tf.train.Saver(tf.global_variables()) def separate_gradient_update(self): denoise_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "denoising_model") ranking_model_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "ranking_model") self.weighs_propen=denoise_params if self.hparams.l2_loss > 0: for p in denoise_params: # self.weighs_propen=p # p=tf.Print(p,[p],message="show the weights") self.exam_loss += self.hparams.l1_loss * tf.reduce_sum(tf.abs(p)) for p in ranking_model_params: self.rank_loss += self.hparams.l2_loss * tf.nn.l2_loss(p) self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss denoise_gradients = tf.gradients(self.exam_loss, denoise_params) ranking_model_gradients = tf.gradients(self.rank_loss, ranking_model_params) if self.hparams.max_gradient_norm > 0: denoise_gradients, denoise_norm = tf.clip_by_global_norm(denoise_gradients, self.hparams.max_gradient_norm) ranking_model_gradients, ranking_model_norm = tf.clip_by_global_norm(ranking_model_gradients, self.hparams.max_gradient_norm * self.hparams.ranker_loss_weight) self.norm = tf.global_norm(denoise_gradients + ranking_model_gradients)
tensorflow.abs
730
import tensorflow as tf for bi in range(b): idx1 = cell_arch[bi][0] idx2 = cell_arch[bi][2] block_use = tf.one_hot(idx1, ni, dtype=tf.int32) + tf.one_hot(idx2, ni, dtype=tf.int32) block_uses.append(block_use) block_uses = tf.add_n(block_uses) unused_indices = tf.reshape(tf.cast(tf.where(tf.equal(block_uses, 0)), tf.int32), [-1]) num_out_blocks = tf.size(unused_indices) # Select only unused blocks with tf.variable_scope('select'): stacked_blocks = tf.stack(cell_inputs + blocks) out_blocks = tf.gather(stacked_blocks, unused_indices, axis=0) out_blocks = tf.transpose(out_blocks, (1, 2, 3, 0, 4)) # Combine to constant channels with tf.variable_scope('combine'): W = self._make_var('W', (ni, block_ch * block_ch)) W = tf.gather(W, unused_indices, axis=0) W = tf.reshape(W, (1, 1, num_out_blocks * block_ch, block_ch)) X = tf.reshape(out_blocks, (-1, w, h, num_out_blocks * block_ch)) X = tf.nn.relu(X) X = tf.nn.conv2d(X, W, (1, 1, 1, 1), padding='SAME') X = self._add_batch_norm(X, block_ch, is_train=is_train)
tensorflow.gather
731
from tensorflow.python.framework import ops Returns: A `Tensor` the same size and type as `x` with absolute values. """ with ops.op_scope([x], name, "Abs") as name: x = ops.convert_to_tensor(x, name="x") if x.dtype == types.complex64: return gen_math_ops.complex_abs(x, name=name) return gen_math_ops._abs(x, name=name)
tensorflow.python.framework.ops.convert_to_tensor
732
import tensorflow as tf image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth return tf.summary.image('ground_truth', image) def _add_act_summary(self, tensor): tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor) tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction', tf.nn.zero_fraction(tensor)) def _add_score_summary(self, key, tensor): tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor) def _add_train_summary(self, var): tf.summary.histogram('TRAIN/' + var.op.name, var) # Custom Layers # def _reshape_layer(self, bottom, num_dim, name): input_shape = tf.shape(bottom) with tf.variable_scope(name): # change the channel to the caffe format # 18个通道[,18,none,none],分别显示得分,前9个为前景得分,后9个为背景得分 # 第二次[1,2,none,none] to_caffe = tf.transpose(bottom, [0, 3, 1, 2]) # then force it to have channel 2 #[1,2,none.none],将9个anchor的前景得分和背景得分分开 # 第二次[1,18,none,none]
tensorflow.summary.histogram
733
import tensorflow as tf for t in self.train_data['failures']: tfail = self.train_data['failures'][t] trisk = self.train_data['atrisk'][t] d = len(tfail) dr = len(trisk) logL += -cumsum_y_pred[tfail[-1]] + (0 if tfail[0] == 0 else cumsum_y_pred[tfail[0]-1]) if self.train_data['ties'] == 'breslow': s = cumsum_hazard_ratio[trisk[-1]] logL += tf.log(s) * d elif self.train_data['ties'] == 'efron': s = cumsum_hazard_ratio[trisk[-1]] r = cumsum_hazard_ratio[tfail[-1]] - (0 if tfail[0] == 0 else cumsum_hazard_ratio[tfail[0]-1]) for j in range(d): logL += tf.log(s - j * r / d) else: raise NotImplementedError('tie breaking method not recognized') # negative average log-likelihood observations = tf.reduce_sum(y_true) return logL / observations def _Metrics_CI(self, label_true, y_pred): """ Compute the concordance-index value. Parameters: label_true: dict, like {'e': event, 't': time}, Observation and Time in survival analyze. y_pred: np.array, predictive proportional risk of network.
tensorflow.log
734
import tensorflow as tf class CheckpointStateTest(tf.test.TestCase): def _TestDir(self, test_name): test_dir = os.path.join(self.get_temp_dir(), test_name) if os.path.exists(test_dir): shutil.rmtree(test_dir) gfile.MakeDirs(test_dir) return test_dir def testAbsPath(self): save_dir = self._TestDir("abs_paths") abs_path = os.path.join(save_dir, "model-0") ckpt = tf.train.generate_checkpoint_state_proto(save_dir, abs_path) self.assertEqual(ckpt.model_checkpoint_path, abs_path) self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path)) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path) def testRelPath(self): train_dir = "train" model = os.path.join(train_dir, "model-0") # model_checkpoint_path should have no "train" directory part. new_rel_path = "model-0" ckpt = tf.train.generate_checkpoint_state_proto(train_dir, model) self.assertEqual(ckpt.model_checkpoint_path, new_rel_path) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
tensorflow.train.generate_checkpoint_state_proto
735
import tensorflow as tf direct_mask_un = tf.greater(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: direct_mask_un = tf.less(unhead_idxs, undep_idxs) # [bs, sluh, sld] # [bs, sluh, sld] rep_mask_tile_un = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_unhead_mask, 2)) pooling_mask = tf.logical_and(direct_mask_un, rep_mask_tile_un) # [bs, sluh, sld] # data for pooling pooling_data = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_unhead, 1, 1]) # bs,sluh,sld,hn
tensorflow.expand_dims
736
import tensorflow as tf log_device_placement=True)) self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1])) self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1]))
tensorflow.placeholder
737
import tensorflow as tf data = np.loadtxt(self.params["words"], dtype=np.unicode, encoding=None) mapping_strings = tf.Variable(data.reshape((-1,))) return mapping_strings def tag2id(self, labels, name=None): mapping_strings = self.load_tag_data() vocab_tags = tf.contrib.lookup.index_table_from_tensor( mapping_strings, name=name ) tags = vocab_tags.lookup(labels) return tags
tensorflow.contrib.lookup.index_table_from_tensor
738
from tensorflow.python.ops import script_ops with g.as_default(): c = tf.constant([1.], tf.float32) _ = tf.py_func(lambda x: x + 1, [c], [tf.float32]) self.assertTrue(script_ops._py_funcs.size() < 100) def testError(self):
tensorflow.python.ops.script_ops._py_funcs.size
739
import tensorflow as tf create_model(config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings, task_name) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
tensorflow.train.Scaffold
740
import tensorflow as tf with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): with ops.colocate_with(var): new_var = tf.assign_sub( var, tf.check_numerics(decay * (var - cur_var), "NaN in moving variance.")) with tf.name_scope(name, "IncrementTime", [step]): with ops.colocate_with(step): new_step = tf.assign_add(step, 1.) res += 0. * new_mean * new_var * new_step return res
tensorflow.name_scope
741
import tensorflow as tf return out def affine(inpOp, nIn, nOut, name, weight_decay=0.0): with tf.variable_scope(name): l2_regularizer = lambda t: l2_loss(t, weight=weight_decay) weights = tf.get_variable("weights", [nIn, nOut],
tensorflow.variable_scope
742
import tensorflow as tf if not do_rnn: assert rnn_nunroll == 1 if cnn_rnn_zack: assert audio_context_len == 1 assert zack_hack > 0 and zack_hack % 2 == 0 export_feat_tensors = {} # Input tensors feats_audio_nunroll = tf.placeholder(dtype, shape=[batch_size, rnn_nunroll + zack_hack, audio_context_len, audio_nbands, audio_nchannels], name='feats_audio') feats_other_nunroll = tf.placeholder(dtype, shape=[batch_size, rnn_nunroll, nfeats], name='feats_other') print('feats_audio: {}'.format(feats_audio_nunroll.get_shape())) print('feats_other: {}'.format(feats_other_nunroll.get_shape())) if mode != 'gen': targets_nunroll = tf.placeholder(dtype, shape=[batch_size, rnn_nunroll]) # TODO: tf.ones acts as an overridable placeholder but this is still awkward target_weights_nunroll = tf.ones([batch_size, rnn_nunroll], dtype) # Reshape input tensors to remove nunroll dim; will briefly restore later during RNN if necessary if cnn_rnn_zack: feats_audio = tf.reshape(feats_audio_nunroll, shape=[batch_size, rnn_nunroll + zack_hack, audio_nbands, audio_nchannels])
tensorflow.placeholder
743
import tensorflow as tf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('dataset', '', 'cifar10 or cifar100.') tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.') tf.app.flags.DEFINE_string('train_data_path', '', 'Filepattern for training data.') tf.app.flags.DEFINE_string('eval_data_path', '', 'Filepattern for eval data') tf.app.flags.DEFINE_string('train_dir', '', 'Directory to keep training outputs.') tf.app.flags.DEFINE_string('eval_dir', '', 'Directory to keep eval outputs.') tf.app.flags.DEFINE_integer('eval_batch_count', 10, 'Number of batches to eval.') tf.app.flags.DEFINE_bool('eval_once', False, 'Whether evaluate the model only once.') tf.app.flags.DEFINE_string('log_root', '', 'Directory to keep the checkpoints. Should be a ' 'parent directory of FLAGS.train_dir/eval_dir.') tf.app.flags.DEFINE_integer('num_gpus', 0,
tensorflow.app.flags.DEFINE_string
744
import tensorflow as tf with self.test_session(): x = tf.constant([b"hello", b"hi"], tf.string) y, = tf.py_func(read_fixed_length_numpy_strings, [], [tf.string]) z, = tf.py_func(read_and_return_strings, [x, y], [tf.string]) self.assertListEqual(list(z.eval()), [b"hello there", b"hi there"])
tensorflow.py_func
745
import tensorflow as tf # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( cifar10.MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # Group all updates to into a single train op. train_op = tf.group(apply_gradient_op, variables_averages_op) # Create a saver. saver = tf.train.Saver(tf.global_variables()) # Build the summary operation from the last tower summaries. summary_op = tf.summary.merge(summaries) # Build an initialization operation to run below. init = tf.global_variables_initializer() # Start running operations on the Graph. allow_soft_placement must be set to
tensorflow.global_variables
746
import tensorflow as tf def test_batcher_closed(self): with tf.Graph().as_default(): @dynamic_batching.batch_fn def f(a): return a f(tf.constant([1])) # Intentionally using tf.Session() instead of self.test_session() to have # control over closing the session. test_session() is a cached session. with tf.Session(): coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) time.sleep(_SLEEP_TIME) coord.request_stop() # Calls close operation. coord.join() # Session closed. def test_minimum_batch_size(self): with self.test_session() as session: @dynamic_batching.batch_fn_with_options( minimum_batch_size=2, timeout_ms=1000)
tensorflow.train.Coordinator
747
import tensorflow as tf JPEG_OPT = {'fancy_upscaling': True, 'dct_method': 'INTEGER_ACCURATE'} def uint8_resize_bicubic(image, shape): ret = tf.image.resize_bicubic([image], shape) return tf.cast(tf.clip_by_value(ret, 0, 255), tf.uint8)[0] def resize_shortest_edge(image, image_shape, size): shape = tf.cast(image_shape, tf.float32) w_greater = tf.greater(image_shape[0], image_shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return uint8_resize_bicubic(image, shape) def center_crop(image, size):
tensorflow.greater
748
from tensorflow.python.framework import op_def_registry as _op_def_registry result = _op_def_lib.apply_op("TestStringOutput", input=input, name=name) return _TestStringOutputOutput._make(result) _ops.RegisterShape("TestStringOutput")(None) def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "GraphDefVersion"
tensorflow.python.framework.op_def_registry.register_op_list
749
import tensorflow as tf grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0)
tensorflow.reduce_mean
750
import tensorflow as tf + self.b_out else: new_output = tf.matmul(tf.nn.relu(new_state), self.W_out * self.output_Connectivity, transpose_b=True, name="3") + self.b_out return new_output def compute_predictions(self): rnn_inputs = tf.unstack(self.x, axis=1) state = self.init_state rnn_outputs = [] rnn_states = [] for rnn_input in rnn_inputs: state = self.rnn_step(rnn_input, state) output = self.rnn_output(state) rnn_outputs.append(output) rnn_states.append(state) return tf.transpose(rnn_outputs, [1, 0, 2]), rnn_states
tensorflow.unstack
751
import tensorflow as tf def build_batch_stats(): """Builds the batch statistics calculation ops.""" # We use the moving mean as an estimate of the mean in order to perform # a more numerically stable calculation of the batch mean. # Copy for better stability. shift = tf.add(self._moving_mean, 0) counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics( input_batch, reduction_indices, keep_dims=True, shift=shift,
tensorflow.add
752
from tensorflow.python.lib.io import file_io validation_steps=ceil(val_dataset_size/batch_size), initial_epoch=initial_epoch) model_name = "vgg19BNReLUmodel.h5" model.save(model_name) with file_io.FileIO(model_name, mode='rb') as input_f: with file_io.FileIO("gs://deeplearningteam11/" + model_name, mode='w+') as output_f: output_f.write(input_f.read())
tensorflow.python.lib.io.file_io.FileIO
753
import tensorflow as tf for i in range(num_gpu): with tf.device('/gpu:%d' % i):
tensorflow.device
754
from tensorflow.python.framework import ops _run_metrics(predictions, labels, proba_metrics, self.get_weight_tensor(features))) return result class _BinarySvmTargetColumn(_MultiClassTargetColumn): """_TargetColumn for binary classification using SVMs.""" def __init__(self, label_name, weight_column_name): def loss_fn(logits, target): check_shape_op = control_flow_ops.Assert( math_ops.less_equal(array_ops.rank(target), 2), ["target's shape should be either [batch_size, 1] or [batch_size]"]) with ops.control_dependencies([check_shape_op]): target = array_ops.reshape( target, shape=[array_ops.shape(target)[0], 1]) return loss_ops.hinge_loss(logits, target) super(_BinarySvmTargetColumn, self).__init__( loss_fn=loss_fn, n_classes=2, label_name=label_name, weight_column_name=weight_column_name) def logits_to_predictions(self, logits, proba=False): if proba: raise ValueError(
tensorflow.python.framework.ops.control_dependencies
755
import tensorflow as tf # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
tensorflow.placeholder
756
import tensorflow as tf The forward pass scales to a standard log normal with mu=0, sigma=1 by computing: exp(Z) = (X / exp(mu))^(1/sigma) """ params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] # compute softplus activation z2, ldj = log_gaussianize(x2, mus, log_sigmas) z2 = tf.where(x2 > self.epsilon, z2, x2) ldj = tf.where(x2 > self.epsilon, ldj, tf.zeros_like(ldj)) return z2, tf.math.reduce_sum(ldj, axis=[1,2,3]) def _inverse(self, x1, z2, **kwargs): params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] x2, ldj = log_gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True)) x2 = tf.where(z2 > self.epsilon, x2, z2) ldj = tf.where(z2 > self.epsilon, ldj, tf.zeros_like(ldj)) return x2, tf.math.reduce_sum(ldj, axis=[1,2,3])
tensorflow.math.reduce_sum
757
import tensorflow as tf # Fully connected layer. Note that the '+' operation automatically # broadcasts the biases. fc = tf.nn.bias_add(tf.matmul(x, weights), biases) return fc def get_conv_filter(self, name): return tf.constant(self.data_dict[name][0], name="filter") def get_bias(self, name): return tf.constant(self.data_dict[name][1], name="biases") def get_fc_weight(self, name): return tf.constant(self.data_dict[name][0], name="weights") def vgg19_pretrained_last_fc(rgb_input, model_path): return Vgg19(model_path).build(rgb_input)
tensorflow.constant
758
import tensorflow as tf scope.reuse_variables() tgtimg_h0 = lrelu(conv2d(tgtimg, self.df_dim, name='h0_conv')) tgtimg_h1 = lrelu(conv2d(tgtimg_h0, self.df_dim*2, name='h1_conv')) tgtimg_h2 = lrelu(conv2d(tgtimg_h1, self.df_dim*4, name='h2_conv')) tgtimg_h3 = lrelu(conv2d(tgtimg_h2, self.df_dim*8, name='h3_conv')) tgtimg_h4 = lrelu(linear(tf.reshape(tgtimg_h3, [self.batch_size, -1]), featsize, 'h4_lin')) tgtimg_z = lrelu(linear(tgtimg_h4, featsize, 'hz_lin')) with tf.variable_scope("translate") as scope: trans_h0 = lrelu(linear(tf.concat([srcimg_z, tgtctx_z], 1), featsize, 'trans_h0')) trans_z = linear(trans_h0, featsize, 'trans_z') self.translated_z = trans_z with tf.variable_scope("deconv") as scope: s_h, s_w = self.output_height, self.output_width s_h2, s_h4, s_h8, s_h16 = \ int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16) s_w2, s_w4, s_w8, s_w16 = \ int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16)
tensorflow.concat
759
import tensorflow as tf # ------------------------------------------------------ # step3: Let's get serious and build the neural network # ------------------------------------------------------ # [none, 128, 9] X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs]) # [none, 6] Y = tf.placeholder(tf.float32, [None, config.n_classes])
tensorflow.placeholder
760
import tensorflow as tf rho_max_init = tf.log(tf.exp(sigma_max) - 1.0) rho_min_init = tf.log(tf.exp(sigma_min) - 1.0) std_init = tf.random_uniform_initializer(rho_min_init, rho_max_init)
tensorflow.random_uniform_initializer
761
import tensorflow as tf im = tf.concat([a, b, c], axis=3) im = tf.transpose(im, [0, 2, 3, 1]) im = (im + 1.0) * 128 im = tf.clip_by_value(im, 0, 255) im = tf.cast(im, tf.uint8, name='viz') tf.summary.image(name, im, max_outputs=50) # use the initializers from torch with argscope([Conv2D, Deconv2D], use_bias=False, W_init=tf.random_normal_initializer(stddev=0.02)), \ argscope([Conv2D, Deconv2D, InstanceNorm], data_format='NCHW'), \ argscope(LeakyReLU, alpha=0.2): with tf.variable_scope('gen'): with tf.variable_scope('B'): AB = self.generator(A) with tf.variable_scope('A'): BA = self.generator(B) ABA = self.generator(AB) with tf.variable_scope('B'): BAB = self.generator(BA) viz3('A_recon', A, AB, ABA) viz3('B_recon', B, BA, BAB)
tensorflow.variable_scope
762
import tensorflow as tf return loss def _tensor_to_image(self, net): with tf.name_scope('to_image'): if FLAGS.new_blur: net = net[..., :self.batch_shape[-1]] net = tf.nn.relu(net) net = tf.cast(net <= 1, net.dtype) * net * 255 net = tf.cast(net, tf.uint8) return net def _image_to_tensor(self, image): with tf.name_scope('args_transform'): net = tf.cast(image, tf.float32) / 255. if FLAGS.new_blur: net = _blur_expand(net) FLAGS.blur = 0. return net def _init_optimizer(self): self.loss_total = tf.add_n(self.losses, 'loss_total') self.optimizer = self.optimizer_constructor(learning_rate=FLAGS.learning_rate) self._train = self.optimizer.minimize(self.loss_total, global_step=self.step) # MAIN
tensorflow.cast
763
import tensorflow as tf # PUT YOUR CODE HERE # ######################## ######################## logits = self.__forward_pass(x, reuse) l2_out = tf.nn.l2_normalize(logits, dim=1) ######################## return l2_out
tensorflow.nn.l2_normalize
764
from tensorflow.python.ops import variables yield def _setupDense(self, is_distributed, dtype): with self._maybeWithDevice("/job:ps" if is_distributed else None): var0 = variables.Variable([[0.0, 1.0], [2.0, 3.0]], dtype=dtype) var1 = variables.Variable([4.0, 5.0], dtype=dtype) with self._maybeWithDevice("/job:worker" if is_distributed else None): grads0 = constant_op.constant([[0.1, 0.1], [0.1, 0.1]], dtype=dtype)
tensorflow.python.ops.variables.Variable
765
import tensorflow as tf tf.stack([range_head, head_org_idx], -1), attn_result, [bs, sl+1, hn]) ) range_unhead = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_unhead]) scatter_pooling = tf.cond( tf.equal(sl_unhead, 0), lambda: tf.zeros([bs, sl+1, hn], tf.float32), lambda: tf.scatter_nd( tf.stack([range_unhead, unhead_org_idx], -1), pooling_result, [bs, sl+1, hn]) ) self_attn_input = rep_map context_features = tf.add(scatter_attn[:, :-1], scatter_pooling[:, :-1], 'context_features') output_mask = rep_mask else: self_attn_input = rep_head_tensor
tensorflow.stack
766
import tensorflow as tf if projection_dim < lstm_dim: # are projecting down output lstm_cell = tf.nn.rnn_cell.LSTMCell( lstm_dim, num_proj=projection_dim, cell_clip=cell_clip, proj_clip=proj_clip) else: lstm_cell = tf.nn.rnn_cell.LSTMCell( lstm_dim, cell_clip=cell_clip, proj_clip=proj_clip) if use_skip_connections: # ResidualWrapper adds inputs to outputs
tensorflow.nn.rnn_cell.LSTMCell
767
import tensorflow as tf 'Gets %d.' % kernel_size) atrous_rates = [1] with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.truncated_normal_initializer(stddev=0.01), reuse=reuse): with tf.variable_scope(LOGITS_SCOPE_NAME, LOGITS_SCOPE_NAME, [features]): branch_logits = [] for i, rate in enumerate(atrous_rates): scope = scope_suffix
tensorflow.truncated_normal_initializer
768
import tensorflow as tf output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu ) if FLAGS.use_tpu: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn, ) else: output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold=None ) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn( masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels, ):
tensorflow.estimator.EstimatorSpec
769
import tensorflow as tf channel_axis = 3 if data_format == 'channels_last' else 1 in_channel = in_shape[channel_axis] assert in_channel is not None, "[Deconv2D] Input cannot have unknown channel!" padding = padding.upper() if w_init is None: w_init = tf.contrib.layers.variance_scaling_initializer() if b_init is None: b_init = tf.constant_initializer() ret = tf.layers.conv2d_transpose(inputs=inputdata, filters=out_channel, kernel_size=kernel_size, strides=stride, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=w_init, bias_initializer=b_init, trainable=trainable,
tensorflow.constant_initializer
770
import tensorflow as tf x, 64, (4, 4), strides=(2, 2), name="conv2", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 128, (4, 4), strides=(2, 2), name="conv3", activation=common_layers.belu, padding="SAME") flat_x = tf.layers.flatten(x) flat_x = tf.nn.dropout(flat_x, rate=dropout) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu, name="dense1") logits = tf.layers.dense( x, self.hparams.problem.num_actions, name="dense2" ) logits = tf.expand_dims(logits, axis=1) logits = clip_logits(logits, self.hparams)
tensorflow.nn.dropout
771
import tensorflow as tf def observation_input(ob_space, batch_size=None, name='Ob'): ''' Build observation input with encoding depending on the observation space type Params: ob_space: observation space (should be one of gym.spaces) batch_size: batch size for input (default is None, so that resulting input placeholder can take tensors with any batch size) name: tensorflow variable name for input placeholder returns: tuple (input_placeholder, processed_input_tensor) ''' if isinstance(ob_space, Discrete): input_x = tf.placeholder(shape=(batch_size,), dtype=tf.int32, name=name) processed_x = tf.to_float(tf.one_hot(input_x, ob_space.n)) return input_x, processed_x elif isinstance(ob_space, Box): input_shape = (batch_size,) + ob_space.shape input_x = tf.placeholder(shape=input_shape, dtype=ob_space.dtype, name=name) processed_x = tf.to_float(input_x) return input_x, processed_x else: raise NotImplementedError
tensorflow.one_hot
772
import tensorflow as tf with tf.name_scope(name): sigmas = [ 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6 ] gaussian_kernel = partial(util.gaussian_kernel_matrix, sigmas=tf.constant(sigmas)) loss_value = maximum_mean_discrepancy(source_samples, target_samples, kernel=gaussian_kernel) loss_value = tf.maximum(1e-4, loss_value) * weight assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value]) with tf.control_dependencies([assert_op]): tag = 'MMD_Loss' barrier = tf.no_op(tag) return loss_value def dann_loss(source_samples, target_samples, weight, name='dann_loss'):
tensorflow.is_finite
773
import tensorflow as tf def __call__(self,input_var) : mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True) return (input_var - mean) / tf.sqrt(var+self.epsilon) class BatchNorm(object): def __init__(self,name,dims,axis=1,epsilon=1e-3,momentum=0.999,center=True,scale=True) : self.momentum = momentum self.epsilon = epsilon self.axis = axis self.center=center self.scale=scale with tf.variable_scope(name) as scope: with tf.variable_scope('bn') : self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0)) self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False) self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False) self.scope = scope def __call__(self,input_var,is_training,**xargs) : with tf.variable_scope(self.scope) : return tf.layers.batch_normalization(
tensorflow.variable_scope
774
import tensorflow as tf # for each direction, we'll store tensors for each layer self.lstm_outputs = {'forward': [], 'backward': []} self.lstm_state_sizes = {'forward': [], 'backward': []} self.lstm_init_states = {'forward': [], 'backward': []} self.lstm_final_states = {'forward': [], 'backward': []} update_ops = [] for direction in ['forward', 'backward']: if direction == 'forward': layer_input = self.embedding else: layer_input = tf.reverse_sequence( self.embedding, sequence_lengths, seq_axis=1, batch_axis=0 ) for i in range(n_lstm_layers): if projection_dim < lstm_dim: # are projecting down output lstm_cell = tf.nn.rnn_cell.LSTMCell( lstm_dim, num_proj=projection_dim,
tensorflow.reverse_sequence
775
import tensorflow as tf def build_training_graph(x, y, ul_x, ul_u, lr, mom): global_step = tf.get_variable( name="global_step", shape=[], dtype=tf.float32, initializer=tf.constant_initializer(0.0), trainable=False, ) logit = vat.forward(x) nll_loss = L.ce_loss(logit, y) with tf.variable_scope(tf.get_variable_scope(), reuse=True): if FLAGS.method == 'vat': ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False) vat_loss, ul_u_updated = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit) additional_loss = vat_loss elif FLAGS.method == 'vatent': ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False) vat_loss, ul_u_updated = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit) ent_loss = L.entropy_y_x(ul_logit) additional_loss = vat_loss + ent_loss elif FLAGS.method == 'baseline':
tensorflow.get_variable_scope
776
import tensorflow as tf self.log_file = filename f = open(self.log_file, 'w') f.close() def weight_variable(shape): return tf.get_variable('W', shape, initializer=tf.random_normal_initializer(0., 0.02)) def bias_variable(shape): return tf.get_variable('b', shape, initializer=tf.constant_initializer(0.))
tensorflow.random_normal_initializer
777
import tensorflow as tf class JointsMSELoss(object): def __init__(self): self.mse = tf.losses.MeanSquaredError()
tensorflow.losses.MeanSquaredError
778
import tensorflow as tf def build(self, input_shape): self.theta = self.add_weight(shape=(1, input_shape[1]), initializer='zeros', trainable=True, name='theta') self.theta_exp = tf.clip_by_value(K.exp(self.theta), 1e-3, 1e4) super().build(input_shape) def call(self, x): return tf.identity(x) def compute_output_shape(self, input_shape): return input_shape class SliceLayer(Layer): def __init__(self, index, **kwargs): self.index = index
tensorflow.identity
779
import tensorflow as tf if nn_type == 'mlp': with tf.variable_scope('pi'): pi = act_limit * mlp(x, list(hidden_sizes) + [act_dim], activation, output_activation) with tf.variable_scope('q1'): q1 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q2'): q2 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q1', reuse=True): q1_pi = tf.squeeze(mlp(tf.concat([x, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) elif nn_type == 'mlp_dropout': with tf.variable_scope('pi'): pi = act_limit * mlp_dropout(x, list(hidden_sizes)+[act_dim], activation, output_activation) with tf.variable_scope('q'): q = tf.squeeze(mlp_dropout(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1) with tf.variable_scope('q', reuse=True): q_pi = tf.squeeze(mlp_dropout(tf.concat([x,pi], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1) elif nn_type == 'mlp_variational': with tf.variable_scope('pi'): pi_in_dim = x.shape.as_list()[1] pi_dropout_mask_generator = DropoutMaskGenerator(pi_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate)
tensorflow.variable_scope
780
import tensorflow as tf def LSGAN_losses(real, fake): d_real = tf.reduce_mean(tf.squared_difference(real, 1), name='d_real') d_fake = tf.reduce_mean(tf.square(fake), name='d_fake') d_loss = tf.multiply(d_real + d_fake, 0.5, name='d_loss')
tensorflow.square
781
import tensorflow as tf N_ch -- number of channels, if any (for the feature vector only) Returns: one_hot -- one hot matrix encoding """ # Create a tensot flow constant equal to the number of classes C = tf.constant(N_classes, name="C") one_hot_matrix = tf.one_hot(vect-1, C, axis=0) #axis=0 means it is mapping to column vectors if N_ch != 0: one_hot_matrix= tf.expand_dims(one_hot_matrix, 1) # Create tensodr flow session sess = tf.Session()
tensorflow.one_hot
782
import tensorflow as tf blank_index: The index of blank which will be set to def_val (or None) def_val: The value associated with the default event shift: Necessary shift to convert to representation Returns: decoded: The decoded sequence [seq_length] """ # Infer predictions using argmax decoded = tf.cast(tf.argmax(inputs, axis=-1), tf.int32) # Adjust event vals according to representation decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded) # Set default vals decoded = tf.where(tf.equal(decoded, 0), def_val, decoded) return decoded, None @tf.function def ctc_decode(inputs, batch_size, seq_length, blank_index, def_val, shift, beam_width=10): """Perform ctc decoding""" # Decode uses time major
tensorflow.not_equal
783
import tensorflow as tf s_h2, s_h4, s_h8, s_h16 = \ int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16) s_w2, s_w4, s_w8, s_w16 = \ int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16) output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2')) output_h3 = lrelu(deconv2d(tf.concat([output_h2, tgtctx_h1], 3), [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3')) output_h4 = deconv2d(tf.concat([output_h3, tgtctx_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4') scope.reuse_variables() truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))
tensorflow.concat
784
import tensorflow as tf while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder(
tensorflow.logging.info
785
import tensorflow as tf backbone = xdet_body_v3.xdet_resnet_v3(params['resnet_size'], params['data_format']) body_cls_output, body_regress_output = backbone(inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN)) cls_pred, location_pred = xdet_body_v3.xdet_head(body_cls_output, body_regress_output, params['num_classes'], num_anchors_list[0], (mode == tf.estimator.ModeKeys.TRAIN), data_format=params['data_format']) if params['data_format'] == 'channels_first': cls_pred = tf.transpose(cls_pred, [0, 2, 3, 1]) location_pred = tf.transpose(location_pred, [0, 2, 3, 1]) bboxes_pred = labels['decode_fn'](location_pred)#(tf.reshape(location_pred, tf.shape(location_pred).as_list()[0:-1] + [-1, 4])) cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']]) location_pred = tf.reshape(location_pred, [-1, 4]) glabels = tf.reshape(glabels, [-1]) gscores = tf.reshape(gscores, [-1]) gtargets = tf.reshape(gtargets, [-1, 4]) # raw mask for positive > 0.5, and for negetive < 0.3 # each positive examples has one label positive_mask = glabels > 0#tf.logical_and(glabels > 0, gscores > params['match_threshold']) fpositive_mask = tf.cast(positive_mask, tf.float32) n_positives = tf.reduce_sum(fpositive_mask) # negtive examples are those max_overlap is still lower than neg_threshold, note that some positive may also has lower jaccard # note those gscores is 0 is either be ignored during anchors encode or anchors have 0 overlap with all ground truth #negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(tf.logical_or(positive_mask, glabels < 0)), gscores < params['neg_threshold']), gscores > 0.) negtive_mask = tf.logical_and(tf.equal(glabels, 0), gscores > 0.) #negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.) #negtive_mask = tf.logical_and(gscores < params['neg_threshold'], tf.logical_not(positive_mask))
tensorflow.reshape
786
import tensorflow as tf learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), [int(_) for _ in params['decay_boundaries']], lr_values) truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype)) # Create a tensor named learning_rate for logging purposes. tf.identity(truncated_learning_rate, name='learning_rate') tf.summary.scalar('learning_rate', truncated_learning_rate) optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate, momentum=params['momentum']) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss, global_step) else: train_op = None cls_accuracy = tf.metrics.accuracy(glabels, predictions['classes']) metrics = {'cls_accuracy': cls_accuracy} # Create a tensor named train_accuracy for logging purposes. tf.identity(cls_accuracy[1], name='cls_accuracy') tf.summary.scalar('cls_accuracy', cls_accuracy[1])
tensorflow.get_collection
787
import tensorflow as tf else: # Take body outputs for the last position only, and targets too. last_position_body_output = tf.expand_dims( body_output[:, -1, :, :], axis=[1])
tensorflow.expand_dims
788
import tensorflow as tf logits = tf.matmul(last_outputs, W) + b self.embed_inputs = embed_inputs return logits def loss(self, logits, forward_only=None): cost = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(self.y, tf.float32)) mean_cost = tf.reduce_mean(cost) y_pred = tf.argmax(logits, 1) correct_pred = tf.equal(y_pred, tf.argmax(self.y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) if forward_only: str_summary_type = 'eval' loss_summ = tf.summary.scalar("{0}_loss".format(str_summary_type), mean_cost) acc_summ = tf.summary.scalar("{0}_accuracy".format(str_summary_type), accuracy) merged = tf.summary.merge([loss_summ, acc_summ]) return mean_cost, accuracy, y_pred, merged else: return mean_cost, accuracy, y_pred
tensorflow.cast
789
from tensorflow.contrib.layers.python.layers import initializers @add_arg_scope def convolution(inputs, num_outputs, kernel_size, stride=1, padding='SAME', data_format=None, rate=1, activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, use_spectral_norm=False, is_training=False, scope=None, conv_dims=None): """Adds an N-D convolution followed by an optional batch_norm layer.
tensorflow.contrib.layers.python.layers.initializers.xavier_initializer
790
import tensorflow as tf coord.request_stop() coord.join(threads) def predict_time(loop=100): feed_dict={ testnum:1 } with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) tf.train.Saver().restore(sess,path) total=0.0 for i in range(loop): a = datetime.now() accuracy_np = sess.run([accuracy],feed_dict=feed_dict) b = datetime.now() c = (b - a).microseconds total+=c print('predict_time(ms): ',total/(loop*1000))
tensorflow.train.start_queue_runners
791
import tensorflow as tf # move back to TF pred_scores = tf.convert_to_tensor(pred_scores.reshape((1, -1)), dtype=tf.float32) # padding pred_scores = padding(pred_scores, n_cands) true_scores = padding(tf.reshape(cand_scores, (1, -1)), n_cands) true_bestscore = tf.reduce_max(true_scores, axis=-1, keepdims=True) assert all(true_bestscore.numpy() == np.take_along_axis(true_scores.numpy(), best_cands.numpy().reshape((-1, 1)), axis=1)) kacc = []
tensorflow.reshape
792
import tensorflow as tf tf.add_to_collection('mu_sigma_bn', sigma) beta = tf.get_variable('beta', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer()) gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32,
tensorflow.zeros_initializer
793
import tensorflow as tf raise NotImplementedError if full_cov: # TODO(VD): ``full_cov`` True would return a ``fvar`` of shape N x N x D x D, # encoding the covariance between input datapoints as well. # This is not implemented as this feature is only used for plotting purposes. raise NotImplementedError pXnew = Gaussian(Xnew_mu, Xnew_var) num_data = tf.shape(Xnew_mu)[0] # number of new inputs (N) num_ind = tf.shape(q_mu)[0] # number of inducing points (M) num_func = tf.shape(q_mu)[1] # output dimension (D) q_sqrt_r = tf.matrix_band_part(q_sqrt, -1, 0) # D x M x M eKuf = tf.transpose(expectation(pXnew, (kern, feat))) # M x N (psi1) if Luu is None: Kuu = feat.Kuu(kern, jitter=settings.numerics.jitter_level) # M x M Luu = tf.cholesky(Kuu) # M x M if not white: q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True) Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True) Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True)
tensorflow.matrix_band_part
794
import tensorflow as tf lambdas, lambdas_variable = _create_dual_variable( 'lambdas', shape=[1, num_labels, num_anchors], dtype=logits.dtype, initializer=lambdas_initializer, collections=variables_collections, trainable=trainable, dual_rate_factor=dual_rate_factor) # Create biases with shape [1, num_labels, num_anchors]. biases = tf.contrib.framework.model_variable( name='biases', shape=[1, num_labels, num_anchors], dtype=logits.dtype, initializer=tf.zeros_initializer(), collections=variables_collections, trainable=trainable) # Maybe create label_priors. label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections) label_priors = tf.reshape(label_priors, [1, num_labels, 1]) # Expand logits, labels, and weights to shape [batch_size, num_labels, 1]. logits = tf.expand_dims(logits, 2) labels = tf.expand_dims(labels, 2) weights = tf.expand_dims(weights, 2) # Calculate weighted loss and other outputs. The log(2.0) term corrects for
tensorflow.zeros_initializer
795
import tensorflow as tf self.test_loss = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D_val['logits'], targets, 1))) self.error_rate = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D['class_logits'], targets, 1))) if gpu_idx == 0: update = tf.assign(num_error_rate, num_error_rate + 1.) with tf.control_dependencies([update]): tc = tf.maximum(.01, 1. / num_error_rate) update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate) with tf.control_dependencies([update]): self.d_loss_class = tf.identity(self.d_loss_class) self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits( logits=self.end_points_D['D_on_G_logits'], labels=tf.zeros_like(self.end_points_D['D_on_G_logits'])) self.d_loss_class = tf.reduce_mean(self.d_loss_class) self.d_loss_real = tf.reduce_mean(self.d_loss_real) self.d_loss_fake = tf.reduce_mean(self.d_loss_fake) if is_fm_loss: global_pool_head = self.end_points_D['global_pool'] real_data_features = tf.slice(global_pool_head, [0, 0], [batch_size_train, num_classes]) fake_data_features = tf.slice(global_pool_head, [batch_size_train, 0],
tensorflow.identity
796
import tensorflow as tf per_example_loss = tf.square(logits - labels) loss = tf.reduce_mean(per_example_loss)
tensorflow.reduce_mean
797
import tensorflow as tf * tf.add_n([tf.nn.l2_loss(var) for var in trainable_vars if loss_filter(var)]) accuracy = tf.reduce_mean( tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(outputs, axis=1)), tf.float32)) metrics = {'accuracy': accuracy}
tensorflow.argmax
798
from tensorflow.python.framework import ops ops.RegisterShape("Relu")(common_shapes.unchanged_shape) ops.RegisterShape("Relu6")(common_shapes.unchanged_shape) ops.RegisterShape("Elu")(common_shapes.unchanged_shape) ops.RegisterShape("Softplus")(common_shapes.unchanged_shape) ops.RegisterShape("Softsign")(common_shapes.unchanged_shape) @ops.RegisterShape("ReluGrad") @ops.RegisterShape("Relu6Grad") @ops.RegisterShape("EluGrad") @ops.RegisterShape("SoftplusGrad") @ops.RegisterShape("SoftsignGrad") def _BinaryElementwiseShape(op): """Returns same shape as both inputs to op. Args:
tensorflow.python.framework.ops.RegisterShape
799