seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf summary = tf.Summary() summary.value.add(tag='eval/Accuracy@1', simple_value=precision_at_1) summary.value.add(tag='eval/Recall@5', simple_value=recall_at_5) summary_writer.add_summary(summary, global_step) log_fn('Precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' % (precision_at_1, recall_at_5, total_eval_count)) def _benchmark_cnn(self): """Run cnn in benchmark mode. When forward_only on, it forwards CNN.""" (enqueue_ops, fetches) = self._build_model() main_fetch_group = tf.group(*fetches) execution_barrier = None if self.job_name and not FLAGS.cross_replica_sync: execution_barrier = self.add_sync_queues_and_barrier( 'execution_barrier_', []) global_step = tf.contrib.framework.get_global_step() with tf.device(self.global_step_device): with tf.control_dependencies([main_fetch_group]): inc_global_step = global_step.assign_add(1)
tensorflow.group
13,800
import tensorflow as tf # Make sure update_ops are computed before total_loss. if update_ops: with tf.control_dependencies(update_ops): barrier = tf.no_op(name='update_barrier') self.d_losses[-1] = control_flow_ops.with_dependencies([barrier], self.d_losses[-1]) self.g_losses[-1] = control_flow_ops.with_dependencies([barrier], self.g_losses[-1])
tensorflow.no_op
13,801
import tensorflow as tf test_dir = self._TestDir("saver_collection") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") saver1_ckpt = os.path.join(test_dir, "saver1.ckpt") with self.test_session(graph=tf.Graph()) as sess: # Creates a graph. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(11.0, name="v1") # Creates 2 savers. saver0 = tf.train.Saver({"v0": v0}, name="saver0") saver1 = tf.train.Saver({"v1": v1}, name="saver1") tf.add_to_collection("savers", saver0)
tensorflow.Variable
13,802
import tensorflow as tf if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
tensorflow.nn.dropout
13,803
import tensorflow as tf loss = interpreter.l2_loss(noisy_decode, self.raw_targets[1], alpha=FLAGS.beta) self.models += [noisy_decode] return loss def _tensor_to_image(self, net): with tf.name_scope('to_image'): if FLAGS.new_blur: net = net[..., :self.batch_shape[-1]] net = tf.nn.relu(net) net = tf.cast(net <= 1, net.dtype) * net * 255 net = tf.cast(net, tf.uint8) return net def _image_to_tensor(self, image): with tf.name_scope('args_transform'): net = tf.cast(image, tf.float32) / 255. if FLAGS.new_blur: net = _blur_expand(net) FLAGS.blur = 0. return net
tensorflow.cast
13,804
from tensorflow.python.ops import array_ops def _resize_image(image, height, width): image = array_ops.expand_dims(image, 0)
tensorflow.python.ops.array_ops.expand_dims
13,805
import tensorflow as tf Returns: *sampled_idx_indicator*: boolean tensor of shape [N], True for entries which are sampled. """ negative_idx = tf.logical_not(labels) positive_idx = tf.logical_and(labels, indicator) negative_idx = tf.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if sample_size is None: max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32)) else: max_num_pos = int(positive_fraction * sample_size) sampled_pos_idx = subsample_indicator(positive_idx, max_num_pos) num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)) if sample_size is None: negative_positive_ratio = (1 - positive_fraction) / positive_fraction max_num_neg = tf.cast(negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32), dtype=tf.int32) else: max_num_neg = sample_size - num_sampled_pos sampled_neg_idx = subsample_indicator(negative_idx, max_num_neg) return tf.logical_or(sampled_pos_idx, sampled_neg_idx) def batch_sample_balanced_positive_negative(indicators, sample_size, labels,
tensorflow.cast
13,806
import tensorflow as tf epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out]) hyp_params = tf.get_variable('hyp_params_layer'+str(h), shape=[2], initializer=tf.random_normal_initializer()) l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1]) epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2 # Compute A_{h+1} A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1]) # Compute z_{h}A_{h+1} Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5) Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2) Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.) KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2. # Output layer else: F = tf.squeeze(tf.layers.dense(Z, n_out), [2]) return F, KL
tensorflow.sin
13,807
import tensorflow as tf tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = ""
tensorflow.train.init_from_checkpoint
13,808
import tensorflow as tf log_y = log_x*scales + mus ldj += log_sigmas z = tf.math.exp(log_y) return z, ldj else: scales = tf.math.exp(-log_sigmas) log_x = tf.math.log(x) ldj = -log_x log_y = (log_x - mus)*scales ldj -= log_sigmas z = tf.math.exp(log_y) return z, ldj class LogGaussianize(Parameterize): """ Implementation of Parameterize for a log-Gaussian prior. """ def __init__(self, input_shape=None, epsilon=1.0E-3, name='log_gaussianize', *args, **kwargs): super().__init__(*args, num_parameters=2, input_shape=input_shape, name=name, **kwargs) self.epsilon = epsilon
tensorflow.math.exp
13,809
import tensorflow as tf test_pre = tf.argmax(test_pre, 1) test_true = tf.argmax(test_labels, 1)
tensorflow.argmax
13,810
import tensorflow as tf query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all if mask is not None: mask = tf.equal(mask, tf.ones_like(mask)) key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Activation if softmax_stag:
tensorflow.layers.dense
13,811
import tensorflow as tf for g, v in grads_and_vars: tf.summary.histogram(v.name[:-2] + '_hist', v) tf.summary.histogram(v.name[:-2] + '_grad_hist', g) with tf.control_dependencies([train_op]), tf.name_scope('ema'): ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step) train_op = ema.apply(tf.trainable_variables())
tensorflow.name_scope
13,812
import tensorflow as tf context, beta = self._selector(context, h, reuse=(t!=0)) beta_list.append(beta) with tf.variable_scope('lstm', reuse=(t!=0)): _, (c, h) = lstm_cell(inputs=tf.concat(axis=1, values=[x, context]), state=[c, h]) logits = self._decode_lstm(x, h, context, reuse=(t!=0)) sampled_word = tf.argmax(logits, 1) sampled_word_list.append(sampled_word) alphas = tf.transpose(tf.stack(alpha_list), (1, 0, 2)) # (N, T, L) betas = tf.transpose(tf.squeeze(beta_list), (1, 0)) # (N, T) sampled_captions = tf.transpose(tf.stack(sampled_word_list), (1, 0)) # (N, max_len) return alphas, betas, sampled_captions
tensorflow.argmax
13,813
import tensorflow as tf grads = average_gradients(tower_grads) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) summary_op = tf.summary.merge_all() # save moving average variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # batch norm updates with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]): train_op = tf.no_op(name='train_op') saver = tf.train.Saver(tf.global_variables()) summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path, tf.get_default_graph())
tensorflow.trainable_variables
13,814
import tensorflow as tf logits=logits, labels=tf.nn.softmax(logits))) entropy += curr_ent prev_layers.append(anchors.read(tf.reduce_sum(index))) inputs = prev_layers[-1] for i in range(2): # op_1, op_2 next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm) prev_c, prev_h = next_c, next_h logits = tf.matmul(next_h[-1], self.w_soft) + self.b_soft if self.temperature is not None: logits /= self.temperature if self.tanh_constant is not None: op_tanh = self.tanh_constant / self.op_tanh_reduce logits = op_tanh * tf.tanh(logits) if use_bias: logits += self.b_soft_no_learn
tensorflow.matmul
13,815
import tensorflow as tf biases = self.variable('biases', [output_channels], tf.constant_initializer(0.0)) deconv_layer = tf.nn.bias_add(deconv, biases) if bn: deconv_layer = self.batch_norm_layer('batch_norm_layer',deconv_layer,training) if relu: deconv_layer = tf.nn.relu(deconv_layer, name=scope.name) print('Deconv layer {0} -> {1}'.format(bottom.get_shape().as_list(),deconv_layer.get_shape().as_list())) return deconv_layer def variable(self, name, shape, initializer,regularizer=None): with tf.device('/cpu:0'): return tf.get_variable(name, shape, initializer=initializer, regularizer=regularizer, trainable=True) def fc_layer(self, bottom, in_size, out_size, name): with tf.variable_scope(name): weights, biases = self.get_fc_var(in_size, out_size, name) x = tf.reshape(bottom, [-1, in_size]) fc = tf.nn.bias_add(tf.matmul(x, weights), biases) tf.summary.histogram('weight', weights) tf.summary.histogram('bias', biases)
tensorflow.get_variable
13,816
import tensorflow as tf """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files))
tensorflow.FixedLenFeature
13,817
from tensorflow.contrib.learn.python.learn.datasets import base local_file = base.maybe_download(TEST_IMAGES, train_dir,
tensorflow.contrib.learn.python.learn.datasets.base.maybe_download
13,818
import tensorflow as tf "question") self.ch = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label1") self.end_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label2") self.position_emb = position_embedding(self.c, 2 * self.config.hidden_size) self.c_mask = tf.cast(self.c, tf.bool) # index 0 is padding symbol N x self.max_p_num, max_p_len self.q_mask = tf.cast(self.q, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1)
tensorflow.placeholder
13,819
import tensorflow as tf segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures(
tensorflow.logging.info
13,820
from tensorflow.contrib.learn.python.learn.estimators import run_config run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1'] } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig() # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({})
tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig
13,821
import tensorflow as tf def loss(self, logits, forward_only=None): cost = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(self.y, tf.float32)) mean_cost = tf.reduce_mean(cost) y_pred = tf.argmax(logits, 1) correct_pred = tf.equal(y_pred, tf.argmax(self.y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) if forward_only: str_summary_type = 'eval'
tensorflow.argmax
13,822
import tensorflow as tf try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with tf.Session() as sess: input0_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_INPUT0:0") input1_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_INPUT1:0") output0_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_OUTPUT0:0")
tensorflow.get_default_graph
13,823
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib GMM.SCORES: _streaming_sum(loss), } return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions, eval_metric_ops=eval_metric_ops,
tensorflow.contrib.learn.python.learn.estimators.model_fn.ModelFnOps
13,824
import tensorflow as tf gru_fw, outputs[-1] * mask_fw, seq_len, initial_state=init_fw, dtype=tf.float32) with tf.variable_scope("bw_{}".format(layer)): inputs_bw = tf.reverse_sequence( outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0) out_bw, _ = tf.nn.dynamic_rnn( gru_bw, inputs_bw, seq_len, initial_state=init_bw, dtype=tf.float32) out_bw = tf.reverse_sequence( out_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0)
tensorflow.nn.dynamic_rnn
13,825
import tensorflow as tf global_step=global_step) update_ops.append(grad_updates) update_op = tf.group(*update_ops) train_op = control_flow_ops.with_dependencies([update_op], total_loss, name='train_op') else: clones_losses = [] regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) for clone in clones: with tf.name_scope(clone.scope): clone_loss = _gather_clone_loss(clone, len(clones), regularization_losses) if clone_loss is not None: clones_losses.append(clone_loss) # Only use regularization_losses for the first clone regularization_losses = None if clones_losses: total_loss = tf.add_n(clones_losses, name='total_loss') # Add the summaries from the first clone. These contain the summaries
tensorflow.name_scope
13,826
import tensorflow as tf if params['use_ohkm']: base_learning_rate = 1. * base_learning_rate for pred_ind in list(range(len(pred_outputs) - 1)): mse_loss_list.append(0.5 * tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind], weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(pred_ind), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements temp_loss = tf.reduce_mean(tf.reshape(tf.losses.mean_squared_error(targets_list[-1], pred_outputs[-1], weights=1.0, loss_collection=None, reduction=tf.losses.Reduction.NONE), [cur_batch_size, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], -1]), axis=-1) num_topk = config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')] // 2 gather_col = tf.nn.top_k(temp_loss, k=num_topk, sorted=True)[1] gather_row = tf.reshape(tf.tile(tf.reshape(tf.range(cur_batch_size), [-1, 1]), [1, num_topk]), [-1, 1]) gather_indcies = tf.stop_gradient(tf.stack([gather_row, tf.reshape(gather_col, [-1, 1])], axis=-1)) select_targets = tf.gather_nd(targets_list[-1], gather_indcies) select_heatmap = tf.gather_nd(pred_outputs[-1], gather_indcies) mse_loss_list.append(tf.losses.mean_squared_error(select_targets, select_heatmap, weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(len(pred_outputs) - 1), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN)) else: for pred_ind in list(range(len(pred_outputs))):
tensorflow.range
13,827
import tensorflow as tf for path in paths: spectrograms.append(np.load("spectrogram/" + path + ".npy")) if spectrograms[-1].shape[0] > max_x: max_x = spectrograms[-1].shape[0] return spectrograms, max_x # In[4]: tf.reset_default_graph() sess = tf.InteractiveSession() model = Model() sess.run(tf.global_variables_initializer()) # In[5]: for e in range(30): pbar = tqdm(range(0, len(text_files), batch_size), desc="minibatch loop") total_cost, total_acc = 0, 0
tensorflow.InteractiveSession
13,828
import tensorflow as tf loss=work.softmax_loss(inf,batch_label) opti=work.optimer(loss,learnrate) test_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum) test_inf=work.test_inference(test_image_batch) test_labels=tf.one_hot(test_label_batch,classnum) test_pre = tf.reshape(test_inf, [testnum, classnum]) correct_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1)) accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) test_pre = tf.argmax(test_pre, 1) test_true = tf.argmax(test_labels, 1) valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum) valid_inf=work.valid_inference(valid_image_batch) valid_labels=tf.one_hot(valid_label_batch,classnum) #train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy) valid_pre = tf.reshape(valid_inf, [validnum, classnum]) valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1))
tensorflow.argmax
13,829
import tensorflow as tf ) encoded_history = reduce_max(conv3, [1, 2]) with tf.name_scope("Decoder"): second_to_last_user_utterance = encoded_utterances[:, history_length - 3, 0, :] last_system_utterance = encoded_utterances[:, history_length - 2, 0, :] last_user_utterance = encoded_utterances[:, history_length - 1, 0, :]
tensorflow.name_scope
13,830
import tensorflow as tf else: if num_channels_in != output_length: conv_weight = tf.Variable(tf.truncated_normal([1, 1, num_channels_in, output_length], stddev=0.1, dtype=tf.float32)) conv = tf.nn.conv2d(input_data, conv_weight, strides=[1, 1, 1, 1], padding='SAME')
tensorflow.truncated_normal
13,831
import tensorflow as tf Returns ------- tf.Tensor A biased tensor with the same shape as the input tensor. """ if init is None: init = tf.zeros([tensor.get_shape()[-1].value]) with tf.name_scope(name, tensor.op.name, [tensor]): b = tf.Variable(init, name='b') return tf.nn.bias_add(tensor, b) def dropout(tensor, dropout_prob, training=True, training_only=True): """Random dropout. This implementation supports "always-on" dropout (training_only=False), which
tensorflow.Variable
13,832
import tensorflow as tf upsampled_features = tf.image.resize_bilinear( image_features, [self._mask_height, self._mask_width], align_corners=True) upsampled_features = slim.conv2d( upsampled_features, num_outputs=self._mask_prediction_conv_depth, kernel_size=[2, 2]) mask_predictions = slim.conv2d(upsampled_features, num_outputs=self.num_classes, activation_fn=None, kernel_size=[3, 3]) instance_masks = tf.expand_dims(tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), axis=1, name='MaskPredictor') predictions_dict[MASK_PREDICTIONS] = instance_masks return predictions_dict class ConvolutionalBoxPredictor(BoxPredictor): """Convolutional Box Predictor. Optionally add an intermediate 1x1 convolutional layer after features and
tensorflow.transpose
13,833
import tensorflow as tf from tensorflow.python.training import moving_averages from collections import defaultdict # TODO(rbharath): What does this line do? py_all = all # TODO(rbharath): REMOVE GLOBAL VARS! BREAKS DEEPCHEM STYLE! _UID_PREFIXES = defaultdict(int) # This dictionary holds a mapping {graph: learning_phase}. # A learning phase is a bool tensor used to run Keras models in # either train mode (learning_phase == 1) or test mode (learning_phase == 0). _GRAPH_LEARNING_PHASES = {} def _to_tensor(x, dtype): x = tf.convert_to_tensor(x) if x.dtype != dtype: x = tf.cast(x, dtype) return x def learning_phase(): """Returns the learning phase flag. The learning phase flag is a bool tensor (0 = test, 1 = train) to be passed as input to any Keras function that uses a different behavior at train time and test time. """ graph = tf.get_default_graph()
tensorflow.convert_to_tensor
13,834
import tensorflow as tf input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128)) if tf.random.uniform(()) > 0.5: input_image = tf.image.flip_left_right(input_image)
tensorflow.random.uniform
13,835
import tensorflow as tf prev_c, prev_h, anchors, anchors_w_1, arc_seq, tf.constant([0.0], dtype=tf.float32, name="entropy"), tf.constant([0.0], dtype=tf.float32, name="log_prob"), ] loop_outputs = tf.while_loop(_condition, _body, loop_vars, parallel_iterations=1) arc_seq = loop_outputs[-3].stack() arc_seq = tf.reshape(arc_seq, [-1]) entropy = tf.reduce_sum(loop_outputs[-2]) log_prob = tf.reduce_sum(loop_outputs[-1]) last_c = loop_outputs[-7]
tensorflow.while_loop
13,836
import tensorflow as tf tgt_mask = tf.cast(tgt_mask, tf.float32) lm_loss = tf.cast(lm_loss, tf.float32) total_loss = tf.reduce_sum(lm_loss * tgt_mask) / tf.reduce_sum(tgt_mask) monitor_dict["total_loss"] = total_loss
tensorflow.reduce_sum
13,837
import tensorflow as tf of the input tensor t after padding, assuming length <= t.shape[0]. Returns: padded_t: the padded tensor, whose first dimension is length. If the length is an integer, the first dimension of padded_t is set to length statically. """ t_rank = tf.rank(t) t_shape = tf.shape(t) t_d0 = t_shape[0] pad_d0 = tf.expand_dims(length - t_d0, 0) pad_shape = tf.cond( tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0), lambda: tf.expand_dims(length - t_d0, 0)) padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0) if not _is_tensor(length): padded_t = _set_dim_0(padded_t, length) return padded_t
tensorflow.expand_dims
13,838
from tensorflow.python.ops import array_ops b_grads = b_module.bspmm(a_indices, a_values, a_shape, grad, adjoint_a=True, adjoint_b=False) bg_row=tf.shape(b_grads[0])[0] bg_col=tf.shape(b_grads[0])[1] b_grads = tf.reshape(b_grads, (numTensors * bg_row, bg_col)) if adj_b: b_grads = [array_ops.transpose(b_g) for b_g in b_grads] for t in range(numTensors): rows = a_indices[t][:, 0] cols = a_indices[t][:, 1] parts_a = array_ops.gather(grad[t], rows if not adj_a else cols) parts_b = array_ops.gather(b_list[t] if not adj_b else array_ops.transpose(b_list[t]), cols if not adj_a else rows) a_values_grads.append(math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)) return_val = [None for _ in range(numTensors)] + a_values_grads + [None for _ in range(numTensors)] + [b_grads] return tuple(return_val)
tensorflow.python.ops.array_ops.gather
13,839
import tensorflow as tf max_chars_total=self.max_chars_for_vocab) def generate_data(self, data_dir, tmp_dir, task_id=-1): """Generates training/dev data. Args: data_dir: a string tmp_dir: a string task_id: an optional integer Returns: shard or shards for which data was generated. """ tf.logging.info("generate_data task_id=%s" % task_id) encoder = self.get_or_create_vocab(data_dir, tmp_dir) assert task_id >= 0 and task_id < self.num_generate_tasks if task_id < self.num_train_shards: out_file = self.training_filepaths( data_dir, self.num_train_shards, shuffled=False)[task_id] else: out_file = self.dev_filepaths( data_dir, self.num_dev_shards, shuffled=False)[task_id - self.num_train_shards] generator_utils.generate_files( self.example_generator(encoder, tmp_dir, task_id), [out_file])
tensorflow.logging.info
13,840
import tensorflow as tf logits = tf.nn.softmax(softmax_mask(outputs, mask)) outputs = tf.matmul(logits, memory) res = tf.concat([inputs, outputs], axis=2) with tf.variable_scope("gate"): dim = res.get_shape().as_list()[-1] d_res = dropout(res, keep_prob=keep_prob, is_train=is_train) gate = tf.nn.sigmoid(dense(d_res, dim, use_bias=False)) return res * gate def dense(inputs, hidden, use_bias=True, scope="dense"): with tf.variable_scope(scope): shape = tf.shape(inputs) dim = inputs.get_shape().as_list()[-1] out_shape = [shape[idx] for idx in range( len(inputs.get_shape().as_list()) - 1)] + [hidden] flat_inputs = tf.reshape(inputs, [-1, dim]) W = tf.get_variable("W", [dim, hidden]) res = tf.matmul(flat_inputs, W) if use_bias: b = tf.get_variable( "b", [hidden], initializer=tf.constant_initializer(0.)) res = tf.nn.bias_add(res, b) res = tf.reshape(res, out_shape) return res
tensorflow.shape
13,841
import tensorflow as tf if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) if not forCnn: scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale
tensorflow.concat
13,842
import tensorflow as tf inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets)) smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32) smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2) smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)
tensorflow.abs
13,843
import tensorflow as tf return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.reshape(self.g,[1,1,1,tf.shape(self.v)[-1]]) * tf.nn.l2_normalize(self.v,axis=[0,1,2]) return tf.nn.bias_add( tf.nn.conv2d(input_var, w,data_format='NHWC', strides=self.strides, padding=self.padding), self.b,data_format='NHWC',name=name)
tensorflow.shape
13,844
import tensorflow as tf features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis = [2]) return tf.estimator.export.ServingInputReceiver(features, feature_placeholders) # Create custom estimator's train and evaluate function def train_and_evaluate(output_dir, use_keras): if use_keras: estimator = make_keras_estimator(output_dir) else: estimator = tf.estimator.Estimator(model_fn = simple_rnn, model_dir = output_dir) train_spec = tf.estimator.TrainSpec(read_dataset('train.csv', tf.estimator.ModeKeys.TRAIN, 512), max_steps = 1000) exporter = tf.estimator.LatestExporter('exporter', serving_input_fn) eval_spec = tf.estimator.EvalSpec(read_dataset('valid.csv', tf.estimator.ModeKeys.EVAL,
tensorflow.estimator.Estimator
13,845
import tensorflow as tf Returns: A tensor with the log loss. """ with tf.name_scope(name): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = tf.to_float(predictions) labels = tf.to_float(labels) losses = -tf.multiply(labels, tf.log(predictions + eps)) - tf.multiply( (1 - labels), tf.log(1 - predictions + eps)) return tf.losses.compute_weighted_loss(losses, weights) def kappa_loss(predictions, labels, y_pow=1, eps=1e-15, num_ratings=5, batch_size=32, name='kappa'): """Define a kappa loss, Its a continuous differentiable approximation of discrete kappa loss. Args: predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .
tensorflow.losses.compute_weighted_loss
13,846
import tensorflow as tf input_ = tf.concat([input_, context], axis=1) input_size = input_.get_shape()[1].value initializer = CellInitializer(decoder.cell_size) if decoder.orthogonal_init else None with tf.variable_scope(tf.get_variable_scope(), initializer=initializer): try: output, new_state = get_cell(input_size)(input_, state) except ValueError: # auto_reuse doesn't work with LSTM cells output, new_state = get_cell(input_size, reuse=True)(input_, state) if decoder.skip_update and decoder.pred_edits and symbol is not None: is_del = tf.equal(symbol, utils.DEL_ID) new_state = tf.where(is_del, state, new_state) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: output = new_state return output, new_state def update_pos(pos, symbol, max_pos=None): if not decoder.pred_edits: return pos
tensorflow.equal
13,847
import tensorflow as tf with tf.name_scope(name, "click_weighted_pairwise_loss",[output]): sliced_output = tf.unstack(output, axis=1) sliced_label = tf.unstack(labels, axis=1) sliced_propensity = tf.unstack(propensity_weights, axis=1) for i in range(len(sliced_output)): for j in range(i+1, len(sliced_output)): cur_label_weight = tf.math.sign(sliced_label[i] - sliced_label[j]) cur_propensity = sliced_propensity[i] * sliced_label[i] + sliced_propensity[j] * sliced_label[j] cur_pair_loss = -tf.exp(sliced_output[i]) / (tf.exp(sliced_output[i]) + tf.exp(sliced_output[j])) if loss == None: loss = cur_label_weight * cur_pair_loss * cur_propensity loss += cur_label_weight * cur_pair_loss * cur_propensity batch_size = tf.shape(labels[0])[0] return tf.reduce_sum(loss) / tf.cast(batch_size, dtypes.float32) #/ (tf.reduce_sum(propensity_weights)+1)
tensorflow.exp
13,848
from tensorflow.python.framework import constant_op """ # The last 10 bits of 359 and 1024+359 are identical. # As a result, all the crosses collide. t1 = constant_op.constant([[359], [359 + 1024]]) t2 = constant_op.constant([list(range(10)), list(range(10))]) cross = sparse_feature_cross_op.sparse_feature_cross(
tensorflow.python.framework.constant_op.constant
13,849
import tensorflow as tf model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop,
tensorflow.contrib.tpu.TPUConfig
13,850
import tensorflow as tf else: return tf.reshape(tf.stack(values=h, axis=1), [-1]) def lstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u)
tensorflow.split
13,851
from tensorflow.python.ops import common_shapes Args: op: Input operation. Returns: Shape of both inputs to `op`. """ return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())] ops.RegisterShape("L2Loss")(common_shapes.scalar_shape) ops.RegisterShape("LRN")(common_shapes.unchanged_shape_with_rank(4)) @ops.RegisterShape("LRNGrad") def _LRNGradShape(op): """Shape function for LRNGrad op.""" in_grads_shape = op.inputs[0].get_shape().with_rank(4) in_image_shape = op.inputs[1].get_shape().with_rank(4) out_image_shape = op.inputs[2].get_shape().with_rank(4) return [in_grads_shape.merge_with(in_image_shape).merge_with(out_image_shape)]
tensorflow.python.ops.common_shapes.unchanged_shape_with_rank
13,852
import tensorflow as tf """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, probabilities, logits, predictions) = \ create_model(config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings, task_name) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint:
tensorflow.cast
13,853
import tensorflow as tf with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b output0 = f(tf.constant([1]), tf.constant([2])) output1 = f(tf.constant([[2]]), tf.constant([3])) tp = pool.ThreadPool(2) f0 = tp.apply_async(session.run, [output0]) f1 = tp.apply_async(session.run, [output1])
tensorflow.constant
13,854
import tensorflow as tf # first task or maxstep, update the model if not test and (TASK_NUM == 0 or TASK_NUM % maxstep == 0): logger.info(f"task_num={TASK_NUM}, sync_model_to_lazymodel") tf.get_default_session().run(sync_model_to_lazymodel) if test:
tensorflow.get_default_session
13,855
import tensorflow as tf return self.fc8 def avg_pool(self, bottom, name): return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def max_pool(self, bottom, name): return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def conv_layer(self, bottom, name): with tf.variable_scope(name): filt = self.get_conv_filter(name) conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
tensorflow.nn.max_pool
13,856
import tensorflow as tf value = tf.layers.dense(x, 1, name="value") return {"target_policy": logits, "target_value": value} @registry.register_model class DenseBitwiseCategoricalPolicy(PolicyBase): """Dense network with bitwise input and categorical output.""" def body(self, features): observations = features["inputs"] flat_x = tf.layers.flatten(observations) with tf.variable_scope("dense_bitwise"): flat_x = discretization.int_to_bit_embed(flat_x, 8, 32) x = tf.layers.dense(flat_x, 256, activation=tf.nn.relu) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) value = tf.layers.dense(x, 1)[..., 0]
tensorflow.layers.flatten
13,857
import tensorflow as tf dtype=tf.float32) (nms_masks1, nms_scores1, nms_classes1) = isu.instance_non_maximum_suppression_2d_scores( masks, scores, 3, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=True) nms_masks_expected1 = tf.stack([mask0, mask5, mask4]) nms_scores_expected1 = tf.constant([1.0, 0.8, 0.7], dtype=tf.float32) nms_classes_expected1 = tf.constant([1, 2, 2], dtype=tf.int32) (nms_masks2, nms_scores2, nms_classes2) = isu.instance_non_maximum_suppression_2d_scores( masks, scores, 3, min_score_thresh=0.65,
tensorflow.stack
13,858
import tensorflow as tf def build_placeholders(self): # standard for all policies self.obs = tf.placeholder(tf.float32, [None, self.obs_space]) # ! self.obs = tf.placeholder(tf.float32, [None] + list(self.obs_space)) # ! self.obs_space = env.observation_space.shape self.r = tf.placeholder(tf.float32, (None,1)) self.ac = tf.placeholder(tf.float32, (None, self.act_space)) self.adv = tf.placeholder(tf.float32, [None]) # unused # specific to FeUdal self.prev_g = tf.placeholder(tf.float32, (None, None, self.g_dim))
tensorflow.placeholder
13,859
import tensorflow as tf from tensorflow.python.platform import googletest class IntegratedGradientsTest(googletest.TestCase): """ To run: "python -m saliency.integrated_gradients_test" from the PAIR-code/saliency directory. """ def testIntegratedGradientsGetMask(self): with tf.Graph().as_default() as graph: x = tf.placeholder(shape=[None, 3], dtype=tf.float32) y = 5 * x[:, 0] + x[:, 0] * x[:, 1] + tf.sin(x[:, 2]) with tf.Session() as sess: # Calculate the value of `y` at the baseline. x_baseline_val = np.array([[0.5, 0.8, 1.0]], dtype=np.float) y_baseline_val = sess.run(y, feed_dict={x: x_baseline_val}) # Calculate the value of `y` at the input. x_input_val = np.array([[1.0, 2.0, 3.0]], dtype=np.float) y_input_val = sess.run(y, feed_dict={x: x_input_val})
tensorflow.Graph
13,860
import tensorflow as tf for direction in ['forward', 'backward']: if direction == 'forward': layer_input = self.embedding else: layer_input = tf.reverse_sequence( self.embedding, sequence_lengths, seq_axis=1, batch_axis=0 ) for i in range(n_lstm_layers): if projection_dim < lstm_dim: # are projecting down output lstm_cell = tf.nn.rnn_cell.LSTMCell( lstm_dim, num_proj=projection_dim, cell_clip=cell_clip, proj_clip=proj_clip) else: lstm_cell = tf.nn.rnn_cell.LSTMCell( lstm_dim, cell_clip=cell_clip, proj_clip=proj_clip) if use_skip_connections: # ResidualWrapper adds inputs to outputs if i == 0: # don't add skip connection from token embedding to # 1st layer output pass
tensorflow.nn.rnn_cell.LSTMCell
13,861
import tensorflow as tf output = tf.nn.relu(pre_activation, name=scope.name) return output # 定义一个池化层,默认为max_pooling def max_pool_2x2(self,name, x): with tf.variable_scope(name) as scope: maxpool = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') return maxpool # 创建DQN def creat_network(self):
tensorflow.variable_scope
13,862
import tensorflow as tf with tf.name_scope('pooling_for_un_head'): undep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_unhead, 1]) # [bs, sluh, sld] unhead_idxs = tf.tile(tf.expand_dims(unhead_org_idx, 2), [1, 1, sl_dep]) # [bs, sluh, sld] if direction is None:
tensorflow.expand_dims
13,863
import tensorflow as tf return crop def random_apply(fn, image, prob=1.): b, *_ = image.get_shape().as_list() chance = tf.less(tf.random_uniform([b], 0, 1.0), prob) return tf.where(chance, fn(image), tf.identity(image)) def color_distortion(image, s=1.0): lower, upper, x = (1 - 0.8 * s), (1 + 0.8 * s), image x = tf.image.random_brightness(x, max_delta=0.8*s) x = tf.image.random_contrast(x, lower=lower, upper=upper) x = tf.image.random_saturation(x, lower=lower, upper=upper) x = tf.image.random_hue(x, max_delta=0.2*s) x = tf.clip_by_value(x, 0, 1) return x def color_drop(image): image = tf.image.rgb_to_grayscale(image) image = tf.tile(image, [1, 1, 1, 3]) return image # pylint: disable=not-callable @gin.configurable(blacklist=["kwargs"]) class CLGAN(modular_gan.ModularGAN):
tensorflow.image.random_hue
13,864
import tensorflow as tf @dynamic_batching.batch_fn_with_options(maximum_batch_size=2) def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) outputs = [ f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), ] tf.train.start_queue_runners() results = session.run(outputs) for value, batch_size in results:
tensorflow.constant
13,865
import tensorflow as tf def _GetFakeDistribution(self): class FakeDistribution(tfd.Distribution): """Fake Distribution for testing _set_sample_static_shape.""" def __init__(self, batch_shape=None, event_shape=None): self._static_batch_shape = tf.TensorShape(batch_shape) self._static_event_shape = tf.TensorShape(event_shape) super(FakeDistribution, self).__init__( dtype=tf.float32, reparameterization_type=tfd.NOT_REPARAMETERIZED, validate_args=True, allow_nan_stats=True,
tensorflow.TensorShape
13,866
import tensorflow as tf facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
tensorflow.shape
13,867
import tensorflow as tf hparams.n_samples = sample_size tf.reset_default_graph() with tf.Graph().as_default(): energy_fn, _, _ = l2hmc.get_scg_energy_fn()
tensorflow.Graph
13,868
import tensorflow as tf if bias: b = bias_variable([shape[-1]]) h = h + b return h def deconv2d(x, shape, output_shape, name, bias=False, stride=2, padding='SAME'): with tf.variable_scope(name): W = weight_variable(shape) h = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding=padding) if bias: b = bias_variable([shape[-2]]) h = h + b return h def conv3d(x, shape, name, bias=False, stride=2, padding='SAME'): with tf.variable_scope(name): W = weight_variable(shape) h = tf.nn.conv3d(x, W, strides=[1, stride, stride, stride, 1], padding=padding) if bias: b = bias_variable([shape[-1]]) h = h + b return h def deconv3d(x, shape, output_shape, name, bias=False, stride=2, padding='SAME'): with tf.variable_scope(name): W = weight_variable(shape) h = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, stride, stride, stride, 1], padding=padding) if bias: b = bias_variable([shape[-2]]) h = h + b
tensorflow.variable_scope
13,869
import tensorflow as tf v = variable_scope.get_variable("v", [options.attention_vec_size]) v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0)
tensorflow.expand_dims
13,870
import tensorflow as tf for classes, sdfs, poses in [(predicted_classes, predicted_sdfs, predicted_poses)]: for i in range(classes.shape[0]): sdf = tf.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]),
tensorflow.expand_dims
13,871
import tensorflow as tf 'batch_edge_type_idx': tf.placeholder(tf.int32, shape=(), name='batch_edge_type_idx'), 'batch_row_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_row_edge_type'), 'batch_col_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_col_edge_type'), 'degrees': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), } placeholders.update({ 'adj_mats_%d,%d,%d' % (i, j, k): tf.sparse_placeholder(tf.float32)
tensorflow.placeholder_with_default
13,872
import tensorflow as tf """ # Reshape input to (batch_size, num_nodes, input_dim) output_size = self._num_units batch_size = inputs.get_shape()[0].value inputs = tf.reshape(inputs, [batch_size, self._num_nodes, -1]) input_size = inputs.get_shape()[2].value dtype = inputs.dtype x = inputs x0 = tf.transpose(x, perm=[1, 2,0]) # (num_nodes, total_arg_size, batch_size) x0 = tf.reshape(x0, shape=[self._num_nodes, input_size * batch_size]) x = tf.expand_dims(x0, axis=0) scope = tf.get_variable_scope() with tf.variable_scope(scope): if self._max_diffusion_step == 0: pass else: for support in self._supports: x1 = tf.sparse_tensor_dense_matmul(support, x0)
tensorflow.reshape
13,873
from tensorflow.python.ops import array_ops labels_sizes = set_ops.set_size(labels) return math_ops.minimum(labels_sizes, k, name=scope) # For dense Tensor, calculate scalar count based on last dimension, and # tile across labels shape. labels_shape = array_ops.shape(labels) labels_size = labels_shape[-1] num_relevant_scalar = math_ops.minimum(labels_size, k) return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
tensorflow.python.ops.array_ops.shape
13,874
from tensorflow.python.framework import tensor_shape return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() @distribution_util.AppendDocstring(_poisson_sample_note)
tensorflow.python.framework.tensor_shape.scalar
13,875
import tensorflow as tf with tf.device('/gpu:2'): d = tf.matmul(b,a) flat_d = tf.reshape(d, [-1])
tensorflow.reshape
13,876
import tensorflow as tf w = tf.get_variable(name='weights', trainable=is_pretrain, shape=[kernel_size[0],kernel_size[1],in_channels,out_channels], initializer=tf.contrib.layers.xavier_initializer()) b = tf.get_variable(name='bias', trainable=is_pretrain, shape=[out_channels], initializer=tf.constant_initializer(0.0)) x = tf.nn.conv2d(x,w,strides,padding='SAME',name='conv') x = tf.nn.bias_add(x,b,name='bias_add') x = tf.nn.relu(x,name='relu') return x def pool(layer_name, x, kernel_size=[1,2,2,1], strides=[1,2,2,1], is_max_pool=True): '''
tensorflow.nn.conv2d
13,877
import tensorflow as tf batch_size, nf = x.get_shape().as_list() h = linear(x, [nf, n_kernels*dim_per_kernel], 'h1') activation = tf.reshape(h, (batch_size, n_kernels, dim_per_kernel)) big = tf.eye(batch_size) big = tf.expand_dims(big, 1) abs_dif = tf.reduce_sum(tf.abs(tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2) mask = 1. - big masked = tf.exp(-abs_dif) * mask
tensorflow.expand_dims
13,878
import tensorflow as tf # Create connected layers: fc1, fc2 with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected], normalizer_fn=tf.contrib.layers.batch_norm,
tensorflow.contrib.framework.arg_scope
13,879
from tensorflow.contrib.framework import deprecated_arg_values x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return (pred[key] for pred in preds) return preds[key].reshape(-1) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_proba(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
tensorflow.contrib.framework.deprecated_arg_values
13,880
import tensorflow as tf def __call__(self, global_step): warmup_lr = self._params.warmup_learning_rate warmup_steps = self._params.warmup_steps init_lr = self._params.init_learning_rate lr_levels = self._params.learning_rate_levels lr_steps = self._params.learning_rate_steps linear_warmup = ( warmup_lr + tf.cast(global_step, dtype=tf.float32) / warmup_steps * (init_lr - warmup_lr)) learning_rate = tf.where(global_step < warmup_steps, linear_warmup, init_lr) for next_learning_rate, start_step in zip(lr_levels, lr_steps): learning_rate = tf.where(global_step >= start_step, next_learning_rate, learning_rate) return learning_rate
tensorflow.cast
13,881
import tensorflow as tf lstm_input = tf.transpose(x, perm=[1, 0, 2]) outputs, _ = tf.lite.experimental.nn.dynamic_rnn(
tensorflow.lite.experimental.nn.dynamic_rnn
13,882
import tensorflow as tf upper_pad = tf.maximum(0, image_height - cutout_center_height - length // 2) left_pad = tf.maximum(0, cutout_center_width - length // 2) right_pad = tf.maximum(0, image_width - cutout_center_width - length // 2) cutout_shape = [image_height - (lower_pad + upper_pad), image_width - (left_pad + right_pad)] padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]] mask = tf.pad( tf.zeros(cutout_shape, dtype=images.dtype), padding_dims, constant_values=1) patch = tf.ones_like(images, dtype=images.dtype) * replace, mask = tf.expand_dims(mask, -1) mask = tf.tile(mask, [1, 1, num_channels]) images = tf.where( tf.equal(mask, 0),
tensorflow.zeros
13,883
import tensorflow as tf if cfgs.ADD_BOX_IN_TENSORBOARD: detections_in_img = self.drawer.draw_boxes_with_categories_and_scores( img_batch=tf.expand_dims(img[0, :, :, :], axis=0), boxes=outputs[0], scores=outputs[1],
tensorflow.expand_dims
13,884
import tensorflow as tf dummy_scores = tf.zeros([k, 1]) # [k, 1] for i in range(self.config["coref_depth"]): with tf.variable_scope("coref_layer", reuse=(i > 0)): top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb] top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb) # [k, c] top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1] top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb] attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb] with tf.variable_scope("f"): f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb] top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb] top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1] top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c] top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c] same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c] non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1] pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c] dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1] top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1] loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k] loss = tf.reduce_sum(loss) # []
tensorflow.concat
13,885
import tensorflow as tf train_X_stds = np.std(train_X_reshaped, axis=0, keepdims=True) def standardization(x): x_reshaped = x.reshape([x.shape[0], -1]) result = (x_reshaped - train_X_means) / (train_X_stds + 1e-9) return result.reshape(x.shape) normalized_test_X = standardization(test_X) with tf.Session() as sess, tf.summary.FileWriter( "./tf_logs/fashion_minst_multi_task_learning/" + str(datetime.now().timestamp()), graph=tf.get_default_graph()) as f: sess.run(tf.global_variables_initializer()) # similar logic as mnist's next_batch() epoch = 0 index_in_epoch = 0 while epoch < n_epoch: for _ in range(m // n_batch_size + 1): start = index_in_epoch if start + n_batch_size > m: epoch += 1
tensorflow.get_default_graph
13,886
import tensorflow as tf # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else:
tensorflow.tensordot
13,887
import tensorflow as tf if linear_loss is not None: values.append(tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_linear_loss", simple_value=linear_loss)) test_summary = tf.Summary(value=values) summary_writer.add_summary(test_summary, step) def time_string(): return datetime.now().strftime("%Y-%m-%d %H:%M") def model_train_mode(args, feeder, hparams, global_step): with tf.variable_scope("Tacotron_model", reuse=tf.AUTO_REUSE) as scope: model = create_model("Tacotron", hparams) model.initialize(feeder.inputs, feeder.input_lengths, feeder.speaker_embeddings, feeder.mel_targets, feeder.token_targets, targets_lengths=feeder.targets_lengths, global_step=global_step, is_training=True, split_infos=feeder.split_infos) model.add_loss() model.add_optimizer(global_step) stats = add_train_stats(model, hparams) return model, stats
tensorflow.variable_scope
13,888
import tensorflow as tf def testGraphExtension(self): self._testGraphExtensionSave() self._testGraphExtensionRestore() def testStrippedOpListDef(self): with self.test_session(): # Creates a graph. v0 = tf.Variable(0.0) var = tf.Variable(10.0) tf.add(v0, var) @function.Defun(x=tf.float32) def minus_one(x): return x - 1 minus_one(tf.identity(v0)) save = tf.train.Saver({"v0": v0})
tensorflow.Variable
13,889
import tensorflow as tf model_target_y = models(hps, images, FLAGS.RCE_train, logits=False) target_y64 = tf.argmin(model_target_y,axis=1) else: target_y64=target_labels target_y = tf.cast(target_y64, tf.int32) adversarial_sample = attacks.jsma.jsma(models, images, hps, RCE_train, target_y,epochs=epoch_jsma, eps=eps, clip_min=-0.5, clip_max=0.5, pair=False, min_proba=0.0) elif method=='smda': print('Attacking method is smda') if target_labels==None: print('Target label is the argmin label') model_target_y = models(hps, images, FLAGS.RCE_train, logits=False) target_y64 = tf.argmin(model_target_y,axis=1) else: target_y64=target_labels target_y = tf.cast(target_y64, tf.int32) adversarial_sample = attacks.smda.smda(models, images, hps, RCE_train, target_y, epochs=epoch_jsma, eps=eps, clip_min=-0.5, clip_max=0.5, min_proba=0.0) else: print('Not recognized method') adversarial_sample = None return adversarial_sample def tSNE_visual(hps,num_batch):
tensorflow.argmin
13,890
import tensorflow as tf tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else: return output, alphas
tensorflow.nn.softmax
13,891
import tensorflow as tf tf.app.flags.DEFINE_integer('seed', 1, "initial random seed") tf.app.flags.DEFINE_bool('validation', False, "") tf.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch") tf.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch") tf.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch") tf.app.flags.DEFINE_integer('eval_freq', 5, "") tf.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training") tf.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay") tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch") tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate") tf.app.flags.DEFINE_float('mom1', 0.9, "initial momentum rate") tf.app.flags.DEFINE_float('mom2', 0.5, "momentum rate after epoch_decay_start") tf.app.flags.DEFINE_string('method', 'vat', "{vat, vatent, baseline}") if FLAGS.dataset == 'cifar10': from cifar10 import inputs, unlabeled_inputs elif FLAGS.dataset == 'svhn': from svhn import inputs, unlabeled_inputs else: raise NotImplementedError NUM_EVAL_EXAMPLES = 5000
tensorflow.app.flags.DEFINE_string
13,892
import tensorflow as tf
tensorflow.get_logger
13,893
import tensorflow as tf tf.logging.info("***** Running training *****") tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) if FLAGS.do_eval: tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
tensorflow.logging.info
13,894
from tensorflow.python.ops import math_ops math_ops.equal( array_ops.constant(0.0, dtype=dtypes.float64), denominator), lambda: array_ops.constant(0.0, dtype=dtypes.float64), lambda: math_ops.div(numerator, denominator), name=name)
tensorflow.python.ops.math_ops.div
13,895
import tensorflow as tf Omega = tf.square(bounded - 1.0) Omega = tf.reduce_sum(tf.reduce_mean(Omega, axis=1)) / (1.0 * tf.reduce_sum(nelems))
tensorflow.reduce_sum
13,896
import tensorflow as tf def _create_model(self, train_triples): # Count unique items to determine embedding matrix sizes entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2])) rel_cnt = len(set(train_triples[:,1])) init_sd = 1.0 / np.sqrt(self.embedding_size) # Embedding variables entity_var_shape = [entity_cnt, self.embedding_size] rel_var_shape = [rel_cnt, self.embedding_size] entity_init = tf.truncated_normal(entity_var_shape, stddev=init_sd) rel_init = tf.truncated_normal(rel_var_shape, stddev=init_sd) # Ensure maxnorm constraints are initially satisfied entity_init = dense_maxnorm(entity_init, self.maxnorm) self.entity_embedding_vars = tf.Variable(entity_init) self.rel_embedding_vars = tf.Variable(rel_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input) tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) # Relationship vector acts as a translation in entity embedding space
tensorflow.truncated_normal
13,897
import tensorflow as tf aux_logits = self._add_fully_connected(X, (ch,), K, no_reg=True) return aux_logits def _compute_predictions(self, logits, classes): probs = tf.nn.softmax(logits) preds = tf.argmax(logits, axis=1, output_type=tf.int32) corrects = tf.equal(preds, classes) return (probs, corrects) def _compute_loss(self, logits, aux_logits_list, classes, **knobs): reg_decay = knobs['reg_decay'] aux_loss_mul = knobs['aux_loss_mul'] # Multiplier for auxiliary loss
tensorflow.equal
13,898
import tensorflow as tf return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0) return tf.matmul(input_var,w)+self.b def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g}
tensorflow.expand_dims
13,899