seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf # Output bias: self.b_out = tf.get_variable('b_out', [N_out], initializer=b_out_initializer, trainable=self.b_out_train) # ------------------------------------------------ # Non-trainable variables: # Overall connectivity and Dale's law matrices # ------------------------------------------------ # Recurrent Dale's law weight matrix: self.Dale_rec = tf.get_variable('Dale_rec', [N_rec, N_rec], initializer=tf.constant_initializer(self.dale_rec), trainable=False) # Output Dale's law weight matrix: self.Dale_out = tf.get_variable('Dale_out', [N_rec, N_rec], initializer=tf.constant_initializer(self.dale_out), trainable=False) # Connectivity weight matrices: self.input_Connectivity = tf.get_variable('input_Connectivity', [N_rec, N_in], initializer=tf.constant_initializer( self.input_connectivity_mask),
tensorflow.constant_initializer
7,900
import tensorflow as tf mode = session.run( global_mode(), feed_dict={tf.global_mode(): tf.estimator.ModeKeys.PREDICT}) # mode == tf.estimator.ModeKeys.PREDICT """ mode = tf.get_collection_ref(_GLOBAL_MODE_KEY) if len(mode) < 1: # mode_tensor = tf.placeholder(tf.string, name="global_mode") mode_tensor = tf.placeholder_with_default( input=tf.estimator.ModeKeys.TRAIN,
tensorflow.get_collection_ref
7,901
import tensorflow as tf class_predictions_with_background, tf.stack([combined_feature_map_shape[0],
tensorflow.stack
7,902
import tensorflow as tf optimizer = tf.train.AdamOptimizer(rl_learning_rate)
tensorflow.train.AdamOptimizer
7,903
import tensorflow as tf writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features):
tensorflow.FixedLenFeature
7,904
import tensorflow as tf self._train_meta = list(np.array(self._metadata)[train_indices]) self._test_meta = list(np.array(self._metadata)[test_indices]) self.test_steps = len(self._test_meta) // hparams.wavenet_batch_size if hparams.wavenet_test_size is None: assert hparams.wavenet_test_batches == self.test_steps #Get conditioning status self.local_condition, self.global_condition = self._check_conditions() with tf.device('/cpu:0'): # Create placeholders for inputs and targets. Don't specify batch size because we want # to be able to feed different batch sizes at eval time. if is_scalar_input(hparams.input_type): input_placeholder = tf.placeholder(tf.float32, shape=(None, 1, None), name='audio_inputs') target_placeholder = tf.placeholder(tf.float32, shape=(None, None, 1), name='audio_targets') target_type = tf.float32 else: input_placeholder = tf.placeholder(tf.float32, shape=(None, hparams.quantize_channels, None), name='audio_inputs') target_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1), name='audio_targets') target_type = tf.int32
tensorflow.device
7,905
import tensorflow as tf return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes) return tf.nn.seq2seq.model_with_buckets( enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq, softmax_loss_function=SampledLoss) # Now we construct the copy model. batch_size = 8 inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)] with tf.variable_scope("root"): _, losses = SampleGRUSeq2Seq(inp, out, weights) updates = [] params = tf.global_variables() optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5) for i in range(len(buckets)): full_grads = tf.gradients(losses[i], params) grads, _ = tf.clip_by_global_norm(full_grads, 30.0) update = optimizer.apply_gradients(zip(grads, params)) updates.append(update) sess.run([tf.global_variables_initializer()]) steps = 6 for _ in range(steps): bucket = random.choice(np.arange(len(buckets))) length = buckets[bucket][0] i = [np.array([np.random.randint(9) + 1 for _ in range(batch_size)],
tensorflow.global_variables
7,906
import tensorflow as tf y += dense(hidden, encoder.attn_size, use_bias=False, name='U_a') if encoder.position_bias and input_length is not None and time is not None: src_pos = tf.tile(tf.expand_dims(tf.range(time_steps), axis=0), [batch_size, 1]) trg_pos = tf.tile(tf.reshape(time, [1, 1]), [batch_size, time_steps]) src_len = tf.tile(tf.expand_dims(input_length, axis=1), [1, time_steps]) # - 1 pos_feats = tf.to_float(tf.stack([src_pos, trg_pos, src_len], axis=2)) pos_feats = tf.log(1 + pos_feats) y += dense(pos_feats, encoder.attn_size, use_bias=False, name='P_a') if encoder.attn_filters: filter_shape = [encoder.attn_filter_length * 2 + 1, 1, 1, encoder.attn_filters] filter_ = get_variable('filter', filter_shape) prev_weights = tf.reshape(prev_weights, tf.stack([batch_size, time_steps, 1, 1])) conv = tf.nn.conv2d(prev_weights, filter_, [1, 1, 1, 1], 'SAME') conv = tf.squeeze(conv, axis=2) y += dense(conv, encoder.attn_size, use_bias=False, name='C_a') v = get_variable('v_a', [encoder.attn_size]) return tf.reduce_sum(v * tf.tanh(y), axis=2) def global_attention(state, hidden_states, encoder, encoder_input_length, scope=None, context=None, **kwargs): with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)): if context is not None and encoder.use_context: state = tf.concat([state, context], axis=1) e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs)
tensorflow.nn.conv2d
7,907
import tensorflow as tf scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0])) x = tf.reshape(scaler, [1, num_units]) * x + tf.reshape(b, [1, num_units])
tensorflow.reshape
7,908
import tensorflow as tf # (weight/bias or contractive) self.regularizers += list(chain.from_iterable([self.custom_regularizers[r] for r in self._opts["regularizers"].keys() if len(self.custom_regularizers[r]) > 0])) self.update_ops += tf.get_collection(tf.GraphKeys.UPDATE_OPS) # ac_train_regularizers = tf.get_collection(get_ac_collection_name("train")) # ac_validation_regularizers = tf.get_collection(get_ac_collection_name("validation")) # ac_test_regularizers = tf.get_collection(get_ac_collection_name("test"))
tensorflow.get_collection
7,909
import tensorflow as tf self.assertEqual(2, len(res[0])) self.assertEqual((2, 2), res[0][0].c.shape) self.assertEqual((2, 2), res[0][0].h.shape) self.assertEqual((2, 2), res[0][1].c.shape) self.assertEqual((2, 2), res[0][1].h.shape) def testEmbeddingAttentionDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 cell = tf.nn.rnn_cell.GRUCell(2) enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec, mem = tf.nn.seq2seq.embedding_attention_decoder( dec_inp, enc_state, attn_states, cell, num_symbols=4, embedding_size=2, output_size=3) sess.run([tf.global_variables_initializer()]) res = sess.run(dec)
tensorflow.nn.rnn_cell.GRUCell
7,910
import tensorflow as tf # Also, we can limit the size of GPU memory used, with the following option config.gpu_options.per_process_gpu_memory_fraction = 0.4 sess_limited = tf.Session(config=config)
tensorflow.Session
7,911
import tensorflow as tf indices = tf.stack((batch_nums, step_nums, passage_word_idx), axis=2) # shape (batch_size, passage_length, 3) indices = tf.reshape(indices, [-1, 3]) #[batch_size * passage_length, 3] indices = tf.cast(indices, tf.int64) shape = [batch_size, passage_length, extended_vsize] shape = tf.cast(shape, tf.int64) attn_dist = tf.reshape(attn_dist, shape=[-1]) # [batch_size*passage_length] one_hot_spare_rep = tf.SparseTensor(indices=indices, values=attn_dist, dense_shape=shape) # [batch_size, passage_length, extended_vsize] if passage_mask is not None: passage_mask = tf.expand_dims(passage_mask, axis=-1) one_hot_spare_rep = one_hot_spare_rep * passage_mask one_hot_spare_rep = tf.sparse_reduce_sum(one_hot_spare_rep, axis=1) # [batch_size, extended_vsize] vocab_dist = tf.add(vocab_dist, one_hot_spare_rep) if self.options.add_first_word_prob_for_phrase: vocab_dist = tf.nn.softmax(vocab_dist) # normalize return vocab_dist # [batch_size, extended_vsize] def linear(args, output_size, bias=True, bias_start=0.0, scope=None): if args is None or (isinstance(args, (list, tuple)) and not args): raise ValueError("`args` must be specified") if not isinstance(args, (list, tuple)): args = [args]
tensorflow.sparse_reduce_sum
7,912
import tensorflow as tf self.fetch_datasets() if FLAGS.model == AUTOENCODER: self.build_ae_model() elif FLAGS.model == PREDICTIVE: self.build_predictive_model() else: self.build_denoising_model() self._init_optimizer() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) self._on_training_start(sess) try: for current_epoch in range(FLAGS.max_epochs): start = time.time() full_set_blur = len(self.train_set) < 50000 ds = self._get_blurred_dataset() if full_set_blur else self.train_set if FLAGS.model == AUTOENCODER:
tensorflow.global_variables_initializer
7,913
import tensorflow as tf def SampledLoss(labels, inputs): labels = tf.reshape(labels, [-1, 1]) return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes) return tf.nn.seq2seq.model_with_buckets( enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq, softmax_loss_function=SampledLoss) # Now we construct the copy model. batch_size = 8 inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)] with tf.variable_scope("root"): _, losses = SampleGRUSeq2Seq(inp, out, weights) updates = [] params = tf.global_variables() optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5) for i in range(len(buckets)): full_grads = tf.gradients(losses[i], params) grads, _ = tf.clip_by_global_norm(full_grads, 30.0)
tensorflow.placeholder
7,914
import tensorflow as tf output = state[:, -cell_output_size:] if decoder.conditional_rnn: with tf.variable_scope('conditional_1'): output, state = update(state, input_) elif decoder.update_first: output, state = update(state, input_, None, ids) elif decoder.generate_first: output, state = tf.cond(tf.equal(time, 0), lambda: (output, state), lambda: update(state, input_, context, ids)) context, new_weights = look(time, output, input_, pos=pos, prev_weights=prev_weights, context=context) if decoder.conditional_rnn: with tf.variable_scope('conditional_2'):
tensorflow.equal
7,915
import tensorflow as tf # dimension. These Tensors are implicitly concatenated to # [params['batch_size']]. global_step_t = tf.reshape(global_step, [1]) total_loss_t = tf.reshape(total_loss, [1])
tensorflow.reshape
7,916
import tensorflow as tf c_hot_flat = tf.reshape( c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]) h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means) h1 = tf.transpose(h1, perm=[1, 0, 2]) h1 = tf.reshape(h1, shape=h1_shape) h1_shape[0] = self.hparams.batch_size
tensorflow.transpose
7,917
import tensorflow as tf with tf.name_scope(name, "click_loglikelihood"): ob_prob=tf.nn.softmax(propensity) rel_prob=tf.nn.softmax(train_output) click_prob=ob_prob*rel_prob click_prob_norm=click_prob/tf.reduce_sum(click_prob,axis=1,keep_dims=True) label_dis = labels/ tf.reduce_sum(labels, 1, keep_dims=True) entropy = tf.reduce_sum(tf.math.log(click_prob_norm)*label_dis,1) return tf.reduce_mean(entropy)
tensorflow.reduce_sum
7,918
import tensorflow as tf tf.shape(images)[1:3], scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits_reversed), 4)) for output in sorted(outputs_to_predictions): predictions = outputs_to_predictions[output] # Compute average prediction across different scales and flipped images. predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4) outputs_to_predictions[output] = tf.argmax(predictions, 3, output_type=tf.dtypes.int32) outputs_to_predictions[output + PROB_SUFFIX] = tf.nn.softmax(predictions) return outputs_to_predictions def predict_labels(images, model_options):
tensorflow.concat
7,919
import tensorflow as tf # NOTE: this is RELU specific def dOmega_dWrec(self): # states in shape timesteps, batch, n_rec states = self.states dxt_list = tf.gradients(self.error, states) #dxt_list[0] = tf.Print(dxt_list[0], [dxt_list[0]], "dxt 0: ") test = tf.gradients(states[0], states[-1]) dxt = tf.stack(dxt_list) xt = tf.stack(states) num = (1 - self.alpha) * dxt + tf.tensordot(self.alpha * dxt , tf.transpose( tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity,self.Dale_rec)), axes=1) * \ tf.where(tf.greater(xt, 0), tf.ones_like(xt), tf.zeros_like(xt)) denom = dxt # sum over hidden units num = tf.reduce_sum(tf.square(num), axis=2) denom = tf.reduce_sum(tf.square(denom), axis=2)
tensorflow.stack
7,920
import tensorflow as tf return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch."""
tensorflow.zeros_initializer
7,921
import tensorflow as tf `session.run`) with these TensorFlow objects as the input. """ # Split the fetches to two structures. py_fetches, tf_fetches = [], [] placeholder_empty_tuple = () assert isinstance(fetches, list), 'fetches should be a list.' for fetch in fetches: if isinstance(fetch, dict): d_py, d_tf = py_utils.split_dict_py_tf(fetch) py_fetches.append(d_py) tf_fetches.append(d_tf) elif tf.is_tensor(fetch): py_fetches.append(None) tf_fetches.append(fetch) else: py_fetches.append(fetch) # This empty tuple is here as a marker to retain the value from # py_fetches, while keeping the list length same for simplicity of # reconstruction. This is effectively None, but self.evaluate does not # accept None as an input argument. tf_fetches.append(placeholder_empty_tuple)
tensorflow.is_tensor
7,922
import tensorflow as tf img_batch=img, boxes=outputs[0], scores=outputs[1], labels=outputs[2], method=1, is_csl=True) tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img) loss_dict = outputs[-1] total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu) if i == num_gpu - 1: regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) # weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses()) total_losses = total_losses + tf.add_n(regularization_losses) tf.get_variable_scope().reuse_variables() grads = optimizer.compute_gradients(total_losses) if cfgs.GRADIENT_CLIPPING_BY_NORM is not None: grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM) tower_grads.append(grads) self.log_printer(r3det_dcl, optimizer, global_step, tower_grads, total_loss_dict, num_gpu, graph) if __name__ == '__main__': trainer = TrainR3DetDCL(cfgs) trainer.main()
tensorflow.add_n
7,923
import tensorflow as tf with tf.name_scope('AccumGradOptimizer'): ops = [] for s, gv in zip(slots, grads_and_vars): g, v = gv ops.append(s.assign_add(g)) update_counter = tf.assign_add(counter, 1, name='update_counter') update_slot_op = tf.group(update_counter, *ops, name='update_slot') def update_grad(): update_op = self._opt.apply_gradients(slots_and_vars) with tf.control_dependencies([update_op]): clear_ops = [tf.assign(s, tf.zeros_like(s)) for s in slots] return tf.group(*clear_ops, name='update_grad') pred = tf.equal(tf.mod(counter, self._niter), 0) with tf.control_dependencies([update_slot_op]): if name is None: name = 'cond_update_grad' op = tf.cond(pred, update_grad, tf.no_op, name=name).op return op if __name__ == '__main__':
tensorflow.zeros_like
7,924
import tensorflow as tf tf.summary.histogram('advantage', adv) tf.summary.histogram('action_probability', self.mu_ph) if tf_util.is_image(self.observation_space): tf.summary.image('observation', train_model.obs_ph) else: tf.summary.histogram('observation', train_model.obs_ph) trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.rprop_alpha, epsilon=self.rprop_epsilon) _opt_op = trainer.apply_gradients(grads)
tensorflow.summary.histogram
7,925
from tensorflow.contrib.learn.python.learn.datasets import base base.shrink_csv(train_path, 1000) base.shrink_csv(test_path, 1000) train_path = train_path.replace('train.csv', 'train_small.csv') test_path = test_path.replace('test.csv', 'test_small.csv') else: module_path = os.path.dirname(__file__) train_path = os.path.join(module_path, 'data', 'text_train.csv') test_path = os.path.join(module_path, 'data', 'text_test.csv') train = base.load_csv_without_header( train_path, target_dtype=np.int32, features_dtype=np.str, target_column=0) test = base.load_csv_without_header( test_path, target_dtype=np.int32, features_dtype=np.str, target_column=0) return base.Datasets(train=train, validation=None, test=test)
tensorflow.contrib.learn.python.learn.datasets.base.Datasets
7,926
import tensorflow as tf # tf.zeros_like(tf.cast(n_neg_to_select, tf.float32)), # name='rand_select_negtive') # include both selected negtive and all positive examples final_mask = tf.stop_gradient(tf.logical_or(tf.logical_and(negtive_mask, selected_neg_mask), positive_mask)) total_examples = tf.reduce_sum(tf.cast(final_mask, tf.float32)) # add mask for glabels and cls_pred here glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask)) cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask))
tensorflow.cast
7,927
import tensorflow as tf Z = tf.layers.dense(Z, n_basis//2) # Define variational parameters alpha_mean = tf.get_variable('alpha_mean_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_logstd = tf.get_variable('alpha_logstd_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_std = tf.exp(alpha_logstd) # Compute epsilon from {n_samples} standard Gaussian # epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out]) epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out]) hyp_params = tf.get_variable('hyp_params_layer'+str(h), shape=[2], initializer=tf.random_normal_initializer()) l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1]) epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2 # Compute A_{h+1} A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1]) # Compute z_{h}A_{h+1} Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5) Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2) Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.) KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2. # Output layer else:
tensorflow.random_normal_initializer
7,928
import tensorflow as tf def get_xavier_weights(filter_shape, poolsize=(2, 2), name=None): fan_in = np.prod(filter_shape[1:]) fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) // np.prod(poolsize)) low = -4*np.sqrt(6.0/(fan_in + fan_out)) # use 4 for sigmoid, 1 for tanh activation high = 4*np.sqrt(6.0/(fan_in + fan_out)) weights = np.random.uniform(low=low, high=high, size=filter_shape) return safe_get(name, filter_shape, initializer=tf.constant_initializer(weights)) def get_he_weights(filter_shape, name=None): fan_in = np.prod(filter_shape[1:]) stddev = np.sqrt(2.6/fan_in) weights = stddev * np.random.randn(filter_shape[0], filter_shape[1], filter_shape[2], filter_shape[3]) return safe_get(name, filter_shape, initializer=tf.constant_initializer(weights))
tensorflow.constant_initializer
7,929
import tensorflow as tf sample["targets"].append(text_encoder.EOS_ID) sample["dist_targets"] = vocab.encode(sample["dist_targets"]) sample["dist_targets"].append(text_encoder.EOS_ID) yield sample def generate_samples(self, data_dir, tmp_dir, dataset_split): data_path = self.source_data_files(dataset_split) assert tf.gfile.Exists(data_path) return text_problems.text2text_distill_iterator(data_path + "inputs", data_path + "gold", data_path + "prediction")
tensorflow.gfile.Exists
7,930
import tensorflow as tf itms = input_training_masks_split[i] total_loss, model_loss = tower_loss(iis, isms, igms, itms, reuse_variables) batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)) reuse_variables = True grads = opt.compute_gradients(total_loss) tower_grads.append(grads) grads = average_gradients(tower_grads) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) summary_op = tf.summary.merge_all() # save moving average variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # batch norm updates with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]): train_op = tf.no_op(name='train_op') saver = tf.train.Saver(tf.global_variables()) summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path, tf.get_default_graph())
tensorflow.summary.merge_all
7,931
import tensorflow as tf def minibatch_discrimination(x, n_kernels, dim_per_kernel, name): with tf.variable_scope(name): batch_size, nf = x.get_shape().as_list() h = linear(x, [nf, n_kernels*dim_per_kernel], 'h1') activation = tf.reshape(h, (batch_size, n_kernels, dim_per_kernel)) big = tf.eye(batch_size) big = tf.expand_dims(big, 1) abs_dif = tf.reduce_sum(tf.abs(tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2) mask = 1. - big masked = tf.exp(-abs_dif) * mask def half(tens, second): m, n, _ = tens.get_shape().as_list() return tf.slice(tens, [0, 0, second*(batch_size/2)], [m, n, batch_size/2]) f1 = tf.reduce_sum(half(masked, 0), 2) / tf.reduce_sum(half(mask, 0))
tensorflow.transpose
7,932
import tensorflow as tf # Load the model input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder} facenet.load_model(args.model, input_map=input_map) # Get output tensor embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") # coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord, sess=sess) evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean, args.use_flipped_images, args.use_fixed_image_standardization, args.warmup_steps, args.max_steps)
tensorflow.train.Coordinator
7,933
import tensorflow as tf assert (len(input_dims) == 4) # batch_size, height, width, num_channels_in assert (len(filter_dims) == 3) # height, width and num_channels out assert (len(stride_dims) == 2) # stride height and width num_channels_in = input_dims[-1] filter_h, filter_w, num_channels_out = filter_dims stride_h, stride_w = stride_dims with tf.variable_scope(scope): conv_weight = tf.Variable( tf.truncated_normal([filter_h, filter_w, num_channels_in, num_channels_out], stddev=0.1, dtype=tf.float32)) conv_bias = tf.Variable(tf.zeros([num_channels_out], dtype=tf.float32)) map = tf.nn.conv2d(input, conv_weight, strides=[1, stride_h, stride_w, 1], padding=padding, dilations=dilation) if bias is True: map = tf.nn.bias_add(map, conv_bias) if non_linear_fn is not None: activation = non_linear_fn(map) else: activation = map
tensorflow.zeros
7,934
import tensorflow as tf return 196.0 * 21.0 / 4096.0 else: rec = tf.cast(kw * kh, tf.float32) n_max = 7 + tf.math.ceil(tf.math.log(rec) / tf.math.log(2.)) ns = tf.range(0., n_max) ns_pow = tf.pow(2., ns) ks = tf.round(ns_pow / rec) diffs = tf.math.abs(ks / ns_pow - 1 / rec) n = tf.argmin(diffs) k = ks[n] scale = k / tf.pow(2., tf.cast(n, tf.float32)) scale *= rec
tensorflow.round
7,935
from tensorflow.python.ops import math_ops array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), self.dtype), self.alpha, message="mean not defined for components of self.alpha <= 1"), ], mean) @distribution_util.AppendDocstring( """Variance for inverse gamma is defined only for `alpha > 2`. If `self.allow_nan_stats` is `False`, an exception will be raised rather than returning `NaN`.""") def _variance(self): var = (math_ops.square(self.beta) / (math_ops.square(self.alpha - 1.) * (self.alpha - 2.))) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( self.alpha > 2., var, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( constant_op.constant(2., dtype=self.dtype), self.alpha, message="variance not defined for components of alpha <= 2"), ], var)
tensorflow.python.ops.math_ops.square
7,936
import tensorflow as tf res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape) # Test externally provided output projection. w = tf.get_variable("proj_w", [2, 5]) b = tf.get_variable("proj_b", [5]) with tf.variable_scope("proj_seq2seq"): dec, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape)
tensorflow.nn.seq2seq.embedding_tied_rnn_seq2seq
7,937
import tensorflow as tf [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t
tensorflow.FixedLenFeature
7,938
import tensorflow as tf _debug(self.conv4) self.conv4 = self._residual_block('conv4_2', self.conv4, 256) _debug(self.conv4) with tf.variable_scope('conv5_x'): self.conv5 = self._residual_block('conv5_1', self.conv4, 512, pool_first=True, strides=2) _debug(self.conv5) self.conv5 = self._residual_block('conv5_2', self.conv5, 512) _debug(self.conv5) if self.test_classification: with tf.variable_scope('logits'): print('Building unit: logits') self.score = tf.reduce_mean(self.conv5, axis=[1, 2]) self.score = self._fc('logits_dense', self.score, output_dim=self.num_classes, l2_strength=self.wd) print('logits-shape: ' + str(self.score.shape.as_list())) self.feed1 = self.conv4 self.feed2 = self.conv3 self.encoder_1 = self.conv2 self.encoder_2 = self.conv3 self.encoder_3 = self.conv4 self.encoder_4 = self.conv5 print("\nEncoder RESNET is built successfully\n\n")
tensorflow.reduce_mean
7,939
import tensorflow as tf num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length],
tensorflow.constant
7,940
import tensorflow as tf false_fn=lambda: ( tf.concat([vz_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64)) ) vz.insert(u, r) kk = tf.Variable(0, dtype=tf.int64) for i in tf.range(start=0, limit=tf.size(vx_keys), delta=1, dtype=None, name='range'): for j in tf.range(start=0, limit=tf.size(vz_keys), delta=1, dtype=None, name='range'): to_add = tf.cond( tf.greater(vz.lookup(vx_keys[i]), -1), true_fn=lambda: tf.math.multiply(vx.lookup(vx_keys[i]), vz.lookup(vz_keys[j])), false_fn=lambda: tf.constant(0, dtype=tf.int64) ) kk = tf.math.add(kk, to_add)
tensorflow.size
7,941
import tensorflow as tf # visualization fig = vis.get_figure() fig.canvas.draw() self.vis_placeholder = tf.placeholder(tf.uint8, ut.fig2rgb_array(fig).shape) self.vis_summary = tf.summary.image('visualization', self.vis_placeholder) # embedding dists = l2(self.embedding_test[:-1] - self.embedding_test[1:]) self.dist = dists metrics = [] metrics.append(tf.summary.histogram('point_distance', dists)) metrics.append(tf.summary.scalar('training/trajectory_length', tf.reduce_sum(dists))) self.blur_ph = tf.placeholder(dtype=tf.float32) metrics.append(tf.summary.scalar('training/blur_sigma', self.blur_ph)) pred = self.embedding_test[1:-1]*2 - self.embedding_test[0:-2] pred_error = l2(pred - self.embedding_test[2:]) mean_dist, mean_pred_error = tf.reduce_mean(dists), tf.reduce_mean(pred_error) improvement = (mean_dist-mean_pred_error)/mean_dist pairwise_improvement = tf.nn.relu(dists[1:] - pred_error) pairwise_improvement_bool = tf.cast(pairwise_improvement > 0, pairwise_improvement.dtype) self.pairwise_improvement_bool = pairwise_improvement_bool
tensorflow.placeholder
7,942
import tensorflow as tf if start >= self.train_data_len: start = 0 new_epoch_flag = True yield x_batch, y_batch def init_summaries(self): """ Create the summary part of the graph :return: """ with tf.variable_scope('train-summary-per-epoch'): for tag in self.scalar_summary_tags: self.summary_tags += tag self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag) self.summary_ops[tag] = tf.summary.scalar(tag, self.summary_placeholders[tag]) for tag, shape in self.images_summary_tags: self.summary_tags += tag self.summary_placeholders[tag] = tf.placeholder('float32', shape, name=tag) self.summary_ops[tag] = tf.summary.image(tag, self.summary_placeholders[tag], max_outputs=10) def add_summary(self, step, summaries_dict=None, summaries_merged=None): """ Add the summaries to tensorboard :param step: :param summaries_dict: :param summaries_merged: :return: """ if summaries_dict is not None:
tensorflow.summary.scalar
7,943
import tensorflow as tf """ n_bits_x = self.hparams.n_bits_x n_bins = 2**n_bits_x x = tf.cast(x, dtype=tf.float32) if n_bits_x < 8: x = tf.floor(x / 2 ** (8 - n_bits_x))
tensorflow.cast
7,944
import tensorflow as tf self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state')
tensorflow.placeholder
7,945
import tensorflow as tf return out def simple_model_w_feat_eng(img_in, num_actions, scope, reuse=False): with tf.variable_scope(scope, reuse=reuse): out = img_in out = layers.flatten(out) # stddev = 1/n, where n = number of inputs gauss_initializer = initializers.xavier_initializer(uniform=False) with tf.variable_scope("action_value"): out = layers.fully_connected( out, num_outputs=num_actions, activation_fn=tf.nn.relu, biases_initializer=None, weights_initializer=gauss_initializer, weights_regularizer=None) return out
tensorflow.variable_scope
7,946
import tensorflow as tf vdict = {} pi = tf.constant(np.pi, dtype=tf.float64, name="pi") sqrt2pi = tf.constant(np.sqrt(2 * np.pi), dtype=tf.float64, name="sqrt2pi") two = tf.constant(2, dtype=tf.float64, name="two") one = tf.constant(1, dtype=tf.float64, name="one") zero = tf.constant(0, dtype=tf.float64, name="zero")
tensorflow.constant
7,947
import tensorflow as tf activation=tf.nn.relu, # initializer=tf.truncated_normal_initializer(stddev=0.1), # initializer=tf.random_uniform_initializer(-0.003, 0.003), initializer=tf.contrib.layers.xavier_initializer(), state_is_tuple=True) if not forward_only: lstm_cell = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_cell, output_keep_prob=self.dropout_output) # lstm_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell] * 4, state_is_tuple=True) if not forward_only: embed_inputs = tf.nn.dropout(embed_inputs, keep_prob=self.dropout_input) rnn_outputs, output_states = tf.nn.dynamic_rnn( cell=lstm_cell, inputs=embed_inputs, dtype=tf.float32, sequence_length=self.seq_len, ) ## (batch_size, seq_len, num_hidden) # rnn_outputs = tf.transpose(rnn_outputs, perm=[1,0,2]) ## (seq_len, batch_size, num_hidden) NOT NEEDED ANY MORE
tensorflow.nn.dropout
7,948
import tensorflow as tf # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat(axis=0, values=grads) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1]
tensorflow.concat
7,949
import tensorflow as tf loss = 0.0 def while_exit_cond(result, logits, loss): # pylint: disable=unused-argument """Exit the loop either if reach decode_length or EOS.""" length = common_layers.shape_list(result)[1] not_overflow = length < decode_length if self._problem_hparams.stop_at_eos: def fn_not_eos(): return tf.not_equal( # Check if the last predicted element is a EOS tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID) not_eos = tf.cond( # We only check for early stoping if there is at least 1 element ( # otherwise not_eos will crash) tf.not_equal(length, 0), fn_not_eos, lambda: True, ) return tf.cond( tf.equal(batch_size, 1),
tensorflow.squeeze
7,950
import tensorflow as tf Returns: * masks: list of masks for weight sparsification * prune_op: pruning operation """ masks, prune_ops = [], [] with tf.variable_scope(self.mask_scope): for var, var_name_n_prune_ratio in zip(self.maskable_vars, self.var_names_n_prune_ratios): # obtain the dynamic pruning ratio assert var.name == var_name_n_prune_ratio[0], \ 'unmatched variable names: %s vs. %s' % (var.name, var_name_n_prune_ratio[0]) prune_ratio = self.__calc_prune_ratio_dyn(var_name_n_prune_ratio[1]) # create a mask and non-masked backup for each variable name = var.name.replace(':0', '_mask') mask = tf.get_variable(name, initializer=tf.ones(var.shape), trainable=False) name = var.name.replace(':0', '_var_bkup') var_bkup = tf.get_variable(name, initializer=var.initialized_value(), trainable=False) # create update operations var_bkup_update_op = var_bkup.assign(tf.where(mask > 0.5, var, var_bkup)) with tf.control_dependencies([var_bkup_update_op]): mask_thres = tf.contrib.distributions.percentile(tf.abs(var_bkup), prune_ratio * 100) mask_update_op = mask.assign(tf.cast(tf.abs(var_bkup) > mask_thres, tf.float32)) with tf.control_dependencies([mask_update_op]): prune_op = var.assign(var_bkup * mask) # record pruning masks & operations masks += [mask] prune_ops += [prune_op]
tensorflow.ones
7,951
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression for _ in range(num_epochs): linear_regression.fit(model, dataset, optimizer)
tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.fit
7,952
import tensorflow as tf val_summaries.append(tf.summary.scalar(key, var)) for key, var in self._score_summaries.items(): #self._score_summaries.update(self._anchor_targets) self._score_summaries.update(self._proposal_targets) self._add_score_summary(key, var) for var in self._act_summaries: # 添加head网络和rpn层 self._add_act_summary(var) ''' for var in tf.trainable_variables(): self._train_summaries.append(var) ''' for var in self._train_summaries: #添加tf.trainable_variables(),显示张量分布监控数据随着迭代轮数的变化趋势 self._add_train_summary(var) self._summary_op = tf.summary.merge_all() # tf.summary.merge_all()函数来整理所有的日志生成操作 if not testing: self._summary_op_val = tf.summary.merge(val_summaries) return layers_to_output def get_variables_to_restore(self, variables, var_keep_dic): raise NotImplementedError def fix_variables(self, sess, pretrained_model): raise NotImplementedError # Extract the head feature maps, for example for vgg16 it is conv5_3 # only useful during testing mode def extract_head(self, sess, image): feed_dict = {self._image: image}
tensorflow.summary.merge
7,953
import tensorflow as tf def build_no_ops(): return (tf.no_op(), tf.no_op())
tensorflow.no_op
7,954
import tensorflow as tf initial_state = get_variable(shape=[cell_state_size], name='initial_state') initial_state = tf.tile(tf.expand_dims(initial_state, axis=0), [batch_size, 1])
tensorflow.expand_dims
7,955
import tensorflow as tf indices_input = tf.reshape(indices_input, [2, -1]) indices_input = tf.transpose(indices_input) res = tf.sparse_to_dense( indices_input, [n_elem, n_indices], 1., 0., name="flat_one_hot")
tensorflow.sparse_to_dense
7,956
import tensorflow as tf """Run cnn in benchmark mode. When forward_only on, it forwards CNN.""" (enqueue_ops, fetches) = self._build_model() main_fetch_group = tf.group(*fetches) execution_barrier = None if self.job_name and not FLAGS.cross_replica_sync: execution_barrier = self.add_sync_queues_and_barrier( 'execution_barrier_', []) global_step = tf.contrib.framework.get_global_step() with tf.device(self.global_step_device): with tf.control_dependencies([main_fetch_group]): inc_global_step = global_step.assign_add(1) fetches.append(inc_global_step) if self.job_name and FLAGS.cross_replica_sync: # Block all replicas until all replicas are ready for next step. fetches.append(self.add_sync_queues_and_barrier( 'sync_queues_step_end_', [main_fetch_group]))
tensorflow.device
7,957
import tensorflow as tf # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2])] * 3 with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()]) tf.get_variable_scope().reuse_variables() d1, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
tensorflow.constant
7,958
from tensorflow.python.ops import math_ops loss_unweighted = self._loss_fn(logits, target) weight_tensor = self.get_weight_tensor(features) if weight_tensor is None: return math_ops.reduce_mean(loss_unweighted, name=name) loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor) return math_ops.reduce_mean(loss_weighted, name=name)
tensorflow.python.ops.math_ops.reduce_mean
7,959
import tensorflow as tf import tensorflow as tf import numpy as np class BaseModel(object): """Holds code shared between all the different model variants.""" def __init__(self, batch_size, max_sequence_len, out_vocab_size, c2v, dropout_keep_prob=0.0): self._batch_size = batch_size self._dropout_keep_prob = dropout_keep_prob self._out_vocab_size = out_vocab_size self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len], name='x') self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size], name='y') # The bidirectional rnn code requires seq_lens as int64 self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens') self.example_weights = tf.placeholder(tf.float32, [batch_size], name='example_weights') embeddings = c2v.GetEmbeddings(self.x) self._inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, max_sequence_len, embeddings)] # Need to prepare a mask to zero out the padding symbols. # Make a batch_size x max_sequence_len matrix where each # row contains the length repeated max_sequence_len times.
tensorflow.placeholder
7,960
import tensorflow as tf 'biases', [num_out_channels], self.data_type, tf.constant_initializer(0.0)) biased = tf.reshape( tf.nn.bias_add( conv, biases, data_format=self.data_format), conv.get_shape()) else: self.top_layer = conv self.top_size = num_out_channels biased = self.batch_norm(**self.batch_norm_config) if activation == 'relu': conv1 = tf.nn.relu(biased) elif activation == 'linear' or activation is None: conv1 = biased elif activation == 'tanh': conv1 = tf.nn.tanh(biased) else: raise KeyError('Invalid activation type \'%s\'' % activation) self.top_layer = conv1 self.top_size = num_out_channels return conv1 def mpool(self, k_height, k_width, d_height=2, d_width=2, mode='VALID', input_layer=None, num_channels_in=None):
tensorflow.nn.tanh
7,961
import tensorflow as tf with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval())
tensorflow.train.Saver
7,962
import tensorflow as tf # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) # Start a second session. In that session the variables # have not been initialized either. with self.test_session(graph=tf.Graph()) as sess: v0 = tf.Variable(-1.0, name="v0") v1 = tf.Variable(-1.0, name="v1") save = tf.train.Saver([v0, v1]) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v0" in e.message): sess.run(v0) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes. save.restore(sess, save_path)
tensorflow.Variable
7,963
from tensorflow.contrib.slim.python.slim import queues width = 280 with self.cached_session(): test_dataset = _create_tfrecord_dataset(dataset_dir) provider = dataset_data_provider.DatasetDataProvider(test_dataset) key, image, label = provider.get(['record_key', 'image', 'label']) image = _resize_image(image, height, width) with session.Session('') as sess: with queues.QueueRunners(sess): key, image, label = sess.run([key, image, label]) split_key = key.decode('utf-8').split(':') self.assertEqual(2, len(split_key)) self.assertEqual(test_dataset.data_sources[0], split_key[0]) self.assertTrue(split_key[1].isdigit()) self.assertListEqual([height, width, 3], list(image.shape)) self.assertListEqual([1], list(label.shape))
tensorflow.contrib.slim.python.slim.queues.QueueRunners
7,964
import tensorflow as tf to_tf = tf.transpose(reshaped, [0, 2, 3, 1]) return to_tf def _softmax_layer(self, bottom, name): if name == 'rpn_cls_prob_reshape': input_shape = tf.shape(bottom) # tf.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来 # 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........ bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]]) reshaped_score = tf.nn.softmax(bottom_reshaped, name=name) return tf.reshape(reshaped_score, input_shape) # [1,none,none,2] return tf.nn.softmax(bottom, name=name) def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name): with tf.variable_scope(name): rois, rpn_scores = tf.py_func(proposal_top_layer, [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._feat_stride, self._anchors, self._num_anchors], [tf.float32, tf.float32])
tensorflow.nn.softmax
7,965
import tensorflow as tf qr = tf.train.QueueRunner(input_queue, [count_up_to]) tf.initialize_all_variables() # Creates a saver. save = tf.train.Saver({"v0": v0}) # Adds a set of collections. tf.add_to_collection("int_collection", 3)
tensorflow.train.Saver
7,966
from tensorflow.contrib.layers.python.layers import feature_column_ops def _get_linear_feature_columns(self): if not self._linear_feature_columns: return None feature_column_ops.check_feature_columns(self._linear_feature_columns) return sorted(set(self._linear_feature_columns), key=lambda x: x.key) def _get_dnn_feature_columns(self): if not self._dnn_feature_columns: return None feature_column_ops.check_feature_columns(self._dnn_feature_columns) return sorted(set(self._dnn_feature_columns), key=lambda x: x.key) def _dnn_logits(self, features, is_training): return self._dnn_model.build_model( features, self._dnn_feature_columns, is_training) def _linear_logits(self, features, is_training): return self._linear_model.build_model( features, self._linear_feature_columns, is_training)
tensorflow.contrib.layers.python.layers.feature_column_ops.check_feature_columns
7,967
import tensorflow as tf ratio = tf.maximum(pi.prob(batch['actions']), 1e-6) / tf.maximum(pi_old.prob(batch['actions']), 1e-6) ratio = tf.clip_by_value(ratio, 0, 10) surr1 = batch['advantage'] * ratio surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay) loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2)) loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf)) loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy()) loss = loss_pg + loss_vf + loss_entropy opt = tf.train.AdamOptimizer(self.LR) self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params)
tensorflow.square
7,968
import tensorflow as tf return loss def compute_error_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss
tensorflow.where
7,969
import tensorflow as tf state_is_tuple=True) inp = tf.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual(2, len(res[0])) self.assertEqual((2, 2), res[0][0].c.shape) self.assertEqual((2, 2), res[0][0].h.shape)
tensorflow.global_variables_initializer
7,970
import tensorflow as tf model = p.cls(p) model._task.CreateChild('a', layers.BatchNormLayer.Params().Set(name='a', dim=1)) model._task._train_op = tf.no_op() model._task.ApplyExponentialMovingAverage(model.ema) with tf.variable_scope('', reuse=True): beta = tf.get_variable('a/beta/var') mean = tf.get_variable('a/moving_mean/var') self.assertIsNotNone(model.ema.average(beta)) self.assertIsNone(model.ema.average(mean)) class MultiTaskModelTest(tf.test.TestCase):
tensorflow.get_variable
7,971
import tensorflow as tf pr_trainable = calc_prune_ratio(self.trainable_vars) pr_maskable = calc_prune_ratio(self.maskable_vars) tf.summary.scalar('pr_trainable', pr_trainable) tf.summary.scalar('pr_maskable', pr_maskable)
tensorflow.summary.scalar
7,972
import tensorflow as tf tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold()
tensorflow.train.Scaffold
7,973
import tensorflow as tf Returns ------- act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q. q_values = q_func(observations_ph.get(), num_actions, scope="q_func") # Perturbable Q used for the actual rollout. q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func") # We have to wrap this code into a function due to the way tf.cond() works. See # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for # a more detailed discussion.
tensorflow.placeholder
7,974
import tensorflow as tf self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy()) self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy()) self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy()) self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy()) self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy()) def test_instance_non_maximum_suppression_1d_scores_empty_inputs(self): masks = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32) scores = tf.constant([], dtype=tf.float32) classes = tf.constant([], dtype=tf.int32) (nms_masks1, nms_scores1, nms_classes1, _) = isu.instance_non_maximum_suppression_1d_scores( masks, scores, classes, min_score_thresh=0.65,
tensorflow.constant
7,975
import tensorflow as tf def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn
tensorflow.contrib.tpu.TPUEstimatorSpec
7,976
import tensorflow as tf # and due to the fact that the rightmost boundary is essentially ignored. boundaries = tf.expand_dims(tf.cast(boundaries, tf.float32), 0) - 0.0001 bucket_indices = tf_utils.assign_buckets( tf.cast(x, tf.float32), remove_leftmost_boundary(boundaries)) bucket_vocab, counts = count_per_key(tf.strings.as_string(bucket_indices)) counts = tf_utils.reorder_histogram(bucket_vocab, counts, tf.size(boundaries) - 1)
tensorflow.cast
7,977
import tensorflow as tf pmm1 = x * (2.0 * tf.cast(m, dtype=x.dtype) + 1.0) * pmm # if, l == m + 1 return pmm1, otherwise lift to the next band. res = tf.where( tf.equal(l, m + 1), pmm1, _evaluate_legendre_polynomial_loop(x, m, l, pmm, pmm1)) return res def evaluate_legendre_polynomial(degree_l: TensorLike, order_m: TensorLike, x: TensorLike) -> TensorLike: degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) x = tf.convert_to_tensor(value=x) pmm = _evaluate_legendre_polynomial_pmm_eval(order_m, x) return tf.where( tf.equal(degree_l, order_m), pmm, _evaluate_legendre_polynomial_branch(degree_l, order_m, x, pmm)) def _spherical_harmonics_normalization(l, m, var_type=tf.float64): l = tf.cast(l, dtype=var_type) m = tf.cast(m, dtype=var_type)
tensorflow.convert_to_tensor
7,978
import tensorflow as tf mask = tf.tile(tf.reshape(mask, (im_height, im_width, 1)), (1, 1, 3)) image = tf.where(tf.equal(mask, 0), x=image, y=tf.zeros_like(image)) return image def _add_drop_path(self, X, keep_prob): with tf.variable_scope('drop_path'): batch_size = tf.shape(X)[0] noise_shape = (batch_size, 1, 1, 1) random_tensor = keep_prob + tf.random_uniform(noise_shape, dtype=tf.float32) binary_tensor = tf.floor(random_tensor) X = (X / keep_prob) * binary_tensor return X
tensorflow.shape
7,979
import tensorflow as tf train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec(
tensorflow.metrics.mean
7,980
import tensorflow as tf def maybe_avg(v): if ema is not None and not init: v = tf.cond(training, lambda: v, lambda: ema.average(v)) return v if init: x = tf.nn.conv2d(x, tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2]), [1] + list(stride) + [1], pad) init_scale=.01 m_init, v_init = tf.nn.moments(x, [0,1,2]) scale_init = init_scale / tf.sqrt(v_init + 1e-10) with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]): x = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (x - tf.reshape(m_init, [1, 1, 1, num_filters])) else: V = maybe_avg(V) g = maybe_avg(g) b = maybe_avg(b) # use weight normalization (Salimans & Kingma, 2016) W = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(V, [0, 1, 2])
tensorflow.sqrt
7,981
import tensorflow as tf tf.train.get_or_create_global_step() def model_loss(labels, chars, sequence_length): predictions = model((chars, sequence_length), training=True) loss_value = loss(labels, predictions) tf.contrib.summary.scalar("loss", loss_value) return loss_value for (batch, (labels, chars, sequence_length)) in enumerate( tfe.Iterator(train_data)): with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval): batch_model_loss = functools.partial(model_loss, labels, chars, sequence_length) optimizer.minimize( batch_model_loss, global_step=tf.train.get_global_step()) if log_interval and batch % log_interval == 0: print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss())) SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv" SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv"
tensorflow.contrib.summary.record_summaries_every_n_global_steps
7,982
import tensorflow as tf layer = tf.contrib.layers.batch_norm(layer, is_training=True, center=True, scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs, reuse=True) # updates_collections=None else: layer = tf.contrib.layers.batch_norm(layer, is_training=False, center=True, scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs, reuse=True) # updates_collections=None elif norm_type == 'layer_norm': # layer_norm
tensorflow.contrib.layers.batch_norm
7,983
import tensorflow as tf original_tensor_shape = tf.shape(tensor) in_dim = int(tensor.get_shape()[-1]) rank = _rank(tensor) if rank > 2: # -- time distributed dense tensor = tf.reshape(tensor, shape=(-1, in_dim)) name = opts.get("name", "") if weight is None: initializer = tf.contrib.layers.xavier_initializer(uniform=True) weight = tf.get_variable("{}_dense_W".format(name), initializer=initializer(shape=(in_dim, hidden_dims))) if bias is None: bias = tf.get_variable("{}_dense_b".format(name), initializer=tf.zeros(shape=hidden_dims)) out = tf.add(tf.matmul(tensor, weight), bias) if rank > 2: # reshape back to time dimension out = tf.reshape(out, shape=original_tensor_shape)
tensorflow.contrib.layers.xavier_initializer
7,984
import tensorflow as tf import ultra.utils as utils def sigmoid_prob(logits): return tf.sigmoid(logits - tf.reduce_mean(logits, -1, keep_dims=True)) class DLA_atten(BaseAlgorithm): """The Dual Learning Algorithm for unbiased learning to rank.
tensorflow.reduce_mean
7,985
from tensorflow.python.training import saver as saver_lib use_deprecated_input_fn=self._use_deprecated_input_fn) except RuntimeError: # Currently we are not syncronized with saving checkpoints, which leads to # runtime errors when we are calling export on the same global step. # Exports depend on saved checkpoints for constructing the graph and # getting the global step from the graph instance saved in the checkpoint. # If the checkpoint is stale with respect to current step, the global step # is taken to be the last saved checkpoint's global step and exporter # doesn't export the same checkpoint again with the following error. logging.info("Skipping exporting because the existing checkpoint has " "already been exported. " "Consider exporting less frequently.") def end(self, session=None): super(ExportMonitor, self).end(session=session) latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.info("Skipping export at the end since model has not been saved " "yet.") return try: self._last_export_dir = self._estimator.export( self.export_dir, exports_to_keep=self.exports_to_keep, signature_fn=self.signature_fn, input_fn=self._input_fn, default_batch_size=self._default_batch_size, input_feature_key=self._input_feature_key, use_deprecated_input_fn=self._use_deprecated_input_fn) except RuntimeError:
tensorflow.python.training.saver.latest_checkpoint
7,986
import tensorflow as tf name: Optional scope/name for op_scope. Returns: the l1+L2 loss op. """ with tf.name_scope(name): weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=var.dtype.base_dtype, name='weight_l1') weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=var.dtype.base_dtype, name='weight_l2') reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(var)), name='value_l1') reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(var), name='value_l2') return tf.add(reg_l1, reg_l2, name='value') def l1_regularizer(scale, name='l1_regularizer'): """Returns a function that can be used to apply L1 regularization to weights. L1 regularization encourages sparsity.
tensorflow.abs
7,987
import tensorflow as tf output_vars1 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="task_dependent") output_vars2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="task_independent/Variable_1") var_list = [output_vars1, output_vars2] train_op = tf.train.AdamOptimizer( # learning_rate=self.params["learning_rate"] # **self.params.get("optimizer_params", {}) # learning_rate=learning_rate
tensorflow.train.AdamOptimizer
7,988
import tensorflow as tf # set up weights for projection if use_proj: assert n_filters > projection_dim with tf.variable_scope('CNN_proj') as scope: W_proj_cnn = tf.get_variable( "W_proj", [n_filters, projection_dim], initializer=tf.random_normal_initializer( mean=0.0, stddev=np.sqrt(1.0 / n_filters)), dtype=DTYPE) b_proj_cnn = tf.get_variable( "b_proj", [projection_dim], initializer=tf.constant_initializer(0.0), dtype=DTYPE) # apply highways layers def high(x, ww_carry, bb_carry, ww_tr, bb_tr): carry_gate = tf.nn.sigmoid(tf.matmul(x, ww_carry) + bb_carry) transform_gate = tf.nn.relu(tf.matmul(x, ww_tr) + bb_tr) return carry_gate * transform_gate + (1.0 - carry_gate) * x if use_highway: highway_dim = n_filters
tensorflow.constant_initializer
7,989
from tensorflow.python.ops import math_ops return {} def get_eval_ops(self, features, logits, labels, metrics=None): loss = self.loss(logits, labels, features) result = {"loss": metric_ops.streaming_mean(loss)} # Adds default metrics. if metrics is None: # TODO(b/29366811): This currently results in both an "accuracy" and an # "accuracy/threshold_0.500000_mean" metric for binary classification. metrics = {("accuracy", "classes"): metric_ops.streaming_accuracy} predictions = math_ops.sigmoid(logits) labels_float = math_ops.to_float(labels) default_metrics = self._default_eval_metrics() for metric_name, metric_op in default_metrics.items(): result[metric_name] = metric_op(predictions, labels_float) class_metrics = {} proba_metrics = {} for name, metric_op in six.iteritems(metrics): if isinstance(name, tuple): if len(name) != 2: raise ValueError("Ignoring metric {}. It returned a tuple with " "len {}, expected 2.".format(name, len(name)))
tensorflow.python.ops.math_ops.to_float
7,990
import tensorflow as tf full_video, time_axis=1) latent = common_video.get_gaussian_tensor(latent_mean, latent_std) latent = tf.layers.flatten(latent) latent = tf.expand_dims(latent, axis=1) latent = tf.expand_dims(latent, axis=1) latent_mask = tf.layers.dense(latent, filters, name="latent_mask") zeros_mask = tf.zeros( common_layers.shape_list(layer)[:-1] + [filters], dtype=tf.float32) layer = tf.concat([layer, latent_mask + zeros_mask], axis=-1) extra_loss = self.get_extra_loss(latent_mean, latent_std) return layer, extra_loss @registry.register_model class NextFrameBasicStochasticDiscrete( basic_deterministic.NextFrameBasicDeterministic):
tensorflow.concat
7,991
import tensorflow as tf weights = tf.get_variable('weights', [prev_node, output_node], initializer=tf.truncated_normal_initializer(stddev=0.1)) self.nnweights.append(weights) biases = tf.get_variable('biases', [output_node], initializer=tf.constant_initializer(0.0)) layer_out = tf.matmul(prev_x, weights) + biases # Output of Network y = layer_out # Global step with tf.variable_scope('training_step', reuse=tf.AUTO_REUSE): global_step = tf.get_variable("global_step", [], dtype=tf.int32, initializer=tf.constant_initializer(0), trainable=False) # Loss value reg_item = tf.contrib.layers.l1_l2_regularizer(L1_reg, L2_reg) reg_term = tf.contrib.layers.apply_regularization(reg_item, self.nnweights) loss_fun = self._negative_log_likelihood(y_, y) loss = loss_fun + reg_term # SGD Optimizer if optimizer == 'sgd': lr = tf.train.exponential_decay( learning_rate, global_step, 1,
tensorflow.constant_initializer
7,992
import tensorflow as tf return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.) #cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred)
tensorflow.losses.sparse_softmax_cross_entropy
7,993
import tensorflow as tf reduction_indices=1) # Prediction: Get min distance index (Nearest neighbor) pred = tf.arg_min(distance, 0) accuracy = 0. # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.compat.v1.Session() as sess: # Run the initializer sess.run(init) # Add the fault injection code here to instrument the graph # We start injecting the fault right away here unlike earlier fi = ti.TensorFI(sess, name="NearestNeighbor", logLevel=50) # loop over test data
tensorflow.compat.v1.Session
7,994
import tensorflow as tf if b_init is None: b_init = tf.constant_initializer() w = tf.get_variable('W', filter_shape, initializer=w_init) b = None if use_bias: b = tf.get_variable('b', [out_dims], initializer=b_init) conv = tf.nn.atrous_conv2d(value=input_tensor, filters=w, rate=rate, padding=padding, name='dilation_conv') if use_bias: ret = tf.add(conv, b) else: ret = conv return ret
tensorflow.nn.atrous_conv2d
7,995
import tensorflow as tf "Only used if `use_tpu` is True. Total number of TPU cores to use.") def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] next_sentence_labels = features["next_sentence_labels"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = modeling.BertModel(
tensorflow.logging.info
7,996
import tensorflow as tf input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tensorflow.cast
7,997
import tensorflow as tf mask = tf.ones(shape=[1, h, w, 1]) update_mask = tf.layers.conv2d(mask, filters=1, dilation_rate=(dilation, dilation), name='mask' + id, kernel_size=size, kernel_initializer=tf.constant_initializer(1.0), strides=stride, padding="SAME", use_bias=False, trainable=False) mask_ratio = slide_window / (update_mask + 1e-8) update_mask = tf.clip_by_value(update_mask, 0.0, 1.0) mask_ratio = mask_ratio * update_mask with tf.variable_scope('parconv'): x = tf.layers.conv2d(input, filters=channels, name='conv' + id, kernel_size=size, kernel_initializer=init, strides=stride, padding="SAME", use_bias=False) x = x * mask_ratio if use_bias: bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0)) x = tf.nn.bias_add(x, bias) return x * update_mask
tensorflow.variable_scope
7,998
from tensorflow.contrib.learn.python.learn.estimators import composable_model model_dir=model_dir, config=config) num_ps_replicas = config.num_ps_replicas if config else 0 self._linear_model = composable_model.LinearComposableModel( num_label_columns=target_column.num_label_columns, optimizer=linear_optimizer, gradient_clip_norm=gradient_clip_norm, num_ps_replicas=num_ps_replicas) self._dnn_model = composable_model.DNNComposableModel( num_label_columns=target_column.num_label_columns, hidden_units=dnn_hidden_units, optimizer=dnn_optimizer, activation_fn=dnn_activation_fn, dropout=dnn_dropout, gradient_clip_norm=gradient_clip_norm, num_ps_replicas=num_ps_replicas) if dnn_hidden_units else None self._linear_feature_columns = linear_feature_columns
tensorflow.contrib.learn.python.learn.estimators.composable_model.DNNComposableModel
7,999