seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf shadow_loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay) algo = TRPO(vfn=vfn, policy=policy, dim_state=dim_state, dim_action=dim_action, **FLAGS.TRPO.as_dict()) advtask = ADVTASK(dim_state, dim_action, policy, vfn, warmup_policy, warmup_vfn, task, alpha=alpha, beta=beta, nsample=nsample, atype=atype) tf.get_default_session().run(tf.global_variables_initializer()) print ("norm params:", normalizers_parameters) print ("norm_copy params:", normalizers_copy_parameters) norm_before = tf.get_default_session().run(normalizers_parameters) print ("norm_before:", norm_before) assert FLAGS.algorithm != 'MF', "don't support model free for now" print (f"n_envs for task: {nsample}//{FLAGS.plan.max_steps}={nsample//FLAGS.plan.max_steps}")
tensorflow.get_default_session
5,500
import tensorflow as tf train_op = self.optimize(loss, num_async_replicas=num_async_replicas) if common_layers.is_on_tpu(): _remove_summaries() # summaries not currently working on TPU return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) else: return tf.estimator.EstimatorSpec(
tensorflow.contrib.tpu.TPUEstimatorSpec
5,501
import tensorflow as tf self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats]) self.cs = self._mask / tf.reduce_sum(self._mask, 1, keep_dims=True) # The final prediction is the average of the predictions for each word # weighted by the individual confidence/utility scores. preds_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]), tf.reshape(self.preds_by_word, [-1, self._out_vocab_size])) preds_weighted_reshaped = tf.reshape(preds_weighted, self.preds_by_word.get_shape()) self.probs = tf.reduce_sum(preds_weighted_reshaped, 0) self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights)
tensorflow.reshape
5,502
import tensorflow as tf if num_channels_in is None: num_channels_in = self.top_size name = 'affine' + str(self.counts['affine']) self.counts['affine'] += 1 with tf.variable_scope(name): init_factor = 2. if activation == 'relu' else 1. kernel = tf.get_variable( 'weights', [num_channels_in, num_out_channels],
tensorflow.variable_scope
5,503
import tensorflow as tf elif self.optim_type == 'rprop': self.optimizer = tf.train.RMSPropOptimizer(self.lr) elif self.optim_type == 'sgd': self.optimizer = tf.train.GradientDescentOptimizer(self.lr) elif self.optim_type == 'adamW': self.optimizer = AdamWOptimizer(self.config.weight_decay,
tensorflow.train.GradientDescentOptimizer
5,504
import tensorflow as tf def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance),
tensorflow.identity
5,505
import tensorflow as tf else: # during inference, compute the end logits based on beam search start_top_log_probs, start_top_index = tf.nn.top_k( start_log_probs, k=FLAGS.start_n_top) start_index = tf.one_hot(start_top_index, depth=seq_len, axis=-1, dtype=tf.float32) start_features = tf.einsum("lbh,bkl->bkh", output, start_index) end_input = tf.tile(output[:, :, None], [1, 1, FLAGS.start_n_top, 1]) start_features = tf.tile(start_features[None], [seq_len, 1, 1, 1]) end_input = tf.concat([end_input, start_features], axis=-1) end_logits = tf.layers.dense( end_input, xlnet_config.d_model, kernel_initializer=initializer, activation=tf.tanh, name="dense_0") end_logits = tf.contrib.layers.layer_norm(end_logits,
tensorflow.tile
5,506
import tensorflow as tf def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None
tensorflow.logging.info
5,507
import tensorflow as tf def creat_optimizer(self,readout): action = tf.placeholder(tf.float32,[None,self.ACTIONS]) y = tf.placeholder(tf.float32,[None]) readout_action = tf.reduce_sum(tf.multiply(readout,action),reduction_indices=1)
tensorflow.placeholder
5,508
from tensorflow.contrib.layers.python.layers import utils ) mean, variance = utils.smart_cond( use_batch_stats,
tensorflow.contrib.layers.python.layers.utils.smart_cond
5,509
import tensorflow as tf inter_block_logits_masked = exp_mask_for_high_rank(inter_block_logits, rep_mask_split) # bs,bn,bl,vec inter_block_soft = tf.nn.softmax(inter_block_logits_masked, 2) # bs,bn,bl,vec inter_block_attn_output = tf.reduce_sum(self_attn_result * inter_block_soft, 2) # bs,bn,vec with tf.variable_scope('self_attn_inter_block'): inter_block_attn_output_mask = tf.cast(tf.ones([bs, bn], tf.int32), tf.bool) block_ct_res = directional_attention_with_dense( inter_block_attn_output, inter_block_attn_output_mask, direction, 'disa',
tensorflow.variable_scope
5,510
from tensorflow.contrib.framework import deprecated_arg_values class ExportMonitor(EveryN): """Monitor that exports Estimator every N steps.""" # TODO(philstahlfeld): Investigate switching export.export_estimator # configuration values to **kwargs so that updates to the export_estimator # function don't have to be reflected here. @deprecated_arg_values( "2016-09-23", "The signature of the input_fn accepted by export is changing to be " "consistent with what's used by tf.Learn Estimator's train/evaluate. " "input_fn (and in most cases, input_feature_key) will both become " "required args.",
tensorflow.contrib.framework.deprecated_arg_values
5,511
import tensorflow as tf if not self.graph.finalized: self.graph.finalize() tf.logging.info('Start training') for i in range(iterations): loss, summaries, _ = self.sess.run(
tensorflow.logging.info
5,512
import tensorflow as tf weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32) next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = tf.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions) next_sentence_mean_loss = tf.metrics.mean( values=next_sentence_example_loss) return { "masked_lm_accuracy": masked_lm_accuracy,
tensorflow.reshape
5,513
import tensorflow as tf filters_out = filters * 4 if bottleneck else filters def projection_shortcut(inputs): return conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format) # Only the first block per block_layer uses projection_shortcut and strides inputs = block_fn(inputs, filters, training, projection_shortcut, strides, data_format) for _ in range(1, blocks): inputs = block_fn(inputs, filters, training, None, 1, data_format) return tf.identity(inputs, name) class Model(object): """Base class for building the Resnet Model.""" def __init__(self, resnet_size, bottleneck, num_classes, num_filters, kernel_size, conv_stride, first_pool_size, first_pool_stride, block_sizes, block_strides, final_size, resnet_version=DEFAULT_VERSION, data_format=None, dtype=DEFAULT_DTYPE): """Creates a model for classifying an image.
tensorflow.identity
5,514
import tensorflow as tf tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint, different_vocabulary=False) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = ""
tensorflow.train.init_from_checkpoint
5,515
import tensorflow as tf logits = logits[:, current_output_position, :, :] return tf.squeeze(logits, axis=[1, 2]) initial_ids = tf.zeros([batch_size], dtype=tf.int32) if self.has_input: inputs_old = features["inputs"] features["inputs"] = tf.expand_dims(features["inputs"], 1) if len(features["inputs"].shape) < 5: features["inputs"] = tf.expand_dims(features["inputs"], 4) # Expand the inputs in to the beam size. features["inputs"] = tf.tile(features["inputs"], [1, beam_size, 1, 1, 1]) s = common_layers.shape_list(features["inputs"]) features["inputs"] = tf.reshape(features["inputs"], [s[0] * s[1], s[2], s[3], s[4]]) target_modality = self._problem_hparams.target_modality vocab_size = target_modality.top_dimensionality # Setting decode length to input length + decode_length decode_length = tf.constant(decode_length) if "partial_targets" not in features: decode_length += common_layers.shape_list(features["inputs"])[1]
tensorflow.tile
5,516
import tensorflow as tf x_flat = tf.reshape(x, [-1, 1]) c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2) shape = common_layers.shape_list(c) new_shape = shape new_shape.append(self.hparams.num_blocks) new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks)) c = tf.to_int32(tf.reshape(c, shape=new_shape)) h1_shape = shape_x h1_shape.append(self.hparams.hidden_size) h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) c_int = self.bit_to_int( c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2)
tensorflow.reshape
5,517
import tensorflow as tf loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1) mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior]) num_pos_per_batch = tf.reduce_sum( tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True) num_pos_per_batch = tf.maximum(num_pos_per_batch, 1) num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch, tf.cast(num_prior, tf.float32) - 1) mask_hard_neg = tf.reshape( tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch, [num_batch * num_prior, 1]) # 3. classification loss including positive and negative examples loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg) loss_class_mask_b = tf.broadcast_to(loss_class_mask, tf.shape(class_pred)) filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32),
tensorflow.cast
5,518
import tensorflow as tf batch_size = 8 inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
tensorflow.placeholder
5,519
from tensorflow.python.framework import ops return list(zip(noisy_gradients, variables)) def _multiply_gradients(grads_and_vars, gradient_multipliers): """Multiply specified gradients.""" multiplied_grads_and_vars = [] for grad, var in grads_and_vars: if (grad is not None and (var in gradient_multipliers or var.name in gradient_multipliers)): key = var if var in gradient_multipliers else var.name multiplier = gradient_multipliers[key] if isinstance(grad, ops.IndexedSlices): grad_values = grad.values * multiplier grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape) else: grad *= math_ops.cast(multiplier, grad.dtype) multiplied_grads_and_vars.append((grad, var)) return multiplied_grads_and_vars
tensorflow.python.framework.ops.IndexedSlices
5,520
import tensorflow as tf if do_norm: conv = tf.layers.batch_normalization(conv, momentum=0.9) if activation_function == "relu": conv = tf.nn.relu(conv, name = 'relu') if activation_function == "leakyrelu": conv = tf.nn.leaky_relu(conv, alpha=relu_factor) if activation_function == "elu": conv = tf.nn.elu(conv, name = 'elu') return conv def general_deconv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm = True, relu_factor = 0, name="deconv2d"): with tf.variable_scope(name): deconv = tf.layers.conv2d_transpose(input_data, filters, kernel_size, (stride, stride), padding, activation = None) if do_norm: deconv = tf.layers.batch_normalization(deconv, momentum = 0.9) if activation_function == "relu": deconv = tf.nn.relu(deconv, name = 'relu') if activation_function == "leakyrelu": deconv = tf.nn.leaky_relu(deconv, alpha=relu_factor) if activation_function == "elu": deconv = tf.nn.elu(deconv, name = 'elu')
tensorflow.variable_scope
5,521
import tensorflow as tf grads = [tf.random_uniform([10, 10], -1., 1.) for _ in range(5)] grads_and_vars = list(zip(grads, range(5))) hparams = { "type": "clip_by_global_norm", "kwargs": { "clip_norm": 0.1 } } gn_grad_clip_fn = opt.get_gradient_clip_fn(hparams) gn_grads_and_vars = gn_grad_clip_fn(grads_and_vars) gn_grads, _ = zip(*gn_grads_and_vars) gn_grads_true, _ = tf.clip_by_global_norm( grads, hparams["kwargs"]["clip_norm"]) hparams = { "type": "clip_by_value", "kwargs": { "clip_value_min": -0.01, "clip_value_max": 0.01 } } v_grad_clip_fn = opt.get_gradient_clip_fn(hparams) v_grads_and_vars = v_grad_clip_fn(grads_and_vars)
tensorflow.clip_by_global_norm
5,522
import tensorflow as tf pooled_outputs.append(pooled) with tf.name_scope("preFc"): # combine all pooled outputs total_filters = num_filter * len(filter_list) # concat all the pooled weights H_pool = tf.concat(pooled_outputs, 3) #flatten it for fully connected layer H_pool_flat = tf.reshape(H_pool, [-1, total_filters]) with tf.name_scope("dropout"): H_drop = tf.nn.dropout(H_pool_flat, keep_prob = keep_prob) # Final (unnormalized) layer with tf.name_scope("output"): W = tf.get_variable("W", shape=[total_filters, nb_classes], initializer=tf.contrib.layers.xavier_initializer()) # add final layer bias b = tf.Variable(tf.constant(0.1, shape=[nb_classes]), name="b") # calc l2 losses l2_loss += tf.nn.l2_loss(W) l2_loss += tf.nn.l2_loss(b) # do logit = W*X+b logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores") predictions = tf.nn.softmax(logit, name="predictions")
tensorflow.name_scope
5,523
import tensorflow as tf # Restores from MetaGraphDef. new_saver = tf.train.import_meta_graph(filename)
tensorflow.train.import_meta_graph
5,524
import tensorflow as tf def sample_compute(_): pairs = sample_func() loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio) pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.size(loss, out_type=tf.float32) p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4, lambda: tf.print('csrt acc ', [pct]), lambda: tf.no_op()) with tf.control_dependencies([p]): return tf.reduce_mean(loss) loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32, parallel_iterations=32) final_loss = tf.reduce_mean(loss) return final_loss def contra_traj_lossV6(pred, tgt, horizon=12):
tensorflow.reduce_mean
5,525
import tensorflow as tf Returns: loss value, means negative log-likelihood. """ logL = 0 # pre-calculate cumsum cumsum_y_pred = tf.cumsum(y_pred) hazard_ratio = tf.exp(y_pred) cumsum_hazard_ratio = tf.cumsum(hazard_ratio) if self.train_data['ties'] == 'noties': log_risk = tf.log(cumsum_hazard_ratio) likelihood = y_pred - log_risk # dimension for E: np.array -> [None, 1] uncensored_likelihood = likelihood * y_true logL = -tf.reduce_sum(uncensored_likelihood) else: # Loop for death times for t in self.train_data['failures']: tfail = self.train_data['failures'][t] trisk = self.train_data['atrisk'][t] d = len(tfail) dr = len(trisk) logL += -cumsum_y_pred[tfail[-1]] + (0 if tfail[0] == 0 else cumsum_y_pred[tfail[0]-1]) if self.train_data['ties'] == 'breslow': s = cumsum_hazard_ratio[trisk[-1]] logL += tf.log(s) * d
tensorflow.reduce_sum
5,526
import tensorflow as tf cdf_plus = tf.nn.sigmoid(plus_in) min_in = inv_stdv * (centered_inputs - 1. / 255.) cdf_min = tf.nn.sigmoid(min_in) log_cdf_plus = plus_in - tf.nn.softplus(plus_in) log_one_minus_cdf_min = -tf.nn.softplus(min_in) cdf_delta = cdf_plus - cdf_min
tensorflow.nn.softplus
5,527
import tensorflow as tf def load_agent_ckpt(ckpt_dir, tf_agent, global_step=None): if global_step is None: global_step = tf.compat.v1.train.get_or_create_global_step() train_checkpointer = common.Checkpointer( ckpt_dir=ckpt_dir, agent=tf_agent, global_step=global_step) train_checkpointer.initialize_or_restore().assert_existing_objects_matched()
tensorflow.compat.v1.train.get_or_create_global_step
5,528
from tensorflow.python.feature_column import feature_column_lib as core_feature_column loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS) model = estimator.GradientBoostedDecisionTreeRanker( head=head_fn, learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir, config=config, use_core_libs=True, feature_columns=[ core_feature_column.numeric_column("f1"), core_feature_column.numeric_column("f2") ], ranking_model_pair_keys=("a", "b")) model.fit(input_fn=_ranking_train_input_fn, steps=1000) model.evaluate(input_fn=_ranking_train_input_fn, steps=1) model.predict(input_fn=_infer_ranking_train_input_fn) class CoreGradientBoostedDecisionTreeEstimator(test_util.TensorFlowTestCase):
tensorflow.python.feature_column.feature_column_lib.numeric_column
5,529
import tensorflow as tf def tower_loss(images, score_maps, geo_maps, training_masks, reuse_variables=None): # Build inference graph with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables): f_score, f_geometry = model.model(images, is_training=True)
tensorflow.get_variable_scope
5,530
import tensorflow as tf '`rightmost_transposed_ndims` and `perm`.') if rightmost_transposed_ndims is not None: rightmost_transposed_ndims = tf.convert_to_tensor( value=rightmost_transposed_ndims,
tensorflow.convert_to_tensor
5,531
import tensorflow as tf crit_gen = tf.reduce_mean(tf.square(crit_fake - tf.ones_like(crit_fake))) rep_loss = tf.reduce_mean(tf.square(pred - x2d)) KK = tf.matmul(K, K, transpose_b=True) K_trace = tf.expand_dims(tf.expand_dims(tf.trace(KK), -1), -1) K_loss = tf.reduce_mean(tf.abs(KK / K_trace - tf.eye(2)))
tensorflow.matmul
5,532
import tensorflow as tf def _latent_projections(self, latents): bs, dim = latents.get_shape().as_list() with tf.variable_scope("discriminator_z_projection", reuse=tf.AUTO_REUSE) as scope: k1 = tf.get_variable("kernel1", [dim, dim * 4]) k2 = tf.get_variable("kernel2", [dim * 4, dim]) z_proj = tf.matmul(tf.nn.leaky_relu(tf.matmul(latents, k1), name=scope.name), k2) z_proj = z_proj / tf.reshape(tf.norm(z_proj, ord=2, axis=-1), [bs, 1]) return z_proj def create_loss(self, features, labels, params, is_training=True): """Build the loss tensors for discriminator and generator.
tensorflow.matmul
5,533
import tensorflow as tf def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), }
tensorflow.FixedLenFeature
5,534
import tensorflow as tf # Test externally provided output projection. w = tf.get_variable("proj_w", [2, 5]) b = tf.get_variable("proj_b", [5]) with tf.variable_scope("proj_seq2seq"): dec, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
tensorflow.get_variable
5,535
import tensorflow as tf "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), }
tensorflow.FixedLenFeature
5,536
import tensorflow as tf epsilon=self.rprop_epsilon) _opt_op = trainer.apply_gradients(grads) # so when you call _train, you first do the gradient step, then you apply ema with tf.control_dependencies([_opt_op]): _train = tf.group(ema_apply_op) # Ops/Summaries to run, and their names for logging
tensorflow.control_dependencies
5,537
import tensorflow as tf ): # data for self-attention rep_map_dp = dropout(rep_map, keep_prob, is_train) rep_dep_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, dep_selection) rep_head_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, head_selection) # mask generation dep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_head, 1]) head_idxs = tf.tile(tf.expand_dims(head_org_idx, 2), [1, 1, sl_dep]) if direction is None: direct_mask = tf.not_equal(head_idxs, dep_idxs) # [bs, slh, sld] else: if direction == 'forward': direct_mask = tf.greater(head_idxs, dep_idxs) # [bs, slh, sld] else: direct_mask = tf.less(head_idxs, dep_idxs) # [bs, slh, sld] # [bs, slh, slh] rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2)) attn_mask = tf.logical_and(direct_mask, rep_mask_tile) # [bs, slh, sld] # tensor tile rep_map_tile = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_head, 1, 1]) # bs,slh,sld,vec with tf.variable_scope('attention'): # bs,sl,sl,vec f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.)) dependent = linear(rep_dep_tensor_dp, ivec, False, scope='linear_dependent') # bs,sld,vec dependent_etd = tf.expand_dims(dependent, 1) # bs,1,sld,vec head = linear(rep_head_tensor_dp, ivec, False, scope='linear_head') # bs,slh,vec
tensorflow.greater
5,538
import tensorflow as tf def resnet_model_fn(inputs, training): """Our model_fn for ResNet to be used with our Estimator.""" network = resnet_model.imagenet_resnet_v2( resnet_size=18, num_classes=class_num, mode='se', data_format=None) inputs= network(inputs=inputs, is_training=training) feat = tf.nn.l2_normalize(inputs, 1, 1e-10, name='feat') inputs = tf.layers.dense(inputs=inputs, units=class_num) # inputs = tf.layers.dense(inputs=feat, units=class_num) inputs = tf.identity(inputs, 'final_dense') return inputs, feat
tensorflow.nn.l2_normalize
5,539
import tensorflow as tf output = tf.nn.sigmoid(dense(d_dense_2, n_l1, input_dim, 'd_output')) return output def discriminator_gauss(x, reuse=False): """ Discriminator that is used to match the posterior distribution with a given gaussian distribution. :param x: tensor of shape [batch_size, z_dim] :param reuse: True -> Reuse the discriminator variables, False -> Create or search of variables before creating :return: tensor of shape [batch_size, 1] """ if reuse: tf.get_variable_scope().reuse_variables() with tf.name_scope('Discriminator_Gauss'): dc_den1 = tf.nn.relu(dense(x, z_dim, n_l1, name='dc_g_den1')) dc_den2 = tf.nn.relu(dense(dc_den1, n_l1, n_l2, name='dc_g_den2')) output = dense(dc_den2, n_l2, 1, name='dc_g_output') return output def discriminator_categorical(x, reuse=False): """ Discriminator that is used to match the posterior distribution with a given categorical distribution. :param x: tensor of shape [batch_size, n_labels] :param reuse: True -> Reuse the discriminator variables,
tensorflow.get_variable_scope
5,540
import tensorflow as tf cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=labels, name='xentropy') weight = tf.convert_to_tensor(weight, dtype=logits.dtype.base_dtype, name='loss_weight') loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value')
tensorflow.convert_to_tensor
5,541
import tensorflow as tf initial_data = tf.concat([initial_state, initial_context, tf.expand_dims(initial_pos, axis=1), initial_weights], axis=1) context_size = initial_context.shape[1].value def get_logits(state, ids, time): # for beam-search decoding with tf.variable_scope('decoder_{}'.format(decoder.name)): state, context, pos, prev_weights = tf.split(state, [cell_state_size, context_size, 1, -1], axis=1) input_ = embed(ids) pos = tf.squeeze(pos, axis=1) pos = tf.cond(tf.equal(time, 0), lambda: pos, lambda: update_pos(pos, ids, encoder_input_length[align_encoder_id])) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: output = state else: # Output is always the right-most part of the state (even with multi-layer RNNs)
tensorflow.squeeze
5,542
import tensorflow as tf sequence_length=text_len, initial_state_fw=state_fw, initial_state_bw=state_bw) text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb] text_outputs = tf.nn.dropout(text_outputs, self.lstm_dropout) if layer > 0: highway_gates = tf.sigmoid(util.projection(text_outputs, util.shape(text_outputs, 2))) # [num_sentences, max_sentence_length, emb] text_outputs = highway_gates * text_outputs + (1 - highway_gates) * current_inputs current_inputs = text_outputs
tensorflow.nn.dropout
5,543
import tensorflow as tf pooling_result = tf.cond( tf.equal(sl_unhead, 0), lambda: tf.zeros([bs, 0, hn], tf.float32), lambda: mean_pooling_for_unselected_head( unhead_org_idx, sl_unhead, rep_unhead_mask, input_idx, sl, rep_mask, rep_map, None) # todo: point ! ) with tf.variable_scope('output'): if keep_unselected: range_head = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_head]) scatter_attn = tf.cond( tf.equal(sl_head, 0), lambda: tf.zeros([bs, sl+1, hn], tf.float32), lambda: tf.scatter_nd( tf.stack([range_head, head_org_idx], -1), attn_result, [bs, sl+1, hn]) ) range_unhead = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_unhead]) scatter_pooling = tf.cond( tf.equal(sl_unhead, 0), lambda: tf.zeros([bs, sl+1, hn], tf.float32), lambda: tf.scatter_nd( tf.stack([range_unhead, unhead_org_idx], -1), pooling_result, [bs, sl+1, hn]) ) self_attn_input = rep_map context_features = tf.add(scatter_attn[:, :-1], scatter_pooling[:, :-1], 'context_features') output_mask = rep_mask else:
tensorflow.stack
5,544
import tensorflow as tf # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder)
tensorflow.logging.info
5,545
import tensorflow as tf # exit() return avg_loss def compute_contra_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss def compute_error_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small))
tensorflow.reshape
5,546
from tensorflow.python.framework import ops ops.RegisterShape("LRN")(common_shapes.unchanged_shape_with_rank(4)) @ops.RegisterShape("LRNGrad") def _LRNGradShape(op): """Shape function for LRNGrad op.""" in_grads_shape = op.inputs[0].get_shape().with_rank(4) in_image_shape = op.inputs[1].get_shape().with_rank(4) out_image_shape = op.inputs[2].get_shape().with_rank(4) return [in_grads_shape.merge_with(in_image_shape).merge_with(out_image_shape)] ops.RegisterShape("Softmax")( common_shapes.unchanged_shape_with_rank(2)) @ops.RegisterShape("InTopK") def _InTopKShape(op): """Shape function for InTopK op.""" predictions_shape = op.inputs[0].get_shape().with_rank(2) targets_shape = op.inputs[1].get_shape().with_rank(1) batch_size = predictions_shape[0].merge_with(targets_shape[0]) return [tensor_shape.vector(batch_size.value)]
tensorflow.python.framework.ops.RegisterShape
5,547
import tensorflow as tf # end of a sentence if all_tokens[i] == "[SEP]": sentence["tokens"] = tokens sentence["ppl"] = float(np.exp(sentence_loss / word_count_per_sent)) sentences.append(sentence) i += 1 if output_file is not None: tf.logging.info("Saving results to %s" % output_file) writer.write(json.dumps(sentences, indent=2, ensure_ascii=False)) def main(_): tf.logging.set_verbosity(tf.logging.INFO) bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tensorflow.logging.info
5,548
import tensorflow as tf """ if split_name not in split_to_sizes: raise ValueError('split name %s was not recognized.' % split_name) file_pattern = os.path.join(dataset_dir, file_pattern % split_name) # Allowing None in the signature so that dataset_factory can use the default. if reader is None: reader = tf.TFRecordReader # Features in Pascal VOC TFRecords. keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/height': tf.FixedLenFeature([1], tf.int64), 'image/width': tf.FixedLenFeature([1], tf.int64), 'image/channels': tf.FixedLenFeature([1], tf.int64), 'image/shape': tf.FixedLenFeature([3], tf.int64), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64), 'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64), 'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64), }
tensorflow.FixedLenFeature
5,549
import tensorflow as tf if len(conflicts) > 0: tf.logging.info('Variables with incompatible shapes: \n\t{}'.format( '\n\t'.join(conflicts))) else: var_list = None with tf.device('/cpu:0'): saver = tf.train.Saver(var_list=var_list, save_relative_paths=True) saver.restore(self.sess, checkpoint_path) def save(self, checkpoint_path):
tensorflow.device
5,550
import tensorflow as tf elif activation == "relu": A = tf.nn.relu(h)
tensorflow.nn.relu
5,551
import tensorflow as tf with tf.tpu.bfloat16_scope(): return two_stream_loss(FLAGS, features, labels, mems, is_training) else: return two_stream_loss(FLAGS, features, labels, mems, is_training) def get_classification_loss( FLAGS, features, n_class, is_training): """Loss for downstream classification tasks.""" bsz_per_core = tf.shape(features["input_ids"])[0] inp = tf.transpose(features["input_ids"], [1, 0]) seg_id = tf.transpose(features["segment_ids"], [1, 0]) inp_mask = tf.transpose(features["input_mask"], [1, 0]) label = tf.reshape(features["label_ids"], [bsz_per_core]) xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask)
tensorflow.transpose
5,552
import tensorflow as tf batch_image,batch_label=get_batch(image,label,trainnum,0) work=trainwork() inf=work.inference(batch_image) loss=work.softmax_loss(inf,batch_label) opti=work.optimer(loss,learnrate) test_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum) test_inf=work.test_inference(test_image_batch) test_labels=tf.one_hot(test_label_batch,classnum) test_pre = tf.reshape(test_inf, [testnum, classnum]) correct_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1)) accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) test_pre = tf.argmax(test_pre, 1) test_true = tf.argmax(test_labels, 1) valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum) valid_inf=work.valid_inference(valid_image_batch) valid_labels=tf.one_hot(valid_label_batch,classnum)
tensorflow.reshape
5,553
import tensorflow as tf def __forward_pass(self, x, reuse): fc_size1 = 384 fc_size2 = 192 # convolutional layers with tf.variable_scope('conv1'): layer1, weights1 = new_conv_layer(x, name="conv1", num_input_channels=3, num_filters=64, filter_size=5, ac_fun=tf.nn.relu, pool_ksize=[1, 3, 3, 1]) with tf.variable_scope('conv2'): layer2, weights2 = new_conv_layer(input=layer1, name="conv2", num_input_channels=64, num_filters=64, filter_size=5, ac_fun=tf.nn.relu, pool_ksize=[1, 3, 3, 1]) with tf.name_scope('flatten'): layer3, num_features = flatten_layer(layer2) # fully connected layers with tf.variable_scope('fc1'): layer4, weights4 = new_fc_layer(input=layer3, name="fc1", num_inputs=num_features, num_outputs=fc_size1) # print(layer4) with tf.variable_scope('fc2'): logits, weights5 = new_fc_layer(input=layer4, name="fc2", num_inputs=fc_size1, num_outputs=fc_size2) # add histograms if not reuse:
tensorflow.name_scope
5,554
import tensorflow as tf span_emb = tf.concat(span_emb_list, 1) # [k, emb] return span_emb # [k, emb] def get_mention_scores(self, span_emb): with tf.variable_scope("mention_scores"): return util.ffnn(span_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, 1] def softmax_loss(self, antecedent_scores, antecedent_labels): gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1] marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k] log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k] return log_norm - marginalized_gold_scores # [k] def bucket_distance(self, distances): """ Places the given values (designed for distances) into 10 semi-logscale buckets: [0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+]. """ logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3
tensorflow.reduce_logsumexp
5,555
import tensorflow as tf return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def batched_matrix_vector_multiply(vector, matrix): """ computes x^T A in mini-batches. """ vector_batch_as_matricies = tf.expand_dims(vector, [1]) mult_result = tf.matmul(vector_batch_as_matricies, matrix) squeezed_result = tf.squeeze(mult_result, [1]) return squeezed_result
tensorflow.expand_dims
5,556
import tensorflow as tf activation=tf.nn.relu, name='l1', trainable=trainable ) mu = 2 * tf.layers.dense(inputs=l1, units=action_dim, # number of hidden units activation=tf.nn.tanh, name='mu', trainable=trainable ) sigma = tf.layers.dense(inputs=l1, units=action_dim, # output units activation=tf.nn.softplus, # get action probabilities name='sigma', trainable=trainable ) norm_dist = tf.distributions.Normal(loc=mu, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params
tensorflow.layers.dense
5,557
import tensorflow as tf return accuracy def num_correct_prediction(logits, labels): ''' Evaluate the quality of the logits at predicting the label ''' correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1)) correct = tf.cast(correct, tf.int32) n_correct = tf.reduce_sum(correct) return n_correct def optimize(loss, learning_rate, global_step): '''
tensorflow.cast
5,558
import tensorflow as tf # The output is (mean, var). if self._compute_variance and not self._compute_weighted: return [ analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None) ] * 2 else: return [
tensorflow.as_dtype
5,559
import tensorflow as tf # In this example, we limit mnist data Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) Xte, Yte = mnist.test.next_batch(200) #200 for testing # tf Graph Input xtr = tf.placeholder("float", [None, 784]) xte = tf.placeholder("float", [784]) # Nearest Neighbor calculation using L1 Distance # Calculate L1 Distance distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1) # Prediction: Get min distance index (Nearest neighbor) pred = tf.arg_min(distance, 0) accuracy = 0. # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training
tensorflow.negative
5,560
import tensorflow as tf recall = recall_score(labels_all,predicted_score) apk_sc = rank_metrics.apk(actual, predicted, k=50) return roc_sc, auprc_score,accuracy,precision,recall,f ,apk_sc def construct_placeholders(edge_types): placeholders = { 'batch': tf.placeholder(tf.int32, name='batch'), 'batch_neg': tf.placeholder(tf.int32, name='batch_neg'), 'batch_node':tf.placeholder(tf.int32,name = 'batch_node'), 'adj_min_batch': tf.placeholder(tf.float32,name='adj_min_batch'), 'sim_min_batch': tf.placeholder(tf.float32,name='sim_min_batch'), 'batch_edge_type_idx': tf.placeholder(tf.int32, shape=(), name='batch_edge_type_idx'), 'batch_row_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_row_edge_type'), 'batch_col_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_col_edge_type'), 'degrees': tf.placeholder(tf.int32),
tensorflow.placeholder
5,561
import tensorflow as tf num_block_layers = 3 dense_layer_depth = 16 def lstm_network(input, scope='lstm_network'): with tf.variable_scope(scope): # tf.nn.rnn_cell lstm_cell1 = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) lstm_cell2 = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) lstm_cells = tf.contrib.rnn.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True) # tf.nn.rnn_cell # lstm_cell1 = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) # lstm_cell2 = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) #lstm_cells = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True) # initial_state = lstm_cells.zero_state(batch_size, tf.float32) _, states = tf.nn.dynamic_rnn(lstm_cells, input, dtype=tf.float32, initial_state=None)
tensorflow.contrib.rnn.MultiRNNCell
5,562
import tensorflow as tf def z_conv(self, id, input, channels, size, stride=1, padding="SAME", use_bias=False, dilation=1): # zero mean conv if type(size) == int: size = [size, size] in_ch = input.get_shape().as_list()[-1] # init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32) init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02) filters = tf.get_variable('zero_conv_weights' + id, initializer=init, shape=[size[0], size[1], in_ch, channels]) filters = filters - tf.reduce_mean(filters, axis=[0, 1, 2], keepdims=True) if padding == "PARTIAL": with tf.variable_scope('mask'): _, h, w, _ = input.get_shape().as_list()
tensorflow.get_variable
5,563
import tensorflow as tf # [n_envs * n_steps, n_act] grad = tf.gradients(- (loss_policy - self.ent_coef * entropy) * self.n_steps * self.n_envs, phi_i) # [n_envs * n_steps, n_act] # Directly computed gradient of KL divergence wrt f kl_grad = - f_polyak_i / (f_i_ + eps) k_dot_g = tf.reduce_sum(kl_grad * grad, axis=-1) adj = tf.maximum(0.0, (tf.reduce_sum(kl_grad * grad, axis=-1) - self.delta) / ( tf.reduce_sum(tf.square(kl_grad), axis=-1) + eps)) # [n_envs * n_steps] # Calculate stats (before doing adjustment) for logging. avg_norm_k = avg_norm(kl_grad) avg_norm_g = avg_norm(grad) avg_norm_k_dot_g = tf.reduce_mean(tf.abs(k_dot_g)) avg_norm_adj = tf.reduce_mean(tf.abs(adj)) grad = grad - tf.reshape(adj, [self.n_envs * self.n_steps, 1]) * kl_grad # These are turst region adjusted gradients wrt f ie statistics of policy pi grads_f = -grad / (self.n_envs * self.n_steps) grads_policy = tf.gradients(f_i_, self.params, grads_f) grads_q = tf.gradients(loss_q * self.q_coef, self.params) grads = [gradient_add(g1, g2, param, verbose=self.verbose) for (g1, g2, param) in zip(grads_policy, grads_q, self.params)] avg_norm_grads_f = avg_norm(grads_f) * (self.n_steps * self.n_envs) norm_grads_q = tf.global_norm(grads_q) norm_grads_policy = tf.global_norm(grads_policy) else: grads = tf.gradients(loss, self.params) norm_grads = None
tensorflow.reshape
5,564
import tensorflow as tf self.epsilon = epsilon self.name = name self.mean = tf.reduce_mean(x, [0, 1, 2], keep_dims=True) self.mean_sq = tf.reduce_mean(tf.square(x), [0, 1, 2], keep_dims=True) self.batch_size = int(x.get_shape()[0]) assert x is not None
tensorflow.square
5,565
import tensorflow as tf for h, n_out in enumerate(layers_width[1:]): # Hidden layer if(h < len(layers_width)-2): # Perform affine mapping at each layer of the neural network Z = tf.layers.dense(Z, n_basis//2) # Define variational parameters alpha_mean = tf.get_variable('alpha_mean_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_logstd = tf.get_variable('alpha_logstd_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_std = tf.exp(alpha_logstd) # Compute epsilon from {n_samples} standard Gaussian # epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out]) epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out]) hyp_params = tf.get_variable('hyp_params_layer'+str(h), shape=[2], initializer=tf.random_normal_initializer()) l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1]) epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2 # Compute A_{h+1}
tensorflow.random_normal_initializer
5,566
import tensorflow as tf entropy, log_prob): indices = tf.range(0, layer_id, dtype=tf.int32) start_id = 4 * (layer_id - 2) prev_layers = [] for i in range(2): # index_1, index_2 next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm) prev_c, prev_h = next_c, next_h query = anchors_w_1.gather(indices) query = tf.reshape(query, [layer_id, self.lstm_size]) query = tf.tanh(query + tf.matmul(next_h[-1], self.w_attn_2)) query = tf.matmul(query, self.v_attn) logits = tf.reshape(query, [1, layer_id]) if self.temperature is not None: logits /= self.temperature if self.tanh_constant is not None: logits = self.tanh_constant * tf.tanh(logits) index = tf.multinomial(logits, 1) index = tf.to_int32(index) index = tf.reshape(index, [1]) arc_seq = arc_seq.write(start_id + 2 * i, index)
tensorflow.matmul
5,567
import tensorflow as tf output_activation, dropout_rate) rnd_pred_act = act_limit * rnd_pred_act with tf.variable_scope('rnd_targ_cri'): rnd_targ_cri = tf.squeeze(mlp(tf.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('rnd_pred_cri'): rnd_pred_cri = tf.squeeze(mlp(tf.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) rnd_pred_cri_in_ph = tf.concat([x_ph, a_ph], axis=-1) rnd_pred_cri_in_dim = rnd_pred_cri_in_ph.shape.as_list()[1] rnd_pred_cri_dropout_mask_generator = DropoutMaskGenerator(rnd_pred_cri_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate)
tensorflow.concat
5,568
import tensorflow as tf tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) tf.summary.scalar("regularization_loss", model.regularization_loss) tf.summary.scalar("stop_token_loss", model.stop_token_loss) tf.summary.scalar("loss", model.loss) tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [tf.norm(grad) for grad in model.gradients] tf.summary.histogram("gradient_norm", gradient_norms) tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize # gradients (in case of explosion) return tf.summary.merge_all() def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, loss): values = [ tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_before_loss", simple_value=before_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss", simple_value=after_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/stop_token_loss", simple_value=stop_token_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_loss", simple_value=loss),
tensorflow.summary.merge_all
5,569
import tensorflow as tf "dtype=float32)") def testReprWorksCorrectlyScalar(self): # Usually we'd write np.float(X) here, but a recent Eager bug would # erroneously coerce the value to float32 anyway. We therefore use constants # here, until the bug is resolved in TensorFlow 1.12. normal = tfd.Normal(loc=tf.constant(0, tf.float16), scale=tf.constant(1, tf.float16)) self.assertEqual( repr(normal), "<tfp.distributions.Normal" " 'Normal/'" " batch_shape=()" " event_shape=()"
tensorflow.constant
5,570
import tensorflow as tf tf.expand_dims(k, 0), util.shape(context_outputs, 0), True) # [1, k] top_span_indices.set_shape([1, None]) top_span_indices = tf.squeeze(top_span_indices, 0) # [k] top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k] top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k] top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb] top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k] top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k] top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k] top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k] c = tf.minimum(self.config["max_top_antecedents"], k) if self.config["coarse_to_fine"]: top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c) else: top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.distance_pruning(top_span_emb, top_span_mention_scores, c) dummy_scores = tf.zeros([k, 1]) # [k, 1] for i in range(self.config["coref_depth"]): with tf.variable_scope("coref_layer", reuse=(i > 0)): top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb] top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb) # [k, c] top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1] top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb]
tensorflow.minimum
5,571
import tensorflow.contrib.graph_editor as ge elif checkpoints == 'speed': # checkpoint all expensive ops to maximize running speed checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul') elif checkpoints == 'memory':
tensorflow.contrib.graph_editor.filter_ts_from_regex
5,572
import tensorflow as tf labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32) next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = tf.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions) next_sentence_mean_loss = tf.metrics.mean( values=next_sentence_example_loss)
tensorflow.argmax
5,573
import tensorflow as tf predictions['image_info'] = features['image_info'] if params['include_mask']: predictions['mask_outputs'] = tf.nn.sigmoid(model_outputs['mask_outputs']) if params['use_tpu']: return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions) return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Set up training loss and learning rate. global_step = tf.train.get_or_create_global_step() learning_rate = learning_rates.step_learning_rate_with_linear_warmup( global_step,
tensorflow.estimator.EstimatorSpec
5,574
import tensorflow as tf # lang2_fname = filename + ".lang2" lang1_fname = filename + ".source" lang2_fname = filename + ".target" if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname): tf.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname, lang2_fname)
tensorflow.gfile.Exists
5,575
import tensorflow as tf tf.add_to_collection('debug_layers', self.x_preprocessed) with tf.variable_scope('conv1_x'): print('Building unit: conv1') self.conv1 = self._conv('conv1', self.x_preprocessed, padding= [[0,0],[3,3],[3,3],[0,0]], num_filters=64, kernel_size=(7, 7), stride=(2, 2), l2_strength=self.wd, bias=self.bias) self.conv1 = self._bn('bn1', self.conv1) self.conv1 = self._relu('relu1', self.conv1) _debug(self.conv1) self.conv1= tf.pad(self.conv1, tf.constant([[0,0],[1,1],[1,1],[0,0]]), "CONSTANT") self.conv1 = tf.nn.max_pool(self.conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='max_pool1') _debug(self.conv1) print('conv1-shape: ' + str(self.conv1.shape.as_list())) with tf.variable_scope('conv2_x'): self.conv2 = self._residual_block('conv2_1', self.conv1, 64) _debug(self.conv2) self.conv2 = self._residual_block('conv2_2', self.conv2, 64) _debug(self.conv2) with tf.variable_scope('conv3_x'): self.conv3 = self._residual_block('conv3_1', self.conv2, 128, pool_first=True, strides=2)
tensorflow.nn.max_pool
5,576
import tensorflow as tf datasetRoot = config.validation.data.root_path data_list_path = os.path.join(datasetRoot,config.validation.data.path) n_batch = batch_override or config.validation.n_batch[partial_level] # read in datalist and create dataset with open(data_list_path) as f: data_path_list = [datasetRoot + x[:-1] for x in f.readlines()] n_data = len(data_path_list) dataset = tf.data.Dataset.from_tensor_slices(data_path_list) if shuffle_repeat: dataset = dataset.shuffle(n_data).repeat() dataset = dataset.map(self.data_map) # validation validation_dataset = dataset.batch(n_batch).prefetch(4) validation_iterator = validation_dataset.make_one_shot_iterator()
tensorflow.data.Dataset.from_tensor_slices
5,577
import tensorflow as tf pos_ = tf.minimum(pos_, encoder_input_length - 1) pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S> weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length)) weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1) weighted_average.append(weighted_average_) weighted_average = tf.concat(weighted_average, axis=1) weighted_average = dense(weighted_average, encoder.attn_size) elif pos is not None: weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1) else:
tensorflow.concat
5,578
import tensorflow as tf gstack = tf.concat([self.prev_g, cut_g], axis=1) self.last_c_g = gstack[:, 1:] # print self.last_c_g gsum = tf.reduce_sum(gstack, axis=1) phi = tf.get_variable("phi", (self.g_dim, self.k)) w = tf.matmul(gsum, phi) w = tf.expand_dims(w, [2]) # Calculate policy and sample logits = tf.reshape(tf.matmul(U, w), [-1, num_acts]) self.pi = tf.nn.softmax(logits) self.log_pi = tf.nn.log_softmax(logits) self.sample = policy_utils.categorical_sample( tf.reshape(logits, [-1, num_acts]), num_acts)[0, :] def build_value(self, _input): with tf.variable_scope('VF'): hidden = tf.layers.dense(inputs=_input, units=self.vf_hidden_size, activation=tf.nn.elu) w = tf.get_variable("weights", (self.vf_hidden_size, 1))
tensorflow.nn.log_softmax
5,579
import tensorflow as tf name, scale=True, train=True, epsilon=1e-8, decay=.1, axes=[0], bn_lag=DEFAULT_BN_LAG): """Batch normalization.""" # create variables with tf.variable_scope(name): var = variable_on_cpu( "var", [dim], tf.constant_initializer(1.), trainable=False) mean = variable_on_cpu( "mean", [dim], tf.constant_initializer(0.), trainable=False) step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) if scale: gamma = variable_on_cpu("gamma", [dim], tf.constant_initializer(1.)) beta = variable_on_cpu("beta", [dim], tf.constant_initializer(0.))
tensorflow.variable_scope
5,580
import tensorflow as tf """ # This assert should be only used with an instance that commutes with sum. assert stage.commutes_with_sum num_summands = len(server_test_data) expected_sum = np.sum([d.decoded_x for d in server_test_data], axis=0) sum_encoded_x = {} for k in server_test_data[0].encoded_x: sum_encoded_x[k] = np.sum([d.encoded_x[k] for d in server_test_data], axis=0) with tf.Graph().as_default(): with self.session() as sess: decode_sum_encoded_x = sess.run( stage.decode(sum_encoded_x, decode_params, num_summands, shape)) self.assertAllClose( expected_sum, decode_sum_encoded_x, rtol=DEFAULT_RTOL, atol=DEFAULT_ATOL)
tensorflow.Graph
5,581
import tensorflow as tf self._score_summaries.update(self._proposal_targets) #self._score_summaries.update(self._anchor_targets) return rois, roi_scores def _anchor_component(self): with tf.variable_scope('ANCHOR_' + 'default'): # just to get the shape right # 根据原始输入图片通过VGG16的conv5_3后,缩小16倍,得到RPN的输入feature map大小 height = tf.to_int32(tf.ceil(self._im_info[0, 0] / np.float32(self._feat_stride[0]))) width = tf.to_int32(tf.ceil(self._im_info[0, 1] / np.float32(self._feat_stride[0])))
tensorflow.variable_scope
5,582
import tensorflow as tf tf.summary.image('input', images) tf.summary.image('score_map', score_maps) tf.summary.image('score_map_pred', f_score * 255) tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
tensorflow.summary.image
5,583
import tensorflow as tf def _attn(q, k, v, train=False, scale=False): #w=[-1,head,n_ctx,n_ctx] w = tf.matmul(q, k)
tensorflow.matmul
5,584
import tensorflow as tf """ Test RNN graph 0 step """ def test_rnn(test_data_x,test_data_y, g, checkpoint, input_prob, output_prob, state_prob, num_test): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) test_data_yp = np.insert(test_data_y,0,0,axis=0)[:-1] "read the trained graph" g['saver'].restore(sess, checkpoint)
tensorflow.global_variables_initializer
5,585
import tensorflow as tf params["discriminator_from_rgb_layers"][i][0][:] for i in range(len(params["discriminator_from_rgb_layers"])) ] # Create list to hold fromRGB 1x1 convs. from_rgb_conv_tensors = [ self.from_rgb_conv_layers[i]( inputs=tf.zeros( shape=[1] + from_rgb[i][0:3], dtype=tf.float32 ) ) for i in range(len(from_rgb)) ] print_obj(
tensorflow.zeros
5,586
import tensorflow as tf return new_state def output_step_scan(self, dummy, new_state): if self.dale_ratio: new_output = tf.matmul( tf.nn.relu(new_state), tf.matmul( tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out, name="in_2"), transpose_b=True, name="3")\ + self.b_out
tensorflow.nn.relu
5,587
import tensorflow as tf """ states = dict() for name in sorted(self.states_memory): states[name] = tf.gather(params=self.states_memory[name], indices=indices) internals = dict()
tensorflow.gather
5,588
import tensorflow as tf images, num_cols=FLAGS.num_images_per_class) tf.summary.image('generated_images', reshaped_img, max_outputs=1)
tensorflow.summary.image
5,589
import tensorflow as tf self.set_training_op() # not used at the moment, could be useful at a certain point # self.create_random_update_op() # there are case in which multiple losses exit if isinstance(self.loss, dict): for k, v in self.loss.items(): self.loss[k] = tf.check_numerics(v, "self.loss" + str(k) + " is not finite") else: self.loss = tf.check_numerics(self.loss, "self.loss is not finite") #session will be created after init def create_datasets_with_handles(self, dataset): datasets_nodes, handle, ds_initializers, ds_handles = dataset.get_dataset_with_handle(self.batch_size["train"], self.batch_size["eval"]) self.datasets_initializers = ds_initializers self.datasets_handles_nodes = ds_handles self.ds_handle = handle
tensorflow.check_numerics
5,590
import tensorflow as tf with tf.variable_scope(name) as scope: conv = tf.nn.conv2d(x, W, [1, stride, stride, 1], padding='SAME') pre_activation = tf.nn.bias_add(conv, bias) output = tf.nn.relu(pre_activation, name=scope.name) return output # 定义一个池化层,默认为max_pooling def max_pool_2x2(self,name, x): with tf.variable_scope(name) as scope: maxpool = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') return maxpool # 创建DQN def creat_network(self): # 网络的参数,权重 W_conv1 = self.weight_variable('W_conv1', [8, 8, 4, 32]) # 第一层卷积层为8x8的卷积核,输入通道为4,输出通道为32 b_conv1 = self.bias_variable('b_conv1', [32]) W_conv2 = self.weight_variable('W_conv2', [4, 4, 32, 64]) # 第二层卷积层为4x4的卷积核,输入通道为32,输出通道为64
tensorflow.nn.max_pool
5,591
import tensorflow as tf eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost) tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput( config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) models = {"Train": m, "Valid": mvalid, "Test": mtest}
tensorflow.name_scope
5,592
import tensorflow as tf return images, sparse_labels def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv_scale(x, W): return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='VALID') def inference(x): """ Creates a model with pooling across space and scales.
tensorflow.Variable
5,593
from tensorflow.python.framework import ops output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) filter_in_depth = int(filter_shape[2]) output_count = np.prod(output_shape.as_list()) return ops.OpStats("flops", (output_count * filter_in_depth * filter_height * filter_width * 2)) @ops.RegisterStatistics("Conv2D", "weight_parameters") def _calc_conv_weight_params(graph, node):
tensorflow.python.framework.ops.OpStats
5,594
import tensorflow as tf self.rewards_ph_n + (1 - self.terminals_ph_n) *( self.gamma**self.n_step_length ) * self.value_target_n) qf1_loss_n = 0.5 * tf.reduce_mean(((q_backup_n - qf1) ** 2)*self.weight_ph) qf1_loss_n_col = tf.reduce_mean(((q_backup_n - qf1) ** 2),1) qf2_loss_n = 0.5 * tf.reduce_mean(((q_backup_n - qf2) ** 2)*self.weight_ph) if self.n_step: value_for_priority = qf1_loss_col + qf1_loss_n_col else:
tensorflow.reduce_mean
5,595
import tensorflow as tf # None would fire an exception were it actually executed. self.assertTrue(normal._is_scalar_helper(x.get_shape, lambda: None)) self.assertTrue(normal._is_scalar_helper(lambda: tf.TensorShape(None), lambda: tf.shape(x))) x = tf.placeholder(dtype=tf.int32, shape=[1])
tensorflow.shape
5,596
from tensorflow.python.ops import array_ops cm_dtype = dtypes.int64 if weights is not None else dtypes.float64 total_cm = _create_local('total_confusion_matrix', shape=[num_classes, num_classes], dtype=cm_dtype) # Cast the type to int64 required by confusion_matrix_ops. predictions = math_ops.to_int64(predictions) labels = math_ops.to_int64(labels) num_classes = math_ops.to_int64(num_classes) # Flatten the input if its rank > 1. predictions_rank = predictions.get_shape().ndims if predictions_rank > 1: predictions = array_ops.reshape(predictions, [-1]) labels_rank = labels.get_shape().ndims if labels_rank > 1: labels = array_ops.reshape(labels, [-1]) weights = _mask_weights(ignore_mask, weights) if weights is not None: weights_rank = weights.get_shape().ndims if weights_rank > 1: weights = array_ops.reshape(weights, [-1])
tensorflow.python.ops.array_ops.reshape
5,597
import tensorflow as tf in_size: size of the hidden state vectors mats: list of hidden state vectors """ pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction on every word. def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias) return tf.nn.softmax(logits) #self.preds_by_word1 = tf.pack([GetWordPred(o_) for o_ in mats]) #self.preds_by_word = tf.reshape(self.preds_by_word1, self.y.get_shape()) #self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_word) self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats]) self.preds_by_instance = tf.pack([self.preds_by_word[:,i,:] for i in range(self.preds_by_word.get_shape()[1])])
tensorflow.nn.xw_plus_b
5,598
import tensorflow as tf shape = tf.cast(shape, tf.int64) attn_dist = tf.reshape(attn_dist, shape=[-1]) # [batch_size*passage_length] one_hot_spare_rep = tf.SparseTensor(indices=indices, values=attn_dist, dense_shape=shape) # [batch_size, passage_length, extended_vsize] if passage_mask is not None: passage_mask = tf.expand_dims(passage_mask, axis=-1) one_hot_spare_rep = one_hot_spare_rep * passage_mask one_hot_spare_rep = tf.sparse_reduce_sum(one_hot_spare_rep, axis=1) # [batch_size, extended_vsize] vocab_dist = tf.add(vocab_dist, one_hot_spare_rep) if self.options.add_first_word_prob_for_phrase: vocab_dist = tf.nn.softmax(vocab_dist) # normalize return vocab_dist # [batch_size, extended_vsize] def linear(args, output_size, bias=True, bias_start=0.0, scope=None): if args is None or (isinstance(args, (list, tuple)) and not args): raise ValueError("`args` must be specified")
tensorflow.add
5,599