seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf order_m: TensorLike, theta: TensorLike, phi: TensorLike, name: str = "spherical_harmonics_evaluate_spherical_harmonics") -> TensorLike: # pylint: disable=line-too-long with tf.name_scope(name): degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) theta = tf.convert_to_tensor(value=theta) phi = tf.convert_to_tensor(value=phi) var_type = theta.dtype sign_m = tf.math.sign(order_m) order_m = tf.abs(order_m)
tensorflow.convert_to_tensor
4,500
import tensorflow as tf initializer=initializer) w = tf.reshape(w, [hidden_size, num_attention_heads, head_size])
tensorflow.reshape
4,501
import tensorflow as tf facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
tensorflow.layers.dense
4,502
import tensorflow as tf tf.app.flags.DEFINE_integer('seed', 1, "initial random seed") tf.app.flags.DEFINE_bool('validation', False, "") tf.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch") tf.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch") tf.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch") tf.app.flags.DEFINE_integer('eval_freq', 5, "") tf.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training") tf.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay") tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch") tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate") tf.app.flags.DEFINE_float('mom1', 0.9, "initial momentum rate") tf.app.flags.DEFINE_float('mom2', 0.5, "momentum rate after epoch_decay_start") tf.app.flags.DEFINE_string('method', 'vat', "{vat, vatent, baseline}") if FLAGS.dataset == 'cifar10': from cifar10 import inputs, unlabeled_inputs elif FLAGS.dataset == 'svhn': from svhn import inputs, unlabeled_inputs else: raise NotImplementedError
tensorflow.app.flags.DEFINE_float
4,503
import tensorflow as tf def _get_placeholder_shape(self, name): if name == "input_0": return self.a.shape if name == "input_1": return self.b.shape if name == "input_2": return self.c.shape def test_tensor_rearrange(): tensor_rearrange = TensorRearrange(seed=713) in_node_a = tensor_rearrange.get_placeholder("input_0") in_node_b = tensor_rearrange.get_placeholder("input_1") in_node_c = tensor_rearrange.get_placeholder("input_2") stitched = tf.dynamic_stitch([[1, 10], [[0, 7, 9], [5, 8, 3]], [[6], [4], [2]]], [in_node_a, in_node_b, in_node_c]) # should be 11,5,4 list_of_parts = tf.dynamic_partition(tf.transpose(stitched, perm=[1, 2, 0]), [[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]], num_partitions=4) # after permute becomes 5,4,11, return all partitions 5,11 node_a = tf.div(list_of_parts[0], list_of_parts[1]) node_b = tf.divide(list_of_parts[2], list_of_parts[3]) trace_node = tf.trace(node_a) + node_b # there is a broadcast here out_node = tf.cast(tf.count_nonzero(trace_node), dtype=tf.float32) + tf.Variable(tf.random_normal(shape=(2, 3))) placeholders = [in_node_a, in_node_b, in_node_c] predictions = [out_node] # Run and persist
tensorflow.dynamic_stitch
4,504
import tensorflow as tf flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
tensorflow.app.run
4,505
from tensorflow.python.platform import tf_logging as logging return try: self._last_export_dir = self._estimator.export( self.export_dir, exports_to_keep=self.exports_to_keep, signature_fn=self.signature_fn, input_fn=self._input_fn, default_batch_size=self._default_batch_size, input_feature_key=self._input_feature_key, use_deprecated_input_fn=self._use_deprecated_input_fn) except RuntimeError: logging.info("Skipping exporting for the same step.") class CheckpointSaver(BaseMonitor): """Saves checkpoints every N steps.""" def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None,
tensorflow.python.platform.tf_logging.info
4,506
from tensorflow.python.framework import ops ops.RegisterShape("Imag")(common_shapes.unchanged_shape) ops.RegisterShape("Inv")(common_shapes.unchanged_shape) ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape) ops.RegisterShape("IsInf")(common_shapes.unchanged_shape)
tensorflow.python.framework.ops.RegisterShape
4,507
import tensorflow as tf # 3. 有时,我们希望搞清楚TensorFlow正在使用的设备。当加载先前保存过的模型,并且该模型在计算图中已分配固定设备时,服务器可提供不同的设备给计算图使用。实现该功能只需在config设置软设备 config = tf.ConfigProto() config.allow_soft_placement = True sess_soft = tf.Session(config=config) # 4. 当使用CPU时,TensorFlow默认占据大部分CPU内存。虽然这也是时常期望的,但是我们能谨慎分配GPU内存。当TensorFlow一直不释放GPU内存时,如有必要,我们可以设置GPU内存增长选项让GPU内存分配缓慢增大到最大限制 config.gpu_options.allow_growth = True sess_grow = tf.Session(config=config) # 5. 如果希望限制死TensorFlow使用GPU内存的百分比,可以使用config设置per_process_gpu_memory_fraction config.gpu_options.per_process_gpu_memory_fraction = 0.4 sess_limited = tf.Session(config=config) # 6. 有时,我们希望代码健壮到可以决定运行多少GPU合适。TensorFlow有内建函数可以探测到。如果我们期望代码在GPU内存合适时利用GPU计算能力,并分配指定操作给GPU,那么该功能是有益的 if tf.test.is_built_with_cuda(): pass
tensorflow.Session
4,508
import tensorflow.contrib.graph_editor as ge checkpoints_other) debug_print("ops_to_copy = %s", ops_to_copy) if not ops_to_copy: # we're done! break copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {}) for origin_op, op in info._transformed_ops.items(): op._set_device(origin_op.node_def.device) copied_ops = info._transformed_ops.values() debug_print("Copied %s to %s", ops_to_copy, copied_ops) ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops) debug_print("Rewired %s in place of %s restricted to %s", checkpoints_disconnected_other, checkpoints_other, copied_ops) # gradient flowing through the checkpointed node boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts] substitute_backprops = [d_checkpoints[r] for r in ts] dv = tf_gradients(boundary, checkpoints_disconnected_other+xs,
tensorflow.contrib.graph_editor.reroute_ts
4,509
import tensorflow as tf facts = tf.concat(facts, 2) print ("querry_size mismatch") query = tf.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
tensorflow.concat
4,510
import tensorflow as tf output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings):
tensorflow.nn.softmax
4,511
import tensorflow as tf config = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) sentence_id = 0 with open(dataset_file, 'r') as fin, h5py.File(outfile, 'w') as fout:
tensorflow.global_variables_initializer
4,512
import tensorflow as tf help='data_location') args = parser.parse_args() vdata = np.load(args.data_location) tf.reset_default_graph() idim = (36, 64) keep_prob = tf.placeholder(tf.float32, name='keep_prob') tftrain = tf.placeholder(tf.bool, name='tftrain') batch_size=100 if (args.experiment_type == "reach") or (args.experiment_type == "push"): idim = (48, 48) tfinput = tf.placeholder(tf.float32, (3, batch_size) + idim + (3, ), name='x') if args.experiment_type == "reach":
tensorflow.placeholder
4,513
import tensorflow as tf for matrix in matrices: matrix_shape = tf.shape(matrix) row_before_length = current_column current_column += matrix_shape[-1] row_after_length = ret_columns - current_column row_blocks.append(tf.pad( tensor=matrix, paddings=tf.concat( [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32), [(row_before_length, row_after_length)]], axis=0))) blocked = tf.concat(row_blocks, -2) blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols))) return blocked
tensorflow.rank
4,514
import tensorflow as tf rate=rate, activation_fn=None, normalizer_fn=None, scope=scope)) return tf.add_n(branch_logits)
tensorflow.add_n
4,515
import tensorflow as tf # Convolution on patches. if transpose: q = tf.nn.conv2d(p, w, strides, 'VALID', data_format='NCHW', use_cudnn_on_gpu=True) else: q = tf.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True) # Allocate output tensor. if use_var: y = sbnet_module.sparse_scatter_var( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_bstride=tf.constant(block_params.bstrides, dtype=tf.int32), dynamic_boffset=tf.constant([0, 0], dtype=tf.int32), add=False, transpose=transpose, atomic=atomic) else: y = sbnet_module.sparse_scatter( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_boffset=tf.constant([0, 0], dtype=tf.int32), add=False,
tensorflow.constant
4,516
import tensorflow as tf return cw_1d elif z_dim == 2: return cw_2d elif z_dim >= 20: return cw else: raise ValueError('Not defined for this latent dimension') def cw_sampling(X, y=None): def phi_sampling(s, D): return tf.pow(1.0 + 4.0*s/(2.0*D-3), -0.5) D = tf.cast(tf.shape(X)[1], tf.float32) N = tf.cast(tf.shape(X)[0], tf.float32) D_int = tf.cast(D, tf.int32) N_int = tf.cast(N, tf.int32) if y is None: y = silverman_rule_of_thumb(N) YDistr = tf.contrib.distributions.MultivariateNormalDiag(loc=tf.zeros(D_int, tf.float32), scale_diag=tf.ones(D_int, tf.float32)) Y = YDistr.sample(N_int) T = 1.0/(2.0*N*tf.sqrt(m.pi*y)) A0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2) A = tf.reduce_sum(phi_sampling(A0/(4*y), D)) B0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(Y, 0), tf.expand_dims(Y, 1)), axis=2) B = tf.reduce_sum(phi_sampling(B0/(4*y), D))
tensorflow.cast
4,517
import tensorflow as tf f.write("final loss:" + str(format(loss_out)) +" \n") f.write("final test accuracy:" + str(format(accuracy_out)) +" \n") f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n") else : # Running a new session print("Starting 2nd session...") with tf.Session() as sess: # Initialize variables sess.run(init) f.write("---Restore model \n") # Restore model weights from previously saved model
tensorflow.Session
4,518
import tensorflow as tf # Initialize the mean mean = tf.get_variable(name + "_mean", shape, dtype=dtype) # Initialize the standard deviation pre_sigma = tf.get_variable(name + "_standard_deviation", shape, initializer=std_init, dtype=dtype) standard_deviation = tf.nn.softplus(pre_sigma) + 1e-5 # The famous reparametrization formula for the factorized Gaussian noise = tf.random_normal([num_samples] + shape, 0.0, 1.0, dtype) weights = mean + standard_deviation * noise return weights, mean, standard_deviation, pre_sigma, noise
tensorflow.random_normal
4,519
import tensorflow as tf def testParetoLogCdfMultidimensional(self): batch_size = 6 scale = tf.constant([[2., 4., 5.]] * batch_size) scale_v = [2., 4., 5.] concentration = tf.constant([[1.]] * batch_size)
tensorflow.constant
4,520
import tensorflow as tf batch_size = 8 hparams = { "pretrained_model_name": None, "regr_strategy": "cls_time" } inputs = tf.placeholder(tf.int32, shape=[batch_size, 6]) regressor = XLNetRegressor(hparams=hparams) logits = regressor(inputs) with self.test_session() as sess: sess.run(tf.global_variables_initializer())
tensorflow.placeholder
4,521
import tensorflow as tf # Calculate the output size. x_shape = tf.shape(x)
tensorflow.shape
4,522
import tensorflow as tf collect_op = utils.collect_gradients(gradients, variables) scale = 1.0 / params.update_cycle gradients, variables = utils.scale_gradients(grads_and_vars, scale) # Gradient clipping avoid greadient explosion!! if isinstance(params.clip_grad_norm or None, float): gradients, _ = tf.clip_by_global_norm(gradients, params.clip_grad_norm) # Update variables grads_and_vars = list(zip(gradients, variables)) with tf.control_dependencies([collect_op]): train_op = opt.apply_gradients(grads_and_vars, global_step)
tensorflow.clip_by_global_norm
4,523
import tensorflow as tf return (dist_entropy, sample_masks, sample_log_prob) def main(unused_argv): tf.set_random_seed(FLAGS.random_seed) run_config_args = { 'model_dir': FLAGS.model_dir,
tensorflow.set_random_seed
4,524
import tensorflow as tf vars_to_load = {} for var in resnet_vars: if var.name not in optimizer_vars: var_name = var.name # Trim the index of the variable. if ':' in var_name: var_name = var_name[:var_name.rindex(':')] if params['skip_checkpoint_variables'] and re.match( params['skip_checkpoint_variables'], var_name[len(prefix):]): continue vars_to_load[var_name[len(prefix):]] = var_name tf.logging.info( 'Optimizer vars: %s.' % ', '.join(var for var in optimizer_vars)) tf.logging.info('Will train: %s.' % vars_to_load) tf.train.init_from_checkpoint(params['resnet_checkpoint'], vars_to_load) if not vars_to_load: raise ValueError('Variables to load is empty.') return tf.train.Scaffold() # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) grads_and_vars = optimizer.compute_gradients(total_loss, var_list) gradients, variables = zip(*grads_and_vars) grads_and_vars = [] # Special treatment for biases (beta is named as bias in reference model) # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/optimizer.py#L113 # pylint: disable=line-too-long
tensorflow.logging.info
4,525
import tensorflow as tf for i, layer_size in enumerate(dnn_sizes): layer_name = 'dnn_{}'.format(i) with tf.variable_scope(layer_name): dnn_w = tf.get_variable('W', shape=[last_layer_size, layer_size], initializer=dnn_init, dtype=dtype) dnn_b = tf.get_variable('b', shape=[layer_size], initializer=tf.constant_initializer(0.0), dtype=dtype) projected = tf.nn.bias_add(tf.matmul(last_layer, dnn_w), dnn_b) # TODO: argument nonlinearity, change bias to 0.1 if relu if dnn_nonlin == 'tanh': last_layer = tf.nn.tanh(projected) elif dnn_nonlin == 'sigmoid': last_layer = tf.nn.sigmoid(projected) elif dnn_nonlin == 'relu': last_layer = tf.nn.relu(projected) else: raise NotImplementedError() if mode == 'train' and dnn_keep_prob < 1.0: last_layer = tf.nn.dropout(last_layer, dnn_keep_prob) last_layer_size = layer_size print('{}: {}'.format(layer_name, last_layer.get_shape()))
tensorflow.nn.sigmoid
4,526
import tensorflow as tf if return_final_state: return final_state else: return outputs @layer def reshape_layer(tensor, shape, **opts): out = tf.reshape(tensor, shape=shape) return out @layer def dense_layer(tensor, hidden_dims, weight=None, bias=None, **opts): original_tensor_shape = tf.shape(tensor) in_dim = int(tensor.get_shape()[-1]) rank = _rank(tensor) if rank > 2: # -- time distributed dense tensor = tf.reshape(tensor, shape=(-1, in_dim)) name = opts.get("name", "") if weight is None: initializer = tf.contrib.layers.xavier_initializer(uniform=True) weight = tf.get_variable("{}_dense_W".format(name), initializer=initializer(shape=(in_dim, hidden_dims))) if bias is None: bias = tf.get_variable("{}_dense_b".format(name), initializer=tf.zeros(shape=hidden_dims))
tensorflow.shape
4,527
import tensorflow as tf shifted_sum_x2, shift, name="normalize_moments") return mean, variance def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), ) mean, variance = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, )
tensorflow.identity
4,528
import tensorflow as tf WN: with normalization """ norm = argus_integral_phalf(m_low, m_high, m0, c) return argus_pdf(m, m0, c) / norm # // --- Observable --- # RooRealVar mes("mes","m_{ES} (GeV)",5.20,5.30) ; # // --- Build Gaussian signal PDF --- # RooRealVar sigmean("sigmean","B^{#pm} mass",5.28,5.20,5.30) ; # RooRealVar sigwidth("sigwidth","B^{#pm} width",0.0027,0.001,1.) ; sigmean = tf.Variable(5.28, name="sigmean", dtype=tf.float64) sigwidth = tf.Variable(0.0027, name="sigwidth", dtype=tf.float64) vdict['sigmean'] = sigmean vdict['sigwidth'] = sigwidth # RooGaussian gauss("gauss","gaussian PDF",mes,sigmean,sigwidth) ; def gaussian_pdf(x, mean, std): val = tf.div(tf.exp(-tf.pow((x - mean) / std, 2) / two), (sqrt2pi * std), name="gaussian_pdf") return val
tensorflow.Variable
4,529
import tensorflow as tf self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024]) def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3))
tensorflow.placeholder
4,530
import tensorflow as tf test(model, eval_data) print("Colorbot is ready to generate colors!") while True: try: color_name = six.moves.input( "Give me a color name (or press enter to exit): ") except EOFError: return if not color_name: return _, chars, length = parse(color_name) with tf.device(device): (chars, length) = (tf.identity(chars), tf.identity(length)) chars = tf.expand_dims(chars, 0) length = tf.expand_dims(length, 0) preds = tf.unstack(model((chars, length), training=False)[0]) # Predictions cannot be negative, as they are generated by a ReLU layer; # they may, however, be greater than 1. clipped_preds = tuple(min(float(p), 1.0) for p in preds) rgb = tuple(int(p * 255) for p in clipped_preds) print("rgb:", rgb) data = [[clipped_preds]] if HAS_MATPLOTLIB: plt.imshow(data)
tensorflow.device
4,531
import tensorflow as tf wd=weight_decay) outputs = tf.nn.conv1d(inputs, kernel, stride=stride, padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = batch_norm_for_conv1d(outputs, is_training, bn_decay=bn_decay, scope='bn') if activation_fn is not None:
tensorflow.nn.bias_add
4,532
import tensorflow as tf for i, (grad, var) in enumerate(gradients): if grad is not None: gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var) with tf.variable_scope("input_info", reuse=False): tf.summary.scalar('rewards', tf.reduce_mean(rew_t_ph)) tf.summary.scalar('importance_weights', tf.reduce_mean(importance_weights_ph))
tensorflow.variable_scope
4,533
import tensorflow as tf for name, op in ops.items(): tf.add_to_collection(name, op) self._initial_state_name = util.with_prefix(self._name, "initial") self._final_state_name = util.with_prefix(self._name, "final") util.export_state_tuples(self._initial_state, self._initial_state_name) util.export_state_tuples(self._final_state, self._final_state_name) def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params,
tensorflow.get_collection_ref
4,534
import tensorflow as tf if self.job_name and FLAGS.cross_replica_sync: # Block all replicas until all replicas are ready for next step. fetches.append(self.add_sync_queues_and_barrier( 'sync_queues_step_end_', [main_fetch_group])) variable_mgr_post_init_ops = self.variable_mgr.get_post_init_ops() if variable_mgr_post_init_ops: post_init_op_group = tf.group(*variable_mgr_post_init_ops) else: post_init_op_group = None local_var_init_op = tf.local_variables_initializer() summary_op = tf.summary.merge_all() is_chief = (not self.job_name or self.task_index == 0) summary_writer = None if (is_chief and FLAGS.summary_verbosity and FLAGS.train_dir and FLAGS.save_summaries_steps > 0): summary_writer = tf.summary.FileWriter(FLAGS.train_dir, tf.get_default_graph()) # We run the summaries in the same thread as the training operations by # passing in None for summary_op to avoid a summary_thread being started.
tensorflow.local_variables_initializer
4,535
import tensorflow as tf if self.data_format == 'NCHW': image = tf.transpose(image, [0, 3, 1, 2]) logits = self.get_logits(image, label) loss = ImageNetModel.compute_loss_and_error( logits, label, label_smoothing=self.label_smoothing) if self.weight_decay > 0: wd_loss = regularize_cost(self.weight_decay_pattern, tf.contrib.layers.l2_regularizer(self.weight_decay), name='l2_regularize_loss') add_moving_summary(loss, wd_loss) total_cost = tf.add_n([loss, wd_loss], name='cost') else: total_cost = tf.identity(loss, name='cost') add_moving_summary(total_cost) if self.loss_scale != 1.: logger.info("Scaling the total loss by {} ...".format(self.loss_scale)) return total_cost * self.loss_scale else: return total_cost @abstractmethod
tensorflow.add_n
4,536
import tensorflow as tf df.reset_state() scene_data = df.get_data() saver = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state('./model_pretrain') if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): print("loading checkpoint...") saver.restore(sess, ckpt.model_checkpoint_path)
tensorflow.Session
4,537
import tensorflow as tf tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
tensorflow.logging.info
4,538
import tensorflow as tf dropout = self.dropout) with tf.variable_scope("Context_to_Query_Attention_Layer"): # C = tf.tile(tf.expand_dims(c,2),[1,1,self.q_maxlen,1]) # Q = tf.tile(tf.expand_dims(q,1),[1,self.c_maxlen,1,1]) # S = trilinear([C, Q, C*Q], input_keep_prob = 1.0 - self.dropout) S = optimized_trilinear_for_attention([c, q], self.c_maxlen, self.q_maxlen, input_keep_prob = 1.0 - self.dropout) mask_q = tf.expand_dims(self.q_mask, 1) S_ = tf.nn.softmax(mask_logits(S, mask = mask_q)) mask_c = tf.expand_dims(self.c_mask, 2) S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask = mask_c), dim = 1),(0,2,1)) self.c2q = tf.matmul(S_, q) self.q2c = tf.matmul(tf.matmul(S_, S_T), c) attention_outputs = [c, self.c2q, c * self.c2q, c * self.q2c] with tf.variable_scope("Model_Encoder_Layer"): inputs = tf.concat(attention_outputs, axis = -1) self.enc = [conv(inputs, d, name = "input_projection")] for i in range(3): if i % 2 == 0: # dropout every 2 blocks self.enc[i] = tf.nn.dropout(self.enc[i], 1.0 - self.dropout) self.enc.append( residual_block(self.enc[i], num_blocks = 7, num_conv_layers = 2, kernel_size = 5, mask = self.c_mask, num_filters = d, num_heads = nh, seq_len = self.c_len,
tensorflow.variable_scope
4,539
from tensorflow.python.framework import ops def every_n_step_begin(self, step): super(LoggingTrainable, self).every_n_step_begin(step) # Get a list of trainable variables at the begining of every N steps. # We cannot get this in __init__ because train_op has not been generated. trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope=self._scope) self._names = {} for var in trainables:
tensorflow.python.framework.ops.get_collection
4,540
import tensorflow as tf init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
tensorflow.logging.info
4,541
import tensorflow as tf np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNElabels_' + f1, labels_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNEisadv_' + f1, is_adv_all) return None def apply_attack_carlini(hps): # Construct graph images, labels = input_name.build_input( FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200 Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False) Res.build_graph() saver = tf.train.Saver() # Open session and restore checkpoint sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) tf.train.start_queue_runners(sess) sess.run(tf.global_variables_initializer()) ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path) num_sample = hps.batch_size * FLAGS.eval_batch_count # Initialize results to save entropy_test_adv_all = np.array([]) confidence_test_adv_all = np.array([]) entropy_test_nor_all = np.array([]) confidence_test_nor_all = np.array([]) logits_adv_all = np.reshape(np.array([]), (0, 64)) logits_nor_all = np.reshape(np.array([]), (0, 64))
tensorflow.global_variables_initializer
4,542
import tensorflow as tf x = flat_observations for size in self.hparams.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) logits = tf.expand_dims(logits, axis=1) with tf.variable_scope("value"): x = flat_observations for size in self.hparams.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1) logits = clip_logits(logits, self.hparams) return {"target_policy": logits, "target_value": value} @registry.register_model class FeedForwardCnnSmallCategoricalPolicy(PolicyBase): """Small cnn network with categorical output."""
tensorflow.layers.dense
4,543
import tensorflow as tf A = tf.transpose(A / 128.0 - 1.0, [0, 3, 1, 2]) B = tf.transpose(B / 128.0 - 1.0, [0, 3, 1, 2])
tensorflow.transpose
4,544
import tensorflow as tf loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, }
tensorflow.metrics.accuracy
4,545
from tensorflow.python.framework import ops merged_shape = merged_shape.merge_with(input_.get_shape()) return [merged_shape] @ops.RegisterShape("Select") def _SelectShape(op): # All three inputs must have the same shape. return [op.inputs[0].get_shape() .merge_with(op.inputs[1].get_shape()) .merge_with(op.inputs[2].get_shape())] @ops.RegisterShape("ArgMax") @ops.RegisterShape("ArgMin") def _ArgOpShape(op): """Common shape function for arg-reduction ops.""" dimension_shape = op.inputs[1].get_shape() dimension_shape.assert_is_compatible_with(tensor_shape.scalar()) input_shape = op.inputs[0].get_shape() if input_shape.ndims is None: return [tensor_shape.unknown_shape()] elif input_shape.ndims <= 1: return [tensor_shape.scalar()] dimension = tensor_util.ConstantValue(op.inputs[1]) if dimension is None:
tensorflow.python.framework.ops.RegisterShape
4,546
import tensorflow as tf is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tensorflow.trainable_variables
4,547
import tensorflow as tf print(var.name) self.test_image_A = tf.placeholder(tf.float32,[None, self.image_size,self.image_size,self.input_dim], name='test_A') self.test_image_B = tf.placeholder(tf.float32,[None, self.image_size, self.image_size,self.output_c_dim], name='test_B') self.saver = tf.train.Saver() def train_network(self):
tensorflow.train.Saver
4,548
import tensorflow as tf train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
tensorflow.logging.info
4,549
import tensorflow as tf num_layers = len(weights) + 1 H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0 for l in range(0,num_layers-2): W1, W2 = weights[l] b = biases[l] H1 = tf.add(tf.matmul(H, W1), b) H2 = tf.matmul(H, W2) H = tf.tanh(tf.add(H1 * H2, H1)) W1, W2 = weights[-1] b = biases[-1] H1 = tf.add(tf.matmul(H, W1), b) H2 = tf.matmul(H, W2) Y = tf.add(H1 * H2, H1) return Y def fwd_gradients_0(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0] return tf.gradients(g, self.dummy_x0_tf)[0] def fwd_gradients_1(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0] return tf.gradients(g, self.dummy_x1_tf)[0] def net_U0(self, x): lambda_1 = self.lambda_1
tensorflow.add
4,550
import tensorflow as tf self.conv3 = tf.layers.conv2d(self.pool2, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2) self.conv4 = tf.layers.conv2d(self.pool3, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn["keep_prob"], training=self.train) # b. Flatten input data self.flatten = tf.reshape(self.drop3, [-1, self.config.cifar10_cnn["fc1_nb_units"]]) # Create connected layers: fc1, fc2 with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected], normalizer_fn=tf.contrib.layers.batch_norm, normalizer_params={"is_training": self.train}): self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"]) self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None) # Compute loss with tf.name_scope("loss"): self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y))
tensorflow.reshape
4,551
import tensorflow as tf else: start_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[2]], axis=-1), 1, bias=False, name="start_pointer"), -1) end_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[3]], axis=-1), 1, bias=False, name="end_pointer"), -1) self.logits = [mask_logits(start_logits, mask=tf.reshape(self.c_mask, [N, -1])), mask_logits(end_logits, mask=tf.reshape(self.c_mask, [N, -1]))] self.logits1, self.logits2 = [l for l in self.logits] outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.logits1), axis=2), tf.expand_dims(tf.nn.softmax(self.logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, self.max_a_len) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) def _compute_loss(self): def focal_loss(logits, labels, weights=None, alpha=0.25, gamma=2): logits = tf.nn.sigmoid(logits) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros) neg_p_sub = array_ops.where(labels > zeros, zeros, logits)
tensorflow.nn.softmax
4,552
import tensorflow as tf self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params, base_variable_scope="Model/RNN") tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable) self._cost = tf.get_collection_ref(util.with_prefix(self._name, "cost"))[0] num_replicas = FLAGS.num_gpus if self._name == "Train" else 1 self._initial_state = util.import_state_tuples( self._initial_state, self._initial_state_name, num_replicas) self._final_state = util.import_state_tuples( self._final_state, self._final_state_name, num_replicas) @property def input(self):
tensorflow.add_to_collection
4,553
import tensorflow as tf source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb] target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb] return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k] def flatten_emb_by_sentence(self, emb, text_len_mask): num_sentences = tf.shape(emb)[0] max_sentence_length = tf.shape(emb)[1] emb_rank = len(emb.get_shape()) if emb_rank == 2:
tensorflow.shape
4,554
import tensorflow as tf from datasets import rocstories from analysis import rocstories as rocstories_analysis from text_utils import TextEncoder from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path def gelu(x): return 0.5*x*(1+tf.tanh(math.sqrt(2/math.pi)*(x+0.044715*tf.pow(x, 3)))) def swish(x): return x*tf.nn.sigmoid(x) opt_fns = { 'adam':adam, } act_fns = { 'relu':tf.nn.relu, 'swish':swish,
tensorflow.nn.sigmoid
4,555
import tensorflow as tf num_decoder_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 4), res[0].shape) # Test externally provided output projection. w = tf.get_variable("proj_w", [2, 5]) b = tf.get_variable("proj_b", [5]) with tf.variable_scope("proj_seq2seq"): dec, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape)
tensorflow.get_variable
4,556
import tensorflow as tf tf.logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids])) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids ) return feature def filed_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file ): writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 5000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask)
tensorflow.python_io.TFRecordWriter
4,557
import tensorflow as tf def __init__(self, config): # Import TF-Hub module self.hub_module = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1") def predict(self, payload): # Preprocess image hr_image = preprocess_image(payload["image_b64"]) # Run model fake_image = self.hub_module(hr_image) # convert to base64 img = get_image(tf.squeeze(fake_image)) im_file = io.BytesIO() img.save(im_file, format="PNG") im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8") return im_bytes
tensorflow.squeeze
4,558
import tensorflow as tf return new_state def rnn_output(self, new_state): if self.dale_ratio: new_output = tf.matmul(tf.nn.relu(new_state), tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out, name="in_2"), transpose_b=True, name="3") \ + self.b_out else: new_output = tf.matmul(tf.nn.relu(new_state),
tensorflow.abs
4,559
import tensorflow as tf if name == 'rpn_cls_prob_reshape': input_shape = tf.shape(bottom) # tf.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来 # 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........ bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]]) reshaped_score = tf.nn.softmax(bottom_reshaped, name=name) return tf.reshape(reshaped_score, input_shape) # [1,none,none,2] return tf.nn.softmax(bottom, name=name) def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name): with tf.variable_scope(name): rois, rpn_scores = tf.py_func(proposal_top_layer, [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._feat_stride, self._anchors, self._num_anchors], [tf.float32, tf.float32]) rois.set_shape([cfg.FLAGS.rpn_top_n, 5]) rpn_scores.set_shape([cfg.FLAGS.rpn_top_n, 1]) return rois, rpn_scores def _proposal_layer(self, rpn_cls_prob, rpn_bbox_pred, name):
tensorflow.variable_scope
4,560
import tensorflow as tf shape = tf.shape(image) deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], 3])
tensorflow.stack
4,561
import tensorflow as tf head: numpy array - [num_classes] Weighting the loss of each class Optional: Prioritize some classes Returns: loss: Loss tensor of type float. """ with tf.name_scope('segment_loss'): # logits = tf.reshape(logits, (-1, num_classes)) epsilon = tf.constant(value=1e-7) labels = tf.to_float(labels) # labels = tf.to_float(tf.reshape(labels, (-1, num_classes))) softmax = tf.nn.softmax(logits) + epsilon if head is not None: cross_entropy = -tf.reduce_sum(tf.mul(labels * tf.log(softmax), head), axis=[1]) else: cross_entropy = -tf.reduce_sum(labels * tf.log(softmax), axis=[1])
tensorflow.to_float
4,562
import tensorflow as tf def prelu(_x, scope=''): """parametric ReLU activation""" with tf.variable_scope(name_or_scope=scope, default_name="prelu"): _alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1], dtype=_x.dtype, initializer=tf.constant_initializer(0.1)) return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x) def calc_auc(raw_arr): """Summary
tensorflow.maximum
4,563
from tensorflow.python.ops import nn raise ValueError("n_classes must be >= 2") super(_MultiClassTargetColumn, self).__init__( loss_fn=loss_fn, num_label_columns=1 if n_classes == 2 else n_classes, label_name=label_name, weight_column_name=weight_column_name) def logits_to_predictions(self, logits, proba=False): if self.num_label_columns == 1: logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _default_eval_metrics(self): if self._num_label_columns == 1: return _get_default_binary_metrics_for_eval(thresholds=[.5]) return {} def get_eval_ops(self, features, logits, targets, metrics=None): loss = self.loss(logits, targets, features) result = {"loss": metrics_lib.streaming_mean(loss)}
tensorflow.python.ops.nn.softmax
4,564
import tensorflow as tf self.d_loss_real = self._sigmoid_kl_with_logits(self.end_points_D['D_on_data_logits'], 1. - d_label_smooth) class_loss_weight = 1. self.d_loss_class = class_loss_weight * tf.nn.sparse_softmax_cross_entropy_with_logits( logits=self.end_points_D['class_logits'], labels=tf.to_int64(targets)) self.test_loss = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D_val['logits'], targets, 1))) self.error_rate = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D['class_logits'], targets, 1))) if gpu_idx == 0: update = tf.assign(num_error_rate, num_error_rate + 1.) with tf.control_dependencies([update]): tc = tf.maximum(.01, 1. / num_error_rate) update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate) with tf.control_dependencies([update]): self.d_loss_class = tf.identity(self.d_loss_class) self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits( logits=self.end_points_D['D_on_G_logits'], labels=tf.zeros_like(self.end_points_D['D_on_G_logits'])) self.d_loss_class = tf.reduce_mean(self.d_loss_class) self.d_loss_real = tf.reduce_mean(self.d_loss_real) self.d_loss_fake = tf.reduce_mean(self.d_loss_fake) if is_fm_loss:
tensorflow.control_dependencies
4,565
import tensorflow as tf if direction == 'forward': direct_mask_un = tf.greater(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: direct_mask_un = tf.less(unhead_idxs, undep_idxs) # [bs, sluh, sld] # [bs, sluh, sld] rep_mask_tile_un = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_unhead_mask, 2)) pooling_mask = tf.logical_and(direct_mask_un, rep_mask_tile_un) # [bs, sluh, sld] # data for pooling pooling_data = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_unhead, 1, 1]) # bs,sluh,sld,hn # execute mean pooling based on pooling_mask[bs, sluh, sld] and pooling_data[bs,sluh,sld,hn] pooling_data = mask_for_high_rank(pooling_data, pooling_mask) # [bs,sluh,sld,hn] pooling_data_sum = tf.reduce_sum(pooling_data, -2) # [bs,sluh,hn] pooling_den = tf.reduce_sum(tf.cast(pooling_mask, tf.int32), -1, keep_dims=True) # [bs,sluh] pooling_den = tf.where(tf.equal(pooling_den, 0), tf.ones_like(pooling_den), pooling_den) pooling_result = pooling_data_sum / tf.cast(pooling_den, tf.float32) return pooling_result def scaled_tanh(x, scale=5.): return scale * tf.nn.tanh(1./scale * x)
tensorflow.equal
4,566
import tensorflow as tf def _transpose(self, x, perm): sample_batch_ndims = tf.rank(x) - self.rightmost_transposed_ndims perm = tf.concat([ tf.range(sample_batch_ndims), sample_batch_ndims + perm, ], axis=0) return tf.transpose(a=x, perm=perm) def _maybe_validate_rightmost_transposed_ndims( rightmost_transposed_ndims, validate_args, name=None): """Checks that `rightmost_transposed_ndims` is valid.""" with tf.name_scope(name, 'maybe_validate_rightmost_transposed_ndims',
tensorflow.transpose
4,567
import tensorflow as tf print(training_hooks, "==training_hooks==", "==task_index==", kargs.get("task_index", 1)) estimator_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op,
tensorflow.estimator.EstimatorSpec
4,568
import tensorflow as tf max_sequence_len=None, dropout_keep_prob=None, weights=None): super(WordSeqModel, self).__init__(batch_size, max_sequence_len, out_vocab_size, c2v) in_size = self._inputs[0].get_shape()[1].value # Also, output confidence scores at every word. confidence_mat = tf.get_variable('confidence_mat', [in_size, 1]) confidence_scores = tf.concat(1, [tf.matmul(o_, confidence_mat) for o_ in self._inputs]) # dropout on confidence_scores random_tensor = (1.0 - self._dropout_keep_prob + tf.random_uniform(tf.shape(confidence_scores))) binary_tensor = -50.0 * tf.floor(random_tensor)
tensorflow.get_variable
4,569
import tensorflow as tf common_layers.shape_list(recent_output)[1], :, :] cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1)) samples = tf.concat([recent_output, cur_sample], axis=1) if not context.in_eager_mode():
tensorflow.concat
4,570
import tensorflow as tf self.assertEqual(1 + 0, metrics_dict['wer'].total_value) self.assertEqual(0 + 4, metrics_dict['wer'].total_weight) self.assertEqual(1 + 0, metrics_dict['norm_wer'].total_value) self.assertEqual(0 + 4, metrics_dict['norm_wer'].total_weight) def testBProp(self): with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() tf.global_variables_initializer().run() test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) mdl.train_op.run() def testBPropSmoothDecay(self): with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams() p.train.lr_schedule = ( schedule.ContinuousLearningRateSchedule.Params().Set( start_step=350000, half_life_steps=45000)) mdl = p.Instantiate() mdl.FPropDefaultTheta()
tensorflow.global_variables_initializer
4,571
import tensorflow as tf inputs = batch_norm(inputs, training, self.data_format) inputs = tf.nn.relu(inputs) # The current top layer has shape # `batch_size x pool_size x pool_size x final_size`. # ResNet does an Average Pooling layer over pool_size, # but that is the same as doing a reduce_mean. We do a reduce_mean # here because it performs better than AveragePooling2D. axes = [2, 3] if self.data_format == 'channels_first' else [1, 2] inputs = tf.reduce_mean(inputs, axes, keepdims=True) inputs = tf.identity(inputs, 'final_reduce_mean') inputs = tf.reshape(inputs, [-1, self.final_size]) inputs = tf.layers.dense(inputs=inputs, units=self.num_classes) inputs = tf.identity(inputs, 'final_dense') return inputs
tensorflow.reduce_mean
4,572
import tensorflow as tf simple function serves an illustrative purpose. It is *not* a useful random number generator, and should only be used in tests. Args: seed: A random seed. num_elements: Number of random values to generate. Returns: A `Tensor` of shape `(num_elements)` containing pseudorandom values. """ def next_num(num): # This creates a cycle of length 136. return tf.mod((num * 13), 137) num = tf.reshape(tf.mod(seed, 136) + 1, (1,)) result = num for _ in range(num_elements - 1): num = next_num(num) result = tf.concat([result, num], 0) return tf.to_float(result) @encoding_stage.tf_style_encoding_stage class PlusRandomNumEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, adding random values given a random seed.
tensorflow.mod
4,573
import tensorflow as tf with tf.control_dependencies([averages_op]): for i, dep_op in enumerate(dep_ops): dep_ops[i] = tf.identity(dep_op, name=dep_op.name.split(':')[0]) return dep_ops def exp_average(vec, curr_avg, decay=0.9): vec_avg = tf.reduce_mean(vec, 0) avg = tf.assign(curr_avg, curr_avg * decay + vec_avg * (1-decay)) return avg def gather_vec_pairs(vecs, gather_inds): """ gather obj-subj feature pairs """ vec_pairs = tf.gather(vecs, gather_inds)
tensorflow.assign
4,574
import tensorflow as tf sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(checkpoint_dir, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops)
tensorflow.get_collection
4,575
import tensorflow as tf Returns: Updated learning rate value. """ super().__init__() self.initial_learning_rate = initial_learning_rate self.maximal_learning_rate = maximal_learning_rate self.step_size = step_size self.scale_fn = scale_fn self.scale_mode = scale_mode self.name = name def __call__(self, step): with tf.name_scope(self.name or "CyclicalLearningRate"): initial_learning_rate = tf.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate" ) dtype = initial_learning_rate.dtype maximal_learning_rate = tf.cast(self.maximal_learning_rate, dtype) step_size = tf.cast(self.step_size, dtype) cycle = tf.floor(1 + step / (2 * step_size)) x = tf.abs(step / step_size - 2 * cycle + 1) mode_step = cycle if self.scale_mode == "cycle" else step
tensorflow.name_scope
4,576
import tensorflow as tf layers = [ tf.concat([token_embeddings, token_embeddings], axis=2) ] n_lm_layers = len(lm_graph.lstm_outputs['forward']) for i in range(n_lm_layers): layers.append( tf.concat( [lm_graph.lstm_outputs['forward'][i], lm_graph.lstm_outputs['backward'][i]], axis=-1 ) )
tensorflow.concat
4,577
import tensorflow as tf w = tf.matmul(gsum, phi) w = tf.expand_dims(w, [2])
tensorflow.expand_dims
4,578
import tensorflow as tf processed_image = utils.process_image(image, mean_pixel) with tf.variable_scope("inference"): image_net = vgg_net(weights, processed_image) conv_final_layer = image_net["conv5_3"] pool5 = utils.max_pool_2x2(conv_final_layer) W6 = utils.weight_variable([7, 7, 512, 4096], name="W6") b6 = utils.bias_variable([4096], name="b6") conv6 = utils.conv2d_basic(pool5, W6, b6) relu6 = tf.nn.relu(conv6, name="relu6") if FLAGS.debug: utils.add_activation_summary(relu6) relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob) W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7") b7 = utils.bias_variable([4096], name="b7") conv7 = utils.conv2d_basic(relu_dropout6, W7, b7) relu7 = tf.nn.relu(conv7, name="relu7") if FLAGS.debug: utils.add_activation_summary(relu7) relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob) W8 = utils.weight_variable([1, 1, 4096, 150], name="W8") b8 = utils.bias_variable([150], name="b8") # W_h = utils.weight_variable([1, 7, 7, 4], name="Wh")
tensorflow.nn.dropout
4,579
import tensorflow as tf def split_states(x, n): x_shape = shape_list(x) m = x_shape[-1] new_x_shape = x_shape[:-1]+[n, m//n] return tf.reshape(x, new_x_shape) def merge_states(x): x_shape = shape_list(x) new_x_shape = x_shape[:-2]+[np.prod(x_shape[-2:])] return tf.reshape(x, new_x_shape) def split_heads(x, n, k=False): #[-1,n_ctx,head,head_emb] if k: return tf.transpose(split_states(x, n), [0, 2, 3, 1]) else: return tf.transpose(split_states(x, n), [0, 2, 1, 3])
tensorflow.reshape
4,580
from tensorflow.python.framework import ops Returns: A `Tensor` with the same type as `value`. """ with ops.op_scope([value, bias], name, "BiasAdd") as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name) ops.RegisterShape("BiasAdd")(common_shapes.bias_add_shape) ops.RegisterShape("BiasAddGrad")(common_shapes.bias_add_grad_shape) # pylint: disable=protected-access def bias_add_v1(value, bias, name=None): """Adds `bias` to `value`. This is a deprecated version of bias_add and will soon to be removed. This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. Broadcasting is supported, so `value` may have any number of dimensions.
tensorflow.python.framework.ops.RegisterShape
4,581
import tensorflow as tf """Name of dataset: imagenet or flowers. If not specified, it is automatically guessed based on --data_dir.""") tf.flags.DEFINE_string('resize_method', 'bilinear', """Method for resizing input images: crop,nearest,bilinear,bicubic or area. The 'crop' mode requires source images to be at least as large as the network input size, while the other modes support any sizes and apply random bbox distortions before resizing (even with --nodistortions).""") tf.flags.DEFINE_boolean('distortions', True, """Enable/disable distortions during image preprocessing. These include bbox and color distortions.""") tf.flags.DEFINE_string('local_parameter_device', 'gpu', """Device to use as parameter server: cpu or gpu. For distributed training, it can affect where caching of variables happens.""") tf.flags.DEFINE_string('device', 'gpu', """Device to use for computation: cpu or gpu""") #tf.flags.DEFINE_string('data_format', 'NCHW',
tensorflow.flags.DEFINE_boolean
4,582
from tensorflow.contrib.framework import deprecated input_fn, default_output_alternative_key=default_output_alternative_key, assets_extra=assets_extra, as_text=as_text, exports_to_keep=exports_to_keep) @property def model_dir(self): return self._estimator.model_dir @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def weights_(self): hiddenlayer_weights = [ self.get_variable_value("dnn/hiddenlayer_%d/weights" % i) for i, _ in enumerate(self._hidden_units) ] logits_weights = [self.get_variable_value("dnn/logits/weights")] return hiddenlayer_weights + logits_weights
tensorflow.contrib.framework.deprecated
4,583
import tensorflow as tf assert len(perturb_ops) == len(all_vars) return tf.group(*perturb_ops) # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = tf.reduce_mean(kl) def update_scale(): with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), )
tensorflow.nn.softmax
4,584
import tensorflow as tf def exp_average(vec, curr_avg, decay=0.9): vec_avg = tf.reduce_mean(vec, 0) avg = tf.assign(curr_avg, curr_avg * decay + vec_avg * (1-decay)) return avg def gather_vec_pairs(vecs, gather_inds): """ gather obj-subj feature pairs """ vec_pairs = tf.gather(vecs, gather_inds) vec_len = int(vec_pairs.get_shape()[2]) * 2 vec_pairs = tf.reshape(vec_pairs, [-1, vec_len]) return vec_pairs def pad_and_gather(vecs, mask_inds, pad=None): """ pad a vector with a zero row and gather with input inds """
tensorflow.gather
4,585
from tensorflow.python.eager import context else: cur_sample = samples[:, common_layers.shape_list(recent_output)[1], :, :] cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1)) samples = tf.concat([recent_output, cur_sample], axis=1) if not context.in_eager_mode(): samples.set_shape([None, None, None, 1]) # Assuming we have one shard for logits. logits = tf.concat([recent_logits, logits[:, -1:]], 1)
tensorflow.python.eager.context.in_eager_mode
4,586
import tensorflow as tf ---------- `tensorflow/examples/tutorials/word2vec/word2vec_basic.py <https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py>`__ """ def __init__( self, inputs=None, train_labels=None, vocabulary_size=80000, embedding_size=200, num_sampled=64, nce_loss_args=None, E_init=tf.random_uniform_initializer(minval=-1.0, maxval=1.0), E_init_args=None, nce_W_init=tf.truncated_normal_initializer(stddev=0.03), nce_W_init_args=None, nce_b_init=tf.constant_initializer(value=0.0), nce_b_init_args=None, name='word2vec', ): if nce_loss_args is None: nce_loss_args = {} if E_init_args is None: E_init_args = {} if nce_W_init_args is None:
tensorflow.random_uniform_initializer
4,587
import tensorflow as tf data = tf.fill([1024, 1024], 0) p = tf.Variable([1])
tensorflow.Variable
4,588
import tensorflow as tf self.train = tf.compat.v1.placeholder(tf.bool) # The CNN architecture = conv/poo layers + flatten layer + connected layers with tf.name_scope("cnn"): # a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop self.conv1 = tf.layers.conv2d(self.X, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn["keep_prob"], training=self.train) self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2)
tensorflow.layers.conv2d
4,589
from tensorflow.python.framework import ops A `Tensor` with the same type as `features`. """ with ops.op_scope([features], name, "Relu6") as name: features = ops.convert_to_tensor(features, name="features") return gen_nn_ops._relu6(features, name=name)
tensorflow.python.framework.ops.convert_to_tensor
4,590
from tensorflow.python.ops import data_flow_ops aggmeth = tf.AggregationMethod.DEFAULT grads = tf.gradients(loss, params, aggregation_method=aggmeth) if FLAGS.staged_vars: grad_dtypes = [grad.dtype for grad in grads] grad_shapes = [grad.shape for grad in grads] grad_stage = data_flow_ops.StagingArea(grad_dtypes, grad_shapes) grad_stage_op = grad_stage.put(grads) # In general, this decouples the computation of the gradients and # the updates of the weights. # During the pipeline warm up, this runs enough training to produce # the first set of gradients. gpu_grad_stage_ops.append(grad_stage_op)
tensorflow.python.ops.data_flow_ops.StagingArea
4,591
import tensorflow as tf def decode(self, serialized_example, items=None): """Decodes the given serialized TF-example. Args: serialized_example: a serialized TF-example tensor. items: the list of items to decode. These must be a subset of the item keys in self._items_to_handlers. If `items` is left as None, then all of the items in self._items_to_handlers are decoded. Returns: the decoded items, a list of tensor. """ context, sequence = tf.parse_single_sequence_example( serialized_example, self._context_keys_to_features, self._sequence_keys_to_features) tokens_raw = sequence[self.tokens_feature_name] tokens = tf.string_split(tokens_raw, delimiter=self.delimiter).values # Optionally prepend a special token if self.prepend_token is not None: tokens = tf.concat([[self.prepend_token], tokens], 0) # Optionally append a special token if self.append_token is not None: tokens = tf.concat([tokens, [self.append_token]], 0) sequence[self.tokens_feature_name] = tokens # Merge context and sequence features example = {} example.update(context) example.update(sequence)
tensorflow.string_split
4,592
import tensorflow as tf # the true input symbols for the decoder with feed_previous=False. dec_fp_true = sess.run(dec_op_fp_true) output_symbols_fp_true = np.argmax(dec_fp_true, axis=2) dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(), output_symbols_fp_true[:-1])) sess.run(update_fp_true) sess.run(update_fp_false, {holder: inp for holder, inp in zip(dec_inp_holder_fp_false, dec_inp_fp_false)}) for v_true, v_false in matched_variables: self.assertAllClose(v_true.eval(), v_false.eval()) def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
tensorflow.nn.seq2seq.embedding_rnn_seq2seq
4,593
import tensorflow as tf def inference(self, forward_only=None): embed_inputs = tf.nn.embedding_lookup(self.embedding_init, self.x) ## (batch_size, seq_len, 100) with tf.variable_scope('hidden', reuse=forward_only): with tf.variable_scope('lstm_cell'): lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self.num_hidden, use_peepholes=False, # forget_bias=0.0, activation=tf.nn.relu, # initializer=tf.truncated_normal_initializer(stddev=0.1), # initializer=tf.random_uniform_initializer(-0.003, 0.003), initializer=tf.contrib.layers.xavier_initializer(),
tensorflow.variable_scope
4,594
import tensorflow as tf reuse = not train # create variables with tf.variable_scope(name) as scope: if reuse: scope.reuse_variables() var = variable_on_cpu( "var", [dim], tf.constant_initializer(1.), trainable=False) mean = variable_on_cpu( "mean", [dim], tf.constant_initializer(0.), trainable=False) step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) # choose the appropriate moments if train: used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") cur_mean, cur_var = used_mean, used_var if bn_lag > 0.: used_var = stable_var(input_=input_, mean=used_mean, axes=axes) cur_var = used_var used_mean -= (1 - bn_lag) * (used_mean - tf.stop_gradient(mean)) used_mean /= (1. - bn_lag**(step + 1)) used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var)) used_var /= (1. - bn_lag**(step + 1)) else: used_mean, used_var = mean, var cur_mean, cur_var = used_mean, used_var
tensorflow.nn.moments
4,595
import tensorflow as tf # Tensorboard visualization tf.summary.scalar(name='Autoencoder Loss', tensor=autoencoder_loss) tf.summary.scalar(name='Discriminator gauss Loss', tensor=dc_g_loss) tf.summary.scalar(name='Discriminator categorical Loss', tensor=dc_c_loss) tf.summary.scalar(name='Generator Loss', tensor=generator_loss) tf.summary.scalar(name='Supervised Encoder Loss', tensor=supervised_encoder_loss) tf.summary.histogram(name='Encoder Gauss Distribution', values=encoder_output_latent) tf.summary.histogram(name='Real Gauss Distribution', values=real_distribution) tf.summary.histogram(name='Encoder Categorical Distribution', values=encoder_output_label) tf.summary.histogram(name='Real Categorical Distribution', values=categorial_distribution) tf.summary.image(name='Input Images', tensor=input_images, max_outputs=10) tf.summary.image(name='Generated Images', tensor=generated_images, max_outputs=10) summary_op = tf.summary.merge_all()
tensorflow.summary.histogram
4,596
import tensorflow as tf # tensor tile rep_map_tile = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_head, 1, 1]) # bs,slh,sld,vec with tf.variable_scope('attention'): # bs,sl,sl,vec f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.)) dependent = linear(rep_dep_tensor_dp, ivec, False, scope='linear_dependent') # bs,sld,vec dependent_etd = tf.expand_dims(dependent, 1) # bs,1,sld,vec head = linear(rep_head_tensor_dp, ivec, False, scope='linear_head') # bs,slh,vec head_etd = tf.expand_dims(head, 2) # bs,slh,1,vec logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,slh,sld,vec logits_masked = exp_mask_for_high_rank(logits, attn_mask) # bs,slh,sld,vec attn_score = tf.nn.softmax(logits_masked, 2) # bs,slh,sld,vec attn_score = mask_for_high_rank(attn_score, attn_mask) attn_result = tf.reduce_sum(attn_score * rep_map_tile, 2) # bs,slh,vec -> head_org_idx return attn_result
tensorflow.expand_dims
4,597
import tensorflow as tf seq_lengths = tf.fill([batch_size], seq_length) # Perform beam search indices, values, shape, indices_u, values_u, shape_u, log_probs = ctc_ext_beam_search_decoder( inputs=inputs, sequence_length=seq_lengths, beam_width=beam_width, blank_index=blank_index, top_paths=1, blank_label=0) decoded = tf.sparse.SparseTensor(indices[0], values[0], shape[0]) decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32) decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0]) decoded_u = tf.cast(tf.sparse.to_dense(decoded_u), tf.int32) # Adjust event vals according to representation decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded) decoded_u = tf.where(tf.not_equal(decoded_u, 0), decoded_u+shift, decoded_u) # Set default vals decoded = tf.where(tf.equal(decoded, 0), def_val, decoded) decoded_u = tf.where(tf.equal(decoded_u, 0), def_val, decoded_u) # We know the shape pf decoded_u, and first dim for decoded decoded_u.set_shape([batch_size, seq_length]) decoded = tf.reshape(decoded, [batch_size, -1]) return decoded_u, decoded
tensorflow.equal
4,598
import tensorflow as tf self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3 mean, var = tf.nn.moments(tgtimg_z, axes=[0]) print(var.get_shape())
tensorflow.nn.moments
4,599