seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
from tensorflow.python.framework import ops as _ops send_device=send_device, send_device_incarnation=0, recv_device=recv_device, client_terminated=False, name=name if name else "Recv") return result _ops.RegisterShape("_Recv")(None) def _Send(tensor, tensor_name, send_device, recv_device, name=None): r"""Sends the named tensor from send_device to recv_device. Args: tensor: A `Tensor`. The tensor to send.
tensorflow.python.framework.ops.RegisterShape
14,600
import tensorflow as tf final_loss = tf.reduce_sum(loss) return final_loss, cstr_pct def contra_traj_lossV8(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0) horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0) pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
tensorflow.reshape
14,601
import tensorflow as tf target_params = tf_util.get_trainable_vars("target/values_fn/vf") # Polyak averaging for target variables self.target_update_op = [ tf.assign(target, (1 - self.tau) * target + self.tau * source) for target, source in zip(target_params, source_params) ] # Initializing target to match source variables
tensorflow.assign
14,602
import tensorflow as tf dec_inp_dict = {} dec_inp_dict["0"] = [ tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec_inp_dict["1"] = [ tf.constant(i, tf.int32, shape=[2]) for i in range(4)] dec_symbols_dict = {"0": 5, "1": 6} cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) outputs_dict, state_dict = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(outputs_dict["0"]) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape)
tensorflow.nn.seq2seq.one2many_rnn_seq2seq
14,603
import tensorflow as tf # Individual components of the loss that will need summaries. clone_loss = None regularization_loss = None # Compute and aggregate losses on the clone device. with tf.device(clone.device): all_losses = [] clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope) if clone_losses: clone_loss = tf.add_n(clone_losses, name='clone_loss') if num_clones > 1: clone_loss = tf.div(clone_loss, 1.0 * num_clones, name='scaled_clone_loss')
tensorflow.get_collection
14,604
import tensorflow as tf decay=.1, axes=[0], bn_lag=DEFAULT_BN_LAG): """Batch normalization.""" # create variables with tf.variable_scope(name): var = variable_on_cpu( "var", [dim], tf.constant_initializer(1.), trainable=False) mean = variable_on_cpu( "mean", [dim], tf.constant_initializer(0.), trainable=False) step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) if scale: gamma = variable_on_cpu("gamma", [dim], tf.constant_initializer(1.)) beta = variable_on_cpu("beta", [dim], tf.constant_initializer(0.)) # choose the appropriate moments if train: used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") cur_mean, cur_var = used_mean, used_var
tensorflow.constant_initializer
14,605
import tensorflow as tf z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name="z") # pred_annotation, logits = inference(image, keep_probability,z) # tf.summary.image("input_image", image, max_outputs=2) # tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2) # tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2) # loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, # labels=tf.squeeze(annotation, squeeze_dims=[3]), # name="entropy"))) mask_ = tf.ones([FLAGS.batch_size,64,64,3]) mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]]) mask2__ = tf.ones([FLAGS.batch_size,78,78,3]) mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]]) mask2 = mask2_ - mask pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z) tf.summary.image("input_image", image, max_outputs=2) tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
tensorflow.ones
14,606
import tensorflow as tf fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, 1], np.int32)) } def fake_model_preprocessor_fn(image): return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0)) num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data,
tensorflow.shape
14,607
import tensorflow as tf writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "input.ckpt"), str(FLAGS.ckpt_no_input))) writer.write('all_model_checkpoint_paths: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "input.ckpt"), str(FLAGS.ckpt_no_input))) if FLAGS.use_hvd and hvd.rank() == 0 and (FLAGS.do_train or FLAGS.do_train_eval): (cpath, cname) = os.path.split(FLAGS.bert_config_file) tf.gfile.Copy(FLAGS.bert_config_file, os.path.join(FLAGS.output_dir, cname), True) input_files = [] if FLAGS.input_file is not None: for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) if FLAGS.input_dir is not None: for filename in tf.gfile.ListDirectory(FLAGS.input_dir): input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.input_dir, filename))) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) validation_input_files = [] if FLAGS.validation_input_file is None and FLAGS.validation_input_dir is None: validation_input_files = input_files else: if FLAGS.validation_input_file is not None: for input_pattern in FLAGS.validation_input_file.split(","): validation_input_files.extend(tf.gfile.Glob(input_pattern)) if FLAGS.validation_input_dir is not None: for filename in tf.gfile.ListDirectory(FLAGS.validation_input_dir): validation_input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.validation_input_dir, filename)))
tensorflow.logging.info
14,608
import tensorflow as tf if self.hparams.use_scales: dist /= tf.reshape(self.hparams.scales, [1, 1, self.hparams.moe_num_experts]) nearest_idx = tf.argmax(-dist, axis=-1) nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size) return nearest_hot def embedding_lookup(self, x, means):
tensorflow.one_hot
14,609
import tensorflow as tf self.has_own_variables) or self._initial_vs_reuse if len(self._vs_name): ret.append(tf.variable_scope(self._vs_name, reuse=reuse)) else: if reuse: ret.append(tf.variable_scope( tf.get_variable_scope(), reuse=True)) else: # work around https://github.com/tensorflow/tensorflow/issues/14703 ret.append(tf.variable_scope(tf.get_variable_scope())) # always clear existing ns # TODO check existing ns if len(self._name) and self._name != self._vs_name: ret.append(tf.name_scope(self._name + '/'))
tensorflow.get_variable_scope
14,610
import tensorflow as tf num_layers = int(log(final_size, 2)) - 1 with tf.compat.v1.variable_scope(scope, values=[inputs], reuse=reuse) as scope: with slim.arg_scope([normalizer_fn], **normalizer_fn_args): with slim.arg_scope([slim.conv2d_transpose], normalizer_fn=normalizer_fn, stride=2, kernel_size=4): net = tf.expand_dims(tf.expand_dims(inputs, 1), 1) # First upscaling is different because it takes the input vector. current_depth = depth * 2 ** (num_layers - 1) scope = 'deconv1' net = slim.conv2d_transpose( net, current_depth, stride=1, padding='VALID', scope=scope)
tensorflow.expand_dims
14,611
import tensorflow as tf Returns: a `float` decov loss """ with tf.name_scope(name): x = tf.reshape(xs, [int(xs.get_shape()[0]), -1]) m = tf.reduce_mean(x, 0, True) z = tf.expand_dims(x - m, 2) corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0) corr_frob_sqr = tf.reduce_sum(tf.square(corr)) corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr))) loss = 0.5 * (corr_frob_sqr - corr_diag_sqr) return loss def center_loss(features, label, alpha, num_classes, name='center_loss'): """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition" (http://ydwen.github.io/papers/WenECCV16.pdf)
tensorflow.square
14,612
import tensorflow as tf w_h = tf.get_variable('w_h', [self.H, self.M], initializer=self.weight_initializer) b_h = tf.get_variable('b_h', [self.M], initializer=self.const_initializer) w_out = tf.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer) b_out = tf.get_variable('b_out', [self.V], initializer=self.const_initializer)
tensorflow.get_variable
14,613
import tensorflow as tf def testAssignNonStrictShapeChecking(self): with self.test_session(): data = tf.fill([1024, 1024], 0) p = tf.Variable([1]) a = tf.assign(p, data, validate_shape=False)
tensorflow.fill
14,614
import tensorflow as tf variable_summaries(w) if isinstance(bias, float): bias = tf.get_variable("biases", [output_dim], tf.float32, tf.constant_initializer(bias)) variable_summaries(bias)
tensorflow.constant_initializer
14,615
import tensorflow as tf # to int then back mask_wo_bos_eos = tf.cast(lm_graph.mask[:, 1:], 'int32') mask_wo_bos_eos = tf.reverse_sequence( mask_wo_bos_eos, lm_graph.sequence_lengths - 1, seq_axis=1, batch_axis=0, ) mask_wo_bos_eos = mask_wo_bos_eos[:, 1:] mask_wo_bos_eos = tf.reverse_sequence( mask_wo_bos_eos, sequence_length_wo_bos_eos, seq_axis=1, batch_axis=0, ) mask_wo_bos_eos = tf.cast(mask_wo_bos_eos, 'bool') return {
tensorflow.reverse_sequence
14,616
import tensorflow as tf init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization_multigpus.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold=scaffold_fn ) elif mode == tf.estimator.ModeKeys.EVAL:
tensorflow.estimator.EstimatorSpec
14,617
import tensorflow as tf if out_dim is not None: with tf.variable_scope(name) : self.gamma= tf.get_variable('gamma',[1,1,1,out_dim], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[out_dim], initializer=tf.constant_initializer(0.0))
tensorflow.constant_initializer
14,618
import tensorflow as tf z0_clip = tf.clip_by_value(z0, zero, max_z) z1_clip = tf.clip_by_value(z1, zero, max_z) dim3 = width dim2 = width * height dim1 = width * height * depth base = _repeat( tf.range(num_batch) * dim1, out_depth * out_height * out_width) base_z0_y0 = base + z0_clip * dim2 + y0_clip * dim3 base_z0_y1 = base + z0_clip * dim2 + y1_clip * dim3 base_z1_y0 = base + z1_clip * dim2 + y0_clip * dim3 base_z1_y1 = base + z1_clip * dim2 + y1_clip * dim3
tensorflow.range
14,619
import tensorflow as tf assert(len(shape) > 1) nh = h[0].get_shape()[-1].value return tf.reshape(tf.concat(axis=1, values=h), [-1, nh]) else: return tf.reshape(tf.stack(values=h, axis=1), [-1]) def lstm(xs, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] nsteps = len(xs) with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, x in enumerate(xs): c = c h = h z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
tensorflow.variable_scope
14,620
import tensorflow as tf dataset = dataset.filter(target_right_length) if max_eval_target_length > 0 and not training: dataset = dataset.filter(eval_target_right_length) return dataset # TODO(lukaszkaiser): find a single more abstract way of text pre-processing. @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def wmt_preprocess(dataset, training, max_length=-1, max_eval_length=-1): """Preprocessing for LM1B: filter out targets exceeding maximum length.""" def train_right_length(example, target): l = tf.maximum(tf.shape(example['inputs'])[0], tf.shape(target)[0]) return tf.less(l, max_length + 1) def eval_right_length(example, target): l = tf.maximum(tf.shape(example['inputs'])[0], tf.shape(target)[0]) return tf.less(l, max_eval_length + 1) if max_length > 0 and training: dataset = dataset.filter(train_right_length) if max_eval_length > 0 and not training: dataset = dataset.filter(eval_right_length) return dataset
tensorflow.less
14,621
from tensorflow.python.framework import ops # one batch. So we use (device, operation) as the key. return self.resource_handle.device, self.op def batch_runner_fn(self): return _scheduled_stamp_resource_op_runner def _move_tensors(tensors, device): """Moves a list of tensors to a device by concatenating/splitting them.""" # Reset the device setting to avoid weird interactions with device merging # logic. with ops.device(None): if all(tensor.shape == tensor_shape.scalar() for tensor in tensors): with ops.device(tensors[0].device): values = array_ops.stack(tensors) with ops.device(device): return array_ops.unstack(values) else: with ops.device(tensors[0].device): sizes = array_ops.stack( [array_ops.shape(tensor)[0] for tensor in tensors]) values = array_ops.concat(tensors, axis=0) with ops.device(device): sizes = array_ops.unstack(sizes) return list(array_ops.split(values, sizes, axis=0)) def _scheduled_stamp_resource_op_runner(batch, stamp): """Runs a batch operation on a stamped resource.""" if not batch:
tensorflow.python.framework.ops.device
14,622
import tensorflow as tf z_s = tf.slice(t_g, [0, 0, 0], [-1, 1, -1]) y_s = tf.slice(t_g, [0, 1, 0], [-1, 1, -1]) x_s = tf.slice(t_g, [0, 2, 0], [-1, 1, -1]) z_s_flat = tf.reshape(z_s, [-1]) y_s_flat = tf.reshape(y_s, [-1]) x_s_flat = tf.reshape(x_s, [-1]) input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, z_s_flat, out_size) output = tf.reshape( input_transformed, tf.stack([num_batch, out_depth, out_height, out_width, num_channels])) return output with tf.variable_scope(name): output = _transform(theta, voxels, out_size, z_near, z_far) return output
tensorflow.stack
14,623
import tensorflow as tf def softmax(target, axis, name=None): max_axis = tf.reduce_max(target, axis, keep_dims=True) target_exp = tf.exp(target - max_axis) normalize = tf.reduce_sum(target_exp, axis, keep_dims=True) softmax = target_exp / normalize
tensorflow.exp
14,624
import tensorflow as tf # = 1, for j > 10. expected_cdf = x.copy() / 10 expected_cdf[x < 1] = 0. expected_cdf[x > 10] = 1. self.assertAllClose(expected_cdf, qdist.cdf(x).eval()) def test_sampling_from_batch_of_normals(self): batch_shape = (2,) with self.test_session(): qdist = distributions.QuantizedDistribution( base_dist_cls=distributions.Normal, lower_cutoff=0., upper_cutoff=None, mu=tf.zeros( batch_shape, dtype=tf.float32), sigma=tf.ones( batch_shape, dtype=tf.float32)) samps = qdist.sample_n(n=5000, seed=42) samps_v = samps.eval() # With lower_cutoff = 0, the interval j=0 is (-infty, 0], which holds 1/2 # of the mass of the normals. # rtol chosen to be 2x as large as necessary to pass. self.assertAllClose([0.5, 0.5], (samps_v == 0).mean(axis=0), rtol=0.03)
tensorflow.zeros
14,625
import tensorflow as tf logz.configure_output_dir(logdir) # Log experimental parameters args = inspect.getargspec(QLearner)[0] params = {k: str(locals_[k]) if k in locals_ else None for k in args} params['exp_name'] = locals_['q_func'].__name__ + locals_['double_q'] * '_doubleQ' logz.save_params(params) def get_num_params(): total_parameters = 0 for variable in tf.trainable_variables(): shape = variable.get_shape() variable_parameters = 1 for dim in shape: variable_parameters *= dim.value print('%d parameters in %s' %(variable_parameters ,variable.name)) total_parameters += variable_parameters print('Total : %d' %total_parameters)
tensorflow.trainable_variables
14,626
import tensorflow as tf shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_logstd = tf.get_variable('alpha_logstd_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_std = tf.exp(alpha_logstd) # Compute epsilon from {n_samples} standard Gaussian # epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out]) epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out]) hyp_params = tf.get_variable('hyp_params_layer'+str(h), shape=[2], initializer=tf.random_normal_initializer()) l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1]) epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2 # Compute A_{h+1} A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1]) # Compute z_{h}A_{h+1} Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5) Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2) Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.) KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2. # Output layer
tensorflow.exp
14,627
import tensorflow as tf defun=True) if __name__ == "__main__": tf.enable_eager_execution() tf.test.main()
tensorflow.test.main
14,628
import tensorflow as tf if input_data_type != data_type: images = tf.cast(images, data_type) network = ConvNetBuilder( images, input_nchan, phase_train, self.data_format, data_type) self.model_conf.add_inference(network) # Add the final fully-connected class layer logits = network.affine(nclass, activation='linear') if not phase_train: top_1_op = tf.reduce_sum( tf.cast(tf.nn.in_top_k(logits, labels, 1), data_type)) top_5_op = tf.reduce_sum( tf.cast(tf.nn.in_top_k(logits, labels, 5), data_type)) return (logits, top_1_op, top_5_op) loss = loss_function(logits, labels) params = self.variable_mgr.trainable_variables_on_device(device_num) l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in params]) weight_decay = FLAGS.weight_decay if weight_decay is not None and weight_decay != 0.: loss += weight_decay * l2_loss aggmeth = tf.AggregationMethod.DEFAULT grads = tf.gradients(loss, params, aggregation_method=aggmeth)
tensorflow.nn.in_top_k
14,629
import tensorflow as tf input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files:
tensorflow.logging.info
14,630
import tensorflow as tf input_layer = tf.reshape(features, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]) conv1 = tf.layers.conv2d( inputs=input_layer, filters=filter_list[0], kernel_size=[5, 5], padding="same", activation=tf.nn.relu) pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) conv2 = tf.layers.conv2d( inputs=pool1, filters=filter_list[1], kernel_size=[5, 5], padding="same", activation=tf.nn.relu) pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
tensorflow.layers.conv2d
14,631
import tensorflow as tf def fwd_gradients_0(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0] return tf.gradients(g, self.dummy_x0_tf)[0] def fwd_gradients_1(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0] return tf.gradients(g, self.dummy_x1_tf)[0] def net_U0(self, x): lambda_1 = self.lambda_1 lambda_2 = tf.exp(self.lambda_2) U = self.neural_net(x, self.weights, self.biases) U_x = self.fwd_gradients_0(U, x) U_xx = self.fwd_gradients_0(U_x, x) F = -lambda_1*U*U_x + lambda_2*U_xx U0 = U - self.dt*tf.matmul(F, self.IRK_alpha.T) return U0 def net_U1(self, x): lambda_1 = self.lambda_1
tensorflow.exp
14,632
import tensorflow as tf w = tf.expand_dims(w, [2]) # Calculate policy and sample logits = tf.reshape(tf.matmul(U, w), [-1, num_acts]) self.pi = tf.nn.softmax(logits) self.log_pi = tf.nn.log_softmax(logits) self.sample = policy_utils.categorical_sample(
tensorflow.nn.softmax
14,633
import tensorflow as tf output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer() ) with tf.variable_scope("loss"): if is_training: output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) output_layer = tf.reshape(output_layer, [-1, hidden_size]) logits = tf.matmul(output_layer, output_weight, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11]) log_probs = tf.nn.log_softmax(logits, axis=-1) # labels = tf.cast(labels,dtype=tf.float32) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_sum(per_example_loss) return (loss, per_example_loss, logits) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): def model_fn(features, labels, mode, params): tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"]
tensorflow.reduce_sum
14,634
import tensorflow as tf img *= 255.0/img.max() file_name = 'heatmap_{}_{}.jpg'.format(save_image_with_heatmap.counter, ind) imsave(os.path.join(config.DEBUG_DIR, file_name), img.astype(np.uint8)) return save_image_with_heatmap.counter def get_keypoint(image, targets, predictions, heatmap_size, height, width, category, clip_at_zero=True, data_format='channels_last', name=None): predictions = tf.reshape(predictions, [1, -1, heatmap_size*heatmap_size]) pred_max = tf.reduce_max(predictions, axis=-1) pred_indices = tf.argmax(predictions, axis=-1) pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32) width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32) pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32) if clip_at_zero:
tensorflow.reduce_max
14,635
import tensorflow as tf writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
tensorflow.app.run
14,636
import tensorflow as tf if self.demo: self.c = tf.placeholder(tf.int32, [None, self.config.max_p_len], "context") self.q = tf.placeholder(tf.int32, [None, self.config.max_q_len], "question") self.ch = tf.placeholder(tf.int32, [None, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [None, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [None], "answer_label1") self.end_label = tf.placeholder(tf.int32, [None], "answer_label2") else: self.c = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len], "context") self.q = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len], "question") self.ch = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label1") self.end_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label2") self.position_emb = position_embedding(self.c, 2 * self.config.hidden_size)
tensorflow.placeholder
14,637
import tensorflow as tf def correlation(x, y): x = x - tf.reduce_mean(x, axis=-1, keepdims=True) y = y - tf.reduce_mean(y, axis=-1, keepdims=True) x = tf.nn.l2_normalize(x, -1) y = tf.nn.l2_normalize(y, -1) return -tf.reduce_sum(x*y, axis=-1) # higher the better def kd(x, y): x_prob = tf.nn.softmax(x) print(x_prob.get_shape(), y.get_shape(), tf.reduce_sum(x_prob * y, axis=-1).get_shape()) return -tf.reduce_sum(x_prob * y, axis=-1) # higher the better def mse(x, y): x = x - tf.reduce_mean(x, axis=-1, keepdims=True) y = y - tf.reduce_mean(y, axis=-1, keepdims=True) return tf.reduce_sum((x-y)**2, axis=-1) # lower the better def kd_distance(x, y, dist_type): if dist_type == "person": return correlation(x,y) elif dist_type == "kd": return kd(x, y) elif dist_type == "mse": return mse(x, y) def model_fn_builder( model_config, num_labels, init_checkpoint, model_reuse=None,
tensorflow.reduce_sum
14,638
import tensorflow as tf idim = (36, 64) keep_prob = tf.placeholder(tf.float32, name='keep_prob') tftrain = tf.placeholder(tf.bool, name='tftrain') batch_size=100 if (args.experiment_type == "reach") or (args.experiment_type == "push"): idim = (48, 48) tfinput = tf.placeholder(tf.float32, (3, batch_size) + idim + (3, ), name='x') if args.experiment_type == "reach": test = ContextAEReach() elif args.experiment_type == "push": test = ContextAEPush() elif args.experiment_type == "pushreal":
tensorflow.placeholder
14,639
import tensorflow as tf self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ),
tensorflow.zeros_like
14,640
import tensorflow as tf test_inf=work.test_inference(test_image_batch) test_labels=tf.one_hot(test_label_batch,classnum) test_pre = tf.reshape(test_inf, [testnum, classnum]) correct_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1)) accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) test_pre = tf.argmax(test_pre, 1) test_true = tf.argmax(test_labels, 1) valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum) valid_inf=work.valid_inference(valid_image_batch) valid_labels=tf.one_hot(valid_label_batch,classnum) #train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy) valid_pre = tf.reshape(valid_inf, [validnum, classnum]) valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1)) valid_accuracy=tf.reduce_mean(tf.cast(valid_correct_prediction,tf.float32)) valid_pre = tf.argmax(valid_pre, 1) valid_true = tf.argmax(valid_labels, 1) target_names = ['class sg', 'class bm', 'class wd', 'class wt', 'class wj', 'class wo', 'class ym', 'class shq', 'class shj', 'class no', 'class yh', 'class fb'] init = tf.initialize_all_variables() config=tf.ConfigProto() config.gpu_options.allow_growth=True #init=tf.initialize_all_variables()
tensorflow.argmax
14,641
import tensorflow as tf assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the ACER model must be " \ "an instance of common.policies.ActorCriticPolicy." if isinstance(self.action_space, Discrete): self.n_act = self.action_space.n continuous = False elif isinstance(self.action_space, Box): # self.n_act = self.action_space.shape[-1] # continuous = True raise NotImplementedError("WIP: Acer does not support Continuous actions yet.") else: raise ValueError("Error: ACER does not work with {} actions space.".format(self.action_space)) self.n_batch = self.n_envs * self.n_steps self.graph = tf.Graph() with self.graph.as_default(): self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph) self.set_random_seed(self.seed) n_batch_step = None if issubclass(self.policy, RecurrentActorCriticPolicy): n_batch_step = self.n_envs n_batch_train = self.n_envs * (self.n_steps + 1) step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1, n_batch_step, reuse=False, **self.policy_kwargs) self.params = tf_util.get_trainable_vars("model") with tf.variable_scope("train_model", reuse=True,
tensorflow.Graph
14,642
import tensorflow as tf def neural_net(self, X, weights, biases): num_layers = len(weights) + 1 H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0 for l in range(0,num_layers-2): W1, W2 = weights[l] b = biases[l] H1 = tf.add(tf.matmul(H, W1), b) H2 = tf.matmul(H, W2) H = tf.tanh(tf.add(H1 * H2, H1)) W1, W2 = weights[-1] b = biases[-1] H1 = tf.add(tf.matmul(H, W1), b) H2 = tf.matmul(H, W2) Y = tf.add(H1 * H2, H1) return Y def fwd_gradients_0(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0] return tf.gradients(g, self.dummy_x0_tf)[0] def fwd_gradients_1(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0] return tf.gradients(g, self.dummy_x1_tf)[0] def net_U0(self, x): lambda_1 = self.lambda_1
tensorflow.matmul
14,643
import tensorflow as tf self._losses['cross_entropy'] = cross_entropy self._losses['loss_box'] = loss_box self._losses['rpn_cross_entropy'] = rpn_cross_entropy self._losses['rpn_loss_box'] = rpn_loss_box loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box self._losses['total_loss'] = loss self._event_summaries.update(self._losses) return loss def create_architecture(self, sess, mode, num_classes, tag=None, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)): self._image = tf.placeholder(tf.float32, shape=[self._batch_size, None, None, 3]) self._im_info = tf.placeholder(tf.float32, shape=[self._batch_size, 3]) #缩放之后的图片尺寸和缩放的比例 self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号 self._tag = tag self._num_classes = num_classes self._mode = mode self._anchor_scales = anchor_scales self._num_scales = len(anchor_scales) self._anchor_ratios = anchor_ratios self._num_ratios = len(anchor_ratios)
tensorflow.placeholder
14,644
import tensorflow as tf """ del num_attention_heads # unused input_shape = get_shape_list(input_tensor) hidden_size = input_shape[2] with tf.variable_scope(name): w = tf.get_variable( name="kernel", shape=[hidden_size, output_size], initializer=initializer) b = tf.get_variable(
tensorflow.get_variable
14,645
import tensorflow as tf saver = tf.train.Saver(max_to_keep=None) if self.is_summary: training_batch_summary_op = tf.merge_all_summaries(key=TRAINING_BATCH_SUMMARIES) training_epoch_summary_op = tf.merge_all_summaries(key=TRAINING_EPOCH_SUMMARIES) validation_batch_summary_op = tf.merge_all_summaries(key=VALIDATION_BATCH_SUMMARIES) validation_epoch_summary_op = tf.merge_all_summaries(key=VALIDATION_EPOCH_SUMMARIES) # Build an initialization operation to run below. init = tf.global_variables_initializer() gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=self.cnf.get('gpu_memory_fraction', 0.9)) sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) sess.run(init) if start_epoch > 1: weights_from = "weights/model-epoch-%d.ckpt" % (start_epoch - 1) if weights_from: self._load_weights(sess, saver, weights_from) learning_rate_value = self.lr_policy.initial_lr log.info("Initial learning rate: %f " % learning_rate_value) if self.is_summary: train_writer, validation_writer = summary.create_summary_writer(
tensorflow.ConfigProto
14,646
import tensorflow as tf tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), }
tensorflow.FixedLenFeature
14,647
import tensorflow as tf # L1 weight regularization reg += self.L1_in * tf.reduce_mean(tf.abs(self.W_in) * self.input_Connectivity) reg += self.L1_rec * tf.reduce_mean(tf.abs(self.W_rec) * self.rec_Connectivity) if self.dale_ratio: reg += self.L1_out * tf.reduce_mean(tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out))
tensorflow.abs
14,648
import tensorflow as tf def _add_losses(self, sigma_rpn=3.0): with tf.variable_scope('loss_' + self._tag): # RPN, class loss rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2]) rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1]) # 得到前景和背景anchor的index rpn_select = tf.where(tf.not_equal(rpn_label, -1)) rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2]) rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1]) rpn_cross_entropy = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label)) # RPN, bbox loss rpn_bbox_pred = self._predictions['rpn_bbox_pred'] rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets'] rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']
tensorflow.gather
14,649
import tensorflow as tf tgtctx_z = linear(tgtctx_h4, featsize, 'hz_lin') with tf.variable_scope("conv") as scope: srcimg_h0 = lrelu(conv2d(srcimg, self.df_dim, name='h0_conv'))
tensorflow.variable_scope
14,650
import tensorflow as tf else: val = save.save(sess, save_path, global_step=global_step_int) expected_save_path = "%s-%d" % (save_path, global_step_int) self.assertEqual(expected_save_path, val) class SaveRestoreShardedTest(tf.test.TestCase): def testBasics(self): save_path = os.path.join(self.get_temp_dir(), "sharded") # Build a graph with 2 parameter nodes on different devices. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(10, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(20, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True) tf.initialize_all_variables().run() val = save.save(sess, save_path) self.assertEqual(save_path + "-?????-of-00002", val) meta_graph_filename = save._MetaGraphFilename(val) self.assertEqual(save_path + ".meta", meta_graph_filename) # Restore a different "v0" from shard 0 of the saved files. with tf.Session(
tensorflow.ConfigProto
14,651
import tensorflow as tf self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
tensorflow.layers.dense
14,652
import tensorflow as tf layers_without_bos_eos.append(layer_wo_bos_eos) # concatenate the layers lm_embeddings = tf.concat( [tf.expand_dims(t, axis=1) for t in layers_without_bos_eos], axis=1 )
tensorflow.expand_dims
14,653
import tensorflow as tf h_conv3_flat = tf.reshape(h_conv3, [-1, 1600], 'h_conv3_flat') h_fc1 = tf.nn.relu(tf.add(tf.matmul(h_conv3_flat, W_fc1), b_fc1, 'h_fc1')) readout = tf.add(tf.matmul(h_fc1, W_fc2), b_fc2, 'h_fc2') return s, readout, h_fc1 def creat_optimizer(self,readout): action = tf.placeholder(tf.float32,[None,self.ACTIONS]) y = tf.placeholder(tf.float32,[None]) readout_action = tf.reduce_sum(tf.multiply(readout,action),reduction_indices=1) cost =tf.reduce_mean(tf.square(y-readout_action)) train_step = tf.train.AdamOptimizer(1e-6).minimize(cost) return train_step,y,action #输入一个初始状态s_t,时间为t,之后进行游戏 def process_game(self,s_t): #通过CNN运算得到Q值向量 read_out_t = self.sess.run(self.readout,feed_dict={self.s:[s_t]})[0] a_t =np.zeros([self.ACTIONS]) action_index =0 if self.time % self.FRAME_PER_ACTION == 0: if random.random()<= self.epsilon: #随机选择动作 print("-----------随机选择动作--------------")
tensorflow.square
14,654
from tensorflow.python.ops import math_ops weights=weights) metric = math_ops.div(tp, math_ops.add(tp, fn), name=scope) update = math_ops.div(
tensorflow.python.ops.math_ops.add
14,655
import tensorflow as tf target_placeholder, tf.placeholder(tf.int32, shape=(None, ), name='input_lengths'), ] queue_types = [tf.float32, target_type, tf.int32] if self.local_condition: self._placeholders.append(tf.placeholder(tf.float32, shape=(None, hparams.num_mels, None), name='local_condition_features')) queue_types.append(tf.float32) if self.global_condition: self._placeholders.append(tf.placeholder(tf.int32, shape=(None, 1), name='global_condition_features')) queue_types.append(tf.int32) # Create queue for buffering data
tensorflow.placeholder
14,656
import tensorflow as tf alpha: Float that controls the length penalty. larger the alpha, stronger the preference for slonger translations. Returns: samples: an integer `Tensor`. Top samples from the beam search """ batch_size = common_layers.shape_list(features["inputs"])[0] def symbols_to_logits_fn(ids): """Go from ids to logits.""" ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3) ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]]) if "partial_targets" in features: pt = features["partial_targets"] pt_length = common_layers.shape_list(pt)[1] pt = tf.tile(pt, [1, beam_size]) pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1]) ids = tf.concat([pt, ids], axis=1) features["targets"] = ids
tensorflow.expand_dims
14,657
import tensorflow as tf def double_factorial(n: TensorLike) -> TensorLike: n = tf.convert_to_tensor(value=n) two = tf.ones_like(n) * 2 result = tf.ones_like(n) _, result, _ = tf.while_loop( cond=_double_factorial_loop_condition, body=_double_factorial_loop_body, loop_vars=[n, result, two])
tensorflow.ones_like
14,658
import tensorflow as tf def testParetoLogCdf(self): batch_size = 6 scale = tf.constant([3.] * batch_size) scale_v = 3. concentration = tf.constant([2.]) concentration_v = 2. x = [3., 3.1, 4., 5., 6., 7.] pareto = tfd.Pareto(concentration, scale) log_cdf = pareto.log_cdf(x)
tensorflow.constant
14,659
import tensorflow as tf batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature( [max_predictions_per_seq], tf.int64 ), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature( [max_predictions_per_seq], tf.float32 ), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files))
tensorflow.FixedLenFeature
14,660
import tensorflow as tf grads = list(zip(grads, self.params)) with tf.variable_scope("input_info", reuse=False): tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph)) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate)) tf.summary.scalar('advantage', tf.reduce_mean(adv)) tf.summary.scalar('action_probability', tf.reduce_mean(self.mu_ph)) if self.full_tensorboard_log: tf.summary.histogram('rewards', self.reward_ph) tf.summary.histogram('learning_rate', self.learning_rate) tf.summary.histogram('advantage', adv) tf.summary.histogram('action_probability', self.mu_ph) if tf_util.is_image(self.observation_space): tf.summary.image('observation', train_model.obs_ph) else: tf.summary.histogram('observation', train_model.obs_ph) trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.rprop_alpha, epsilon=self.rprop_epsilon) _opt_op = trainer.apply_gradients(grads) # so when you call _train, you first do the gradient step, then you apply ema with tf.control_dependencies([_opt_op]): _train = tf.group(ema_apply_op) # Ops/Summaries to run, and their names for logging assert norm_grads is not None
tensorflow.summary.image
14,661
import tensorflow as tf print(sess.run(sequence_var)) rnorm_var = tf.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0) runif_var = tf.random_uniform([row_dim, col_dim], minval=0, maxval=4) print(sess.run(rnorm_var)) print(sess.run(runif_var)) ops.reset_default_graph() sess = tf.Session() my_var = tf.Variable(tf.zeros([1,20])) merged = tf.summary.merge_all() writer = tf.summary.FileWriter("./logs", graph=sess.graph) initialize_op = tf.global_variables_initializer() sess.run(initialize_op)
tensorflow.Session
14,662
import tensorflow as tf tower_preds = [] tower_metrics = [] for i in range(self.n_gpus): worker = '/gpu:{}'.format(i) device_setter = tf.train.replica_device_setter( worker_device=worker, ps_device='/cpu:0', ps_tasks=1) with tf.name_scope('{}_{}'.format(mode, i)) as scope: with tf.device(device_setter): net_outputs = self._model(shards[i], mode, **self.config) if mode == Mode.TRAIN: loss = self._loss(net_outputs, shards[i], **self.config) loss += tf.reduce_sum( tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope))
tensorflow.device
14,663
import tensorflow as tf with tf.name_scope(name, "click_softmax_cross_entropy",[output]): label_dis = labels*propensity_weights / tf.reduce_sum(labels*propensity_weights, 1, keep_dims=True) loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels*propensity_weights, 1) return tf.reduce_sum(loss) / tf.reduce_sum(labels*propensity_weights) def click_loglikelihood(self, labels, propensity,train_output, name=None):
tensorflow.reduce_sum
14,664
import tensorflow as tf elif params.initializer == "normal_unit_scaling": return tf.variance_scaling_initializer(params.initializer_gain, mode="fan_avg", distribution="normal") elif params.initializer == "uniform_unit_scaling": return tf.variance_scaling_initializer(params.initializer_gain, mode="fan_avg", distribution="uniform") else: raise ValueError("Unrecognized initializer: %s" % params.initializer)
tensorflow.variance_scaling_initializer
14,665
import tensorflow as tf from utils.external import resnet_model as ResNet from utils.lrn_rate_utils import setup_lrn_rate_piecewise_constant from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_integer('resnet_size', 20, '# of layers in the ResNet model') tf.app.flags.DEFINE_float('nb_epochs_rat', 1.0, '# of training epochs\'s ratio') tf.app.flags.DEFINE_float('lrn_rate_init', 1e-1, 'initial learning rate') tf.app.flags.DEFINE_float('batch_size_norm', 128, 'normalization factor of batch size') tf.app.flags.DEFINE_float('momentum', 0.9, 'momentum coefficient') tf.app.flags.DEFINE_float('loss_w_dcy', 2e-4, 'weight decaying loss\'s coefficient')
tensorflow.app.flags.DEFINE_integer
14,666
import tensorflow as tf self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval()) def testScaleGradientsCheckNumerics(self): """ScaleGradients when enable_check_numerics=True.""" FLAGS.enable_check_numerics = True p = self.TestParams() p.input = base_input_generator.BaseSequenceInputGenerator.Params() task = p.cls(p) task.CreateVariable( 'a', py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0))) var_a = task.theta.a # Make a NaN gradient. var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.))) has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads) with self.session(): tf.global_variables_initializer().run() with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'is not finite'): self.assertTrue(has_nan_or_inf.eval()) self.assertEqual(0., grad_scale.eval()) # The final gradient must be finite. self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval()) self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
tensorflow.log
14,667
import tensorflow as tf # products of decays isn't ideal numerically, in particular if any of the # decays are zero it results in NaNs. with tf.name_scope(name, values=[sequence, decay, initial_value]): if sequence_lengths is not None: # Zero out sequence and decay beyond sequence_lengths. with tf.control_dependencies( [tf.assert_equal(sequence.shape[0], decay.shape[0])]): mask = tf.sequence_mask(sequence_lengths, maxlen=sequence.shape[0], dtype=sequence.dtype) mask = tf.transpose(mask) # Adding trailing dimensions to mask to allow for broadcasting. to_seq = mask.shape.dims + [1] * (sequence.shape.ndims - mask.shape.ndims) sequence *= tf.reshape(mask, to_seq) to_decay = mask.shape.dims + [1] * (decay.shape.ndims - mask.shape.ndims) decay *= tf.reshape(mask, to_decay) sequences = [sequence, decay] if reverse: sequences = [_reverse_seq(s, sequence_lengths) for s in sequences] summed = tf.scan(lambda a, x: x[0] + x[1] * a, sequences, initializer=tf.convert_to_tensor(initial_value), parallel_iterations=1, back_prop=back_prop)
tensorflow.reshape
14,668
import tensorflow as tf new_h = u * state + (1 - u) * c return new_h, new_h def prelu(_x, scope=''): """parametric ReLU activation""" with tf.variable_scope(name_or_scope=scope, default_name="prelu"): _alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1], dtype=_x.dtype, initializer=tf.constant_initializer(0.1)) return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x) def calc_auc(raw_arr): """Summary Args: raw_arr (TYPE): Description
tensorflow.constant_initializer
14,669
import tensorflow as tf w = tf.get_variable(name='weights', trainable=is_pretrain, shape=[kernel_size[0],kernel_size[1],in_channels,out_channels], initializer=tf.contrib.layers.xavier_initializer()) b = tf.get_variable(name='bias', trainable=is_pretrain, shape=[out_channels], initializer=tf.constant_initializer(0.0)) x = tf.nn.conv2d(x,w,strides,padding='SAME',name='conv') x = tf.nn.bias_add(x,b,name='bias_add') x = tf.nn.relu(x,name='relu') return x
tensorflow.constant_initializer
14,670
import tensorflow as tf list of variables in `scope`. """ return tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES, scope=scope if isinstance(scope, str) else scope.name ) def scope_name(): """Returns the name of current scope as a string, e.g. deepq/q_func""" return tf.get_variable_scope().name def absolute_scope_name(relative_scope_name): """Appends parent scope name to `relative_scope_name`""" return scope_name() + "/" + relative_scope_name def default_param_noise_filter(var): if var not in tf.trainable_variables():
tensorflow.get_variable_scope
14,671
import tensorflow as tf if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn def main(_): tf.logging.set_verbosity(tf.logging.INFO) processors = { "cola": classifier_utils.ColaProcessor, "mnli": classifier_utils.MnliProcessor, "mismnli": classifier_utils.MisMnliProcessor, "mrpc": classifier_utils.MrpcProcessor, "rte": classifier_utils.RteProcessor, "sst-2": classifier_utils.Sst2Processor, "sts-b": classifier_utils.StsbProcessor, "qqp": classifier_utils.QqpProcessor, "qnli": classifier_utils.QnliProcessor,
tensorflow.logging.set_verbosity
14,672
import tensorflow as tf var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.))) has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads) with self.session(): tf.global_variables_initializer().run() with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'is not finite'): self.assertTrue(has_nan_or_inf.eval()) self.assertEqual(0., grad_scale.eval()) # The final gradient must be finite. self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval()) self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval()) class TeacherTask(base_model.BaseTask): @base_layer.initializer def __init__(self, params): super(TeacherTask, self).__init__(params) p = self.params with tf.variable_scope(p.name): self.CreateVariable('x',
tensorflow.is_finite
14,673
import tensorflow as tf 1, scope='feature_projection' + str(i))) # Resize to decoder_height/decoder_width. for j, feature in enumerate(decoder_features_list): decoder_features_list[j] = tf.image.resize_bilinear( feature, [decoder_height, decoder_width], align_corners=True) if is_training: decoder_features_list[j].set_shape(
tensorflow.image.resize_bilinear
14,674
import tensorflow as tf self.train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad') else: self.train_op = self.optimizer.minimize(self.loss) def _attention(self, output, name='attn', reuse=None): with tf.variable_scope(name, reuse=reuse): W = tf.get_variable(name="attn_W", shape=[2 * self.config.hidden_size, 2 * self.config.hidden_size], initializer=tf.contrib.layers.xavier_initializer(), # initializer=tf.truncated_normal_initializer(),
tensorflow.variable_scope
14,675
from tensorflow.python.platform import gfile # Exercise the second helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save2.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save2.last_checkpoints) # Created by the first helper. self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Deleted by the first helper. self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save2.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save2.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
tensorflow.python.platform.gfile.Exists
14,676
import tensorflow as tf gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0)) bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0)) bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i)
tensorflow.split
14,677
import tensorflow as tf din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all
tensorflow.layers.dense
14,678
from tensorflow.contrib import metrics as contrib_metrics "eval_accuracy": accuracy, "eval_loss": loss, } elif task_name == "sts-b": def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Pearson correlations for STS-B.""" # Display labels and predictions concat1 = contrib_metrics.streaming_concat(logits) concat2 = contrib_metrics.streaming_concat(label_ids) # Compute Pearson correlation pearson = contrib_metrics.streaming_pearson_correlation( logits, label_ids, weights=is_real_example) # Compute MSE # mse = tf.metrics.mean(per_example_loss) mse = tf.metrics.mean_squared_error( label_ids, logits, weights=is_real_example) loss = tf.metrics.mean( values=per_example_loss, weights=is_real_example)
tensorflow.contrib.metrics.streaming_pearson_correlation
14,679
from tensorflow.python.ops import array_ops def loss_fn(logits, target): check_shape_op = logging_ops.Assert( math_ops.less_equal(array_ops.rank(target), 2), ["target's shape should be either [batch_size, 1] or [batch_size]"]) with ops.control_dependencies([check_shape_op]): target = array_ops.reshape( target, shape=[array_ops.shape(target)[0], 1]) return losses.hinge_loss(logits, target) super(_BinarySvmTargetColumn, self).__init__( loss_fn=loss_fn, n_classes=2,
tensorflow.python.ops.array_ops.shape
14,680
import tensorflow as tf sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss can be a single Tensor or a 2-tuple (numerator and denominator). Returns: losses: dict<str loss_name, Tensor avg_loss> """ losses = {} for loss_name in sharded_losses[0]: all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses] if isinstance(all_shards[0], tuple): sharded_num, sharded_den = zip(*all_shards) mean_loss = ( tf.add_n(sharded_num) / tf.maximum(1.0, tf.add_n(sharded_den))) else: mean_loss = tf.reduce_mean(all_shards) losses[loss_name] = mean_loss return losses def summarize_features(features, num_shards=1): with tf.name_scope("input_stats"): for (k, v) in six.iteritems(features): if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1:
tensorflow.add_n
14,681
import tensorflow as tf Raises: ValueError: if anchors or groundtruth_boxes are not of type box_list.BoxList """ if not isinstance(anchors, box_list.BoxList): raise ValueError('anchors must be an BoxList') if not isinstance(groundtruth_boxes, box_list.BoxList): raise ValueError('groundtruth_boxes must be an BoxList') if groundtruth_labels is None: groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), 0)) groundtruth_labels = tf.expand_dims(groundtruth_labels, -1) shape_assert = tf.assert_equal(tf.shape(groundtruth_labels)[1:], tf.shape(self._unmatched_cls_target)) with tf.control_dependencies([shape_assert]): match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors) match = self._matcher.match(match_quality_matrix, **params) reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match) cls_targets = self._create_classification_targets(groundtruth_labels, match)
tensorflow.expand_dims
14,682
import tensorflow as tf # input tensors self.input_x = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_x") self.input_tags = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_tags") self.input_deps = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_dependency") self.input_head = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_head") self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
tensorflow.placeholder
14,683
import tensorflow as tf lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) outputs = lowering.export_to_tf_tensor(mtf_samples) if self.has_input: ndims = len(outputs.shape.as_list()) actual_batch_size = tf.shape(features["inputs"])[0] outputs = tf.slice( outputs, [0] * ndims, [actual_batch_size] + [-1] * (ndims - 1)) predictions = {
tensorflow.shape
14,684
import tensorflow as tf if self.optim_type == 'adagrad': self.optimizer = tf.train.AdagradOptimizer(self.lr) elif self.optim_type == 'adam': self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr) elif self.optim_type == 'rprop': self.optimizer = tf.train.RMSPropOptimizer(self.lr)
tensorflow.train.AdamOptimizer
14,685
import tensorflow as tf inputs_shape = list(map(int, inputs.get_shape())) predictions_shape = list(map(int, predictions.get_shape())) nr_mix = int(predictions_shape[-1] / 10) logit_probs = predictions[:, :, :, :nr_mix] predictions = tf.reshape(predictions[:, :, :, nr_mix:], inputs_shape + [nr_mix * 3]) means = predictions[:, :, :, :, :nr_mix] log_scales = tf.maximum(predictions[:, :, :, :, nr_mix:2 * nr_mix], -7.) coeffs = tf.nn.tanh(predictions[:, :, :, :, 2 * nr_mix:3 * nr_mix]) inputs = tf.reshape(inputs, inputs_shape + [1]) + tf.zeros(inputs_shape + [nr_mix]) m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * inputs[:, :, :, 0, :], [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]) m3 = tf.reshape( means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * inputs[:, :, :, 0, :] + coeffs[:, :, :, 2, :] * inputs[:, :, :, 1, :], [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]) means = tf.concat([ tf.reshape(means[:, :, :, 0, :], [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]), m2, m3 ], axis=3) centered_inputs = inputs - means inv_stdv = tf.exp(-log_scales)
tensorflow.reshape
14,686
from tensorflow.python.ops import math_ops dnn_grads = grads[0:len(dnn_vars)] linear_grads = grads[len(dnn_vars):] train_ops = self._get_linear_training_ops( linear_grads, linear_vars) + self._get_dnn_training_ops(dnn_grads, dnn_vars) train_step = control_flow_ops.group(*train_ops, name="combined_training_op") with ops.control_dependencies([train_step]): with ops.get_default_graph().colocate_with(global_step): return state_ops.assign_add(global_step, 1).op, loss def _run_metrics(self, predictions, targets, metrics, weights): result = {} targets = math_ops.cast(targets, predictions.dtype) for name, metric in six.iteritems(metrics or {}): if "weights" in inspect.getargspec(metric)[0]: result[name] = metric(predictions, targets, weights=weights) else: result[name] = metric(predictions, targets) return result def _get_eval_ops(self, features, targets, metrics=None): """See base class.""" logits = self._logits(features) result = {"loss": metrics_lib.streaming_mean(self._loss( logits, targets,
tensorflow.python.ops.math_ops.cast
14,687
from tensorflow.python.framework import ops @ops.RegisterShape("LessEqual") @ops.RegisterShape("LogicalAnd") @ops.RegisterShape("LogicalOr") @ops.RegisterShape("Maximum") @ops.RegisterShape("Minimum") @ops.RegisterShape("Mod") @ops.RegisterShape("Mul") @ops.RegisterShape("NotEqual") @ops.RegisterShape("Pow") @ops.RegisterShape("Sub") def _BroadcastShape(op): """Common shape function for binary operators that broadcast their inputs.""" shape_x = op.inputs[0].get_shape() shape_y = op.inputs[1].get_shape()
tensorflow.python.framework.ops.RegisterShape
14,688
import tensorflow as tf # set some important options if self._gpu == -1: sess_config = tf.ConfigProto(device_count = {'GPU': 0}, allow_soft_placement=True) else:
tensorflow.ConfigProto
14,689
import tensorflow as tf noise_shape = [shape[0], 1, shape[-1]] args = tf.cond(is_train, lambda: tf.nn.dropout( args, keep_prob, noise_shape=noise_shape) * scale, lambda: args) return args def softmax_mask(val, mask): return -INF * (1 - tf.cast(mask, tf.float32)) + val def pointer(inputs, state, hidden, mask, scope="pointer"): with tf.variable_scope(scope): u = tf.concat([tf.tile(tf.expand_dims(state, axis=1), [1, tf.shape(inputs)[1], 1]), inputs], axis=2) #[N,PL,2d] s0 = tf.nn.tanh(dense(u, hidden, use_bias=False, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask)#[N,PL] a = tf.expand_dims(tf.nn.softmax(s1), axis=2)#[N,PL,1] res = tf.reduce_sum(a * inputs, axis=1) return res, s1 # attention_sum probability def summ(memory, hidden, mask, keep_prob=1.0, is_train=None, scope="summ"): with tf.variable_scope(scope): d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train)
tensorflow.expand_dims
14,690
import tensorflow as tf idx_z1_y0_x0 = base_z1_y0 + x0_clip idx_z1_y0_x1 = base_z1_y0 + x1_clip idx_z1_y1_x0 = base_z1_y1 + x0_clip idx_z1_y1_x1 = base_z1_y1 + x1_clip # Use indices to lookup pixels in the flat image and restore # channels dim im_flat = tf.reshape(im, tf.stack([-1, channels])) im_flat = tf.to_float(im_flat) i_z0_y0_x0 = tf.gather(im_flat, idx_z0_y0_x0) i_z0_y0_x1 = tf.gather(im_flat, idx_z0_y0_x1) i_z0_y1_x0 = tf.gather(im_flat, idx_z0_y1_x0) i_z0_y1_x1 = tf.gather(im_flat, idx_z0_y1_x1) i_z1_y0_x0 = tf.gather(im_flat, idx_z1_y0_x0) i_z1_y0_x1 = tf.gather(im_flat, idx_z1_y0_x1) i_z1_y1_x0 = tf.gather(im_flat, idx_z1_y1_x0) i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1) # Finally calculate interpolated values. x0_f = tf.to_float(x0) x1_f = tf.to_float(x1) y0_f = tf.to_float(y0) y1_f = tf.to_float(y1) z0_f = tf.to_float(z0) z1_f = tf.to_float(z1) # Check the out-of-boundary case. x0_valid = tf.to_float( tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0))
tensorflow.gather
14,691
import tensorflow as tf x = tf.placeholder_with_default(input=[2., 2., 5.], shape=[3]) log_prob = pareto.log_prob(x) self.evaluate(log_prob) with self.assertRaisesOpError("not in the support"): x = tf.placeholder_with_default(input=[1., 3., 5.], shape=[3]) log_prob = pareto.log_prob(x) self.evaluate(log_prob) def testParetoLogPdfMultidimensional(self): batch_size = 6 scale = tf.constant([[2., 4., 5.]] * batch_size) scale_v = [2., 4., 5.] concentration = tf.constant([[1.]] * batch_size) concentration_v = 1. x = np.array([[6., 7., 9.2, 5., 6., 7.]], dtype=np.float32).T pareto = tfd.Pareto(concentration, scale) log_prob = pareto.log_prob(x) self.assertEqual(log_prob.shape, (6, 3)) self.assertAllClose(
tensorflow.constant
14,692
import tensorflow as tf # logloss not being an upper bound on the indicator function. weighted_loss = weights * losses_utils.weighted_surrogate_loss( labels, logits, surrogate_type=surrogate_type, positive_weights=1.0 + lambdas * (1.0 - target_precision), negative_weights=lambdas * target_precision) maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) lambda_term = lambdas * (1.0 - target_precision) * label_priors * maybe_log2 loss = tf.reshape(weighted_loss - lambda_term, original_shape) other_outputs = { 'lambdas': lambdas_variable,
tensorflow.log
14,693
import tensorflow as tf def export_ops(self, name): self._name = name ops = {self.with_prefix(self._name, 'cost'): self._cost} if self._is_training: ops.update(lr=self._lr, new_lr=self._new_lr, lr_update=self._lr_update) if self._rnn_params: ops.update(rnn_params=self._rnn_params) for name, op in ops.items(): tf.add_to_collection(name, op) self._initial_state_name = self.with_prefix(self._name, 'initial') self._final_state_name = self.with_prefix(self._name, 'final') for state_tuple in self._initial_state: tf.add_to_collection(self._initial_state_name, state_tuple.c) tf.add_to_collection(self._initial_state_name, state_tuple.h) for state_tuple in self._final_state: tf.add_to_collection(self._final_state_name, state_tuple.c) tf.add_to_collection(self._final_state_name, state_tuple.h)
tensorflow.add_to_collection
14,694
import tensorflow as tf if init: x = tf.nn.conv2d_transpose(x, tf.nn.l2_normalize(V.initialized_value(), [0, 1, 3]), target_shape, [1] + list(stride) + [1], padding=pad) init_scale = .01 m_init, v_init = tf.nn.moments(x, [0, 1, 2]) scale_init = init_scale / tf.sqrt(v_init + 1e-10) with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]): x = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (x - tf.reshape(m_init, [1, 1, 1, num_filters])) else: V = maybe_avg(V) g = maybe_avg(g) b = maybe_avg(b) W = tf.reshape(g, [1, 1, num_filters, 1]) * tf.nn.l2_normalize(V, [0, 1, 3]) # calculate convolutional layer output x = tf.nn.conv2d_transpose(x, W, target_shape, [1] + list(stride) + [1], padding=pad) x = tf.nn.bias_add(x, b) return x def transposed_gated_resnet(x, aux, dim=(32, [3, 3], [1, 1]), activation=tf.nn.elu, scope="transposed_gated_resnet", residual=True, dropout=.0, conv=conv2d, training=True, ema=None, init=False): out = conv(activation(x), [dim[0], dim[1], [1, 1]], scope="%s_conv_in" % scope, training=training, ema=ema, init=init) in_shp = x.get_shape().as_list() assert in_shp[1] == in_shp[2] if aux is not None: aux_shp = aux.get_shape().as_list()
tensorflow.reshape
14,695
import tensorflow as tf print('\ntranspose(D)=') print(sess.run(tf.transpose(D))) print('\ninverse(D)=') print(sess.run(tf.matrix_inverse(D))) print('\ndeterminant(D)={:.1f}'.format(sess.run(tf.matrix_determinant(D)))) print('\ncholesky(D):')
tensorflow.matrix_inverse
14,696
import tensorflow as tf def reduce_fn(accum, elem): return tf.size(elem, out_type=tf.int64, name='vocabulary_size') + accum return _get_tensor_value( dataset.batch(tf.int32.max).reduce( tf.constant(0, tf.int64), reduce_fn)) else: with tf.io.gfile.GFile(vocab_path, 'rb') as f: return sum(1 for _ in f) def vocabulary_by_name(self, vocab_filename: str) -> List[bytes]: """Like vocabulary_file_by_name but returns a list.""" vocab_path = self.vocabulary_file_by_name(vocab_filename) if not vocab_path:
tensorflow.io.gfile.GFile
14,697
from tensorflow.python.ops import array_ops if (x.get_shape().ndims is not None and self._is_all_constant_helper(size, *start_sum)): start = sum(tensor_util.constant_value(s) for s in start_sum) stop = start + tensor_util.constant_value(size) slice_ = x.get_shape()[start:stop].as_list() if all(s is not None for s in slice_): return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name) # Fall-through intended. return array_ops.slice(array_ops.shape(x), (sum(start_sum),), (size,)) sample_ndims = self.get_sample_ndims(x, name=name) return (slice_shape((), sample_ndims, name="sample_shape"), slice_shape((sample_ndims,), self.batch_ndims, name="batch_shape"), slice_shape((sample_ndims, self.batch_ndims), self.event_ndims, name="event_shape"))
tensorflow.python.ops.array_ops.shape
14,698
import tensorflow as tf scope_name = 'decoder_{}'.format(decoder.name) scope_name += '/' + '_'.join(encoder.name for encoder in encoders) def embed(input_): embedded_input = tf.nn.embedding_lookup(embedding, input_) if decoder.use_dropout and decoder.word_keep_prob is not None: noise_shape = [1, 1] if decoder.pervasive_dropout else [tf.shape(input_)[0], 1] embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.word_keep_prob, noise_shape=noise_shape) if decoder.use_dropout and decoder.embedding_keep_prob is not None: size = tf.shape(embedded_input)[1] noise_shape = [1, size] if decoder.pervasive_dropout else [tf.shape(input_)[0], size] embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.embedding_keep_prob, noise_shape=noise_shape) return embedded_input
tensorflow.nn.dropout
14,699