seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf self.alpha_c = alpha_c self.selector = selector self.dropout = dropout self.V = len(word_to_idx) self.L = dim_feature[0] self.D = dim_feature[1] self.M = dim_embed self.H = dim_hidden self.T = n_time_step self._start = word_to_idx['<START>'] self._null = word_to_idx['<NULL>'] self.weight_initializer = tf.contrib.layers.xavier_initializer() self.const_initializer = tf.constant_initializer(0.0) self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0) # Place holder for features and captions self.features = tf.placeholder(tf.float32, [None, self.L, self.D]) self.captions = tf.placeholder(tf.int32, [None, self.T + 1]) def _get_initial_lstm(self, features): with tf.variable_scope('initial_lstm'): features_mean = tf.reduce_mean(features, 1) w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer) b_h = tf.get_variable('b_h', [self.H], initializer=self.const_initializer)
tensorflow.constant_initializer
7,700
import tensorflow as tf # case 2 hparams = { "pretrained_model_name": None, "regr_strategy": "cls_time" } regressor = XLNetRegressor(hparams=hparams) logits = regressor(inputs) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) logits_ = sess.run(logits) self.assertEqual(logits_.shape, (batch_size,)) # case 3 hparams = { "pretrained_model_name": None, "regr_strategy": "time_wise" } regressor = XLNetRegressor(hparams=hparams)
tensorflow.global_variables_initializer
7,701
import tensorflow as tf def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC: return tf.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0.0, state_is_tuple=True, reuse=not is_training) if config.rnn_mode == BLOCK: return tf.contrib.rnn.LSTMBlockCell( config.hidden_size, forget_bias=0.0) raise ValueError("rnn_mode %s not supported" % config.rnn_mode) def _build_rnn_graph_lstm(self, inputs, config, is_training): """Build the inference graph using canonical LSTM cells.""" # Slightly better results can be obtained with forget gate biases
tensorflow.contrib.rnn.LSTMBlockCell
7,702
import tensorflow as tf if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1)
tensorflow.random_normal
7,703
import tensorflow as tf wvs_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]), tf.reshape(wvs, [-1, in_size])) wvs_weighted_reshaped = tf.reshape(wvs_weighted, wvs.get_shape()) wvsum = tf.reduce_sum(wvs_weighted_reshaped,0) pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction for each tweet. def GetWordPred(o_):
tensorflow.get_variable
7,704
import tensorflow as tf best_target_per_prior = tf.math.reduce_max(ious, axis=1) best_target_per_prior_index = tf.math.argmax(ious, axis=1) # size: num_targets best_prior_per_target = tf.math.reduce_max(ious, axis=0) best_prior_per_target_index = tf.math.argmax(ious, axis=0) targets = tf.range(tf.shape(best_prior_per_target_index)[0], dtype='int64') best_target_per_prior_index = tf.tensor_scatter_nd_update(best_target_per_prior_index, tf.expand_dims(best_prior_per_target_index, 1), targets) # 2.0 is used to make sure every target has a prior assigned best_target_per_prior = tf.tensor_scatter_nd_update(best_target_per_prior, tf.expand_dims(best_prior_per_target_index, 1), tf.ones_like(best_prior_per_target_index, dtype=tf.float32)*2.0) # size: num_priors labels = tf.gather(gt_labels, best_target_per_prior_index) labels = tf.where(tf.less(best_target_per_prior, iou_threshold), tf.constant(0, dtype='int64'), labels)
tensorflow.expand_dims
7,705
import tensorflow as tf class SetFromFlat(object): def __init__(self, var_list, dtype=tf.float32): assigns = [] shapes = list(map(var_shape, var_list)) total_size = np.sum([intprod(shape) for shape in shapes]) self.theta = theta = tf.placeholder(dtype, [total_size]) start = 0 assigns = [] for (shape, v) in zip(shapes, var_list): size = intprod(shape) assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape))) start += size self.op = tf.group(*assigns) def __call__(self, theta): get_session().run(self.op, feed_dict={self.theta: theta}) class GetFlat(object): def __init__(self, var_list): self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list]) def __call__(self):
tensorflow.reshape
7,706
import tensorflow as tf :param padding: 'VALID' or 'SAME' :param stride: int so only support square stride :param w_init: initializer for convolution weights :param b_init: initializer for bias :param split: split channels as used in Alexnet mainly group for GPU memory save. :param use_bias: whether to use bias. :param data_format: default set to NHWC according tensorflow :return: tf.Tensor named ``output`` """ with tf.variable_scope(name): in_shape = inputdata.get_shape().as_list() channel_axis = 3 if data_format == 'NHWC' else 1 in_channel = in_shape[channel_axis] assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!" assert in_channel % split == 0 assert out_channel % split == 0 padding = padding.upper()
tensorflow.variable_scope
7,707
import tensorflow as tf new_mean = tf.assign_sub( mean, tf.check_numerics( decay * (mean - cur_mean), "NaN in moving mean."))
tensorflow.check_numerics
7,708
import tensorflow as tf return pmm def _evaluate_legendre_polynomial_loop_cond(x, n, l, m, pmm, pmm1): return tf.cast(tf.math.count_nonzero(n <= l), tf.bool) def _evaluate_legendre_polynomial_loop_body(x, n, l, m, pmm, pmm1): n_float = tf.cast(n, dtype=x.dtype) m_float = tf.cast(m, dtype=x.dtype) pmn = (x * (2.0 * n_float - 1.0) * pmm1 - (n_float + m_float - 1) * pmm) / ( n_float - m_float) pmm = tf.where(tf.less_equal(n, l), pmm1, pmm) pmm1 = tf.where(tf.less_equal(n, l), pmn, pmm1) n += 1 return x, n, l, m, pmm, pmm1 def _evaluate_legendre_polynomial_loop(x, m, l, pmm, pmm1): n = m + 2 x, n, l, m, pmm, pmm1 = tf.while_loop( cond=_evaluate_legendre_polynomial_loop_cond, body=_evaluate_legendre_polynomial_loop_body, loop_vars=[x, n, l, m, pmm, pmm1]) return pmm1
tensorflow.less_equal
7,709
import tensorflow as tf graph_def = tf.get_default_graph().as_graph_def() self.default_encoding_stage() new_graph_def = tf.get_default_graph().as_graph_def() tf.test.assert_equal_graph_def(graph_def, new_graph_def) def test_encoding_stage_name(self):
tensorflow.test.assert_equal_graph_def
7,710
import tensorflow as tf unconnected_gradients=tf.UnconnectedGradients.NONE) train_op = optimizer.apply_gradients(zip(grads, tf_sparse_demo.trainable_variables)) with tf.control_dependencies([train_op]): loss = tf.identity(loss) return loss, embedding_vector dataset = utils.tf_dataset(*random_samples, batchsize=args.global_batch_size, to_sparse_tensor=True, repeat=1) train_iterator = dataset.make_initializable_iterator() iterator_init = train_iterator.initializer inputs, labels = train_iterator.get_next() graph_results = _train_step(inputs, labels, training=True) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) restore_op = list() for i, embedding_weight in enumerate(tf_sparse_demo.embedding_weights): restore_op.append(embedding_weight.assign(tf.concat(init_tensors[i], axis=0))) emb_values = list() for embedding_weight in tf_sparse_demo.embedding_weights: if args.save_params: filepath = r"./embedding_variables/" utils.try_make_dirs(filepath) emb_values.append(embedding_weight.read_value()) else: emb_values = tf.constant(1.0)
tensorflow.local_variables_initializer
7,711
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops config = test_configs[config_name] num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units) params_size_t = model.params_size() input_data = variables.Variable( array_ops.ones([seq_length, batch_size, num_units])) input_h = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) input_c = variables.Variable(
tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops.CudnnLSTM
7,712
import tensorflow as tf X = self._do_conv(X, w, h, ch, ch_out, filter_size=1, is_train=is_train) X = tf.reshape(X, (-1, w_out, h_out, ch_out)) # Sanity shape check return X
tensorflow.reshape
7,713
from tensorflow.contrib.learn.python.learn.datasets import base base.shrink_csv(train_path, 1000) base.shrink_csv(test_path, 1000)
tensorflow.contrib.learn.python.learn.datasets.base.shrink_csv
7,714
from tensorflow.examples.tutorials.mnist import input_data w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer()) b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0)) y = tf.matmul(h2, w3) + b3 # losses cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y)) # optimizer optimizer = tf.train.GradientDescentOptimizer(args.lr) # define one-step train ops train_op = optimizer.minimize(cross_entropy) return x, y, gt, train_op if __name__ == "__main__": max_train_step = args.max_train_step batch_size = args.batch_size mnist = input_data.read_data_sets(args.data_dir, one_hot=True) x, y, gt, train_op = model() # create saver saver = tf.train.Saver() if os.path.exists('./mnist'): print('=> directory is existed!') else: print('=> creating temporary directory ...') os.makedirs('./mnist') with tf.Session() as sess: if args.model_path == '': tf.global_variables_initializer().run()
tensorflow.examples.tutorials.mnist.input_data.read_data_sets
7,715
import tensorflow as tf inputs = tf.identity(inputs, 'initial_max_pool') for i, num_blocks in enumerate(self.block_sizes): num_filters = self.num_filters * (2**i) inputs = block_layer( inputs=inputs, filters=num_filters, bottleneck=self.bottleneck, block_fn=self.block_fn, blocks=num_blocks, strides=self.block_strides[i], training=training, name='block_layer{}'.format(i + 1), data_format=self.data_format) # Only apply the BN and ReLU for model that does pre_activation in each # building/bottleneck block, eg resnet V2. if self.pre_activation: inputs = batch_norm(inputs, training, self.data_format) inputs = tf.nn.relu(inputs) # The current top layer has shape # `batch_size x pool_size x pool_size x final_size`. # ResNet does an Average Pooling layer over pool_size, # but that is the same as doing a reduce_mean. We do a reduce_mean # here because it performs better than AveragePooling2D. axes = [2, 3] if self.data_format == 'channels_first' else [1, 2] inputs = tf.reduce_mean(inputs, axes, keepdims=True) inputs = tf.identity(inputs, 'final_reduce_mean') inputs = tf.reshape(inputs, [-1, self.final_size])
tensorflow.nn.relu
7,716
import tensorflow as tf YDistr = tf.contrib.distributions.MultivariateNormalDiag(loc=tf.zeros(D_int, tf.float32), scale_diag=tf.ones(D_int, tf.float32)) Y = YDistr.sample(N_int) T = 1.0/(2.0*N*tf.sqrt(m.pi*y)) A0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)
tensorflow.sqrt
7,717
from tensorflow.python.framework import ops @contextlib.contextmanager def _handle_graph(handle): with handle.graph.as_default(): yield def _enclosing_tpu_context(): # pylint: disable=protected-access context = ops.get_default_graph()._get_control_flow_context() # pylint: enable=protected-access while context is not None and not isinstance( context, control_flow_ops.XLAControlFlowContext): context = context.outer_context return context class ReplicatedVariable(VariableBase):
tensorflow.python.framework.ops.get_default_graph
7,718
import tensorflow as tf inputdata = tf.reshape(inputdata, [-1, group_size, c // group_size, h, w]) mean, var = tf.nn.moments(inputdata, [2, 3, 4], keep_dims=True) inputdata = (inputdata - mean) / tf.sqrt(var + esp) # 每个通道的gamma和beta gamma = tf.Variable(tf.constant(1.0, shape=[c]), dtype=tf.float32, name='gamma') beta = tf.Variable(tf.constant(0.0, shape=[c]), dtype=tf.float32, name='beta') gamma = tf.reshape(gamma, [1, c, 1, 1]) beta = tf.reshape(beta, [1, c, 1, 1]) # 根据论文进行转换 [n, c, h, w, c] 到 [n, h, w, c] output = tf.reshape(inputdata, [-1, c, h, w]) output = output * gamma + beta output = tf.transpose(output, [0, 2, 3, 1]) return output @staticmethod def squeeze(inputdata, axis=None, name=None): """ :param inputdata: :param axis: :param name: :return: """
tensorflow.transpose
7,719
import tensorflow as tf self._create_model(train_triples) self.sess.run(tf.initialize_all_variables())
tensorflow.initialize_all_variables
7,720
import tensorflow as tf [], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32) image = _aspect_preserving_resize(image, resize_side) image = _random_crop([image], output_height, output_width)[0] image.set_shape([output_height, output_width, 3]) image = tf.to_float(image) image = tf.image.random_flip_left_right(image) return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) def preprocess_for_eval(image, output_height, output_width, resize_side): """Preprocesses the given image for evaluation.
tensorflow.image.random_flip_left_right
7,721
import tensorflow as tf result_m_zero = _spherical_harmonics_normalization( degree_l, zeros, var_type) * evaluate_legendre_polynomial( degree_l, zeros, tf.cos(theta)) result_branch = _evaluate_spherical_harmonics_branch(
tensorflow.cos
7,722
import tensorflow as tf Returns: logits: the pre-softmax activations, a tensor of size [batch_size, num_classes] end_points: a dictionary from components of the nets to the corresponding activation. """ assert data_format in ['NDHWC', 'NCDHW'] # Final pooling and prediction with tf.variable_scope( scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope: with arg_scope( [layers.batch_norm, layers.dropout], is_training=is_training): net, end_points = s3dg_base( inputs, first_temporal_kernel_size=first_temporal_kernel_size, temporal_conv_startat=temporal_conv_startat,
tensorflow.variable_scope
7,723
import tensorflow as tf dynamic_boffset=tf.constant([0, 0], dtype=tf.int32), add=True, transpose=transpose) else: y = sbnet_module.sparse_scatter( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_boffset=tf.constant([0, 0], dtype=tf.int32), add=True, transpose=transpose) return y def sparse_conv2d_matmul(x, w, blk_indices, strides, padding): """ Performs 2D convolution using matrix multiplication on a sparse feature map.
tensorflow.constant
7,724
import tensorflow as tf for progress in [0, 0.2, 1, 1.2, 2, 2.2, 3] ] alpha_fixed_progress = [ sess.run( networks._discriminator_alpha(block_id, tf.constant(1.2, tf.float32))) for block_id in range(1, 5) ] self.assertArrayNear(alpha_fixed_block_id, [1, 1, 1, 0.8, 0, 0, 0], 1.0e-6) self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 1, 1], 1.0e-6) def test_blend_images_in_stable_stage(self): x_np = np.random.normal(size=[2, 8, 8, 3]) x = tf.constant(x_np, tf.float32) x_blend = networks.blend_images( x, progress=tf.constant(0.0), resolution_schedule=networks.ResolutionSchedule( scale_base=2, num_resolutions=2), num_blocks=2) with self.test_session(use_gpu=True) as sess: x_blend_np = sess.run(x_blend) x_blend_expected_np = sess.run(layers.upscale(layers.downscale(x, 2), 2)) self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6) def test_blend_images_in_transition_stage(self): x_np = np.random.normal(size=[2, 8, 8, 3])
tensorflow.constant
7,725
import tensorflow as tf # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) # Start a second session. In that session the variables # have not been initialized either. with self.test_session(graph=tf.Graph()) as sess: v0 = tf.Variable(-1.0, name="v0") v1 = tf.Variable(-1.0, name="v1") save = tf.train.Saver([v0, v1]) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v0" in e.message): sess.run(v0) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes.
tensorflow.train.Saver
7,726
import tensorflow as tf initialized_variable_names, ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info( " name = %s, shape = %s%s", var.name, var.shape, init_string ) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer(
tensorflow.logging.info
7,727
import tensorflow as tf else: initial_state = dense(initial_state, cell_state_size, use_bias=True, name='initial_state_projection', activation=activation_fn) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: initial_output = initial_state else: # Last layer's state is the right-most part. Output is the left-most part of an LSTM's state. initial_output = initial_state[:, -cell_output_size:] time = tf.constant(0, dtype=tf.int32, name='time') outputs = tf.TensorArray(dtype=tf.float32, size=time_steps) samples = tf.TensorArray(dtype=tf.int64, size=time_steps) inputs = tf.TensorArray(dtype=tf.int64, size=time_steps).unstack(tf.to_int64(tf.transpose(decoder_inputs))) states = tf.TensorArray(dtype=tf.float32, size=time_steps) weights = tf.TensorArray(dtype=tf.float32, size=time_steps) attns = tf.TensorArray(dtype=tf.float32, size=time_steps) initial_symbol = inputs.read(0) # first symbol is BOS
tensorflow.constant
7,728
import tensorflow as tf if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, optimizer) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: if task_name not in ["sts-b", "cola"]: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean( values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } elif task_name == "sts-b": def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Pearson correlations for STS-B.""" # Display labels and predictions
tensorflow.argmax
7,729
import tensorflow as tf print('------valid_classification_report-----') print(classification_report(valid_true_total, valid_pre_total, target_names=target_names)) print('------valid_classification_report-----') print('------valid_confusion_matrix-----') cm = confusion_matrix(y_true=valid_true_total, y_pred=valid_pre_total) print(cm) print('------valid_confusion_matrix-----') coord.request_stop() coord.join(threads) def predict_time(loop=100): feed_dict={ testnum:1 } with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) tf.train.Saver().restore(sess,path) total=0.0 for i in range(loop): a = datetime.now() accuracy_np = sess.run([accuracy],feed_dict=feed_dict) b = datetime.now() c = (b - a).microseconds total+=c print('predict_time(ms): ',total/(loop*1000)) coord.request_stop() coord.join(threads)
tensorflow.Session
7,730
from tensorflow.contrib.boosted_trees.proto import learner_pb2 def testFitAndEvaluateDontThrowExceptionWithCoreForEstimator(self): learner_config = learner_pb2.LearnerConfig()
tensorflow.contrib.boosted_trees.proto.learner_pb2.LearnerConfig
7,731
import tensorflow as tf centroids_mask = None centroids, lookup = get_unique(weights) num_centroids = tf.size(centroids) if self.preserve_sparsity: sparsity_mask = tf.math.divide_no_nan(weights, weights) zero_idx = tf.argmin(tf.abs(centroids), axis=-1) centroids_mask = 1.0 - tf.one_hot(zero_idx, num_centroids) result = {SPARSITY_MASK: sparsity_mask}
tensorflow.math.divide_no_nan
7,732
import tensorflow as tf apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) summary_op = tf.summary.merge_all() # save moving average variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # batch norm updates with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]): train_op = tf.no_op(name='train_op') saver = tf.train.Saver(tf.global_variables()) summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path, tf.get_default_graph()) init = tf.global_variables_initializer() if FLAGS.pretrained_model_path is not None: variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path, slim.get_trainable_variables(),
tensorflow.no_op
7,733
import tensorflow as tf tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
tensorflow.app.flags.DEFINE_string
7,734
from tensorflow.contrib.framework import tensor_util update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_relative_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions, normalizer = tensor_util.remove_squeezable_dimensions( predictions, normalizer) predictions.get_shape().assert_is_compatible_with(normalizer.get_shape()) relative_errors = math_ops.select( math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels), math_ops.div(math_ops.abs(labels - predictions), normalizer)) return streaming_mean(relative_errors, weights, metrics_collections, updates_collections, name or 'mean_relative_error') def streaming_mean_squared_error(predictions, labels, weights=None, metrics_collections=None,
tensorflow.contrib.framework.tensor_util.remove_squeezable_dimensions
7,735
import tensorflow as tf elif encoder.bidir and encoder.final_state == 'last_both': encoder_state_ = tf.concat([last_forward, last_backward], axis=1)
tensorflow.concat
7,736
import tensorflow as tf tf.summary.image('training_masks', training_masks) tf.summary.scalar('model_loss', model_loss) tf.summary.scalar('total_loss', total_loss) return total_loss, model_loss def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [] for g, _ in grad_and_vars: expanded_g = tf.expand_dims(g, 0) grads.append(expanded_g) grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads class MixedPrecisionOptimizer(tf.train.Optimizer): """An optimizer that updates trainable variables in fp32.""" def __init__(self, optimizer, scale=None, name="MixedPrecisionOptimizer",
tensorflow.concat
7,737
import tensorflow as tf # synchronization point across all towers. grads = average_gradients(tower_grads) # Add a summary to track the learning rate. summaries.append(tf.summary.scalar('learning_rate', lr)) # Add histograms for gradients. for grad, var in grads: if grad is not None: summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad)) # Apply the gradients to adjust the shared variables. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): summaries.append(tf.summary.histogram(var.op.name, var)) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( cifar10.MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # Group all updates to into a single train op. train_op = tf.group(apply_gradient_op, variables_averages_op) # Create a saver. saver = tf.train.Saver(tf.global_variables()) # Build the summary operation from the last tower summaries.
tensorflow.trainable_variables
7,738
import tensorflow as tf "grouped_minibatch_stddev", "centered_grouped_image", centered_grouped_image ) # Calculate variance over group. # shape = ( # cur_batch_size / group_size, # image_size, # image_size, # num_channels # ) grouped_variance = tf.reduce_mean( input_tensor=tf.square(x=centered_grouped_image), axis=0, name="grouped_variance" ) print_obj( "grouped_minibatch_stddev", "grouped_variance", grouped_variance ) # Get stddev image using ops common to both grouped & ungrouped. stddev_feature_map = self.minibatch_stddev_common( variance=grouped_variance,
tensorflow.square
7,739
import tensorflow as tf tf.app.flags.DEFINE_integer( 'resnet_size', 50, 'The size of the ResNet model to use.') tf.app.flags.DEFINE_integer( 'train_epochs', None, 'The number of epochs to use for training.')
tensorflow.app.flags.DEFINE_integer
7,740
import tensorflow as tf # intermediate projection to embedding size (before projecting to vocabulary size) # this is useful to reduce the number of parameters, and # to use the output embeddings for output projection (tie_embeddings parameter) output_ = dense(output_, decoder.embedding_size, use_bias=False, name='softmax0') if decoder.tie_embeddings and (decoder.pred_embed_proj or decoder.pred_deep_layer): bias = get_variable('softmax1/bias', shape=[decoder.vocab_size]) output_ = tf.matmul(output_, tf.transpose(embedding)) + bias else: output_ = dense(output_, decoder.vocab_size, use_bias=True, name='softmax1') return output_ if decoder.use_dropout: # FIXME: why no pervasive dropout here? initial_state = tf.nn.dropout(initial_state, keep_prob=decoder.initial_state_keep_prob) with tf.variable_scope(scope_name): activation_fn = None if decoder.initial_state == 'linear' else tf.nn.tanh if decoder.initial_state == 'trained': initial_state = get_variable(shape=[cell_state_size], name='initial_state') initial_state = tf.tile(tf.expand_dims(initial_state, axis=0), [batch_size, 1]) elif decoder.initial_state == 'zero': initial_state = tf.zeros(shape=[batch_size, cell_state_size]) elif decoder.layer_norm: initial_state = dense(initial_state, cell_state_size, use_bias=False, name='initial_state_projection') initial_state = tf.contrib.layers.layer_norm(initial_state, activation_fn=activation_fn, scope='initial_state_layer_norm')
tensorflow.nn.dropout
7,741
import tensorflow as tf aspect_ratio_range=[0.75, 1.33], area_range=[0.08, 1.0], max_attempts=10, use_image_if_no_bounding_boxes=True) is_bad = tf.reduce_sum(tf.cast(tf.equal(bbox_size, jpeg_shape), tf.int32)) >= 2 def good(): offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) image = tf.image.decode_and_crop_jpeg( byte, crop_window, channels=3, **JPEG_OPT) image = uint8_resize_bicubic(image, [224, 224]) return image
tensorflow.unstack
7,742
import tensorflow as tf targets: Target classes of shape `[T, B]` sequence_length: An int32 tensor of shape `[B]` corresponding to the length of each input Returns: A tensor of shape [T, B] that contains the loss per example, per time step. """ with tf.name_scope("cross_entropy_sequence_loss"): losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets) loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0])) losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0]) return losses def dice_loss(predictions, targets, weights=1., name='dice_loss'):
tensorflow.nn.sparse_softmax_cross_entropy_with_logits
7,743
import tensorflow as tf self.EPS_LEN = 100000 # GPU setup os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'discounted_r') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.shuffle(buffer_size=10000) self.dataset = self.dataset.batch(self.MINIBATCH) self.dataset = self.dataset.cache() self.dataset = self.dataset.repeat(self.EPOCHS) self.data_iter = self.dataset.make_initializable_iterator() batch = self.data_iter.get_next() # Call ppo net pi_old, pi_old_params = self.build_anet(batch['state'], 'oldpi') pi, pi_params = self.build_anet(batch['state'], 'pi')
tensorflow.placeholder
7,744
import tensorflow as tf initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer())
tensorflow.zeros_initializer
7,745
import tensorflow as tf inputs=inputs, sequence_length=seq_lengths, beam_width=beam_width, blank_index=blank_index, top_paths=1, blank_label=0) decoded = tf.sparse.SparseTensor(indices[0], values[0], shape[0]) decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32) decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0])
tensorflow.sparse.SparseTensor
7,746
import tensorflow as tf alpha_var: variable, alpha for weighted sum of fade-in of layers. params: dict, user passed parameters. trans_idx: int, index of current growth transition. Returns: Final logits tensor of discriminator. """ print_obj( "\nEntered create_growth_transition_discriminator_network", "trans_idx", trans_idx ) print_obj("create_growth_transition_discriminator_network", "X", X) with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE): # Growing side chain. growing_from_rgb_conv_layer = self.from_rgb_conv_layers[trans_idx + 1] growing_block_layers = self.conv_layer_blocks[trans_idx + 1] # Pass inputs through layer chain. growing_block_conv = growing_from_rgb_conv_layer(inputs=X) print_obj( "\ncreate_growth_transition_discriminator_network", "growing_block_conv", growing_block_conv ) for i in range(len(growing_block_layers)):
tensorflow.variable_scope
7,747
import tensorflow as tf self._testMultiSaverCollectionSave() self._testMultiSaverCollectionRestore() def testBinaryAndTextFormat(self): test_dir = self._TestDir("binary_and_text") filename = os.path.join(test_dir, "metafile") with self.test_session(graph=tf.Graph()): # Creates a graph. tf.Variable(10.0, name="v0") # Exports the graph as binary format. tf.train.export_meta_graph(filename, as_text=False) with self.test_session(graph=tf.Graph()):
tensorflow.Graph
7,748
import tensorflow as tf input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks) tf.logging.info('Evaluation results: %s' % eval_results)
tensorflow.logging.info
7,749
import tensorflow as tf if ids == "": ids = id else: ids = ids + "_" + id return ids def _create_regularizers_hook(self, config): wb_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # see keras_utils.py: activity_and_contractive_regularizers ac_regularizers = tf.get_collection(AC_REGULARIZATION) custom_regularizers = tf.get_collection(CUSTOM_REGULARIZATION) if wb_regularizers: wb_regularizers_names = [r.name for r in wb_regularizers] else: wb_regularizers = [tf.zeros([1])] wb_regularizers_names = ["none"] wb_regularizers_fileNames = {"fileName" : "wb_regularizers"}
tensorflow.get_collection
7,750
import tensorflow as tf rotations_y = tf.concat([tf_utils.euler_from_rotation_matrix( tf.reshape(detections['rotations_3d'][i], [3, 3]), 1) for i in range(num_boxes)], axis=0) rotations_y = tf.reshape(rotations_y, [-1, 1]) predicted_boxes = tf.concat([detections['translations_3d'], detections['sizes_3d'],
tensorflow.reshape
7,751
from tensorflow.python.framework import tensor_shape @ops.RegisterShape("AssignAdd") @ops.RegisterShape("AssignSub") def _AssignUpdateShape(op): """Shape function for the AssignAdd and AssignSub dense update ops.""" return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())] @ops.RegisterShape("CountUpTo") def _CountUpToShape(op): """Shape function for the CountUpTo op.""" return [op.inputs[0].get_shape().merge_with(tensor_shape.scalar())] @ops.RegisterShape("ScatterAdd") @ops.RegisterShape("ScatterSub") @ops.RegisterShape("ScatterUpdate") def _ScatterUpdateShape(op): """Shape function for the sparse update ops.""" var_shape = op.inputs[0].get_shape() indices_shape = op.inputs[1].get_shape()
tensorflow.python.framework.tensor_shape.scalar
7,752
import tensorflow as tf W = weights[l] b = biases[l] H = tf.tanh(tf.add(tf.matmul(H, W), b)) W = weights[-1] b = biases[-1] Y = tf.add(tf.matmul(H, W), b) return Y def fwd_gradients_0(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0] return tf.gradients(g, self.dummy_x0_tf)[0] def fwd_gradients_1(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0] return tf.gradients(g, self.dummy_x1_tf)[0] def net_U0(self, x): lambda_1 = self.lambda_1 lambda_2 = tf.exp(self.lambda_2) U = self.neural_net(x, self.weights, self.biases) U_x = self.fwd_gradients_0(U, x) U_xx = self.fwd_gradients_0(U_x, x) F = -lambda_1*U*U_x + lambda_2*U_xx U0 = U - self.dt*tf.matmul(F, self.IRK_alpha.T) return U0 def net_U1(self, x): lambda_1 = self.lambda_1
tensorflow.gradients
7,753
import tensorflow as tf if 'train' == name: record_filename = _get_output_filename(dataset_dir, name, shard, FLAGS.train_shards) elif 'test' == name: record_filename = _get_output_filename(dataset_dir, name) else: raise ValueError('Illegal dataset name') tfrecord_writer = tf.python_io.TFRecordWriter(record_filename) for filename in filenames: with tf.gfile.Open(filename, 'r') as f: data = cPickle.load(f) images = data['data'] num_images = images.shape[0] images = images.reshape((num_images, 3, 32, 32)) labels = data['labels'] with tf.Graph().as_default(): image_placeholder = tf.placeholder(dtype=tf.uint8)
tensorflow.gfile.Open
7,754
from tensorflow.contrib import seq2seq if decoder_fn is None: outputs, final_state = tf.nn.dynamic_rnn(cell, tensor, sequence_length=sequence_length, initial_state=initial_state, dtype=tf.float32) final_context_state = None else: # TODO: turn off sequence_length? outputs, final_state, final_context_state = seq2seq.dynamic_rnn_decoder( cell, decoder_fn, inputs=None, sequence_length=sequence_length) if return_final_state: return final_state else:
tensorflow.contrib.seq2seq.dynamic_rnn_decoder
7,755
import tensorflow as tf for i in range(len(sliced_output)): for j in range(i+1, len(sliced_output)): cur_label_weight = tf.math.sign(sliced_label[i] - sliced_label[j]) cur_propensity = sliced_propensity[i] * sliced_label[i] + sliced_propensity[j] * sliced_label[j] cur_pair_loss = -tf.exp(sliced_output[i]) / (tf.exp(sliced_output[i]) + tf.exp(sliced_output[j])) if loss == None: loss = cur_label_weight * cur_pair_loss * cur_propensity loss += cur_label_weight * cur_pair_loss * cur_propensity
tensorflow.exp
7,756
import tensorflow as tf [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2)) with tf.variable_scope("value"): x = flat_observations for size in config.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1)[..., 0] mean = tf.check_numerics(mean, "mean") logstd = tf.check_numerics(logstd, "logstd") value = tf.check_numerics(value, "value") policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd)) return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))
tensorflow.check_numerics
7,757
import tensorflow as tf eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d", len(eval_examples)) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
tensorflow.logging.info
7,758
import tensorflow as tf input = op.inputs[0] return tf.zeros_like(input)
tensorflow.zeros_like
7,759
import tensorflow as tf affine1 = tf.nn.relu_layer(inpOp, weights, biases) return affine1 def l2_loss(tensor, weight=1.0, scope=None): """Define a L2Loss, useful for regularize, i.e. weight decay. Args: tensor: tensor to regularize. weight: an optional weight to modulate the loss. scope: Optional scope for op_scope. Returns: the L2 loss op. """ with tf.name_scope(scope): weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight') loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value') return loss def lppool(inpOp, pnorm, kH, kW, dH, dW, padding, name): with tf.variable_scope(name): if pnorm == 2: pwr = tf.square(inpOp) else: pwr = tf.pow(inpOp, pnorm)
tensorflow.convert_to_tensor
7,760
from tensorflow.python.framework import ops ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape) @ops.RegisterShape("Add") @ops.RegisterShape("Complex") @ops.RegisterShape("Div") @ops.RegisterShape("Equal") @ops.RegisterShape("Greater") @ops.RegisterShape("GreaterEqual") @ops.RegisterShape("Less") @ops.RegisterShape("LessEqual") @ops.RegisterShape("LogicalAnd") @ops.RegisterShape("LogicalOr") @ops.RegisterShape("Maximum") @ops.RegisterShape("Minimum") @ops.RegisterShape("Mod") @ops.RegisterShape("Mul") @ops.RegisterShape("NotEqual") @ops.RegisterShape("Pow") @ops.RegisterShape("Sub")
tensorflow.python.framework.ops.RegisterShape
7,761
import tensorflow as tf self._cur_user = self.user_memory(self.input_users) # Item memories a query self._cur_item = self.item_memory(self.input_items) self._cur_item_negative = self.item_memory(self.input_items_negative) def _construct_placeholders(self): self.input_users = tf.placeholder(tf.int32, [None], 'UserID') self.input_items = tf.placeholder(tf.int32, [None], 'ItemID') self.input_items_negative = tf.placeholder(tf.int32, [None], 'NegativeItemID') # Add our placeholders add_to_collection(GraphKeys.PLACEHOLDER, [self.input_users, self.input_items, self.input_items_negative])
tensorflow.placeholder
7,762
import tensorflow as tf Raises: ValueError: if neither `loc` nor `covariance_matrix` are specified. """ parameters = dict(locals()) # Convert the covariance_matrix up to a scale_tril and call MVNTriL. with tf.name_scope(name) as name: with tf.name_scope("init", values=[loc, covariance_matrix]): dtype = dtype_util.common_dtype([loc, covariance_matrix], tf.float32) loc = loc if loc is None else tf.convert_to_tensor( loc, name="loc", dtype=dtype) if covariance_matrix is None: scale_tril = None else: covariance_matrix = tf.convert_to_tensor( covariance_matrix, name="covariance_matrix", dtype=dtype) if validate_args: covariance_matrix = control_flow_ops.with_dependencies([
tensorflow.convert_to_tensor
7,763
import tensorflow as tf FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None
tensorflow.contrib.tpu.TPUConfig
7,764
import tensorflow as tf weights, states, attns, initial_weights, samples, initial_context), parallel_iterations=decoder.parallel_iterations, swap_memory=decoder.swap_memory) outputs = outputs.stack() weights = weights.stack() # batch_size, encoders, output time, input time states = states.stack() attns = attns.stack() samples = samples.stack() # put batch_size as first dimension outputs = tf.transpose(outputs, perm=(1, 0, 2)) weights = tf.transpose(weights, perm=(1, 0, 2)) states = tf.transpose(states, perm=(1, 0, 2)) attns = tf.transpose(attns, perm=(1, 0, 2)) samples = tf.transpose(samples) return outputs, weights, states, attns, samples, get_logits, initial_data def encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, align_encoder_id=0, encoder_input_length=None, feed_argmax=True, rewards=None, use_baseline=True, training=True, global_step=None, monotonicity_weight=None, monotonicity_dist=None, monotonicity_decay=None, **kwargs): decoder = decoders[0] targets = targets[0] # single decoder if encoder_input_length is None: encoder_input_length = [] for encoder_inputs_ in encoder_inputs:
tensorflow.transpose
7,765
from tensorflow.contrib.learn.python.learn.estimators import run_config def testFitAndEvaluateDontThrowExceptionWithCoreForClassifier(self): learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.constraints.max_tree_depth = 1 model_dir = tempfile.mkdtemp() config = run_config.RunConfig() classifier = estimator.GradientBoostedDecisionTreeClassifier( learner_config=learner_config, num_trees=1,
tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig
7,766
import tensorflow as tf if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters
tensorflow.array_ops.transpose
7,767
import tensorflow as tf initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, do_serve): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator."""
tensorflow.one_hot
7,768
from tensorflow.python.framework import ops
tensorflow.python.framework.ops.reset_default_graph
7,769
import tensorflow as tf cell = tf.nn.rnn_cell.GRUCell(2) inp = [tf.constant(0.5, shape=[2, 2])] * 2 enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4, num_heads=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec)
tensorflow.nn.seq2seq.attention_decoder
7,770
import tensorflow as tf self.task = task self.freeze = freeze worker_device = "/job:worker/task:{}/cpu:0".format(task) with tf.device(tf.train.replica_device_setter(1, worker_device=worker_device)): with tf.variable_scope("global"): self.network = LSTMPolicy(env.observation_space.shape, env.action_space.n) self.global_step = tf.get_variable("global_step", [], tf.int32, initializer=tf.constant_initializer(0, dtype=tf.int32), trainable=False) with tf.device(worker_device): with tf.variable_scope("local"): self.local_network = pi = LSTMPolicy(env.observation_space.shape, env.action_space.n) pi.global_step = self.global_step self.ac = tf.placeholder(tf.float32, [None, env.action_space.n], name="ac") self.adv = tf.placeholder(tf.float32, [None], name="adv") self.r = tf.placeholder(tf.float32, [None], name="r") log_prob_tf = tf.nn.log_softmax(pi.logits) prob_tf = tf.nn.softmax(pi.logits)
tensorflow.variable_scope
7,771
import tensorflow as tf with slim.arg_scope([slim.batch_norm], **batch_norm_params): depth = 256 branch_logits = [] if model_options.add_image_level_feature: # modified by Paul Voigtlaender if is_training: pool_height = scale_dimension(model_options.crop_size[0], 1. / model_options.output_stride) pool_width = scale_dimension(model_options.crop_size[1], 1. / model_options.output_stride) image_feature = slim.avg_pool2d( features, [pool_height, pool_width], [pool_height, pool_width], padding='VALID') else: pool_height = tf.shape(features)[1] pool_width = tf.shape(features)[2] image_feature = tf.reduce_mean(features, axis=[1,2])[:, tf.newaxis, tf.newaxis, :] image_feature = slim.conv2d( image_feature, depth, 1, scope=_IMAGE_POOLING_SCOPE) image_feature = tf.image.resize_bilinear( image_feature, [pool_height, pool_width], align_corners=True) if is_training: image_feature.set_shape([None, pool_height, pool_width, depth]) branch_logits.append(image_feature) # Employ a 1x1 convolution. branch_logits.append(slim.conv2d(features, depth, 1, scope=_ASPP_SCOPE + str(0)))
tensorflow.shape
7,772
import tensorflow as tf scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs) # updates_collections=None except ValueError: layer = tf.contrib.layers.batch_norm(layer, is_training=True, center=True, scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs, reuse=True) # updates_collections=None else: layer = tf.contrib.layers.batch_norm(layer, is_training=False, center=True, scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs, reuse=True) # updates_collections=None elif norm_type == 'layer_norm': # layer_norm # Take activation_fn out to apply lrelu try: layer = activation_fn(tf.contrib.layers.layer_norm(layer, center=True, scale=False, scope=vs)) # updates_collections=None except ValueError: layer = activation_fn(tf.contrib.layers.layer_norm(layer, center=True, scale=False, scope=vs, reuse=True)) elif norm_type == 'selu': layer = selu(layer) else: raise NotImplementedError('Other types of norm not implemented.') return layer class VBN(object): """ Virtual Batch Normalization """ def __init__(self, x, name, epsilon=1e-5):
tensorflow.contrib.layers.layer_norm
7,773
import tensorflow as tf def batch_norm_conv(x, b_train, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = tf.get_variable('beta', initializer=tf.constant(0.0, shape=[n_out])) gamma = tf.get_variable('gamma', initializer=tf.constant(1.0, shape=[n_out])) batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9)
tensorflow.constant
7,774
import tensorflow as tf A tuple containing: - Input tensor of the restored model. - Prediction tensor of the restored model. - Output tensor, which is the softwmax result of the prediction tensor. - new session of the restored model. """ model_dir = tempfile.mkdtemp() saver.save(sess, model_dir) # Reset the graph. tf.reset_default_graph() x, prediction, output_class = self.buildModel(lstm_layer, is_dynamic_rnn) new_sess = tf.compat.v1.Session(config=CONFIG) saver = tf.train.Saver() saver.restore(new_sess, model_dir) return x, prediction, output_class, new_sess def getInferenceResult(self, x, output_class, sess): """Get inference result given input tensor and output tensor. Args: x: The input tensor. output_class: The output tensor. sess: Current session. Returns: A tuple containing:
tensorflow.compat.v1.Session
7,775
import tensorflow as tf "url.") tf.flags.DEFINE_string( "tpu_zone", None,
tensorflow.flags.DEFINE_string
7,776
import tensorflow as tf pad = tf.expand_dims(tf.expand_dims(pad, axis=0), axis=1) pad = tf.tile(pad, [batch_size, 1, 1])
tensorflow.tile
7,777
import tensorflow as tf """ x_means_hot = self.nearest_neighbor(x, means) x_means_hot_flat = tf.reshape( x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size]) x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) x_means = tf.transpose(x_means, [1, 0, 2]) q_loss = tf.reduce_mean( tf.squared_difference(tf.stop_gradient(x), x_means)) e_loss = tf.reduce_mean( tf.squared_difference(x, tf.stop_gradient(x_means)))
tensorflow.transpose
7,778
import tensorflow as tf config = ConfigProto(log_device_placement=True) config.gpu_options.allow_growth = True session = tf.Session(config=config).as_default()
tensorflow.Session
7,779
import tensorflow as tf def time_string(): return datetime.now().strftime("%Y-%m-%d %H:%M") def model_train_mode(args, feeder, hparams, global_step): with tf.variable_scope("Tacotron_model", reuse=tf.AUTO_REUSE) as scope: model = create_model("Tacotron", hparams) model.initialize(feeder.inputs, feeder.input_lengths, feeder.speaker_embeddings, feeder.mel_targets, feeder.token_targets, targets_lengths=feeder.targets_lengths, global_step=global_step, is_training=True, split_infos=feeder.split_infos)
tensorflow.variable_scope
7,780
import tensorflow as tf tf.app.flags.DEFINE_string('ws_prune_ratio_prtl', 'optimal', 'WS: pruning ratio protocol (\'uniform\' | \'heurist\' | \'optimal\')') tf.app.flags.DEFINE_integer('ws_nb_rlouts', 200, 'WS: # of roll-outs for the RL agent') tf.app.flags.DEFINE_integer('ws_nb_rlouts_min', 50, 'WS: minimal # of roll-outs for the RL agent to start training') tf.app.flags.DEFINE_string('ws_reward_type', 'single-obj', 'WS: reward type (\'single-obj\' OR \'multi-obj\')') tf.app.flags.DEFINE_float('ws_lrn_rate_rg', 3e-2, 'WS: learning rate for layerwise regression') tf.app.flags.DEFINE_integer('ws_nb_iters_rg', 20, 'WS: # of iterations for layerwise regression') tf.app.flags.DEFINE_float('ws_lrn_rate_ft', 3e-4, 'WS: learning rate for global fine-tuning') tf.app.flags.DEFINE_integer('ws_nb_iters_ft', 400, 'WS: # of iterations for global fine-tuning') tf.app.flags.DEFINE_integer('ws_nb_iters_feval', 25, 'WS: # of iterations for fast evaluation') tf.app.flags.DEFINE_float('ws_prune_ratio_exp', 3.0, 'WS: pruning ratio\'s exponent term') tf.app.flags.DEFINE_float('ws_iter_ratio_beg', 0.1, 'WS: iteration ratio (at starting time)') tf.app.flags.DEFINE_float('ws_iter_ratio_end', 0.5, 'WS: iteration ratio (at ending time)') tf.app.flags.DEFINE_float('ws_mask_update_step', 500, 'WS: step size for updating the pruning mask') def calc_prune_ratio(vars_list): """Calculate the overall pruning ratio for the given list of variables. Args: * vars_list: list of variables Returns: * prune_ratio: overall pruning ratio of the given list of variables """ nb_params_nnz = tf.add_n([tf.count_nonzero(var) for var in vars_list]) nb_params_all = tf.add_n([tf.size(var) for var in vars_list]) prune_ratio = 1.0 - tf.cast(nb_params_nnz, tf.float32) / tf.cast(nb_params_all, tf.float32) return prune_ratio
tensorflow.app.flags.DEFINE_float
7,781
import tensorflow as tf def bias_variable(self,name, shape): with tf.variable_scope(name) as scope: biases = tf.get_variable('biaess', shape=shape, dtype=tf.float32, initializer=tf.constant_initializer(0.01)) return biases # 定义一个卷积层,命名空间为name,输入为x,卷积核为W,步长为stride,偏差为bias,激活函数默认为relu def conv2d(self,name, x, W, stride, bias): with tf.variable_scope(name) as scope: conv = tf.nn.conv2d(x, W, [1, stride, stride, 1], padding='SAME') pre_activation = tf.nn.bias_add(conv, bias) output = tf.nn.relu(pre_activation, name=scope.name) return output # 定义一个池化层,默认为max_pooling def max_pool_2x2(self,name, x): with tf.variable_scope(name) as scope: maxpool = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') return maxpool # 创建DQN def creat_network(self): # 网络的参数,权重 W_conv1 = self.weight_variable('W_conv1', [8, 8, 4, 32]) # 第一层卷积层为8x8的卷积核,输入通道为4,输出通道为32
tensorflow.nn.relu
7,782
import tensorflow as tf from src.autoencoders.optimizer import OptimizerAE, OptimizerVAE from src.autoencoders.preprocessing import * import networkx as nx import numpy as np from collections import namedtuple import os import scipy.sparse as sp import tensorflow as tf import time os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) flags = namedtuple('FLAGS', ['dataset', 'task', 'model', 'dropout', 'epochs', 'features', 'learning_rate', 'hidden', 'dimension', 'nb_run', 'prop_val', 'prop_test', 'validation', 'verbose', 'kcore', 'k', 'nb_iterations']) FLAGS = flags('custom', 'link_prediction', 'gcn_ae', 0., 200, False, 0.01, 32, 16, 1, 5., 10., False, True, False, 2, 10) def fit_model(g, model_name): # Lists to collect average results mean_roc = []
tensorflow.compat.v1.logging.set_verbosity
7,783
from tensorflow.python.ops import math_ops self._lambda_t = ops.convert_to_tensor(self._lambda, name="lambda") def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) lambda_t = math_ops.cast(self._lambda_t, var.dtype.base_dtype) g_t = grad var_update = state_ops.assign_sub(var,
tensorflow.python.ops.math_ops.cast
7,784
from tensorflow.contrib.layers.python.layers import feature_column as contrib_feature_column classifier = estimator.GradientBoostedDecisionTreeClassifier( learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir, config=config, feature_columns=[contrib_feature_column.real_valued_column("x")]) classifier.fit(input_fn=_train_input_fn, steps=15) classifier.evaluate(input_fn=_eval_input_fn, steps=1) classifier.export(self._export_dir_base) def testThatLeafIndexIsInPredictions(self):
tensorflow.contrib.layers.python.layers.feature_column.real_valued_column
7,785
import tensorflow as tf if type=='scramble': self.augment = self.scramble elif type=='mix_scramble': self.augment = self.mix_scramble elif type=='blur': self.augment = self.gaussian_blur self.pointwise_filter = tf.eye(3, batch_shape=[1, 1]) elif type=='high_low_pass': self.augment = self.high_low_pass self.kernel = self.gaussian_kernel(size,mean,std) self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1]) self.pointwise_filter = tf.eye(3, batch_shape=[1, 1]) self.paddings = [[size,size],[size,size],[0,0]] elif type=='no_op': self.augment = self.no_op def gaussian_kernel(self,size,mean,std): """Makes 2D gaussian Kernel for convolution.""" d = tfp.distributions.Normal(mean, std) vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32))
tensorflow.tile
7,786
import tensorflow as tf """ Like the testing function but this one is for calculate the inference time and measure the frame per second """ print("INFERENCE mode will begin NOW..") # load the best model checkpoint to test on it self.load_best_model() # output_node: network/output/Argmax # input_node: network/input/Placeholder # for n in tf.get_default_graph().as_graph_def().node: # if 'input' in n.name:#if 'Argmax' in n.name: # import pdb; pdb.set_trace() print("Saving graph...") tf.train.write_graph(self.sess.graph_def, ".", 'graph.pb') print("Graph saved successfully.\n\n") exit(1) # init tqdm and get the epoch value tt = tqdm(range(self.test_data_len)) # idx of image idx = 0 # create the FPS Meter fps_meter = FPSMeter() # loop by the number of iterations for cur_iteration in tt:
tensorflow.train.write_graph
7,787
import tensorflow as tf loss = tf.reduce_mean(loss) return loss def contra_step_lossV2(pred, tgt): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) loss = tf.reduce_mean(loss) return loss def contra_step_lossV3(pred, tgt, margin=1.0): # Step-wise contrastive loss
tensorflow.where
7,788
import tensorflow as tf def loop_body(b, ign_mask): true_box = tf.boolean_mask(y_true[l][b, ..., 0:4], object_mask_bool[b, ..., 0]) iou = box_iou(pred_box[b], true_box)
tensorflow.boolean_mask
7,789
import tensorflow as tf self.train_summary = tf.summary.merge_all(key='train') self.eval_summary = tf.summary.merge_all(key='eval') self.saver = tf.train.Saver(tf.global_variables()) def separate_gradient_update(self): denoise_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "denoising_model") ranking_model_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "ranking_model") self.weighs_propen=denoise_params if self.hparams.l2_loss > 0: for p in denoise_params: # self.weighs_propen=p # p=tf.Print(p,[p],message="show the weights") self.exam_loss += self.hparams.l1_loss * tf.reduce_sum(tf.abs(p))
tensorflow.get_collection
7,790
import tensorflow as tf x = tf.expand_dims(self.s, [0]) self.manager_lstm = SingleStepLSTM(x, self.g_dim, step_size=tf.shape(self.obs)[:1]) g_hat = self.manager_lstm.output self.g = tf.nn.l2_normalize(g_hat, dim=1)
tensorflow.shape
7,791
import tensorflow as tf return out def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) out = tf.matmul(l1, self.w2)+self.b2 return out def softmax_loss(self,predicts,labels): predicts=tf.nn.softmax(predicts) labels=tf.one_hot(labels,classnum)
tensorflow.matmul
7,792
import tensorflow as tf box1 = box1.astype(np.float32) box2 = box2.astype(np.float32) # rotates around z, while we rotate around y so need to swap center_1 = tf.reshape(box1[0:3][[0, 2, 1]], [1, 3]) center_2 = tf.reshape(box2[0:3][[0, 2, 1]], [1, 3]) rotation_z_1 = tf.reshape(box1[-1], [1])
tensorflow.reshape
7,793
import tensorflow as tf return loss def create_architecture(self, sess, mode, num_classes, tag=None, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)): self._image = tf.placeholder(tf.float32, shape=[self._batch_size, None, None, 3]) self._im_info = tf.placeholder(tf.float32, shape=[self._batch_size, 3]) #缩放之后的图片尺寸和缩放的比例 self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号 self._tag = tag self._num_classes = num_classes self._mode = mode
tensorflow.placeholder
7,794
from tensorflow.python.ops import array_ops def _get_predict_ops(self, features): """See base class.""" logits = self._logits(features) return self._logits_to_predictions(logits, proba=True) def _logits_to_predictions(self, logits, proba=False): if self._n_classes < 2: return array_ops.reshape(logits, [-1]) if self._n_classes == 2: logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _get_feature_ops_from_example(self, examples_batch): column_types = layers.create_dict_for_parse_example( (self._get_linear_feature_columns() or []) + (self._get_dnn_feature_columns() or []))
tensorflow.python.ops.array_ops.zeros_like
7,795
import tensorflow as tf def gen_epochs_multi(raw_data_x,raw_data_y, timeindex, num_epochs, num_steps, batch_size,epoch_overlap): for i in range(int(num_epochs)): yield gen_batch_multi(raw_data_x,raw_data_y, timeindex, batch_size, num_steps, epoch_overlap) def reset_graph(): if 'sess' in globals() and sess: sess.close() tf.reset_default_graph() """ Define RNN graph """ def build_multilayer_rnn_graph_with_dynamic_rnn(cell_type, activation,state_size, num_steps, num_layers, input_size_x, input_size_y , learning_rate, lambda_l2_reg,random_seed=0):
tensorflow.reset_default_graph
7,796
import tensorflow as tf return collected def merge_parameters(params1, params2): params = tf.contrib.training.HParams() for (k, v) in params1.values().items(): params.add_hparam(k, v) params_dict = list(params.values()) ## key value pair
tensorflow.contrib.training.HParams
7,797
import tensorflow as tf return tf.constant(0, dtype=x.dtype) def _transpose(self, x, perm): sample_batch_ndims = tf.rank(x) - self.rightmost_transposed_ndims perm = tf.concat([ tf.range(sample_batch_ndims),
tensorflow.rank
7,798
import tensorflow as tf print('Training...done') tf.reset_default_graph() sess.close()
tensorflow.reset_default_graph
7,799