seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf def __init__(self, cell): super(CellWrapper, self).__init__() self.cell = cell self.num_splits = len(cell.state_size) if isinstance(cell.state_size, tuple) else 1 @property def state_size(self): return sum(self.cell.state_size) @property def output_size(self): return self.cell.output_size def __call__(self, inputs, state, scope=None): state = tf.split(value=state, num_or_size_splits=self.num_splits, axis=1) new_h, new_state = self.cell(inputs, state, scope=scope) return new_h, tf.concat(new_state, 1) def multi_encoder(encoder_inputs, encoders, encoder_input_length, other_inputs=None, training=True, **kwargs): """ Build multiple encoders according to the configuration in `encoders`, reading from `encoder_inputs`. The result is a list of the outputs produced by those encoders (for each time-step), and their final state. :param encoder_inputs: list of tensors of shape (batch_size, input_length), one tensor for each encoder. :param encoders: list of encoder configurations :param encoder_input_length: list of tensors of shape (batch_size,) (one tensor for each encoder) :return:
tensorflow.split
3,900
from tensorflow.python.framework import ops @ops.RegisterShape("Hardlabel")
tensorflow.python.framework.ops.RegisterShape
3,901
import tensorflow as tf pred_label = tf.argmax(logits, axis=-1, output_type=tf.int32) prob = tf.nn.softmax(logits) max_prob = tf.reduce_max(prob, axis=-1)
tensorflow.reduce_max
3,902
import tensorflow as tf # size: num_priors labels = tf.gather(gt_labels, best_target_per_prior_index) labels = tf.where(tf.less(best_target_per_prior, iou_threshold), tf.constant(0, dtype='int64'), labels) # labels[best_target_per_prior < iou_threshold] = 0 # the backgournd id boxes = tf.gather(gt_boxes, best_target_per_prior_index) return boxes, labels class MatchPrior(object): def __init__(self, center_form_priors, center_variance, size_variance, iou_threshold): self.center_form_priors = center_form_priors
tensorflow.gather
3,903
import tensorflow as tf tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": # flags.mark_flag_as_required("input_file") # flags.mark_flag_as_required("bert_config_file") # flags.mark_flag_as_required("output_dir") tf.app.run()
tensorflow.logging.info
3,904
import tensorflow as tf # equivalent int32 value. if tf_input_dtype == tf.string: in0 = tf.strings.to_number(in0, tf.int32) in1 = tf.strings.to_number(in1, tf.int32) add = tf.add(in0, in1, "ADD") sub = tf.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == tf.string: cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else:
tensorflow.subtract
3,905
import tensorflow as tf self.out2 = truthoutput_h4 print(self.out.get_shape()) self.recon1 = tf.nn.l2_loss(tgtimg - self.out) self.recon2 = tf.nn.l2_loss(tgtimg - self.out2) if ablation_type == "None": self.loss = self.recon1 + self.recon2 + self.simloss
tensorflow.nn.l2_loss
3,906
import tensorflow as tf dec, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)] with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()]) tf.get_variable_scope().reuse_variables() d1, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
tensorflow.constant
3,907
import tensorflow as tf self.forward() total_params() if trainable: self.lr = tf.minimum(config.learning_rate, 0.001 / tf.log(999.) * tf.log(tf.cast(self.global_step, tf.float32) + 1)) self.opt = tf.train.AdamOptimizer(learning_rate = self.lr, beta1 = 0.8, beta2 = 0.999, epsilon = 1e-7) grads = self.opt.compute_gradients(self.loss) gradients, variables = zip(*grads) capped_grads, _ = tf.clip_by_global_norm( gradients, config.grad_clip) self.train_op = self.opt.apply_gradients(
tensorflow.train.AdamOptimizer
3,908
import tensorflow as tf with tf.contrib.summary.create_file_writer( logdir=model_dir, filename_suffix=".host_call").as_default(): with tf.contrib.summary.always_record_summaries(): for i, name in enumerate(metric_names):
tensorflow.contrib.summary.always_record_summaries
3,909
import tensorflow as tf output_projection=(w, b)) targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0] def SampledLoss(labels, inputs): labels = tf.reshape(labels, [-1, 1]) return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes) return tf.nn.seq2seq.model_with_buckets( enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq, softmax_loss_function=SampledLoss) # Now we construct the copy model. batch_size = 8 inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)] with tf.variable_scope("root"): _, losses = SampleGRUSeq2Seq(inp, out, weights) updates = [] params = tf.global_variables() optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5) for i in range(len(buckets)): full_grads = tf.gradients(losses[i], params) grads, _ = tf.clip_by_global_norm(full_grads, 30.0) update = optimizer.apply_gradients(zip(grads, params)) updates.append(update) sess.run([tf.global_variables_initializer()]) steps = 6
tensorflow.ones_like
3,910
import tensorflow as tf # Use indices to lookup pixels in the flat image and restore # channels dim im_flat = tf.reshape(im, tf.stack([-1, channels])) im_flat = tf.to_float(im_flat) i_z0_y0_x0 = tf.gather(im_flat, idx_z0_y0_x0) i_z0_y0_x1 = tf.gather(im_flat, idx_z0_y0_x1) i_z0_y1_x0 = tf.gather(im_flat, idx_z0_y1_x0) i_z0_y1_x1 = tf.gather(im_flat, idx_z0_y1_x1) i_z1_y0_x0 = tf.gather(im_flat, idx_z1_y0_x0) i_z1_y0_x1 = tf.gather(im_flat, idx_z1_y0_x1) i_z1_y1_x0 = tf.gather(im_flat, idx_z1_y1_x0) i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1) # Finally calculate interpolated values. x0_f = tf.to_float(x0) x1_f = tf.to_float(x1) y0_f = tf.to_float(y0) y1_f = tf.to_float(y1) z0_f = tf.to_float(z0) z1_f = tf.to_float(z1) # Check the out-of-boundary case. x0_valid = tf.to_float(
tensorflow.gather
3,911
import tensorflow as tf print(labeled_sdfs) mean_x = tf.reduce_mean(labeled_poses[1][:, 0])
tensorflow.reduce_mean
3,912
from tensorflow.python.ops import math_ops Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_scope( name, 'false_negatives', [predictions, labels]): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) is_false_negative = math_ops.logical_and(math_ops.equal(labels, 1), math_ops.equal(predictions, 0)) return _count_condition(is_false_negative, weights, metrics_collections, updates_collections) def _broadcast_weights(weights, values): """Broadcast `weights` to the same shape as `values`. This returns a version of `weights` following the same broadcast rules as
tensorflow.python.ops.math_ops.equal
3,913
import tensorflow as tf return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.) #cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy_loss') tf.summary.scalar('cross_entropy_loss', cross_entropy) loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred)) #loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets)) loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1)) loc_loss = tf.identity(loc_loss, name='location_loss') tf.summary.scalar('location_loss', loc_loss) tf.losses.add_loss(loc_loss)
tensorflow.summary.scalar
3,914
import tensorflow as tf tf.get_variable('W_in', [N_rec, N_in], initializer=W_in_initializer, trainable=self.W_in_train) # Recurrent weight matrix: # (gamma (Dale) or normal (non-Dale) initialization) self.W_rec = \ tf.get_variable( 'W_rec', [N_rec, N_rec], initializer=W_rec_initializer, trainable=self.W_rec_train) # Output weight matrix: # (uniform initialization as in pycog) self.W_out = tf.get_variable('W_out', [N_out, N_rec], initializer=W_out_initializer, trainable=self.W_out_train) # Recurrent bias: self.b_rec = tf.get_variable('b_rec', [N_rec], initializer=b_rec_initializer, trainable=self.b_rec_train) # Output bias: self.b_out = tf.get_variable('b_out', [N_out], initializer=b_out_initializer, trainable=self.b_out_train) # ------------------------------------------------ # Non-trainable variables:
tensorflow.get_variable
3,915
import tensorflow as tf gamma = tf.reshape(gamma, [1, c, 1, 1]) beta = tf.reshape(beta, [1, c, 1, 1]) # 根据论文进行转换 [n, c, h, w, c] 到 [n, h, w, c] output = tf.reshape(inputdata, [-1, c, h, w]) output = output * gamma + beta output = tf.transpose(output, [0, 2, 3, 1])
tensorflow.reshape
3,916
import tensorflow as tf def _forward(self, x): return self._transpose(x, self.perm) def _inverse(self, y): return self._transpose(y, tf.argsort(self.perm)) def _inverse_log_det_jacobian(self, y): return tf.constant(0, dtype=y.dtype) def _forward_log_det_jacobian(self, x): return tf.constant(0, dtype=x.dtype) def _transpose(self, x, perm): sample_batch_ndims = tf.rank(x) - self.rightmost_transposed_ndims perm = tf.concat([ tf.range(sample_batch_ndims), sample_batch_ndims + perm, ], axis=0) return tf.transpose(a=x, perm=perm)
tensorflow.constant
3,917
import tensorflow as tf else: with tf.io.gfile.GFile(vocab_path, 'rb') as f:
tensorflow.io.gfile.GFile
3,918
import tensorflow as tf buckets = [(4, 4), (8, 8)] perplexities = [[], []] # Results for each bucket. tf.set_random_seed(111) random.seed(111) np.random.seed(111) with self.test_session() as sess: # We use sampled softmax so we keep output projection separate. w = tf.get_variable("proj_w", [24, classes]) w_t = tf.transpose(w) b = tf.get_variable("proj_b", [classes]) # Here comes a sample Seq2Seq model using GRU cells. def SampleGRUSeq2Seq(enc_inp, dec_inp, weights): """Example sequence-to-sequence model that uses GRU cells.""" def GRUSeq2Seq(enc_inp, dec_inp): cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2, state_is_tuple=True) return tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=classes, num_decoder_symbols=classes, embedding_size=24, output_projection=(w, b)) targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0] def SampledLoss(labels, inputs): labels = tf.reshape(labels, [-1, 1]) return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes) return tf.nn.seq2seq.model_with_buckets( enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq, softmax_loss_function=SampledLoss) # Now we construct the copy model.
tensorflow.nn.rnn_cell.GRUCell
3,919
import tensorflow as tf num_units = inputs.get_shape()[-1] with tf.variable_scope(scope):
tensorflow.variable_scope
3,920
import tensorflow as tf # Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail # to all be the same size along the batch dimension. for elem_shape in elem_shapes: if (not elem_shape or not elem_shape[0] or elem_shape[0] != elem_shapes[0][0]): return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop) arg_tuples = zip(*[tf.unstack(elem) for elem in elems]) outputs = [fn(arg_tuple) for arg_tuple in arg_tuples] else: if not isinstance(elems, tf.Tensor): raise ValueError('`elems` must be a Tensor or list of Tensors.') elems_shape = elems.shape.as_list() if not elems_shape or not elems_shape[0]: return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop) outputs = [fn(arg) for arg in tf.unstack(elems)] # Stack `outputs`, which is a list of Tensors or list of lists of Tensors if all([isinstance(output, tf.Tensor) for output in outputs]): return tf.stack(outputs) else: if all([isinstance(output, list) for output in outputs]): if all([all( [isinstance(entry, tf.Tensor) for entry in output_list]) for output_list in outputs]): return [tf.stack(output_tuple) for output_tuple in zip(*outputs)] raise ValueError('`fn` should return a Tensor or a list of Tensors.')
tensorflow.map_fn
3,921
from tensorflow.contrib import layers def _define_vars(self, params): pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = [layers.fully_connected(data, self.params.layer_size)] for _ in range(1, self.params.num_layers): # pylint: disable=W0106 nn_activations.append( layers.fully_connected( nn_activations[-1],
tensorflow.contrib.layers.fully_connected
3,922
import tensorflow as tf training=None, mask=None ): feature_hidden = inputs original_feature_hidden = inputs # flatten inputs if len(original_feature_hidden.shape) > 2: feature_hidden = tf.reshape( feature_hidden, [-1, feature_hidden.shape[-1]] ) # pass it through fc_stack feature_hidden = self.fc_stack( feature_hidden,
tensorflow.reshape
3,923
import tensorflow as tf with tf.Session() as sess: input0_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_INPUT0:0") input1_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_INPUT1:0") output0_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_OUTPUT0:0") output1_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_OUTPUT1:0") tf.saved_model.simple_save(sess, model_version_dir + "/model.savedmodel", inputs={ "INPUT0": input0_tensor, "INPUT1": input1_tensor
tensorflow.get_default_graph
3,924
import tensorflow as tf } example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def full_featurespec(): return { 'bounding_box_samples': tf.io.FixedLenFeature([100000, 4], tf.float32), 'depth_renders': tf.io.FixedLenFeature([20, 224, 224, 1], tf.float32), 'mesh_name': tf.io.FixedLenFeature([], tf.string), 'near_surface_samples': tf.io.FixedLenFeature([100000, 4], tf.float32), 'grid': tf.io.FixedLenFeature([32, 32, 32], tf.float32), 'world2grid': tf.io.FixedLenFeature([4, 4], tf.float32), 'surface_point_samples': tf.io.FixedLenFeature([10000, 6], tf.float32) } def parse_tf_example(example_proto):
tensorflow.io.FixedLenFeature
3,925
import tensorflow as tf output = tf.matmul(input, cross_stitch) # need to call .value to convert Dimension objects to normal value input1_shape = list(-1 if s.value is None else s.value for s in input1.shape) input2_shape = list(-1 if s.value is None else s.value for s in input2.shape) output1 = tf.reshape(output[:, :input1_reshaped.shape[1]], shape=input1_shape) output2 = tf.reshape(output[:, input1_reshaped.shape[1]:], shape=input2_shape) return output1, output2 def main(args): train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2 = load_data()
tensorflow.reshape
3,926
import tensorflow as tf np.random.seed(self.seed) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tensorflow.compat.v1.logging.set_verbosity
3,927
from tensorflow.python.ops import nn raise ValueError("Target's dtype should be integer " "Instead got %s." % target.dtype) # sparse_softmax_cross_entropy_with_logits requires [batch_size] target. if len(target.get_shape()) == 2: target = array_ops.squeeze(target, squeeze_dims=[1]) loss_vec = nn.sparse_softmax_cross_entropy_with_logits( labels=target, logits=logits) return loss_vec
tensorflow.python.ops.nn.sparse_softmax_cross_entropy_with_logits
3,928
import tensorflow as tf GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0 GLOBAL_RUNNING_R = [] COORD = tf.train.Coordinator() # 宣告共用記憶體
tensorflow.train.Coordinator
3,929
import tensorflow as tf def build_value(self, _input): with tf.variable_scope('VF'): hidden = tf.layers.dense(inputs=_input, units=self.vf_hidden_size,
tensorflow.layers.dense
3,930
import tensorflow as tf # Need to prepare a mask to zero out the padding symbols. # Make a batch_size x max_sequence_len matrix where each # row contains the length repeated max_sequence_len times. lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1) lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len]) # Make a matrix where each row contains [0, 1, ..., max_sequence_len] r = tf.range(0, max_sequence_len, 1) range_row = tf.expand_dims(r, 0) range_tiled = tf.tile(range_row, [batch_size, 1])
tensorflow.tile
3,931
from tensorflow.python.ops import math_ops self._lambda_t = ops.convert_to_tensor(self._lambda, name="lambda") def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) lambda_t = math_ops.cast(self._lambda_t, var.dtype.base_dtype)
tensorflow.python.ops.math_ops.cast
3,932
import tensorflow.contrib.graph_editor as ge if x.op and x.op.name is not None: grad_node = tf.stop_gradient(x, name=x.op.name+"_sg") else: grad_node = tf.stop_gradient(x) checkpoints_disconnected[x] = grad_node # partial derivatives to the checkpointed tensors and xs ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys], stop_at_ts=checkpoints, within_ops=fwd_ops) debug_print("Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s", len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints) debug_print("ops_to_copy = %s", ops_to_copy) debug_print("Processing list %s", ys) copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {}) for origin_op, op in info._transformed_ops.items(): op._set_device(origin_op.node_def.device) copied_ops = info._transformed_ops.values() debug_print("Copied %s to %s", ops_to_copy, copied_ops) ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops) debug_print("Rewired %s in place of %s restricted to %s", checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops) # get gradients with respect to current boundary + original x's copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys] boundary = list(checkpoints_disconnected.values()) dv = tf_gradients(ys=copied_ys, xs=boundary+xs, grad_ys=grad_ys, **kwargs)
tensorflow.contrib.graph_editor.sgv
3,933
import tensorflow as tf @staticmethod def layergn(inputdata, name, group_size=32, esp=1e-5): """ :param inputdata: :param name: :param group_size: :param esp: :return: """ with tf.variable_scope(name): inputdata = tf.transpose(inputdata, [0, 3, 1, 2]) n, c, h, w = inputdata.get_shape().as_list() group_size = min(group_size, c) inputdata = tf.reshape(inputdata, [-1, group_size, c // group_size, h, w]) mean, var = tf.nn.moments(inputdata, [2, 3, 4], keep_dims=True) inputdata = (inputdata - mean) / tf.sqrt(var + esp) # 每个通道的gamma和beta gamma = tf.Variable(tf.constant(1.0, shape=[c]), dtype=tf.float32, name='gamma') beta = tf.Variable(tf.constant(0.0, shape=[c]), dtype=tf.float32, name='beta') gamma = tf.reshape(gamma, [1, c, 1, 1]) beta = tf.reshape(beta, [1, c, 1, 1]) # 根据论文进行转换 [n, c, h, w, c] 到 [n, h, w, c] output = tf.reshape(inputdata, [-1, c, h, w]) output = output * gamma + beta output = tf.transpose(output, [0, 2, 3, 1]) return output
tensorflow.nn.moments
3,934
import tensorflow as tf print("Setting up dataset reader") image_options = {'resize': True, 'resize_size': IMAGE_SIZE} if FLAGS.mode == 'train': train_dataset_reader = dataset.BatchDatset(train_records, image_options) validation_dataset_reader = dataset.BatchDatset(valid_records, image_options) sess = tf.Session() print("Setting up Saver...") saver = tf.train.Saver() # create two summary writers to show training loss and validation loss in the same graph
tensorflow.Session
3,935
import tensorflow as tf if weights.get_shape().ndims == 1: # Weights has shape [batch_size]. Reshape to [batch_size, 1]. weights = tf.reshape(weights, [-1, 1]) if weights.get_shape().ndims == 0: # Weights is a scalar. Change shape of weights to match logits. weights *= tf.ones_like(logits) return labels, logits, weights, original_shape
tensorflow.ones_like
3,936
import tensorflow as tf index = tf.range(num_sam) divider = tf.constant(resample, dtype=tf.float32)
tensorflow.constant
3,937
from tensorflow.python.ops import array_ops Args: numerator: A scalar `float64` `Tensor`. denominator: A scalar `float64` `Tensor`. name: Name for the returned op. Returns: 0 if `denominator` == 0, else `numerator` / `denominator` """ numerator.get_shape().with_rank_at_most(1) denominator.get_shape().with_rank_at_most(1) return control_flow_ops.cond( math_ops.equal( array_ops.constant(0.0, dtype=dtypes.float64), denominator), lambda: array_ops.constant(0.0, dtype=dtypes.float64), lambda: math_ops.div(numerator, denominator), name=name) def _create_local(name, shape, collections=None, validate_shape=True, dtype=dtypes.float32): """Creates a new local variable. Args: name: The name of the new or existing variable.
tensorflow.python.ops.array_ops.constant
3,938
import tensorflow as tf def testAttentionDecoderStateIsTuple(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2, state_is_tuple=True) inp = [tf.constant(0.5, shape=[2, 2])] * 2 enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state,
tensorflow.constant
3,939
import tensorflow as tf cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return tf.nn.seq2seq.embedding_attention_seq2seq(
tensorflow.nn.seq2seq.embedding_tied_rnn_seq2seq
3,940
import tensorflow as tf random.seed(111) np.random.seed(111) enc_inp = [tf.constant(i + 1, tf.int32, shape=[batch_size]) for i in range(num_enc_timesteps)] dec_inp_fp_true = [tf.constant(i, tf.int32, shape=[batch_size]) for i in range(num_dec_timesteps)] dec_inp_holder_fp_false = [tf.placeholder(tf.int32, shape=[batch_size]) for _ in range(num_dec_timesteps)] targets = [tf.constant(i + 1, tf.int32, shape=[batch_size]) for i in range(num_dec_timesteps)] weights = [tf.constant(1.0, shape=[batch_size]) for i in range(num_dec_timesteps)] def ForwardBackward(enc_inp, dec_inp, feed_previous): scope_name = "fp_{}".format(feed_previous) with tf.variable_scope(scope_name): dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous) net_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope_name) optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5) update_op = optimizer.minimize(
tensorflow.constant
3,941
import tensorflow as tf # randrom horizon def contra_traj_lossV3(pred, tgt, horizon=12): # Step-wise contrastive loss horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) # pred1, pred2 = tf.split(horizon_pred, 2, axis=0) # tgt1, tgt2 = tf.split(horizon_tgt, 2, axis=0) even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = tf.gather(horizon_pred, even) pred2 = tf.gather(horizon_pred, odd) tgt1 = tf.gather(horizon_tgt, even) tgt2 = tf.gather(horizon_tgt, odd) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small))) loss = tf.reduce_mean(loss) return loss
tensorflow.gather
3,942
import tensorflow as tf dataset = dataset.map(squeeze) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def lm1b_preprocess(dataset, training, max_target_length=-1, max_eval_target_length=-1): """Preprocessing for LM1B: filter out targets exceeding maximum length.""" def target_right_length(_, target): return tf.less(tf.shape(target)[0], max_target_length + 1) def eval_target_right_length(_, target): return tf.less(tf.shape(target)[0], max_eval_target_length + 1) if max_target_length > 0 and training: dataset = dataset.filter(target_right_length) if max_eval_target_length > 0 and not training: dataset = dataset.filter(eval_target_right_length) return dataset # TODO(lukaszkaiser): find a single more abstract way of text pre-processing. @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def wmt_preprocess(dataset, training, max_length=-1, max_eval_length=-1): """Preprocessing for LM1B: filter out targets exceeding maximum length."""
tensorflow.shape
3,943
import tensorflow as tf else: e -= tf.reduce_max(e, axis=1, keep_dims=True) T = encoder.attn_temperature or 1.0 exp = tf.exp(e / T) * mask weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True) weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1) return weighted_average, weights def no_attention(state, hidden_states, *args, **kwargs): batch_size = tf.shape(state)[0] weighted_average = tf.zeros(shape=tf.stack([batch_size, 0])) weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]]) return weighted_average, weights def average_attention(hidden_states, encoder_input_length, *args, **kwargs): # attention with fixed weights (average of all hidden states) lengths = tf.to_float(tf.expand_dims(encoder_input_length, axis=1)) mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1]) weights = tf.to_float(mask) / lengths weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1) return weighted_average, weights
tensorflow.stack
3,944
from tensorflow.python.ops import array_ops for config_name, config in test_configs.items(): num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): inputs = seq_length * [ array_ops.zeros([batch_size, num_units], dtypes.float32) ] initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127) cell = rnn_cell.LSTMCell( num_units=num_units, initializer=initializer, state_is_tuple=True) multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)])
tensorflow.python.ops.array_ops.zeros
3,945
import tensorflow as tf yield (x, y) # yield 是生成器,生成器函数在生成值后会自动挂起并暂停他们的执行和状态(最后就是for循环结束后的结果,共有1000个(x, y)) def gen_epochs(n, num_steps): for i in range(n): yield gen_batch(gen_data(), batch_size, num_steps) '''定义placeholder''' x = tf.placeholder(tf.int32, [batch_size, num_steps], name="x") y = tf.placeholder(tf.int32, [batch_size, num_steps], name='y') init_state = tf.zeros([batch_size, state_size]) '''RNN输入''' rnn_inputs = tf.one_hot(x, num_classes) #rnn_inputs = tf.unstack(x_one_hot, axis=1) '''不需要了,使用tensorflow中定义好的cell即可'''
tensorflow.placeholder
3,946
from tensorflow.python.ops import array_ops non_zero_count = math_ops.maximum(count, array_ops.ones_like(count),
tensorflow.python.ops.array_ops.ones_like
3,947
import tensorflow as tf # TODO(koz4k): Translate it to T2TModel or remove. def feed_forward_gaussian_fun(action_space, config, observations): """Feed-forward Gaussian.""" if not isinstance(action_space, gym.spaces.box.Box): raise ValueError("Expecting continuous action space.") mean_weights_initializer = tf.initializers.variance_scaling( scale=config.init_mean_factor) logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10) flat_observations = tf.reshape(observations, [ tf.shape(observations)[0], tf.shape(observations)[1], functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)]) with tf.variable_scope("network_parameters"): with tf.variable_scope("policy"): x = flat_observations for size in config.policy_layers:
tensorflow.random_normal_initializer
3,948
import tensorflow as tf var = tf.concat(tf.unstack(var), axis=0) var = tf.expand_dims(var, dim=0) color_s = tf.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max) var = tf.expand_dims(var[..., 3], dim=3) bw_s = tf.summary.image('depth_' + name, var, max_outputs=FLAGS.visualiza_max) return tf.summary.merge([color_s, bw_s]) # TRAINING PROGRESS EVENTS
tensorflow.summary.merge
3,949
import tensorflow as tf self._add_dynamic_cell(reduction_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train) else: self._add_static_cell(reduction_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train, is_reduction=True) else: with tf.variable_scope('normal_cell'): if use_dynamic_arch: self._add_dynamic_cell(normal_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train) else: self._add_static_cell(normal_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train) # Maybe add auxiliary heads if l in aux_head_layers: with tf.variable_scope('aux_head'): aux_logits = self._add_aux_head(*layers[-1], K, is_train) aux_logits_list.append(aux_logits) # Global average pooling (X, w, h, ch) = layers[-1] X = self._add_global_avg_pool(X, w, h, ch) # Add dropout if training if is_train: X = tf.nn.dropout(X, dropout_keep_prob) # Compute logits from X
tensorflow.variable_scope
3,950
import tensorflow as tf encode_params, decode_params = stage.get_params(state) encoded_x, state_update_tensors = stage.encode(x, encode_params) updated_state = stage.update_state(state, state_update_tensors) # Get all values out of TensorFlow as Python constants. This is a trivial # example of communication happening outside of TensorFlow. with self.session(graph=server_graph): (x, decode_params, encoded_x, state, state_update_tensors, updated_state, shape) = self.evaluate_tf_py_list([ x, decode_params, encoded_x, state, state_update_tensors, updated_state, shape ]) client_graph = tf.Graph() with client_graph.as_default(): decoded_x = stage.decode(encoded_x, decode_params, shape=shape) with self.session(graph=client_graph): decoded_x = self.evaluate(decoded_x) return TestData(x, encoded_x, decoded_x, state, state_update_tensors, updated_state) def _non_adaptive_one_to_many_encode_decode(): """Implementation of the method for `EncodingStageInterface`.""" server_graph = tf.Graph() with server_graph.as_default():
tensorflow.Graph
3,951
import tensorflow as tf # define train_op gen_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05) dis_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05) # wrapper to make the optimizer work with TPUs if params['use_tpu']: gen_optimizer = tf.contrib.tpu.CrossShardOptimizer(gen_optimizer) dis_optimizer = tf.contrib.tpu.CrossShardOptimizer(dis_optimizer) gan_train_ops = tf.contrib.gan.gan_train_ops(gan_model, gan_loss, gen_optimizer, dis_optimizer) while_loop = tf.contrib.tpu.while_loop if params['use_tpu'] else tf.while_loop
tensorflow.contrib.tpu.CrossShardOptimizer
3,952
from tensorflow.python.framework import ops sensitivity = compute_sensitivity_at_specificity('value') with ops.control_dependencies( [tp_update_op, fn_update_op, tn_update_op, fp_update_op]): update_op = compute_sensitivity_at_specificity('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, sensitivity) if updates_collections: ops.add_to_collections(updates_collections, update_op) return sensitivity, update_op def streaming_precision_at_thresholds(predictions, labels, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes precision values for different `thresholds` on `predictions`.
tensorflow.python.framework.ops.add_to_collections
3,953
import tensorflow as tf ''' Convolution 3D op wrapper, use RELU activation after convolution ''' in_channels = x.get_shape()[-1].value with tf.variable_scope(layer_name): w = tf.get_variable(name='weight', trainable=is_pretrain, shape=[kernel_size[0],kernel_size[1],kernel_size[2],in_channels,out_channels], initializer=tf.contrib.layers.xavier_initializer()) b = tf.get_variable(name='bias', trainable=is_pretrain, shape=[out_channels], initializer=tf.contrib.layers.xavier_initializer()) x = tf.nn.conv3d(x, w, strides=strides, padding='SAME', data_format=data_format, name='conv3d') x = tf.nn.bias_add(x, b, name='bias_add') x = tf.nn.relu(x, name='relu') return x def conv(layer_name, x, out_channels, kernel_size=[3,3], strides=[1,1,1,1], is_pretrain=True): ''' Convolution op wrapper, use RELU activation after convolution Args: layer_name: x: input tensor Returns: 4D tensor ''' # x.get_shape()[-1] : Dimension(3) # x.get_shape()[-1].value : 3
tensorflow.nn.relu
3,954
import tensorflow as tf scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****")
tensorflow.train.init_from_checkpoint
3,955
from tensorflow.contrib.layers.python.layers import utils return update_mean_op, update_second_moment_op def build_no_ops(): return (tf.no_op(), tf.no_op()) # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_second_moment_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, )
tensorflow.contrib.layers.python.layers.utils.constant_value
3,956
import tensorflow as tf q_i = q_value[:, 0] rho_i = tf.reshape(f_i, [-1, 1]) / (self.mu_ph + eps) rho_i_ = tf.reshape(f_i_, [-1, 1]) / (self.mu_ph + eps)
tensorflow.reshape
3,957
import tensorflow as tf def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1]
tensorflow.nn.bias_add
3,958
import tensorflow as tf varphis = tf.placeholder(dtype=tf.float32, shape=[None, None], name="varphis")
tensorflow.placeholder
3,959
import tensorflow as tf in_shape = input_tensor.get_shape().as_list() in_channel = in_shape[3] padding = padding.upper() depthwise_filter_shape = [kernel_size, kernel_size] + [in_channel, depth_multiplier] w_init = tf.contrib.layers.variance_scaling_initializer() depthwise_filter = tf.get_variable( name='depthwise_filter_w', shape=depthwise_filter_shape, initializer=w_init ) result = tf.nn.depthwise_conv2d( input=input_tensor,
tensorflow.get_variable
3,960
import tensorflow as tf def good(): offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
tensorflow.unstack
3,961
import tensorflow as tf for i in range(self.hparams.num_blocks) ], axis=1) nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size) nearest_hot = tf.reduce_mean(nearest_hot, axis=-2) else: if self.hparams.random_top_k > 1: _, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k)
tensorflow.reduce_mean
3,962
import tensorflow as tf predict = tf.placeholder(tf.float32, shape=[hps.batch_size, 10]) predict_nor, tsne_logit_nor = models(hps, image, FLAGS.RCE_train, logits=False, tsne_logits=True) predict_adv, tsne_logit_adv = models(hps, adv_image, FLAGS.RCE_train, logits=False, tsne_logits=True) # Calculate entropy argmax_y_onehot = tf.one_hot(tf.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1) normalized_y_nonmaximal = tf.reduce_sum(predict * argmax_y_onehot, 1) entropy = tf.reduce_sum(-tf.log(predict) * predict * argmax_y_onehot, 1) / normalized_y_nonmaximal + tf.log( normalized_y_nonmaximal) for k in range(10): adv_image_craft = adv_craft_func(hps, image, FLAGS.attack_method, eps=0.02 * k + 0.02, RCE_train=FLAGS.RCE_train) #adv_image_craft = adv_craft_func(hps, image, FLAGS.attack_method, eps=0.04,RCE_train=FLAGS.RCE_train) sess.run(tf.global_variables_initializer()) saver.restore(sess, ckpt_state.model_checkpoint_path) for i in six.moves.range(FLAGS.eval_batch_count): time_start = time.time() (nor_img,true_label) = sess.run([images,labels]) adv_img = sess.run(adv_image_craft,feed_dict={image:nor_img}) # Local logits (predict_NOR, predict_ADV, logits_part_nor, logits_part_adv) = sess.run( [predict_nor, predict_adv, tsne_logit_nor, tsne_logit_adv], feed_dict={image: nor_img, adv_image: adv_img} )
tensorflow.global_variables_initializer
3,963
import tensorflow as tf e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs) mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32) e *= mask if encoder.attn_norm_fun == 'none': weights = e elif encoder.attn_norm_fun == 'sigmoid': weights = tf.nn.sigmoid(e) elif encoder.attn_norm_fun == 'max': weights = tf.one_hot(tf.argmax(e, -1), depth=tf.shape(e)[1]) else: e -= tf.reduce_max(e, axis=1, keep_dims=True) T = encoder.attn_temperature or 1.0 exp = tf.exp(e / T) * mask weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True) weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1) return weighted_average, weights def no_attention(state, hidden_states, *args, **kwargs): batch_size = tf.shape(state)[0] weighted_average = tf.zeros(shape=tf.stack([batch_size, 0])) weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]]) return weighted_average, weights def average_attention(hidden_states, encoder_input_length, *args, **kwargs):
tensorflow.reduce_sum
3,964
import tensorflow as tf tf.summary.scalar('Gradient Norm', self.norm, collections=['train']) tf.summary.scalar('Learning Rate', self.ranker_learning_rate, collections=['train']) tf.summary.scalar('Final Loss', tf.reduce_mean(self.loss), collections=['train']) clipped_labels = tf.clip_by_value(reshaped_train_labels, clip_value_min=0, clip_value_max=1) pad_removed_train_output = self.remove_padding_for_metric_eval(self.docid_inputs, train_output) for metric in self.exp_settings['metrics']: for topn in self.exp_settings['metrics_topn']: list_weights = tf.reduce_mean(self.propensity_weights * clipped_labels, axis=1, keep_dims=True) metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, None) tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['train']) weighted_metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, list_weights) tf.summary.scalar('Weighted_%s_%d' % (metric, topn), weighted_metric_value, collections=['train']) self.train_summary = tf.summary.merge_all(key='train') self.eval_summary = tf.summary.merge_all(key='eval') self.saver = tf.train.Saver(tf.global_variables()) def separate_gradient_update(self): denoise_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "denoising_model")
tensorflow.summary.scalar
3,965
import tensorflow as tf c = tf.to_int32(tf.reshape(c, shape=new_shape)) h1_shape = shape_x h1_shape.append(self.hparams.hidden_size) h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) c_int = self.bit_to_int( c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2) c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1) c_hot_flat = tf.reshape( c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]) h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means) h1 = tf.transpose(h1, perm=[1, 0, 2]) h1 = tf.reshape(h1, shape=h1_shape)
tensorflow.one_hot
3,966
import tensorflow as tf final_dim] l3_shape = [ x_shape[0], self.compute_shape(l2_shape[1], self.ff_pool_strides[1][0]), self.compute_shape(l2_shape[2], self.ff_pool_strides[1][1]), self.compute_shape(l2_shape[3], self.ff_pool_strides[1][2]), final_dim] else: l2_shape = tf.identity(x_shape) # Initialize hidden layer activities if self.hidden_init == 'identity': l1_h2 = tf.identity(x) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype) l3_h2 = tf.zeros(l3_shape, dtype=self.dtype) elif self.hidden_init == 'random': l1_h2 = tf.random_normal(x_shape, dtype=self.dtype) l2_h2 = tf.random_normal(l2_shape, dtype=self.dtype) l3_h2 = tf.random_normal(l3_shape, dtype=self.dtype) elif self.hidden_init == 'zeros': l1_h2 = tf.zeros(x_shape, dtype=self.dtype) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype) l3_h2 = tf.zeros(l3_shape, dtype=self.dtype) else: raise RuntimeError
tensorflow.zeros
3,967
import tensorflow as tf output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts))
tensorflow.shape
3,968
import tensorflow as tf b: /job:localhost/replica:0/task:0/cpu:0 I tensorflow/core/common_runtime/simple_placer.cc:818] b: /job:localhost/replica:0/task:0/cpu:0 I tensorflow/core/common_runtime/simple_placer.cc:818] a: /job:localhost/replica:0/task:0/cpu:0 [[22. 28.] [49. 64.] """ # 3. 有时,我们希望搞清楚TensorFlow正在使用的设备。当加载先前保存过的模型,并且该模型在计算图中已分配固定设备时,服务器可提供不同的设备给计算图使用。实现该功能只需在config设置软设备 config = tf.ConfigProto() config.allow_soft_placement = True sess_soft = tf.Session(config=config) # 4. 当使用CPU时,TensorFlow默认占据大部分CPU内存。虽然这也是时常期望的,但是我们能谨慎分配GPU内存。当TensorFlow一直不释放GPU内存时,如有必要,我们可以设置GPU内存增长选项让GPU内存分配缓慢增大到最大限制 config.gpu_options.allow_growth = True sess_grow = tf.Session(config=config)
tensorflow.ConfigProto
3,969
import tensorflow as tf step, loss), target_spectrogram=target, max_len=target_length) log("Input at step {}: {}".format(step, sequence_to_text(input_seq))) if step % args.embedding_interval == 0 or step == args.tacotron_train_steps or step == 1: # Get current checkpoint state checkpoint_state = tf.train.get_checkpoint_state(save_dir) # Update Projector log("\nSaving Model Character Embeddings visualization..") add_embedding_stats(summary_writer, [model.embedding_table.name], [char_embedding_meta], checkpoint_state.model_checkpoint_path)
tensorflow.train.get_checkpoint_state
3,970
import tensorflow as tf targets = tf.range(tf.shape(best_prior_per_target_index)[0], dtype='int64') best_target_per_prior_index = tf.tensor_scatter_nd_update(best_target_per_prior_index, tf.expand_dims(best_prior_per_target_index, 1), targets) # 2.0 is used to make sure every target has a prior assigned best_target_per_prior = tf.tensor_scatter_nd_update(best_target_per_prior, tf.expand_dims(best_prior_per_target_index, 1), tf.ones_like(best_prior_per_target_index, dtype=tf.float32)*2.0) # size: num_priors labels = tf.gather(gt_labels, best_target_per_prior_index) labels = tf.where(tf.less(best_target_per_prior, iou_threshold), tf.constant(0, dtype='int64'), labels) # labels[best_target_per_prior < iou_threshold] = 0 # the backgournd id boxes = tf.gather(gt_boxes, best_target_per_prior_index) return boxes, labels class MatchPrior(object): def __init__(self, center_form_priors, center_variance, size_variance, iou_threshold): self.center_form_priors = center_form_priors
tensorflow.constant
3,971
import tensorflow as tf """ assertions = [] assertions.append( tf.Assert( tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)), ['The l2 norm of each prediction quaternion vector should be 1.'])) assertions.append( tf.Assert( tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), ['The l2 norm of each label quaternion vector should be 1.'])) with tf.name_scope(name): with tf.control_dependencies(assertions): product = tf.multiply(predictions, labels) internal_dot_products = tf.reduce_sum(product, [1]) logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) return logcost def log_quaternion_loss(predictions, labels, batch_size, name='log_quaternion_loss'): """A helper function to compute the mean error between batches of quaternions. The caller is expected to add the loss to the graph.
tensorflow.multiply
3,972
import tensorflow as tf return (tf.convert_to_tensor(value=degree_l), tf.convert_to_tensor(value=order_m))
tensorflow.convert_to_tensor
3,973
import tensorflow as tf def build_trainer(self, child_model): child_model.build_valid_rl() self.valid_acc = (tf.to_float(child_model.valid_shuffle_acc) / tf.to_float(child_model.batch_size)) self.reward = self.valid_acc if self.entropy_weight is not None: self.reward += self.entropy_weight * self.sample_entropy self.sample_log_prob = tf.reduce_sum(self.sample_log_prob) self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False) baseline_update = tf.assign_sub( self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward)) with tf.control_dependencies([baseline_update]): self.reward = tf.identity(self.reward) self.loss = self.sample_log_prob * (self.reward - self.baseline)
tensorflow.reduce_sum
3,974
import tensorflow as tf batch_size = tf.shape(encoder_inputs_)[0] time_steps = tf.shape(encoder_inputs_)[1] if embeddings is not None: flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)]) flat_inputs = tf.nn.embedding_lookup(embeddings, flat_inputs) encoder_inputs_ = tf.reshape(flat_inputs, tf.stack([batch_size, time_steps, flat_inputs.get_shape()[1].value])) if pos_embeddings is not None: pos_inputs_ = tf.range(time_steps, dtype=tf.int32) pos_inputs_ = tf.nn.embedding_lookup(pos_embeddings, pos_inputs_) pos_inputs_ = tf.tile(tf.expand_dims(pos_inputs_, axis=0), [batch_size, 1, 1]) encoder_inputs_ = tf.concat([encoder_inputs_, pos_inputs_], axis=2) if other_inputs is not None: encoder_inputs_ = tf.concat([encoder_inputs_, other_inputs], axis=2) if encoder.use_dropout:
tensorflow.range
3,975
import tensorflow as tf slots.append(s) return slots def apply_gradients(self, grads_and_vars, global_step=None, name=None): assert global_step is None, \ "AccumGradOptimizer doesn't support the option global_step! " \ "Please maintain it yourself." grads_and_vars = FilterNoneGrad().process(grads_and_vars) vs = [] for g, v in grads_and_vars: assert isinstance(g, tf.Tensor) and isinstance(v, tf.Variable), \ "AccumGradOptimizer only works for dense update! " \ "Types of v and g are {} and {}".format(type(v), type(g)) vs.append(v) with tf.control_dependencies(None): slots = self._create_accum_slots(vs) slots_and_vars = [(s, gv[1]) for s, gv in zip(slots, grads_and_vars)] # Create the counter on the same device as the first variable. with tf.variable_scope(self._name), \ vs[0].graph.colocate_with(vs[0]): counter = tf.Variable( 0, name="counter", trainable=False, dtype=tf.int32) with tf.name_scope('AccumGradOptimizer'): ops = [] for s, gv in zip(slots, grads_and_vars): g, v = gv ops.append(s.assign_add(g))
tensorflow.control_dependencies
3,976
import tensorflow as tf def testLogitsNotSqueezed(self): num_classes = 25 images = tf.random_uniform([1, 224, 224, 3]) logits, _ = mobilenet_v1.mobilenet_v1(images, num_classes=num_classes, spatial_squeeze=False) with self.test_session() as sess: tf.global_variables_initializer().run() logits_out = sess.run(logits) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)])
tensorflow.global_variables_initializer
3,977
import tensorflow as tf # Local logits (predict_NOR, predict_ADV, logits_part_nor, logits_part_adv) = sess.run( [predict_nor, predict_adv, tsne_logit_nor, tsne_logit_adv], feed_dict={image: nor_img, adv_image: adv_img} ) # Local entropy and confidence for nor_img (entropy_test_nor_help, labels_nor_help, confidence_test_nor_help) = sess.run( [entropy, tf.argmax(predict, axis=1), tf.reduce_max(predict, axis=1)], feed_dict={predict: predict_NOR} ) # Local entropy and confidence for adv_img (entropy_test_adv_help, labels_adv_help, confidence_test_adv_help) = sess.run( [entropy, tf.argmax(predict, axis=1), tf.reduce_max(predict, axis=1)], feed_dict={predict: predict_ADV} ) entropy_test_adv_all = np.concatenate((entropy_test_adv_all, entropy_test_adv_help), axis=0) confidence_test_adv_all = np.concatenate((confidence_test_adv_all, confidence_test_adv_help), axis=0) entropy_test_nor_all = np.concatenate((entropy_test_nor_all, entropy_test_nor_help), axis=0) confidence_test_nor_all = np.concatenate((confidence_test_nor_all, confidence_test_nor_help), axis=0) logits_nor_all = np.concatenate((logits_nor_all, logits_part_nor), axis=0) labels_nor_all = np.concatenate((labels_nor_all, labels_nor_help), axis=0) logits_adv_all = np.concatenate((logits_adv_all, logits_part_adv), axis=0) labels_adv_all = np.concatenate((labels_adv_all, labels_adv_help), axis=0) labels_true_all = np.concatenate((labels_true_all, np.argmax(true_label, axis=1)), axis=0) L2_distance = np.concatenate((L2_distance,np.sqrt(np.mean(np.square(nor_img-adv_img),axis=(1,2,3)))), axis=0) nor_img_all = np.concatenate((nor_img_all, nor_img), axis=0)
tensorflow.argmax
3,978
import tensorflow as tf tf.summary.scalar("loss", weighted_error) if full_tensorboard_log: tf.summary.histogram("td_error", td_error) # update_target_fn will be called periodically to copy Q network to target Q network update_target_expr = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_expr.append(var_target.assign(var)) update_target_expr = tf.group(*update_target_expr) # compute optimization op (potentially with gradient clipping) gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars) if grad_norm_clipping is not None: for i, (grad, var) in enumerate(gradients): if grad is not None: gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var) with tf.variable_scope("input_info", reuse=False):
tensorflow.group
3,979
import tensorflow as tf b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias)) return tf.matmul(x, w)+b
tensorflow.matmul
3,980
import tensorflow as tf if FLAGS.task.reset_policy: # NOTE: reset policy and valuefunc logger.info("Resetting Policy") pol_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) tf.get_default_session().run(tf.variables_initializer(policy.parameters())) pol_params_after = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) print ("pol_params:", np.linalg.norm(pol_params), "pol_params_after_reset:", np.linalg.norm(pol_params_after)) logger.info("Resetting Valuefunc") tf.get_default_session().run(tf.variables_initializer(vfn.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_policy.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_vfn.parameters())) for p in warmup_policy.parameters(): p.invalidate() for p in warmup_vfn.parameters(): p.invalidate() for p in policy.parameters(): p.invalidate() for p in vfn.parameters(): p.invalidate()
tensorflow.get_default_session
3,981
import tensorflow as tf validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader() _,serialized_example=reader.read(filename_queue) features=tf.parse_single_example(serialized_example, features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), }) image=tf.decode_raw(features['img_raw'],tf.uint8) label=tf.cast(features['label'],tf.int32) image=tf.reshape(image,[4096,1]) return image,label def get_batch(image,label,batch_size,crop_size): #print(image.shape) #print(label.shape) images,labels=tf.train.shuffle_batch([image,label], batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=200) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
tensorflow.decode_raw
3,982
import tensorflow as tf if FLAGS.use_hvd: hvd.init() if FLAGS.reduce_log and (hvd.rank() != 0): tf.logging.set_verbosity(tf.logging.ERROR) FLAGS.output_dir = FLAGS.output_dir if hvd.rank() == 0 else os.path.join(FLAGS.output_dir, str(hvd.rank())) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_train_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) if FLAGS.recover_dir is not None: if FLAGS.use_hvd: FLAGS.recover_dir = FLAGS.recover_dir if hvd.rank() == 0 else os.path.join(FLAGS.recover_dir, str(hvd.rank())) path_ckpt = os.path.join(FLAGS.output_dir, "checkpoint") path_ckpt_input = os.path.join(FLAGS.output_dir, "checkpoint_input") if FLAGS.ckpt_no is not None and not tf.gfile.Exists(path_ckpt): with tf.gfile.GFile(path_ckpt, "w") as writer: writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no))) writer.write('all_model_checkpoint_paths: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no))) if FLAGS.ckpt_no_input is not None and not tf.gfile.Exists(path_ckpt_input):
tensorflow.gfile.MakeDirs
3,983
from tensorflow.python.framework import tensor_util else: # NOTE(mrry): We could in principle work out the shape from the # gradients and the attrs, but if we do not know orig_input_shape # statically, then we are unlikely to know the shape of the # gradients either. return [tensor_shape.unknown_shape(ndims=4)] @ops.RegisterShape("Conv2DBackpropFilter") def _Conv2DBackpropFilterShape(op): """Shape function for the Conv2DBackpropFilter op.""" filter_shape = tensor_util.constant_value(op.inputs[1]) if filter_shape is not None: return [tensor_shape.TensorShape(filter_shape.tolist())] else: # NOTE(mrry): We could in principle work out the shape from the # gradients and the attrs, but if we do not know filter_shape # statically, then we are unlikely to know the shape of the # gradients either. return [tensor_shape.unknown_shape(ndims=4)]
tensorflow.python.framework.tensor_util.constant_value
3,984
import tensorflow as tf >>> samples.dtype dtype('float32') """ mu, var = self.build_prior_mean_var(test_points, num_latent, True) jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06 L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter) V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples] V = tf.random_normal(V_shape, dtype=L.dtype) samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V) return tf.transpose(samples) @autoflow((tf.float64, [None, None]), (tf.float64, [None, None]), (tf.float64, [None, None])) def compute_posterior_mean_var(self, X, Y, test_points): """Computes the means and variances of the posterior(s). This is just an autoflowed version of
tensorflow.transpose
3,985
import tensorflow as tf if checkpoint_state is None: return for checkpoint_path in checkpoint_state.all_model_checkpoint_paths: tf.compat.v1.train.remove_checkpoint(checkpoint_path) return
tensorflow.compat.v1.train.remove_checkpoint
3,986
from tensorflow.python.ops import random_ops class ParameterizedTruncatedNormalTest(tf.test.TestCase): _use_gpu = False z_limit = 6.0 # Stop at moment 10 to avoid numerical errors in the theoretical moments. max_moment = 10 def validateMoments(self, shape, mean, stddev, minval, maxval, seed=1618): try: # TruncatedNormalMoments requires scipy.stats. # Give up early if we are unable to import it. import scipy.stats # pylint: disable=g-import-not-at-top,unused-variable tf.set_random_seed(seed) with self.test_session(use_gpu=self._use_gpu): samples = random_ops.parameterized_truncated_normal(shape, mean, stddev, minval, maxval).eval() assert (~np.isnan(samples)).all() moments = calculate_moments(samples, self.max_moment) expected_moments = TruncatedNormalMoments(mean, stddev, minval, maxval) num_samples = functools.reduce(lambda x, y: x * y, shape, 1) for i in range(1, len(moments)): self.assertLess( z_test(moments, expected_moments, i, num_samples), self.z_limit) except ImportError as e: tf.logging.warn("Cannot test truncated normal op: %s" % str(e)) def validateKolmogorovSmirnov(self,
tensorflow.python.ops.random_ops.parameterized_truncated_normal
3,987
import tensorflow as tf self.expert_N_buffer.add((obs,action_list[i],reward_list[i],obs_,done)) if len(n_step_buffer)== self.n_step_length: #self.expert_buffer.add(obs,action_list[i],reward_list[i],obs_,done_list[i],1) one_step = n_step_buffer[0] self.expert_buffer.add(one_step[0],one_step[1],one_step[2],one_step[3],one_step[4],1) def setup_model(self): with SetVerbosity(self.verbose): self.graph = tf.Graph() with self.graph.as_default(): self.set_random_seed(self.seed) self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph) if self.prioritized_replay: self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size, alpha=self.prioritized_replay_alpha) if self.prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = 100000
tensorflow.Graph
3,988
import tensorflow as tf b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0)) out = tf.matmul(self.flatten, w) + b self.fc1 = tf.nn.relu(out) # fc2 with tf.variable_scope('fc2'): w = tf.get_variable('w', [self.fc1.get_shape()[1], 2048], initializer=he_normal, regularizer=regularizer) b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0)) out = tf.matmul(self.fc1, w) + b self.fc2 = tf.nn.relu(out) # fc3 with tf.variable_scope('fc3'): w = tf.get_variable('w', [self.fc2.get_shape()[1], num_classes], initializer=initializer, regularizer=regularizer) b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(1.0)) self.fc3 = tf.matmul(self.fc2, w) + b # Calculate Mean cross-entropy loss with tf.name_scope("loss"): self.predictions = tf.argmax(self.fc3, 1, name="predictions") losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.fc3, labels=self.input_y) regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.loss = tf.reduce_mean(losses) + sum(regularization_losses)
tensorflow.variable_scope
3,989
import tensorflow as tf def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1)
tensorflow.matmul
3,990
import tensorflow as tf trainable=True)) setattr( self, 'ff_bias_%s' % idx, tf.get_variable( name='%s_ff_bias_%s' % (self.layer_name, idx), dtype=self.dtype, initializer=tf.ones([higher_feats], dtype=self.dtype), trainable=True)) lower_feats = higher_feats # HGRU KERNELS for idx, layer in enumerate(self.hgru_ids): with tf.variable_scope( '%s_hgru_weights_%s' % (self.layer_name, layer)): setattr( self, 'horizontal_kernels_%s' % layer, tf.get_variable( name='%s_horizontal' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer( shape=self.hgru_dhw[idx] + [self.hgru_k[idx], self.hgru_k[idx]], dtype=self.dtype, uniform=self.normal_initializer), trainable=True))
tensorflow.variable_scope
3,991
import tensorflow as tf feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb] feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb] target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb] similarity_emb = top_antecedent_emb * target_emb # [k, c, emb] target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb] pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb] with tf.variable_scope("slow_antecedent_scores"): slow_antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, c, 1] slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c] return slow_antecedent_scores # [k, c] def get_fast_antecedent_scores(self, top_span_emb): with tf.variable_scope("src_projection"): source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb] target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb] return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k] def flatten_emb_by_sentence(self, emb, text_len_mask): num_sentences = tf.shape(emb)[0] max_sentence_length = tf.shape(emb)[1] emb_rank = len(emb.get_shape()) if emb_rank == 2: flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length]) elif emb_rank == 3: flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)]) else:
tensorflow.variable_scope
3,992
import tensorflow as tf used_mean /= (1. - bn_lag**(step + 1)) used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var)) used_var /= (1. - bn_lag**(step + 1)) else: used_mean, used_var = mean, var cur_mean, cur_var = used_mean, used_var # update variables if train: with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]): with ops.colocate_with(mean): new_mean = tf.assign_sub( mean, tf.check_numerics( decay * (mean - cur_mean), "NaN in moving mean.")) with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): with ops.colocate_with(var): new_var = tf.assign_sub(
tensorflow.name_scope
3,993
import tensorflow as tf def _checkpoint_var_search(self, checkpoint_path): reader = tf.train.NewCheckpointReader(checkpoint_path) saved_shapes = reader.get_variable_to_shape_map() model_names = tf.model_variables() # Used by tf.slim layers if not len(tf.model_variables()): model_names = tf.global_variables() # Fallback when slim is not used model_names = set([v.name.split(':')[0] for v in model_names]) checkpoint_names = set(saved_shapes.keys()) found_names = model_names & checkpoint_names missing_names = model_names - checkpoint_names shape_conflicts = set() restored = [] with tf.variable_scope('', reuse=True): for name in found_names: # print(tf.global_variables()) # print(name, name in model_names, name in checkpoint_names) var = tf.get_variable(name) var_shape = var.get_shape().as_list() if var_shape == saved_shapes[name]: restored.append(var) else: shape_conflicts.add(name) found_names -= shape_conflicts return (restored, sorted(found_names), sorted(missing_names), sorted(shape_conflicts))
tensorflow.variable_scope
3,994
import tensorflow as tf nfilt_last = audio_nchannels for i, ((ntime, nband, nfilt), (ptime, pband)) in enumerate(zip(cnn_filter_shapes, cnn_pool)): layer_name = 'cnn_{}'.format(i) with tf.variable_scope(layer_name): filters = tf.get_variable('filters', [ntime, nband, nfilt_last, nfilt], initializer=cnn_init, dtype=dtype) biases = tf.get_variable('biases', [nfilt], initializer=tf.constant_initializer(0.1), dtype=dtype) if cnn_rnn_zack: padding = 'SAME' else: padding = 'VALID' conv = tf.nn.conv2d(layer_last, filters, [1, 1, 1, 1], padding=padding) biased = tf.nn.bias_add(conv, biases) convolved = tf.nn.relu(biased) pool_shape = [1, ptime, pband, 1] pooled = tf.nn.max_pool(convolved, ksize=pool_shape, strides=pool_shape, padding='SAME') print('{}: {}'.format(layer_name, pooled.get_shape())) export_feat_tensors[layer_name] = pooled # TODO: CNN dropout? layer_last = pooled nfilt_last = nfilt
tensorflow.nn.relu
3,995
import tensorflow as tf batch_axis=0, ) layers_without_bos_eos.append(layer_wo_bos_eos) # concatenate the layers lm_embeddings = tf.concat( [tf.expand_dims(t, axis=1) for t in layers_without_bos_eos], axis=1 ) # get the mask op without bos/eos. # tf doesn't support reversing boolean tensors, so cast # to int then back mask_wo_bos_eos = tf.cast(lm_graph.mask[:, 1:], 'int32') mask_wo_bos_eos = tf.reverse_sequence( mask_wo_bos_eos, lm_graph.sequence_lengths - 1, seq_axis=1, batch_axis=0, ) mask_wo_bos_eos = mask_wo_bos_eos[:, 1:] mask_wo_bos_eos = tf.reverse_sequence( mask_wo_bos_eos, sequence_length_wo_bos_eos, seq_axis=1, batch_axis=0, ) mask_wo_bos_eos = tf.cast(mask_wo_bos_eos, 'bool')
tensorflow.reverse_sequence
3,996
import tensorflow as tf tf.summary.image('score_map', score_maps) tf.summary.image('score_map_pred', f_score * 255) tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1]) tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
tensorflow.summary.image
3,997
import tensorflow as tf features = {'member/name': tf.io.FixedLenFeature([], tf.string), 'member/encoded': tf.io.FixedLenFeature([], tf.string), 'member/age': tf.io.FixedLenFeature([], tf.int64), 'member/height': tf.io.VarLenFeature(tf.float32), 'member/prefer_prods': tf.io.VarLenFeature(tf.int64)} features = tf.io.parse_single_example(example_proto, features) images = tf.image.decode_png(features['member/encoded'], channels=3) # 注意png原本有4個channel,但執行到下面的處理會出錯,所以前一行先降成3個channel。 images = tf.image.random_brightness(images, 0.1) images = tf.image.random_saturation(images, 0.7, 1.3) images = tf.image.random_contrast(images, 0.6, 1.5) images = tf.image.random_flip_left_right(images)
tensorflow.image.decode_png
3,998
import tensorflow as tf else: log_f_bc = tf.log(f_i_ + eps) # / (f_old + eps)
tensorflow.log
3,999