seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf def f(v): t = func(v) if t is None: return t return tf.assign(v, t, use_locking=False).op super(VariableAssignmentOptimizer, self).__init__(opt, f)
tensorflow.assign
6,600
import tensorflow as tf prev_c, prev_h = next_c, next_h query = anchors_w_1.gather(indices) query = tf.reshape(query, [layer_id, self.lstm_size]) query = tf.tanh(query + tf.matmul(next_h[-1], self.w_attn_2)) query = tf.matmul(query, self.v_attn) logits = tf.reshape(query, [1, layer_id]) if self.temperature is not None: logits /= self.temperature if self.tanh_constant is not None: logits = self.tanh_constant * tf.tanh(logits)
tensorflow.reshape
6,601
import tensorflow as tf noisy_decode = interpreter.build_decoder(noisy_encoding, model.config, reuse=True, masks=model.mask_list) loss = interpreter.l2_loss(noisy_decode, self.raw_targets[1], alpha=FLAGS.beta) self.models += [noisy_decode] return loss def _tensor_to_image(self, net): with tf.name_scope('to_image'): if FLAGS.new_blur: net = net[..., :self.batch_shape[-1]] net = tf.nn.relu(net) net = tf.cast(net <= 1, net.dtype) * net * 255 net = tf.cast(net, tf.uint8)
tensorflow.name_scope
6,602
import tensorflow as tf tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', loss_entropy) tf.summary.scalar('Loss/Total', loss) tf.summary.scalar('Var/Epsilon', epsilon_decay) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False, batch_size=64): reg = None with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_a = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_a = tf.nn.rnn_cell.DropoutWrapper(lstm_a, output_keep_prob=self.keep_prob) state_init_a = lstm_a.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_ain = tf.expand_dims(layer_a2, axis=1) out_a, state_final_a = tf.nn.dynamic_rnn(cell=lstm_a, inputs=lstm_ain, initial_state=state_init_a) cell_out_a = tf.reshape(out_a, [-1, 256]) mu = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params, state_init_a, state_final_a
tensorflow.layers.dense
6,603
import tensorflow as tf if norm: d = tf.contrib.layers.batch_norm(d) d = lrelu(d,alpha=0.2) return d #def common_deconv2d(layer_input,skip_input, filters,f_size=4,stride=2,dropout_rate=0,name='common_deconv2d'): def common_deconv2d(layer_input,filters,f_size=4,stride=2,padding='SAME',dropout_rate=0,name='common_deconv2d'): """Layers used during upsampling""" with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False u = tf.contrib.layers.conv2d_transpose(layer_input,filters,f_size,stride=stride,padding=padding) if dropout_rate: u = tf.contrib.layers.dropout(u,keep_prob=dropout_rate) u = tf.contrib.layers.batch_norm(u) u = tf.nn.relu(u) # u = tf.contrib.keras.layers.concatenate([skip_input,u]) return u
tensorflow.get_variable_scope
6,604
import tensorflow as tf images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2
tensorflow.nn.relu
6,605
import tensorflow as tf """ Constant learning rate decay (uses global_step param instead of x) """ return tf.train.piecewise_constant(global_step, *args, **kwargs)
tensorflow.train.piecewise_constant
6,606
import tensorflow as tf print("-------X Y----------") print(X) X = tf.reshape(X, shape=[-1, 32, 36]) print(X) print(Y) Y = tf.reshape(Y, shape=[-1, 6]) print(Y) # Weight Initialization def weight_variable(shape): # tra ve 1 gia tri random theo thuat toan truncated_ normal
tensorflow.reshape
6,607
import tensorflow as tf alpha_logstd = tf.get_variable('alpha_logstd_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_std = tf.exp(alpha_logstd) # Compute epsilon from {n_samples} standard Gaussian # epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out]) epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out]) hyp_params = tf.get_variable('hyp_params_layer'+str(h), shape=[2], initializer=tf.random_normal_initializer()) l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1]) epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2 # Compute A_{h+1} A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1]) # Compute z_{h}A_{h+1} Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5) Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2) Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.) KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2. # Output layer else: F = tf.squeeze(tf.layers.dense(Z, n_out), [2]) return F, KL
tensorflow.matmul
6,608
import tensorflow as tf anchors.generate_detections_per_image_op( softmax_class_outputs[i], box_outputs[i], box_rois[i], features['source_ids'][i], features['image_info'][i], params['test_detections_per_image'], params['test_rpn_post_nms_topn'], params['test_nms'], params['bbox_reg_weights']) ) detections = tf.stack(detections, axis=0) if params['include_mask']: mask_outputs = mask_rcnn_architecture.mask_rcnn_fn( fpn_feats, params, is_training=False, detections=detections) else: (class_outputs, box_outputs, box_rois, class_targets, box_targets, proposal_to_label_map) = mask_rcnn_architecture.faster_rcnn_fn(
tensorflow.stack
6,609
import tensorflow as tf def update_target_network(self): raise NotImplementedError("update target network!") def get_num_trainable_vars(self): raise NotImplementedError("update target network!") def apply_norm(self, net, activation_fn, phase, layer_num): if self.norm_type == 'layer': norm_net = tf.contrib.layers.layer_norm(net, center=True, scale=True, activation_fn=activation_fn) elif self.norm_type == 'batch': norm_net = tf.contrib.layers.batch_norm(net, fused=True, center=True, scale=True, activation_fn=activation_fn, is_training=phase, scope='batchnorm_'+str(layer_num)) elif self.norm_type == 'none' or self.norm_type == 'input_norm': norm_net = activation_fn(net) else: raise ValueError('unknown norm type') return norm_net
tensorflow.contrib.layers.layer_norm
6,610
import tensorflow as tf y = silverman_rule_of_thumb(N) A = 1/(N*N*tf.sqrt(y)) B = 2.0/(N*tf.sqrt(y+0.5))
tensorflow.sqrt
6,611
import tensorflow as tf pad = tf.tile(pad, [batch_size, 1, 1]) # Fully Character-Level NMT without Explicit Segmentation, Lee et al. 2016 inputs = [] for w, filter_size in enumerate(encoder.convolutions, 1): filter_ = get_variable('filter_{}'.format(w), [w, encoder.embedding_size, filter_size]) if w > 1: right = (w - 1) // 2 left = (w - 1) - right pad_right = tf.tile(pad, [1, right, 1]) pad_left = tf.tile(pad, [1, left, 1]) inputs_ = tf.concat([pad_left, encoder_inputs_, pad_right], axis=1) else: inputs_ = encoder_inputs_ inputs_ = tf.nn.convolution(inputs_, filter=filter_, padding='VALID') inputs.append(inputs_) encoder_inputs_ = tf.concat(inputs, axis=2) # if encoder.convolution_activation.lower() == 'relu':
tensorflow.tile
6,612
import tensorflow as tf """ self._batch_env = batch_env batch_dims = (len(self._batch_env),) print('*~*' * 60) observ_shape = self._parse_shape(self._batch_env.observation_space) print(observ_shape) observ_dtype = self._parse_dtype(self._batch_env.observation_space) print(observ_dtype) action_shape = self._parse_shape(self._batch_env.action_space) print(action_shape) action_dtype = self._parse_dtype(self._batch_env.action_space) print(action_dtype) with tf.variable_scope('env_temporary'): self._observ = tf.Variable( lambda: tf.zeros(batch_dims + observ_shape, observ_dtype), name='observ', trainable=False) self._action = tf.Variable( lambda: tf.zeros(batch_dims + action_shape, action_dtype), name='action', trainable=False) self._reward = tf.Variable( lambda: tf.zeros(batch_dims, tf.float32), name='reward', trainable=False) self._done = tf.Variable( lambda: tf.cast(tf.ones(batch_dims), tf.bool),
tensorflow.variable_scope
6,613
import tensorflow as tf def viz3(name, a, b, c): with tf.name_scope(name): im = tf.concat([a, b, c], axis=3) im = tf.transpose(im, [0, 2, 3, 1]) im = (im + 1.0) * 128 im = tf.clip_by_value(im, 0, 255) im = tf.cast(im, tf.uint8, name='viz') tf.summary.image(name, im, max_outputs=50) # use the initializers from torch with argscope([Conv2D, Deconv2D], use_bias=False, W_init=tf.random_normal_initializer(stddev=0.02)), \ argscope([Conv2D, Deconv2D, InstanceNorm], data_format='NCHW'), \
tensorflow.cast
6,614
import tensorflow as tf Returns: tuple (final output, loss) ''' y = output if add_bias: bias = tf.Variable([0.0]) y = output + bias loss = tf.reduce_sum(tf.square(y - target)) return y, loss def logistic_objective(output, target, add_bias=True): ''' Creates final model output and loss for logistic objective Args:
tensorflow.square
6,615
import tensorflow as tf def sparse_maxnorm_update(var_matrix, indices, maxnorm=1.0): '''Sparse update operation that ensures selected rows in var_matrix do not have a Euclidean norm greater than maxnorm. Rows that exceed it are scaled to length. Args: var_matrix: 2D mutable tensor (Variable) to operate on indices: 1D tensor with the row indices to constrain maxnorm: the maximum Euclidean norm Returns: An operation that will update var_matrix when run in a Session ''' selected_rows = tf.nn.embedding_lookup(var_matrix, indices) row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1)) scaling = maxnorm / tf.maximum(row_norms, maxnorm) scaled = selected_rows * tf.expand_dims(scaling, 1) return tf.scatter_update(var_matrix, indices, scaled) def dense_maxnorm_update(var_matrix, maxnorm=1.0): '''Dense update operation that ensures all rows in var_matrix do not have a Euclidean norm greater than maxnorm. Rows that exceed it are scaled to length. Args: var_matrix: 2D mutable tensor (Variable) to operate on maxnorm: the maximum Euclidean norm
tensorflow.square
6,616
import tensorflow as tf A = 1/(N*N*tf.sqrt(y)) B = 2.0/(N*tf.sqrt(y+0.5)) A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)/(4*y) B1 = euclidean_norm_squared(X, axis=1)/(2+4*y) return 1/tf.sqrt(1+y) + A*tf.reduce_sum(__phi(A1)) - B*tf.reduce_sum(__phi(B1)) def cw(X, y=None): D = tf.cast(tf.shape(X)[1], tf.float32) N = tf.cast(tf.shape(X)[0], tf.float32) if y is None: y = silverman_rule_of_thumb(N) K = 1/(2*D-3) A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2) A = (1/(N**2)) * tf.reduce_sum((1/tf.sqrt(y + K*A1)))
tensorflow.shape
6,617
import tensorflow as tf y_h_diff = y[:, 1:] - y[:, :-1] y_w_diff = y[:, :, 1:] - y[:, :, :-1] h_diff = tf.abs(tf.abs(x_h_diff) - tf.abs(y_h_diff)) w_diff = tf.abs(tf.abs(x_w_diff) - tf.abs(y_w_diff)) return h_diff + tf.transpose(w_diff)
tensorflow.abs
6,618
import tensorflow as tf """ if weight_init is None: num_features = tensor.get_shape()[-1].value weight_init = tf.truncated_normal([num_features, size], stddev=0.01) if bias_init is None: bias_init = tf.zeros([size]) with tf.name_scope(name, 'fully_connected', [tensor]): w = tf.Variable(weight_init, name='w', dtype=tf.float32) b = tf.Variable(bias_init, name='b', dtype=tf.float32)
tensorflow.zeros
6,619
import tensorflow as tf shape: The shape of the variable to get. dtype: The dtype of the variable to get. Note that if this is a low precision dtype, the variable will be created as a tf.float32 variable, then cast to the appropriate dtype *args: Additional arguments to pass unmodified to getter. **kwargs: Additional keyword arguments to pass unmodified to getter. Returns: A variable which is cast to fp16 if necessary. """ if dtype in CASTABLE_TYPES: var = getter(name, shape, tf.float32, *args, **kwargs) return tf.cast(var, dtype=dtype, name=name + '_cast') else: return getter(name, shape, dtype, *args, **kwargs) def _model_variable_scope(self): """Returns a variable scope that the model should be created under. If self.dtype is a castable type, model variable will be created in fp32 then cast to self.dtype before being used. Returns: A variable scope for the model. """
tensorflow.cast
6,620
import tensorflow as tf def softmax_mask(val, mask): return -INF * (1 - tf.cast(mask, tf.float32)) + val def pointer(inputs, state, hidden, mask, scope="pointer"): with tf.variable_scope(scope): u = tf.concat([tf.tile(tf.expand_dims(state, axis=1), [1, tf.shape(inputs)[1], 1]), inputs], axis=2) #[N,PL,2d] s0 = tf.nn.tanh(dense(u, hidden, use_bias=False, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask)#[N,PL] a = tf.expand_dims(tf.nn.softmax(s1), axis=2)#[N,PL,1] res = tf.reduce_sum(a * inputs, axis=1) return res, s1 # attention_sum probability def summ(memory, hidden, mask, keep_prob=1.0, is_train=None, scope="summ"): with tf.variable_scope(scope): d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train) s0 = tf.nn.tanh(dense(d_memory, hidden, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask) a = tf.expand_dims(tf.nn.softmax(s1), axis=2) res = tf.reduce_sum(a * memory, axis=1)
tensorflow.reduce_sum
6,621
import tensorflow as tf _to_dataset(input_ids), _to_dataset(token_type_ids), _to_dataset(attention_mask), _to_dataset(labels), ) ) return dataset def _filter(self, dataset: tf.data.Dataset, do_filer=True, max_sequence_length=512, **kwargs) -> tf.data.Dataset: if not do_filer: return dataset dataset = dataset.filter(lambda a, b, c, y: tf.size(a) <= max_sequence_length) return dataset def _to_dict(self, dataset: tf.data.Dataset, to_dict=True, **kwargs) -> tf.data.Dataset: num_parallel_calls = kwargs.get("num_parallel_calls", utils.AUTOTUNE) if not to_dict: dataset = dataset.map( lambda a, b, c, y: ((a, b, c), y), num_parallel_calls=num_parallel_calls, ) return dataset
tensorflow.size
6,622
import tensorflow as tf tf.scalar_summary('nll', nll) init_op = tf.initialize_all_variables() # from http://stackoverflow.com/a/35907755/1199693 config = tf.ConfigProto(graph_options=tf.GraphOptions( # optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L2))) # L2 werkt niet (wrs eruit gehaald) optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1))) # start session with tf.Session(config=config) as sess: # Merge all the summaries and write them out to /tmp/mnist_logs (by default) summarize_merged = tf.merge_all_summaries() summary_writer = tf.train.SummaryWriter('./train/%i' % int(time.time()), sess.graph) # Run the init operation. sess.run(init_op) true_vars = {} for v in variables: key = v.name[:v.name.find(':')] true_vars[key] = v.eval() true_vars['m0'] = m0.eval()
tensorflow.merge_all_summaries
6,623
import tensorflow as tf ni = len(cell_inputs + blocks) b = len(blocks) # Count usage of inputs block_uses = [] for bi in range(b): idx1 = cell_arch[bi][0] idx2 = cell_arch[bi][2] block_use = tf.one_hot(idx1, ni, dtype=tf.int32) + tf.one_hot(idx2, ni, dtype=tf.int32) block_uses.append(block_use) block_uses = tf.add_n(block_uses) unused_indices = tf.reshape(tf.cast(tf.where(tf.equal(block_uses, 0)), tf.int32), [-1]) num_out_blocks = tf.size(unused_indices) # Select only unused blocks with tf.variable_scope('select'): stacked_blocks = tf.stack(cell_inputs + blocks) out_blocks = tf.gather(stacked_blocks, unused_indices, axis=0) out_blocks = tf.transpose(out_blocks, (1, 2, 3, 0, 4)) # Combine to constant channels with tf.variable_scope('combine'):
tensorflow.equal
6,624
import tensorflow as tf rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size, initializer=initializers["w"]) rev_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size, initializer=initializers["w"]) return TrainableVRNN(
tensorflow.nn.rnn_cell.LSTMCell
6,625
import tensorflow as tf x = tf.cast(values, dtype=tf.int32) y = tf.cast(answers, dtype=tf.int32)
tensorflow.cast
6,626
import tensorflow as tf layer_wo_bos_eos, lm_graph.sequence_lengths - 1, seq_axis=1, batch_axis=0, ) layer_wo_bos_eos = layer_wo_bos_eos[:, 1:, :] layer_wo_bos_eos = tf.reverse_sequence( layer_wo_bos_eos, sequence_length_wo_bos_eos, seq_axis=1, batch_axis=0, )
tensorflow.reverse_sequence
6,627
import tensorflow as tf output_width: The width of the image after preprocessing. resize_side_min: The lower bound for the smallest side of the image for aspect-preserving resizing. resize_side_max: The upper bound for the smallest side of the image for aspect-preserving resizing. Returns: A preprocessed image. """ resize_side = tf.random_uniform( [], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32) image = _aspect_preserving_resize(image, resize_side) image = _random_crop([image], output_height, output_width)[0] image.set_shape([output_height, output_width, 3]) image = tf.to_float(image) image = tf.image.random_flip_left_right(image) return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
tensorflow.random_uniform
6,628
import tensorflow as tf preprocessed_resized_image, true_image_shape = model_preprocess_fn( tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0))
tensorflow.cast
6,629
import tensorflow as tf 'grid': _float_feature(d['grid']), 'world2grid': _float_feature(d['world2grid']), 'surface_point_samples': _float_feature(d['surface_point_samples']) } example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def full_featurespec(): return { 'bounding_box_samples': tf.io.FixedLenFeature([100000, 4], tf.float32), 'depth_renders': tf.io.FixedLenFeature([20, 224, 224, 1], tf.float32), 'mesh_name': tf.io.FixedLenFeature([], tf.string), 'near_surface_samples': tf.io.FixedLenFeature([100000, 4], tf.float32), 'grid': tf.io.FixedLenFeature([32, 32, 32], tf.float32), 'world2grid': tf.io.FixedLenFeature([4, 4], tf.float32), 'surface_point_samples': tf.io.FixedLenFeature([10000, 6], tf.float32) }
tensorflow.io.FixedLenFeature
6,630
from tensorflow.python.framework import ops var_labels, update_var_labels = streaming_covariance( labels, labels, weights=weights, name='variance_labels') pearson_r = _safe_div( cov, math_ops.mul(math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)), 'pearson_r') with ops.control_dependencies( [update_cov, update_var_predictions, update_var_labels]): update_op = _safe_div(update_cov, math_ops.mul( math_ops.sqrt(update_var_predictions), math_ops.sqrt(update_var_labels)), 'update_op') if metrics_collections:
tensorflow.python.framework.ops.control_dependencies
6,631
import tensorflow as tf N, PL, QL, CL, d, dc, nh = self._params() if self.config.use_position_attn: start_logits = tf.squeeze( conv(self._attention(tf.concat([self.enc[1], self.enc[2]], axis=-1), name="attn1"), 1, bias=False, name="start_pointer"), -1) end_logits = tf.squeeze( conv(self._attention(tf.concat([self.enc[1], self.enc[3]], axis=-1), name="attn2"), 1, bias=False, name="end_pointer"), -1)
tensorflow.concat
6,632
import tensorflow as tf f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f if feature: features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature(feature.label_id) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64),
tensorflow.train.Features
6,633
from tensorflow.python.framework import dtypes if the functio returns multiple value; the Operation if the function returns no values. Raises: ValueError: if the arguments are invalid. """ if len(inputs) != len(sig.input_arg): raise ValueError("Expected number of arguments: %d, received: %d" % (len(sig.input_arg), len(inputs))) name = kwargs.pop("name", None) attrs = _parse_kwargs_as_attrs(**kwargs) g = ops.get_default_graph() func_name = sig.name output_types = [dtypes.DType(x.type) for x in sig.output_arg] with ops.name_scope(name, func_name, inputs) as name: op = g.create_op( func_name, list(inputs), output_types, name=name, attrs=attrs, compute_shapes=False) setattr(op, "_sig", sig) # Remember the signature. if op.outputs: if len(op.outputs) == 1: return op.outputs[0]
tensorflow.python.framework.dtypes.DType
6,634
import tensorflow as tf
tensorflow.app.flags.DEFINE_string
6,635
import tensorflow as tf res3 = sess.run(d3) self.assertAllClose(res1, res2) self.assertAllClose(res1, res3) def testOne2ManyRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp_dict = {} dec_inp_dict["0"] = [ tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec_inp_dict["1"] = [ tf.constant(i, tf.int32, shape=[2]) for i in range(4)] dec_symbols_dict = {"0": 5, "1": 6} cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) outputs_dict, state_dict = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(outputs_dict["0"]) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run(outputs_dict["1"]) self.assertEqual(4, len(res)) self.assertEqual((2, 6), res[0].shape) res = sess.run([state_dict["0"]]) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape)
tensorflow.nn.rnn_cell.BasicLSTMCell
6,636
import tensorflow as tf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('ws_save_path', './models_ws/model.ckpt', 'WS: model\'s save path') tf.app.flags.DEFINE_float('ws_prune_ratio', 0.75, 'WS: target pruning ratio') tf.app.flags.DEFINE_string('ws_prune_ratio_prtl', 'optimal', 'WS: pruning ratio protocol (\'uniform\' | \'heurist\' | \'optimal\')') tf.app.flags.DEFINE_integer('ws_nb_rlouts', 200, 'WS: # of roll-outs for the RL agent') tf.app.flags.DEFINE_integer('ws_nb_rlouts_min', 50, 'WS: minimal # of roll-outs for the RL agent to start training')
tensorflow.app.flags.DEFINE_string
6,637
import tensorflow as tf encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / stride)) if encoder.highway_layers: x = encoder_inputs_ for j in range(encoder.highway_layers): size = x.shape[2].value with tf.variable_scope('highway_{}'.format(j + 1)): g = tf.layers.dense(x, size, activation=tf.nn.sigmoid, use_bias=True, name='g') y = tf.layers.dense(x, size, activation=tf.nn.relu, use_bias=True, name='y') x = g * y + (1 - g) * x encoder_inputs_ = x # Contrary to Theano's RNN implementation, states after the sequence length are zero # (while Theano repeats last state) inter_layer_keep_prob = None if not encoder.use_dropout else encoder.inter_layer_keep_prob
tensorflow.layers.dense
6,638
import tensorflow as tf if optimizer == 'sgd': lr = tf.train.exponential_decay( learning_rate, global_step, 1, learning_rate_decay ) train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss, global_step=global_step) elif optimizer == 'adam': train_step = tf.train.GradientDescentOptimizer(learning_rate).\ minimize(loss, global_step=global_step) else: raise NotImplementedError('activation not recognized') # init op init_op = tf.global_variables_initializer() # Save into class members self.X = X
tensorflow.train.GradientDescentOptimizer
6,639
import tensorflow as tf def sequence_loss(logits, targets, weights, average_across_timesteps=False, average_across_batch=True, rewards=None): batch_size = tf.shape(targets)[0] time_steps = tf.shape(targets)[1] logits_ = tf.reshape(logits, tf.stack([time_steps * batch_size, logits.get_shape()[2].value])) targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size])) crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=targets_) crossent = tf.reshape(crossent, tf.stack([batch_size, time_steps])) if rewards is not None: crossent *= tf.stop_gradient(rewards) log_perp = tf.reduce_sum(crossent * weights, axis=1) if average_across_timesteps: total_size = tf.reduce_sum(weights, axis=1) total_size += 1e-12 # just to avoid division by 0 for all-0 weights log_perp /= total_size cost = tf.reduce_sum(log_perp) if average_across_batch: return cost / tf.to_float(batch_size) else: return cost
tensorflow.reduce_sum
6,640
import tensorflow as tf def testOne2ManyRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp_dict = {} dec_inp_dict["0"] = [ tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec_inp_dict["1"] = [ tf.constant(i, tf.int32, shape=[2]) for i in range(4)] dec_symbols_dict = {"0": 5, "1": 6} cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) outputs_dict, state_dict = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(outputs_dict["0"]) self.assertEqual(3, len(res))
tensorflow.constant
6,641
import tensorflow as tf import tensorflow as tf from nvidia_info import get_memory_info memory_info = get_memory_info(0) total_memory = memory_info[1] memory_limit = int(fraction*total_memory) print(memory_info) if tf.version.VERSION[0]=="2": gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_limit)]) else: gpu_options = tf.GPUOptions(allow_growth=allow_growth, per_process_gpu_memory_fraction=fraction) config = tf.ConfigProto(gpu_options=gpu_options) session = tf.Session(config=config) K.set_session(session) def multi_gpu(model, gpus=None, cpu_merge=True, cpu_relocation=False): '''Takes as input the model, and returns a model based on the number of GPUs available on the machine or alternatively the 'gpus' user input.
tensorflow.GPUOptions
6,642
import tensorflow as tf self.retrieve_indices = tf.make_template(
tensorflow.make_template
6,643
import tensorflow as tf X = tf.nn.leaky_relu(X, 0.2) X = self.conv('d_2', X, 512, size=1, stride=1, padding="SAME") X = tf.nn.leaky_relu(X, 0.2) X = self.conv('d_3', X, 512, size=1, stride=1, padding="SAME")
tensorflow.nn.leaky_relu
6,644
from tensorflow.python.framework import ops has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN] dimensions of `predictions_idx` and `labels`. name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of `Variable` and update `Operation`. Raises: ValueError: If `weights` is not `None` and has an incomptable shape. """ default_name = _at_k_name('false_positive', k, class_id=class_id) with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope: fp = _sparse_false_positive_at_k( predictions_idx=predictions_idx, labels=labels, class_id=class_id, weights=weights) batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp)) var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=scope) return var, state_ops.assign_add(var, batch_total_fp, name='update') def _sparse_false_negative_at_k(predictions_idx, labels, class_id=None, weights=None):
tensorflow.python.framework.ops.name_scope
6,645
import tensorflow as tf return norm_dist, params def build_cnet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) vf = tf.layers.dense(layer_c2, 1, kernel_regularizer=reg) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return vf, params # Update the network def train(self, s, a, r, adv): start = time()
tensorflow.layers.dense
6,646
import tensorflow as tf xs = (tf.split(x, n_gpu, 0) for x in xs) for i, xs in enumerate(zip(*xs)): do_reuse = True if i > 0 else None with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse): clf_logits, clf_losses, lm_losses = model(*xs, train=True, reuse=do_reuse) if lm_coef > 0: train_loss = tf.reduce_mean(clf_losses) + lm_coef*tf.reduce_mean(lm_losses) else: train_loss = tf.reduce_mean(clf_losses) params = find_trainable_variables("model") grads = tf.gradients(train_loss, params) grads = list(zip(grads, params))
tensorflow.reduce_mean
6,647
import tensorflow as tf auc += ((x - prev_x) * (y + prev_y) / 2.) prev_x = x prev_y = y return auc def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'):
tensorflow.array_ops.transpose
6,648
from tensorflow.python.framework import ops cm_diag = math_ops.to_float(array_ops.diag_part(total_cm)) denominator = sum_over_row + sum_over_col - cm_diag # If the value of the denominator is 0, set it to 1 to avoid # zero division. denominator = math_ops.select( math_ops.greater(denominator, 0), denominator, array_ops.ones_like(denominator)) iou = math_ops.div(cm_diag, denominator) return math_ops.reduce_mean(iou, name=name) mean_iou = compute_mean_iou('mean_iou') if metrics_collections: ops.add_to_collections(metrics_collections, mean_iou) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_iou, update_op def _next_array_size(required_size, growth_factor=1.5): """Calculate the next size for reallocating a dynamic array. Args: required_size: number or tf.Tensor specifying required array capacity. growth_factor: optional number or tf.Tensor specifying the growth factor between subsequent allocations.
tensorflow.python.framework.ops.add_to_collections
6,649
from tensorflow.python.framework import ops tf_index = math_ops.cast(tf_index, dtypes.int32) # Now, we have the implicit threshold, so compute the specificity: return math_ops.div(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon, name) specificity = compute_specificity_at_sensitivity('value') with ops.control_dependencies( [tp_update_op, fn_update_op, tn_update_op, fp_update_op]): update_op = compute_specificity_at_sensitivity('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, specificity) if updates_collections: ops.add_to_collections(updates_collections, update_op) return specificity, update_op def streaming_sensitivity_at_specificity( predictions, labels, specificity, weights=None, num_thresholds=200, metrics_collections=None, updates_collections=None, name=None): """Computes the the specificity at a given sensitivity.
tensorflow.python.framework.ops.add_to_collections
6,650
import tensorflow as tf self.rank_loss += self.hparams.l2_loss * tf.nn.l2_loss(p) self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss denoise_gradients = tf.gradients(self.exam_loss, denoise_params) ranking_model_gradients = tf.gradients(self.rank_loss, ranking_model_params) if self.hparams.max_gradient_norm > 0:
tensorflow.gradients
6,651
import tensorflow as tf out_width = out_size[2] grid = _meshgrid(out_depth, out_height, out_width, z_near, z_far) grid = tf.expand_dims(grid, 0) grid = tf.reshape(grid, [-1]) grid = tf.tile(grid, tf.stack([num_batch])) grid = tf.reshape(grid, tf.stack([num_batch, 4, -1])) # Transform A x (x_t', y_t', 1, d_t)^T -> (x_s, y_s, z_s, 1). t_g = tf.matmul(theta, grid) z_s = tf.slice(t_g, [0, 0, 0], [-1, 1, -1]) y_s = tf.slice(t_g, [0, 1, 0], [-1, 1, -1]) x_s = tf.slice(t_g, [0, 2, 0], [-1, 1, -1]) z_s_flat = tf.reshape(z_s, [-1]) y_s_flat = tf.reshape(y_s, [-1]) x_s_flat = tf.reshape(x_s, [-1]) input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, z_s_flat, out_size) output = tf.reshape( input_transformed,
tensorflow.slice
6,652
import tensorflow as tf sp_indices = [sp_m.indices for sp_m in sp_matrices] sp_values = [sp_m.values for sp_m in sp_matrices] sp_shape = [sp_m.dense_shape for sp_m in sp_matrices] return self.b_module.bspmm(sp_ids = sp_indices, sp_values = sp_values, sp_shape = sp_shape, rhs = dense_matrices, adjoint_a = adjoint_a, adjoint_b = adjoint_b) class BatchedSpMDT: def __init__(self): self.b_module = tf.load_op_library('./batched.so') def call(self, sp_matrices, dense_matrices, adjoint_a=False, adjoint_b=False): sp_indices = [sp_m.indices for sp_m in sp_matrices] sp_values = [sp_m.values for sp_m in sp_matrices] sp_shape = [sp_m.dense_shape for sp_m in sp_matrices]
tensorflow.load_op_library
6,653
import tensorflow as tf 'use_nccl', True, 'Whether to use nccl all-reduce primitives where possible') # Distributed training flags. tf.flags.DEFINE_string('job_name', '', 'One of "ps", "worker", "". Empty for local training') tf.flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_string('worker_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_integer('task_index', 0, 'Index of task within the job') tf.flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers') tf.flags.DEFINE_boolean('cross_replica_sync', True, '') # Summary and Save & load checkpoints. tf.flags.DEFINE_integer('summary_verbosity', 0, """Verbosity level for summary ops. Pass 0 to disable both summaries and checkpoints.""") tf.flags.DEFINE_integer('save_summaries_steps', 0, """How often to save summaries for trained models. Pass 0 to disable summaries.""")
tensorflow.flags.DEFINE_string
6,654
import tensorflow as tf parser.add_argument('--model_path', type=str, default='', help='the path of checkpoint file') args = parser.parse_args() def model(): x = tf.placeholder(tf.float32, [None, 784], name='x') gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth') with tf.variable_scope('layer1'): w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer()) b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0)) h1 = tf.nn.relu(tf.matmul(x, w1) + b1) with tf.variable_scope('layer2'): w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer()) b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0)) h2 = tf.nn.relu(tf.matmul(h1, w2) + b2) with tf.variable_scope('layer3'): w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer()) b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0)) y = tf.matmul(h2, w3) + b3 # losses cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y)) # optimizer optimizer = tf.train.GradientDescentOptimizer(args.lr) # define one-step train ops train_op = optimizer.minimize(cross_entropy) return x, y, gt, train_op if __name__ == "__main__": max_train_step = args.max_train_step
tensorflow.variable_scope
6,655
import tensorflow as tf print("Loading Weights") weights = np.load(self.load_weights_path) init_state_initializer = tf.constant_initializer(weights['init_state']) W_in_initializer = tf.constant_initializer(weights['W_in']) W_rec_initializer = tf.constant_initializer(weights['W_rec']) W_out_initializer = tf.constant_initializer(weights['W_out']) b_rec_initializer = tf.constant_initializer(weights['b_rec']) b_out_initializer = tf.constant_initializer(weights['b_out']) self.input_connectivity_mask = weights['input_Connectivity'] self.recurrent_connectivity_mask = weights['rec_Connectivity'] self.output_connectivity_mask = weights['output_Connectivity'] self.init_state = tf.get_variable('init_state', [N_batch, N_rec], initializer=init_state_initializer) # ------------------------------------------------ # Trainable variables: # Weight matrices and bias weights # ------------------------------------------------ # Input weight matrix: # (uniform initialization as in pycog) self.W_in = \ tf.get_variable('W_in', [N_rec, N_in], initializer=W_in_initializer, trainable=self.W_in_train) # Recurrent weight matrix:
tensorflow.get_variable
6,656
import tensorflow as tf for key, value in metrics.items(): tf.summary.scalar(key, value) # learning rate schedule self.global_step = tf.train.get_or_create_global_step() lrn_rate, self.nb_iters_train = self.setup_lrn_rate(self.global_step) # overall pruning ratios of trainable & maskable variables pr_trainable = calc_prune_ratio(self.trainable_vars) pr_maskable = calc_prune_ratio(self.maskable_vars) tf.summary.scalar('pr_trainable', pr_trainable) tf.summary.scalar('pr_maskable', pr_maskable) # build masks and corresponding operations for weight sparsification self.masks, self.prune_op = self.__build_masks() # optimizer & gradients optimizer_base = tf.train.MomentumOptimizer(lrn_rate, FLAGS.momentum) if not FLAGS.enbl_multi_gpu: optimizer = optimizer_base else: optimizer = mgw.DistributedOptimizer(optimizer_base)
tensorflow.summary.scalar
6,657
from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf import numpy as np import datetime import os import argparse import matplotlib.pyplot as plt from matplotlib import gridspec from tensorflow.examples.tutorials.mnist import input_data # Get the MNIST data mnist = input_data.read_data_sets('./Data', one_hot=True) # Parameters input_dim = 784 n_l1 = 1000 n_l2 = 1000 z_dim = 10 batch_size = 100 n_epochs = 1000 learning_rate = 0.001
tensorflow.examples.tutorials.mnist.input_data.read_data_sets
6,658
import tensorflow as tf # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy. loss = cross_entropy + loc_loss + params['weight_decay'] * tf.add_n( [tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name]) total_loss = tf.identity(loss, name='total_loss')
tensorflow.trainable_variables
6,659
import tensorflow as tf obs_t_input = U.ensure_tf_input(make_obs_ph("obs_t")) act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") obs_tp1_input = U.ensure_tf_input(make_obs_ph("obs_tp1")) done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") # q network evaluation
tensorflow.placeholder
6,660
import tensorflow as tf start_resolutions=(4, 4), scale_base=2, num_resolutions=3)) fake_loss = tf.reduce_sum(tf.square(x)) grad_norms = [ _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_1/.*')), _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_2/.*')), _get_grad_norm(
tensorflow.trainable_variables
6,661
import tensorflow as tf weights = tf.to_float(weights) weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1) return weighted_average, weights def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None, context=None, **kwargs): batch_size = tf.shape(state)[0] attn_length = tf.shape(hidden_states)[1] if context is not None and encoder.use_context: state = tf.concat([state, context], axis=1) state_size = state.get_shape()[1].value with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)): encoder_input_length = tf.to_float(tf.expand_dims(encoder_input_length, axis=1)) if pos is not None: pos = tf.reshape(pos, [-1, 1]) pos = tf.minimum(pos, encoder_input_length - 1)
tensorflow.concat
6,662
import tensorflow as tf def lppool(inpOp, pnorm, kH, kW, dH, dW, padding, name): with tf.variable_scope(name): if pnorm == 2: pwr = tf.square(inpOp) else: pwr = tf.pow(inpOp, pnorm) subsamp = tf.nn.avg_pool(pwr, ksize=[1, kH, kW, 1], strides=[1, dH, dW, 1], padding=padding) subsamp_sum = tf.multiply(subsamp, kH*kW) if pnorm == 2: out = tf.sqrt(subsamp_sum)
tensorflow.nn.avg_pool
6,663
import tensorflow as tf "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from "
tensorflow.flags.DEFINE_string
6,664
import tensorflow as tf sess.run(my_var.initializer) sess.run(my_var) row_dim = 2 col_dim = 3 zero_var = tf.Variable(tf.zeros([row_dim, col_dim])) ones_var = tf.Variable(tf.ones([row_dim, col_dim])) sess.run(zero_var.initializer) sess.run(ones_var.initializer) print(sess.run(zero_var)) print(sess.run(ones_var)) zero_similar = tf.Variable(tf.zeros_like(zero_var)) ones_similar = tf.Variable(tf.ones_like(ones_var)) sess.run(ones_similar.initializer) sess.run(zero_similar.initializer) print(sess.run(ones_similar)) print(sess.run(zero_similar)) fill_var = tf.Variable(tf.fill([row_dim, col_dim], -1)) sess.run(fill_var.initializer) print(sess.run(fill_var)) const_var = tf.Variable(tf.constant([8, 6, 7, 5, 3, 0, 9])) const_fill_var = tf.Variable(tf.constant(-1, shape=[row_dim, col_dim])) sess.run(const_var.initializer) sess.run(const_fill_var.initializer)
tensorflow.zeros_like
6,665
import tensorflow as tf top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1] loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k] loss = tf.reduce_sum(loss) # []
tensorflow.reduce_sum
6,666
import tensorflow as tf trainable_vars = tf.trainable_variables() if self.config.clip_weight: # clip_weight tvars = tf.trainable_variables() grads = tf.gradients(self.loss, tvars) grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.config.max_norm_grad) grad_var_pairs = zip(grads, tvars) self.train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad') else: self.train_op = self.optimizer.minimize(self.loss)
tensorflow.clip_by_global_norm
6,667
from tensorflow.python.framework import ops def _bilateral_slice_grad(op, grad): grid_tensor = op.inputs[0] guide_tensor = op.inputs[1] return _hdrnet.bilateral_slice_grad(grid_tensor, guide_tensor, grad) @ops.RegisterGradient('BilateralSliceApply') def _bilateral_slice_grad(op, grad): grid_tensor = op.inputs[0] guide_tensor = op.inputs[1] input_tensor = op.inputs[2] has_offset = op.get_attr('has_offset')
tensorflow.python.framework.ops.RegisterGradient
6,668
import tensorflow as tf """ assertions = [] assertions.append( tf.Assert( tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)), ['The l2 norm of each prediction quaternion vector should be 1.'])) assertions.append( tf.Assert( tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), ['The l2 norm of each label quaternion vector should be 1.'])) with tf.name_scope(name): with tf.control_dependencies(assertions): product = tf.multiply(predictions, labels) internal_dot_products = tf.reduce_sum(product, [1]) logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) return logcost def log_quaternion_loss(predictions, labels, batch_size, name='log_quaternion_loss'): """A helper function to compute the mean error between batches of quaternions. The caller is expected to add the loss to the graph. Args: predictions: A Tensor of size [batch_size, 4].
tensorflow.reduce_sum
6,669
import tensorflow as tf input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu:
tensorflow.logging.info
6,670
import tensorflow as tf mvn_static = tfd.MultivariateNormalDiag( loc=np.zeros([2, 2]), name="MVN") self.assertEqual( str(mvn_static), "tfp.distributions.MultivariateNormalDiag(" "\"MVN/\", " "batch_shape=(2,), " "event_shape=(2,), " "dtype=float64)") # There's no notion of partially known shapes in eager mode, so exit # early. if tf.executing_eagerly(): return mvn_dynamic = tfd.MultivariateNormalDiag( loc=tf.placeholder_with_default( input=np.ones((3, 3), dtype=np.float32), shape=[None, 3]), name="MVN2") self.assertEqual( str(mvn_dynamic), "tfp.distributions.MultivariateNormalDiag(" "\"MVN2/\", " "batch_shape=(?,), " # Partially known.
tensorflow.executing_eagerly
6,671
import tensorflow as tf q2 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q1', reuse=True): q1_pi = tf.squeeze(mlp(tf.concat([x, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) elif nn_type == 'mlp_dropout': with tf.variable_scope('pi'): pi = act_limit * mlp_dropout(x, list(hidden_sizes)+[act_dim], activation, output_activation) with tf.variable_scope('q'): q = tf.squeeze(mlp_dropout(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1) with tf.variable_scope('q', reuse=True): q_pi = tf.squeeze(mlp_dropout(tf.concat([x,pi], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1) elif nn_type == 'mlp_variational': with tf.variable_scope('pi'): pi_in_dim = x.shape.as_list()[1] pi_dropout_mask_generator = DropoutMaskGenerator(pi_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) pi_dropout_mask_phs = pi_dropout_mask_generator.generate_dropout_mask_placeholders() pi, pi_reg = mlp_variational(x, pi_dropout_mask_phs, list(hidden_sizes) + [act_dim], activation, output_activation, dropout_rate) pi = act_limit * pi
tensorflow.concat
6,672
from tensorflow.python.framework import ops @ops.RegisterShape("CountUpTo") def _CountUpToShape(op): """Shape function for the CountUpTo op.""" return [op.inputs[0].get_shape().merge_with(tensor_shape.scalar())] @ops.RegisterShape("ScatterAdd") @ops.RegisterShape("ScatterSub") @ops.RegisterShape("ScatterUpdate") def _ScatterUpdateShape(op): """Shape function for the sparse update ops.""" var_shape = op.inputs[0].get_shape()
tensorflow.python.framework.ops.RegisterShape
6,673
import tensorflow as tf @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for _ in xrange(1000): outputs.append(f(tf.ones([1, 10]), tf.ones([1, 10]))) op_to_benchmark = tf.group(*outputs) tf.train.start_queue_runners() self.run_op_benchmark( name='batching_many_small', sess=session,
tensorflow.group
6,674
import tensorflow as tf def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable(
tensorflow.variable_scope
6,675
import tensorflow as tf filter_regex = get_vocab_newline_characters_regex(x.dtype, file_format) if vocab_ordering_type == _VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION: labels = tf.reshape(labels, [-1]) reduced_batch = tf_utils.reduce_batch_weighted_cooccurrences(
tensorflow.reshape
6,676
import tensorflow as tf def convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0): with tf.variable_scope(name): l2_regularizer = lambda t: l2_loss(t, weight=weight_decay) kernel = tf.get_variable("weights", [kH, kW, nIn, nOut], initializer=tf.truncated_normal_initializer(stddev=1e-1), regularizer=l2_regularizer, dtype=inpOp.dtype) cnv = tf.nn.conv2d(inpOp, kernel, [1, dH, dW, 1], padding=padType)
tensorflow.truncated_normal_initializer
6,677
import tensorflow as tf tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
tensorflow.train.Scaffold
6,678
import tensorflow as tf model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size) if FLAGS.do_train: tf.logging.info("***** Running training *****") tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
tensorflow.logging.info
6,679
import tensorflow as tf self.optimizer ,self.y,self.a = self.creat_optimizer(self.readout) #创建优化方案,使用ADAOPTIMIZAL优化器 self.game_state = game.GameState() self.sess = tf.Session() # 创建类的TensorFlow会话 self.start = self.init_step() #对创建系统的初始状态
tensorflow.Session
6,680
import tensorflow as tf input_training_masks_split = tf.split(input_training_masks, len(gpus)) tower_grads = [] reuse_variables = None for i, gpu_id in enumerate(gpus): #with tf.device('/gpu:%d' % gpu_id): with tf.name_scope('model_%d' % gpu_id) as scope: iis = input_images_split[i] isms = input_score_maps_split[i] igms = input_geo_maps_split[i] itms = input_training_masks_split[i] total_loss, model_loss = tower_loss(iis, isms, igms, itms, reuse_variables) batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)) reuse_variables = True grads = opt.compute_gradients(total_loss) tower_grads.append(grads) grads = average_gradients(tower_grads) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) summary_op = tf.summary.merge_all() # save moving average variable_averages = tf.train.ExponentialMovingAverage(
tensorflow.get_collection
6,681
import tensorflow as tf if channels == 3: scales = cutoff_data.dflux_dt_dflux_dtime_scales(band) cutoffs = cutoff_data.dflux_dt_dflux_dtime_cutoffs(band) cutoffs_batch_window = tf.expand_dims(tf.expand_dims(cutoffs, 0), 0) scales_batch_window = tf.expand_dims( tf.expand_dims(tf.expand_dims(scales, 0), 0), -1 ) init_layer_per_cutoff = tf.expand_dims(initial_layer_features, -1) graph_typecheck.assert_shape( cutoffs_batch_window, [1, 1, channels, cutoff_data.embedding_size] ) graph_typecheck.assert_shape(scales_batch_window, [1, 1, channels, 1]) graph_typecheck.assert_shape( init_layer_per_cutoff, [batch_size, twice_window_size, channels, 1] )
tensorflow.expand_dims
6,682
import tensorflow as tf if not inverse: z = tf.math.exp(log_lambdas)*x ldj = tf.math.reduce_sum(log_lambdas, axis=[1,2,3]) else: z = x*tf.math.exp(-log_lambdas) ldj = -tf.math.reduce_sum(log_lambdas, axis=[1,2,3]) return z, ldj class Exponentiate(Parameterize): """
tensorflow.math.reduce_sum
6,683
import tensorflow as tf d.input_ids: tf.io.VarLenFeature(tf.int64), d.token_type_ids: tf.io.VarLenFeature(tf.int64), d.attention_mask: tf.io.VarLenFeature(tf.int64), d.labels: tf.io.VarLenFeature(tf.int64), } dataset = dataset.map( lambda x: tf.io.parse_example(x, features), num_parallel_calls=utils.AUTOTUNE, ).prefetch(utils.AUTOTUNE) dataset = dataset.map( lambda x: ( tf.cast(tf.sparse.to_dense(x[d.input_ids]), tf.int32), tf.cast(tf.sparse.to_dense(x[d.token_type_ids]), tf.int32), tf.cast(tf.sparse.to_dense(x[d.attention_mask]), tf.int32), tf.cast(tf.sparse.to_dense(x[d.labels]), tf.int32), ), num_parallel_calls=utils.AUTOTUNE, ).prefetch(utils.AUTOTUNE) # do transformation return d(dataset, **kwargs) def parse_examples_to_dataset(self): if not self.examples: logging.info("self.examples is empty or None, skipped.")
tensorflow.sparse.to_dense
6,684
import tensorflow as tf def standard_normal_ll(input_): """Log-likelihood of standard Gaussian distribution.""" res = -.5 * (tf.square(input_) + numpy.log(2. * numpy.pi)) return res def standard_normal_sample(shape): """Samples from standard Gaussian distribution.""" return tf.random_normal(shape) SQUEEZE_MATRIX = numpy.array([[[[1., 0., 0., 0.]], [[0., 0., 1., 0.]]], [[[0., 0., 0., 1.]], [[0., 1., 0., 0.]]]]) def squeeze_2x2_ordered(input_, reverse=False): """Squeezing operation with a controlled ordering."""
tensorflow.random_normal
6,685
import tensorflow as tf # the last batch. eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file)
tensorflow.gfile.GFile
6,686
import tensorflow as tf U = tf.reshape(flat_logits, [-1, num_acts, self.k]) # Calculate w cut_g = tf.stop_gradient(self.g) cut_g = tf.expand_dims(cut_g, [1]) gstack = tf.concat([self.prev_g, cut_g], axis=1) self.last_c_g = gstack[:, 1:] # print self.last_c_g gsum = tf.reduce_sum(gstack, axis=1) phi = tf.get_variable("phi", (self.g_dim, self.k)) w = tf.matmul(gsum, phi) w = tf.expand_dims(w, [2]) # Calculate policy and sample logits = tf.reshape(tf.matmul(U, w), [-1, num_acts]) self.pi = tf.nn.softmax(logits) self.log_pi = tf.nn.log_softmax(logits) self.sample = policy_utils.categorical_sample( tf.reshape(logits, [-1, num_acts]), num_acts)[0, :] def build_value(self, _input): with tf.variable_scope('VF'): hidden = tf.layers.dense(inputs=_input, units=self.vf_hidden_size, activation=tf.nn.elu) w = tf.get_variable("weights", (self.vf_hidden_size, 1)) return tf.matmul(hidden, w)
tensorflow.matmul
6,687
import tensorflow as tf model :dict,里面包含模型的参数,损失函数,自变量,应变量 """ np.random.seed(1024) # 定义自变量和应变量 x = tf.placeholder(tf.float64, shape=[None, dimension], name='x') ## 将被预测值写成矩阵形式,会极大加快速度 y = tf.placeholder(tf.float64, shape=[None, 1], name="y") # 定义参数估计值和预测值 betaPred = tf.Variable(np.random.random([dimension, 1])) yPred = tf.matmul(x, betaPred, name="y_pred") # 定义损失函数 loss = tf.reduce_mean(tf.square(yPred - y))
tensorflow.placeholder
6,688
import tensorflow as tf X = tf.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name) elif norm == 'G': X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse) if dropout > 0.0: X = tf.layers.dropout(X, dropout, training=is_train) if slope < 1.0: X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X) return X def decoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00, use_bias=True): with tf.variable_scope(name) as scope: if scale > 1:
tensorflow.nn.leaky_relu
6,689
import tensorflow as tf tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2) tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2) #debug loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=tf.squeeze(annotation, squeeze_dims=[3]), name="entropy"))) # loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, # labels=annotation, # name="entropy"))) loss_summary = tf.summary.scalar("entropy", loss) trainable_var = tf.trainable_variables() if FLAGS.debug: for var in trainable_var: utils.add_to_regularization_and_summary(var) train_op = train(loss, trainable_var) print("Setting up summary op...") summary_op = tf.summary.merge_all() print("Setting up image reader...") train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir) print(len(train_records)) print(len(valid_records)) print("Setting up dataset reader")
tensorflow.trainable_variables
6,690
import tensorflow as tf message = ( 'Cannot resume an existing run since the logging directory does not ' 'contain a configuration file.') raise IOError(message) with tf.gfile.GFile(config_path, 'r') as file_: print('try to load') config = yaml.load(file_, yaml.Loader) message = 'Resume run and write summaries and checkpoints to {}.'
tensorflow.gfile.GFile
6,691
import tensorflow as tf coord.request_stop() coord.join(threads) def test_and_valid(test_loop=1,valid_loop=1,test_num=64,valid_num=64): feed_dict={ testnum: test_num, validnum: valid_num } with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) tf.train.Saver().restore(sess,path) #test test_acc_avg = 0.0 test_true_total=np.array([]) test_pre_total=np.array([]) for i in range(0, test_loop): accuracy_np = sess.run([accuracy],feed_dict=feed_dict) test_pre_1, test_true_1 = sess.run([test_pre, test_true],feed_dict=feed_dict) test_pre_1 = np.array(test_pre_1)
tensorflow.train.Coordinator
6,692
import tensorflow as tf for h in range(1, horizon + 1): new_w = epi_len - h + 1 weights = np.zeros([epi_len, epi_len]) for i in range(new_w): weights[i:i + h, i] = 1.0 weights_list += [weights] weights_tensors = tf.stack([tf.convert_to_tensor(weights, dtype=tf.float32) for weights in weights_list]) rand_horizon = tf.random_uniform((), 0, horizon, dtype=tf.int32) new_w = epi_len - rand_horizon cur_weights = tf.slice(weights_tensors[tf.cast(rand_horizon, tf.int32)], [0, 0], [epi_len, new_w]) # cur_weights = tf.slice(weights_tensors, [tf.cast(rand_horizon, tf.int32), 0, 0], [1, epi_len, new_w])
tensorflow.convert_to_tensor
6,693
import tensorflow as tf for _ in range(n_iters): _, samples = step_fn(dynamics, optimizer, samples) def fit(dynamics, samples, optimizer, step_fn=step, n_iters=5000, verbose=True, logdir=None): """Fit L2HMC sampler with given log-likelihood function.""" if logdir: summary_writer = tf.contrib.summary.create_file_writer(logdir) for i in range(n_iters): loss, samples = step_fn(dynamics, optimizer, samples) if verbose: print("Iteration %d: loss %.4f" % (i, loss)) if logdir: with summary_writer.as_default(): with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar("loss", loss) class L2hmcTest(tf.test.TestCase): """Unit tests for l2hmc in both eager and graph mode."""
tensorflow.contrib.summary.create_file_writer
6,694
from tensorflow.python.framework import tensor_shape ops.RegisterShape("Softmax")( common_shapes.unchanged_shape_with_rank(2)) @ops.RegisterShape("InTopK") def _InTopKShape(op): """Shape function for InTopK op.""" predictions_shape = op.inputs[0].get_shape().with_rank(2) targets_shape = op.inputs[1].get_shape().with_rank(1) batch_size = predictions_shape[0].merge_with(targets_shape[0]) return [tensor_shape.vector(batch_size.value)] @ops.RegisterShape("TopK") @ops.RegisterShape("TopKV2") def _TopKShape(op): """Shape function for TopK and TopKV2 ops.""" input_shape = op.inputs[0].get_shape().with_rank_at_least(1) if len(op.inputs) >= 2: k = tensor_util.constant_value(op.inputs[1]) else:
tensorflow.python.framework.tensor_shape.vector
6,695
from tensorflow.python.ops import array_ops for config_name, config in test_configs.items(): config = test_configs[config_name] num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units) params_size_t = model.params_size() input_data = variables.Variable( array_ops.ones([seq_length, batch_size, num_units])) input_h = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) input_c = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) params = variables.Variable( array_ops.ones([params_size_t]), validate_shape=False) output, output_h, output_c = model( is_training=True, input_data=input_data, input_h=input_h,
tensorflow.python.ops.array_ops.ones
6,696
import tensorflow as tf def conv_layer(self, bottom, kernal_size, in_channels, out_channels, stride, name): with tf.variable_scope(name): filt, conv_biases = self.get_conv_var(kernal_size, in_channels, out_channels, name) conv = tf.nn.conv2d(bottom, filt, [1,stride,stride,1], padding='SAME') bias = tf.nn.bias_add(conv, conv_biases) tf.summary.histogram('weight', filt) tf.summary.histogram('bias', conv_biases) return bias def conv_bn_relu(self, bottom,name, kernel_size, output_channels, initializer,stride=1, bn=False,training=False,relu=True): input_channels = bottom.get_shape().as_list()[-1] with tf.variable_scope(name) as scope: kernel = self.variable('weights', [kernel_size, kernel_size, input_channels, output_channels], initializer, regularizer=tf.contrib.layers.l2_regularizer(0.0005))
tensorflow.summary.histogram
6,697
import tensorflow as tf features = {'member/name': tf.io.FixedLenFeature([], tf.string), 'member/encoded': tf.io.FixedLenFeature([], tf.string),
tensorflow.io.FixedLenFeature
6,698
import tensorflow as tf self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testDynamicAttentionDecoder1(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.GRUCell(2) inp = tf.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = tf.nn.dynamic_rnn(cell, inp, dtype=tf.float32) attn_states = enc_outputs dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4) sess.run([tf.global_variables_initializer()]) res = sess.run(dec)
tensorflow.constant
6,699