seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
@pytest.fixture
def mu(session_tf):
return tf.convert_to_tensor(Datum.mu_data)
@pytest.fixture
def sqrt_diag(session_tf):
return tf.convert_to_tensor(Datum.sqrt_diag_data)
@pytest.fixture
def K(session_tf):
return tf.convert_to_tensor(Datum.K_data)
@pytest.fixture
def K_batch(session_tf):
return tf.convert_to_tensor(Datum.K_batch_data)
@pytest.fixture
def sqrt(session_tf):
return tf.convert_to_tensor(Datum.sqrt_data)
@pytest.fixture()
def I(session_tf):
return tf.convert_to_tensor(Datum.I)
@pytest.mark.parametrize('white', [True, False])
def test_diags(session_tf, white, mu, sqrt_diag, K):
"""
The covariance of q(x) can be Cholesky matrices or diagonal matrices.
Here we make sure the behaviours overlap.
| tensorflow.convert_to_tensor | 3,300 |
import tensorflow as tf
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0.0))
q_func_results = q_func(observations_ph.get(), num_actions, scope="q_func")
q_values = q_func_results['q']
s_value = q_func_results['s']
a_values = q_func_results['a']
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=[output_actions, q_values, s_value, a_values, update_eps_expr],
givens={update_eps_ph: test_epsilon, stochastic_ph: False},
updates=[update_eps_expr])
return act
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None):
"""Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
| tensorflow.where | 3,301 |
import tensorflow as tf
counts = tf_utils.reorder_histogram(bucket_vocab, counts,
tf.size(boundaries) - 1)
| tensorflow.size | 3,302 |
import tensorflow as tf
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target_net')
with tf.variable_scope('target_q'):
self.target_q = R + self.gamma * self.q_
with tf.variable_scope('abs_TD'):
self.abs_td = tf.abs(self.target_q - self.q)
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('TD_error'):
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q))
with tf.variable_scope('C_train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP)
with tf.variable_scope('a_grad'):
self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim)
| tensorflow.variable_scope | 3,303 |
import tensorflow as tf
model.get_pooled_output(),
num_labels,
label_ids,
dropout_prob)
label_loss = tf.reduce_sum(per_example_loss * features["label_ratio"]) / (1e-10+tf.reduce_sum(features["label_ratio"]))
tf.get_variable_scope().reuse_variables()
(tgt_loss,
| tensorflow.reduce_sum | 3,304 |
import tensorflow.contrib.layers as layers
from dqn_utils import *
from atari_wrappers import *
def atari_model(img_in, num_actions, scope, reuse=False):
# as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
def atari_learn(env,
session,
num_timesteps):
# This is just a rough estimate
num_iterations = float(num_timesteps) / 4.0
| tensorflow.contrib.layers.flatten | 3,305 |
import tensorflow as tf
self.end_points_G = self.model.generator([batch_size_train, 100], True, None, batch_size_val)
if gpu_idx == 0:
G_means = tf.reduce_mean(self.end_points_G['softmax'], 0, keep_dims=True)
G_vars = tf.reduce_mean(tf.square(self.end_points_G['softmax'] - G_means), 0, keep_dims=True)
G = tf.Print(
self.end_points_G['softmax'],
[tf.reduce_mean(G_means), tf.reduce_mean(G_vars)],
| tensorflow.square | 3,306 |
import tensorflow as tf
name="moving_variance")
def build_batch_stats():
"""Builds the batch statistics calculation ops."""
# Copy for better stability.
# We use the moving mean as an estimate of the mean in order to perform
# a more numerically stable calculation of the batch mean.
shift = tf.add(self._moving_mean, 0)
counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(
input_batch,
reduction_indices,
keep_dims=True,
shift=shift,
name="batch_norm_ss")
mean, variance = tf.nn.normalize_moments(counts,
shifted_sum_x,
| tensorflow.nn.sufficient_statistics | 3,307 |
from tensorflow.python.ops import array_ops
predictions=threshold_predictions, labels=labels, weights=weights)
return _accuracy_metric
def _streaming_at_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, labels, weights=None):
precision_tensor, update_op = streaming_metrics_fn(
predictions,
labels=labels,
thresholds=[threshold],
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), update_op
return _streaming_metrics
class _MetricKeys(object):
AUC = "auc"
PREDICTION_MEAN = "labels/prediction_mean"
TARGET_MEAN = "labels/actual_target_mean"
ACCURACY_BASELINE = "accuracy/baseline_target_mean"
ACCURACY_MEAN = "accuracy/threshold_%f_mean"
PRECISION_MEAN = "precision/positive_threshold_%f_mean"
RECALL_MEAN = "recall/positive_threshold_%f_mean"
| tensorflow.python.ops.array_ops.squeeze | 3,308 |
import tensorflow as tf
def train_right_length(example, target):
l = tf.maximum(tf.shape(example['inputs'])[0], tf.shape(target)[0])
return tf.less(l, max_length + 1)
def eval_right_length(example, target):
l = tf.maximum(tf.shape(example['inputs'])[0], tf.shape(target)[0])
return tf.less(l, max_eval_length + 1)
if max_length > 0 and training:
dataset = dataset.filter(train_right_length)
| tensorflow.shape | 3,309 |
from tensorflow.python.ops import control_flow_ops
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(
new_size > array_size, reallocate, control_flow_ops.no_op)
with ops.control_dependencies([maybe_reallocate_op]):
append_values_op = array[size:new_size].assign(batch_values)
with ops.control_dependencies([append_values_op]):
update_op = size.assign(new_size)
if metrics_collections:
ops.add_to_collections(metrics_collections, value)
| tensorflow.python.ops.control_flow_ops.cond | 3,310 |
import tensorflow as tf
channels = inp.get_shape().as_list()[3]
with tf.variable_scope(name):
moving_mean = get_variable("mean", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0), trainable=False)
moving_variance = get_variable("var", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), trainable=False)
offset = get_variable("offset", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0))
scale = get_variable("scale", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), regularizer=tf.nn.l2_loss)
mean, variance = tf.nn.moments(inp, axes=[0, 1, 2], shift=moving_mean)
mean_op = moving_mean.assign(decay * moving_mean + (1 - decay) * mean)
var_op = moving_variance.assign(decay * moving_variance + (1 - decay) * variance)
| tensorflow.constant_initializer | 3,311 |
import tensorflow as tf
return U0
def net_U1(self, x):
lambda_1 = self.lambda_1
lambda_2 = tf.exp(self.lambda_2)
U = self.neural_net(x, self.weights, self.biases)
U_x = self.fwd_gradients_1(U, x)
U_xx = self.fwd_gradients_1(U_x, x)
U_xxx = self.fwd_gradients_1(U_xx, x)
F = -lambda_1*U*U_x - lambda_2*U_xxx
U1 = U + self.dt*tf.matmul(F, (self.IRK_beta - self.IRK_alpha).T)
return U1
def callback(self, loss):
print('Loss:', loss)
self.loss_log.append(loss)
def train(self, nIter):
tf_dict = {self.x0_tf: self.x0, self.u0_tf: self.u0,
self.x1_tf: self.x1, self.u1_tf: self.u1,
| tensorflow.matmul | 3,312 |
import tensorflow as tf
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
| tensorflow.split | 3,313 |
from tensorflow.python.framework import tensor_util
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch))
normal = dists.Normal([mu], [sigma],
validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([mu], [sigma],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([[mu]], [[sigma]],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch))
# We now test every codepath within the underlying is_scalar_helper
| tensorflow.python.framework.tensor_util.constant_value | 3,314 |
import tensorflow as tf
# Center locations.
blk_indices_crop = blk_indices[:, 0, 0, :]
# Project back to an image.
y = tf.scatter_nd(blk_indices_crop, q, out_shape)
return y
with tf.control_dependencies([assert_shape, assert_strides]):
return tf.cond(
tf.equal(tf.size(blk_indices_), 0), lambda: tf.zeros(out_shape, dtype=x.dtype),
_conv_nonzero)
def mask_conv2d(x, w, mask, strides, padding):
"""Masked 2D convolution. Used to check 2D sparse convolution.
:param x: [Tensor] Convolution feature map, 4D, dtype float32.
:param w: [Tensor] Convolution kernel, 4D, dtype float32.
| tensorflow.zeros | 3,315 |
import tensorflow as tf
num_channels = input_dim.get_shape().as_list()[4]
theta = tf.reshape(theta, (-1, 4, 4))
| tensorflow.reshape | 3,316 |
import tensorflow as tf
X, Y= create_placeholders(f, l) # Create Placeholders
parameters = initialize_parameters(network, activation, stdbeta)
betan = tf.identity(parameters['beta'+str(L)], name="betan") #add the output noise to the graph for later retrieval
an, hn, _ , _ = FW_prop(X, parameters, activation) #post and pre-activation output of the last layer
an = tf.identity(an, name= "an") #add the output post-activation value to the graph for later retrieval
hn = tf.identity(hn, name='hn') #add the output pre-activation value to the graph for later retrieval
#Create a saver for the Model
if save_model == True:
saver = tf.train.Saver()
#-----------------Initialize the cost and gradients---------------------------------------------------------
| tensorflow.identity | 3,317 |
import tensorflow.contrib as contrib
stitch3_1, stitch3_2 = fc3_1, fc3_2
dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training,
scope="dropout3_1")
dropout3_2 = contrib.layers.dropout(stitch3_2, keep_prob=keep_prob, is_training=is_training,
scope="dropout3_2")
output_1 = contrib.layers.fully_connected(dropout3_1, n_output_1, activation_fn=None, scope="output_1")
output_2 = contrib.layers.fully_connected(dropout3_2, n_output_2, activation_fn=None, scope="output_2")
with tf.variable_scope("loss"):
loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1))
loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses)
| tensorflow.contrib.layers.fully_connected | 3,318 |
from tensorflow.contrib.eager.python.examples.revnet import revnet
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
model = revnet.RevNet(config=config)
if defun:
| tensorflow.contrib.eager.python.examples.revnet.revnet.RevNet | 3,319 |
import tensorflow as tf
def compute_triplet_loss(positive_distances, negative_distances):
losses = tf.nn.relu(positive_distances + margin - negative_distances)
losses = tf.where(
tf.stop_gradient(losses < losses.dtype.max), losses,
tf.zeros_like(losses))
num_nonzero_losses = tf.math.count_nonzero(losses)
loss = tf.math.reduce_mean(losses)
return loss, num_nonzero_losses
| tensorflow.zeros_like | 3,320 |
import tensorflow as tf
super(MixedPrecisionOptimizer, self).__init__(
name=name, use_locking=use_locking)
self._optimizer = optimizer
self._scale = float(scale) if scale is not None else 1.0
def compute_gradients(self, loss, var_list=None, *args, **kwargs):
if var_list is None:
var_list = (
tf.trainable_variables() +
tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
replaced_list = var_list
if self._scale != 1.0:
loss = tf.scalar_mul(self._scale, loss)
| tensorflow.trainable_variables | 3,321 |
import tensorflow as tf
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
| tensorflow.maximum | 3,322 |
from tensorflow.python.ops import check_ops
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
def _introspect_ndims(self, ndims):
| tensorflow.python.ops.check_ops.assert_non_negative | 3,323 |
import tensorflow as tf
train_y_1 = to_categorical(train_y_1, n_class_1)
test_y_1 = to_categorical(test_y_1, n_class_1)
train_y_2 = to_categorical(train_y_2, n_class_2)
test_y_2 = to_categorical(test_y_2, n_class_2)
return train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2
def apply_cross_stitch(input1, input2):
input1_reshaped = contrib.layers.flatten(input1)
input2_reshaped = contrib.layers.flatten(input2)
input = tf.concat((input1_reshaped, input2_reshaped), axis=1)
# initialize with identity matrix
cross_stitch = tf.get_variable("cross_stitch", shape=(input.shape[1], input.shape[1]), dtype=tf.float32,
collections=['cross_stitches', tf.GraphKeys.GLOBAL_VARIABLES],
initializer=tf.initializers.identity())
output = tf.matmul(input, cross_stitch)
# need to call .value to convert Dimension objects to normal value
input1_shape = list(-1 if s.value is None else s.value for s in input1.shape)
input2_shape = list(-1 if s.value is None else s.value for s in input2.shape)
output1 = tf.reshape(output[:, :input1_reshaped.shape[1]], shape=input1_shape)
output2 = tf.reshape(output[:, input1_reshaped.shape[1]:], shape=input2_shape)
return output1, output2
def main(args):
train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2 = load_data()
m = train_X.shape[0]
| tensorflow.initializers.identity | 3,324 |
from tensorflow.python.framework import ops
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(None, 'false_negatives', (predictions_idx, labels)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fn = set_ops.set_size(set_ops.set_difference(predictions_idx,
labels,
aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
weights = math_ops.to_double(weights)
fn = math_ops.mul(fn, weights)
return fn
| tensorflow.python.framework.ops.name_scope | 3,325 |
import tensorflow as tf
dim_size += max(kernel_size - stride_size, 0)
return dim_size
# caculate output shape
batch_size = tf.shape(inputs)[0]
height = tf.shape(inputs)[1]
width = tf.shape(inputs)[2]
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0)
| tensorflow.shape | 3,326 |
import tensorflow as tf
target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True)
parameters = dict(encoders=encoders[1:], decoder=encoders[0], training=training)
attention_states, encoder_state, encoder_input_length[1:] = multi_encoder(
encoder_inputs[1:], encoder_input_length=encoder_input_length[1:], **parameters)
decoder_inputs = encoder_inputs[0][:, :-1]
batch_size = tf.shape(decoder_inputs)[0]
pad = tf.ones(shape=tf.stack([batch_size, 1]), dtype=tf.int32) * utils.BOS_ID
decoder_inputs = tf.concat([pad, decoder_inputs], axis=1)
outputs, _, states, attns, _, _, _ = attention_decoder(
attention_states=attention_states, initial_state=encoder_state, decoder_inputs=decoder_inputs,
encoder_input_length=encoder_input_length[1:], **parameters
)
| tensorflow.shape | 3,327 |
import tensorflow as tf
truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8])
truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3),
[self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))
truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3),
[self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))
truthoutput_h3 = lrelu(deconv2d(tf.concat([truthoutput_h2, tgtctx_h1], 3),
[self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3'))
truthoutput_h4 = deconv2d(tf.concat([truthoutput_h3, tgtctx_h0], 3),
[self.batch_size, s_h, s_w, self.c_dim], name='d_h4')
self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3
mean, var = tf.nn.moments(tgtimg_z, axes=[0])
print(var.get_shape())
# self.simloss /= tf.reduce_mean(var)
print(tgtimg_z.get_shape())
| tensorflow.concat | 3,328 |
import tensorflow as tf
name: A string used as the name for this variable scope.
Returns:
(tf.Tensor) A single value tensor containing the loss.
(tf.Tensor) A tensor containing the propensity weights.
"""
loss = None
with tf.name_scope(name, "click_weighted_pairwise_loss",[output]):
sliced_output = tf.unstack(output, axis=1)
sliced_label = tf.unstack(labels, axis=1)
sliced_propensity = tf.unstack(propensity_weights, axis=1)
for i in range(len(sliced_output)):
for j in range(i+1, len(sliced_output)):
cur_label_weight = tf.math.sign(sliced_label[i] - sliced_label[j])
cur_propensity = sliced_propensity[i] * sliced_label[i] + sliced_propensity[j] * sliced_label[j]
cur_pair_loss = -tf.exp(sliced_output[i]) / (tf.exp(sliced_output[i]) + tf.exp(sliced_output[j]))
if loss == None:
loss = cur_label_weight * cur_pair_loss * cur_propensity
loss += cur_label_weight * cur_pair_loss * cur_propensity
batch_size = tf.shape(labels[0])[0]
return tf.reduce_sum(loss) / tf.cast(batch_size, dtypes.float32) #/ (tf.reduce_sum(propensity_weights)+1)
def click_weighted_log_loss(self, output, labels, propensity_weights, name=None):
"""Computes pointwise sigmoid loss with propensity weighting.
Args:
output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
| tensorflow.exp | 3,329 |
import tensorflow as tf
tt_ranks = []
for i in range(len(tt_cores)):
tt_ranks.append(tt_cores[i].get_shape()[0])
tt_ranks.append(tt_cores[-1].get_shape()[-1])
return tf.TensorShape(tt_ranks)
| tensorflow.TensorShape | 3,330 |
import tensorflow as tf
self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
def testScaleGradientsNaN(self):
FLAGS.enable_check_numerics = False
p = self.TestParams()
p.input = base_input_generator.BaseSequenceInputGenerator.Params()
task = p.cls(p)
task.CreateVariable(
'a',
py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))
var_a = task.theta.a
# Make a NaN gradient.
var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.)))
has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)
with self.session():
tf.global_variables_initializer().run()
self.assertTrue(has_nan_or_inf.eval())
self.assertEqual(0., grad_scale.eval())
# The final gradient must be finite.
self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())
self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())
def testScaleGradientsCheckNumerics(self):
| tensorflow.log | 3,331 |
import tensorflow as tf
end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0])
end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask
end_log_probs = tf.nn.log_softmax(end_logits_masked, -1)
else:
| tensorflow.nn.log_softmax | 3,332 |
import tensorflow as tf
pred_label = tf.argmax(distillation_loss["st_logits"], axis=-1, output_type=tf.int32)
correct = tf.equal(
tf.cast(tf.ones_like(label_ids, dtype=tf.int32), tf.int32),
tf.cast(pred_label, tf.int32)
)
st_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
pred_label = tf.argmax(distillation_loss["te_logits"], axis=-1, output_type=tf.int32)
correct = tf.equal(
tf.cast(tf.zeros_like(label_ids, dtype=tf.int32), tf.int32),
tf.cast(pred_label, tf.int32)
)
te_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
except:
te_accuracy = tf.constant(0.0)
st_accuracy = tf.constant(0.0)
try:
st_accuracy = tf.reduce_mean(distillation_loss["src_f1_prob"])
te_accuracy = tf.reduce_mean(distillation_loss["tgt_f1_prob"])
except:
te_accuracy = tf.constant(0.0)
st_accuracy = tf.constant(0.0)
return {
"train":{
"loss":loss,
"logits":logits,
| tensorflow.constant | 3,333 |
import tensorflow as tf
self.memory_counter = 0
self._build_net()
t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/target_net')
e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/eval_net')
e_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/eval_hyper')
t_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/target_hyper')
with tf.variable_scope('soft_replacement'):
self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/' + current_time
self.summary_writer = tf.summary.FileWriter(train_log_dir, self.sess.graph)
def _build_net(self): # we use parameter sharing among agents
| tensorflow.assign | 3,334 |
import tensorflow as tf
# combine all pooled outputs
total_filters = num_filter * len(filter_list)
# concat all the pooled weights
H_pool = tf.concat(pooled_outputs, 3)
#flatten it for fully connected layer
H_pool_flat = tf.reshape(H_pool, [-1, total_filters])
with tf.name_scope("dropout"):
H_drop = tf.nn.dropout(H_pool_flat, keep_prob = keep_prob)
# Final (unnormalized) layer
with tf.name_scope("output"):
W = tf.get_variable("W",
shape=[total_filters, nb_classes],
| tensorflow.name_scope | 3,335 |
import tensorflow as tf
if trainable:
self.lr = tf.minimum(config.learning_rate, 0.001 / tf.log(999.) * tf.log(tf.cast(self.global_step, tf.float32) + 1))
self.opt = tf.train.AdamOptimizer(learning_rate = self.lr, beta1 = 0.8, beta2 = 0.999, epsilon = 1e-7)
grads = self.opt.compute_gradients(self.loss)
gradients, variables = zip(*grads)
capped_grads, _ = tf.clip_by_global_norm(
gradients, config.grad_clip)
self.train_op = self.opt.apply_gradients(
zip(capped_grads, variables), global_step=self.global_step)
def forward(self):
config = self.config
N, PL, QL, CL, d, dc, nh = config.batch_size if not self.demo else config.batch_size, self.c_maxlen, self.q_maxlen, config.char_limit, config.hidden, config.char_dim, config.num_heads
with tf.variable_scope("Input_Embedding_Layer"):
ch_emb = tf.reshape(tf.nn.embedding_lookup(
self.char_mat, self.ch), [N * PL, CL, dc])
qh_emb = tf.reshape(tf.nn.embedding_lookup(
self.char_mat, self.qh), [N * QL, CL, dc])
ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout)
qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)
# Bidaf style conv-highway encoder
ch_emb = conv(ch_emb, d,
bias = True, activation = tf.nn.relu, kernel_size = 5, name = "char_conv", reuse = None)
qh_emb = conv(qh_emb, d,
bias = True, activation = tf.nn.relu, kernel_size = 5, name = "char_conv", reuse = True)
| tensorflow.variable_scope | 3,336 |
import tensorflow as tf
losses_eval_train = build_eval_graph(images_eval_train, labels_eval_train, ul_images_eval_train, ul_u_eval_train)
losses_eval_test = build_eval_graph(images_eval_test, labels_eval_test, images_eval_test, ul_u_eval_test)
init_op = tf.global_variables_initializer()
if not FLAGS.log_dir:
logdir = None
writer_train = None
writer_test = None
else:
logdir = FLAGS.log_dir
writer_train = tf.summary.FileWriter(FLAGS.log_dir + "/train", g)
writer_test = tf.summary.FileWriter(FLAGS.log_dir + "/test", g)
saver = tf.train.Saver(tf.global_variables())
sv = tf.train.Supervisor(
is_chief=True,
logdir=logdir,
init_op=init_op,
init_feed_dict={lr: FLAGS.learning_rate, mom: FLAGS.mom1},
saver=saver,
global_step=global_step,
| tensorflow.summary.FileWriter | 3,337 |
import tensorflow as tf
# Reshape to use within a convolutional neural net. Last dimension is for
# 'features' - it would be 1 one for a grayscale image, 3 for an RGB image,
# 4 for RGBA, etc.
x_image = tf.reshape(x, [-1, FLAGS.img_width, FLAGS.img_height, FLAGS.img_channels])
x_image = tf.cond(train, lambda: tf.map_fn(tf.image.random_flip_left_right, x_image), lambda: x_image)
x_image = tf.cond(train, lambda: tf.map_fn(lambda x: tf.image.random_brightness(x, 0.5), x_image), lambda: x_image)
img_summary = tf.summary.image('Input_images', x_image)
# First convolutional layer - maps one image to 32 feature maps.
with tf.variable_scope('Conv_1'):
conv1 = tf.layers.conv2d(
inputs=x_image,
filters=32,
kernel_size=[5,5],
padding='same',
| tensorflow.summary.image | 3,338 |
import tensorflow as tf
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
| tensorflow.squeeze | 3,339 |
import tensorflow as tf
y_true=heatmap_true * true_weight[:, i])
losses.append(loss)
return tf.reduce_mean(loss)
class JointsMSELoss(object):
def __init__(self):
self.mse = tf.losses.MeanSquaredError()
def __call__(self, y_pred, target, target_weight):
batch_size = y_pred.shape[0]
num_of_joints = y_pred.shape[-1]
pred = tf.reshape(tensor=y_pred, shape=(batch_size, -1, num_of_joints))
heatmap_pred_list = tf.split(value=pred, num_or_size_splits=num_of_joints, axis=-1)
gt = tf.reshape(tensor=target, shape=(batch_size, -1, num_of_joints))
heatmap_gt_list = tf.split(value=gt, num_or_size_splits=num_of_joints, axis=-1)
loss = 0.0
for i in range(num_of_joints):
heatmap_pred = tf.squeeze(heatmap_pred_list[i])
heatmap_gt = tf.squeeze(heatmap_gt_list[i])
loss += 0.5 * self.mse(y_true=heatmap_pred * target_weight[:, i],
y_pred=heatmap_gt * target_weight[:, i])
return loss / num_of_joints
| tensorflow.reshape | 3,340 |
import tensorflow as tf
synthetic data will be used.""")
tf.flags.DEFINE_string('data_name', None,
"""Name of dataset: imagenet or flowers.
If not specified, it is automatically guessed
based on --data_dir.""")
tf.flags.DEFINE_string('resize_method', 'bilinear',
"""Method for resizing input images:
crop,nearest,bilinear,bicubic or area.
The 'crop' mode requires source images to be at least
as large as the network input size,
| tensorflow.flags.DEFINE_string | 3,341 |
import tensorflow as tf
n_row,n_col,n_channel = x.shape
n_patch = n_row*n_col // (self.size**2)
patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=[1,self.size,self.size,1],strides=[1,self.size,self.size,1],rates=[1, 1, 1, 1],padding='VALID')
patches = tf.reshape(patches,[n_patch,self.size,self.size,n_channel])
patches = tf.random.shuffle(patches)
# rand_idx = tf.reshape(tf.random.shuffle(tf.range(0,n_patch)),[n_patch])
# patches = tf.gather(patches, rand_idx, axis=0)
rows = tf.split(patches,n_col//self.size,axis=0)
rows = [tf.concat(tf.unstack(x),axis=1) for x in rows]
x_aug = tf.concat(rows,axis=0)
x_aug = tf.convert_to_tensor(x_aug)
return tf.concat([x, x_aug],axis=2)
def mix_scramble(self,x):
# assume square patch
| tensorflow.unstack | 3,342 |
import tensorflow as tf
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
"""Helper to create an initialized Variable with weight decay.
| tensorflow.get_variable | 3,343 |
import tensorflow as tf
use_feature_trans=self.use_feature_trans)
self.get_loss = partial(self.model_pred.get_loss, use_trans_loss=self.use_trans_loss)
with tf.variable_scope(name):
self.noise = tf.placeholder(tf.float32, shape=[self.batch_size, self.noise_dim], name='noise') # Noise vector.
self.real_pc = tf.placeholder(tf.float32, shape=[self.batch_size] + self.n_output, name='real_pc') # Ground-truth.
with tf.variable_scope('rotation'):
self.rot_label_pl = tf.placeholder(tf.int32, shape=self.batch_size, name='rot_label_pl')
self.real_pc_rotated = self.rotate_n_angles(self.real_pc, self.rot_label_pl)
self.real_pc_pred, real_pc_end_points = self.get_pred(self.real_pc_rotated)
self.real_pc_rot_loss = self.get_loss(self.real_pc_pred, self.rot_label_pl, real_pc_end_points)
| tensorflow.variable_scope | 3,344 |
import tensorflow as tf
x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup(
x_reshaped, self.means)
if self.hparams.ema:
tf.logging.info("Using EMA with beta = {}".format(self.hparams.beta))
updated_ema_count = \
moving_averages.assign_moving_average(
self.ema_count,
tf.reduce_sum(
tf.reshape(
x_means_hot,
shape=[-1, self.hparams.num_blocks,
self.hparams.block_v_size]),
axis=0),
self.hparams.decay,
zero_debias=False)
dw = tf.matmul(
| tensorflow.reshape | 3,345 |
import tensorflow as tf
512),
max_steps = 1000)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(read_dataset('valid.csv',
| tensorflow.estimator.LatestExporter | 3,346 |
import tensorflow as tf
attention_probs = tf.nn.softmax(logits, name="attention_probs")
attention_probs = dropout(attention_probs, dropout_rate)
return tf.matmul(attention_probs, v)
| tensorflow.matmul | 3,347 |
from tensorflow.python.ops import array_ops
if all(tensor.shape == tensor_shape.scalar() for tensor in tensors):
with ops.device(tensors[0].device):
values = array_ops.stack(tensors)
with ops.device(device):
return array_ops.unstack(values)
else:
with ops.device(tensors[0].device):
sizes = array_ops.stack(
[array_ops.shape(tensor)[0] for tensor in tensors])
values = array_ops.concat(tensors, axis=0)
with ops.device(device):
sizes = array_ops.unstack(sizes)
return list(array_ops.split(values, sizes, axis=0))
def _scheduled_stamp_resource_op_runner(batch, stamp):
"""Runs a batch operation on a stamped resource."""
if not batch:
return
arg_keys = set(batch[0].args.keys())
grouped_args = collections.OrderedDict()
resource_handles = []
| tensorflow.python.ops.array_ops.unstack | 3,348 |
import tensorflow as tf
base_vae.NextFrameBaseVae):
"""Stochastic version of basic next-frame model."""
def inject_latent(self, layer, features, filters):
"""Inject a VAE-style latent."""
# Latent for stochastic model
input_frames = tf.to_float(features["inputs_raw"])
target_frames = tf.to_float(features["targets_raw"])
full_video = tf.concat([input_frames, target_frames], axis=1)
latent_mean, latent_std = self.construct_latent_tower(
full_video, time_axis=1)
latent = common_video.get_gaussian_tensor(latent_mean, latent_std)
latent = tf.layers.flatten(latent)
latent = tf.expand_dims(latent, axis=1)
latent = tf.expand_dims(latent, axis=1)
latent_mask = tf.layers.dense(latent, filters, name="latent_mask")
zeros_mask = tf.zeros(
common_layers.shape_list(layer)[:-1] + [filters], dtype=tf.float32)
layer = tf.concat([layer, latent_mask + zeros_mask], axis=-1)
extra_loss = self.get_extra_loss(latent_mean, latent_std)
return layer, extra_loss
@registry.register_model
class NextFrameBasicStochasticDiscrete(
basic_deterministic.NextFrameBasicDeterministic):
"""Basic next-frame model with a tiny discrete latent."""
def inject_latent(self, layer, features, filters):
"""Inject a deterministic latent based on the target frame."""
| tensorflow.layers.dense | 3,349 |
import tensorflow as tf
train_op: op for training.
"""
# Compute gradients.
tf.scalar_summary(loss.op.name, loss)
optimizer = tf.train.AdagradOptimizer(FLAGS.learning_rate)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
| tensorflow.train.AdagradOptimizer | 3,350 |
import tensorflow as tf
height = shape[1]
width = shape[2]
channels = shape[3]
res = tf.reshape(input_, [batch_size, height, 1, width, 1, channels])
res = tf.concat(
axis=2, values=[res, tf.zeros([batch_size, height, stride - 1, width, 1, channels])])
res = tf.concat(axis=4, values=[
res, tf.zeros([batch_size, height, stride, width, stride - 1, channels])
])
res = tf.reshape(res, [batch_size, stride * height, stride * width, channels])
return res
# random flip on a batch of images
def batch_random_flip(input_):
"""Simultaneous horizontal random flip."""
if isinstance(input_, (float, int)):
| tensorflow.reshape | 3,351 |
import tensorflow as tf
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
pred=tf.greater(tf.size(input=masks), 0),
true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
| tensorflow.io.decode_png | 3,352 |
import tensorflow as tf
num_classes=3, use_multiclass_scores=True)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
np.array([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3]], np.float32),
transformed_inputs[fields.InputDataFields.groundtruth_classes])
def test_use_multiclass_scores_when_not_present(self):
image = np.random.rand(4, 4, 3).astype(np.float32)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(image),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.multiclass_scores:
tf.placeholder(tf.float32),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
| tensorflow.constant | 3,353 |
import tensorflow as tf
self.a_grads = tf.gradients(loss_pg, self.pi_params)
self.c_grads = tf.gradients(loss_vf, self.vf_params)
| tensorflow.gradients | 3,354 |
import tensorflow as tf
is an integer, the first dimension of padded_t is set to length
statically.
"""
t_rank = tf.rank(t)
t_shape = tf.shape(t)
t_d0 = t_shape[0]
pad_d0 = tf.expand_dims(length - t_d0, 0)
pad_shape = tf.cond(
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
lambda: tf.expand_dims(length - t_d0, 0))
padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
if not _is_tensor(length):
padded_t = _set_dim_0(padded_t, length)
return padded_t
| tensorflow.concat | 3,355 |
import tensorflow as tf
with tf.variable_scope(name) as scope:
if init_stddev <= 0.0:
init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
else:
init = tf.truncated_normal_initializer(stddev=init_stddev)
X = tf.layers.conv2d(X, out_channels, kernel_size=filtersize, strides=(stride, stride), padding="valid",
kernel_initializer=init)
if norm == 'I':
| tensorflow.truncated_normal_initializer | 3,356 |
import tensorflow as tf
hessians = [] #store the hessian for different epochs
residuals= [] #store the value of the residuals for different epochs
#gs = [] #store the value of the phase space factor for different epochs
if with_hessian == True: #if true, it evaluates
hess = hessian(grads, tvars) #Hessian matrix
res = tf.subtract(an, Y) #residual error
#---------------------------Initialize evaluation metrics----------------------------------------------------
e_len = len(epoch_sample)
acc_train = [] #store train accuracy for each epoch
| tensorflow.subtract | 3,357 |
import tensorflow as tf
tf.cast(tf.cast(self.ch, tf.bool), tf.int32), axis=2), [-1])
self.qh_len = tf.reshape(tf.reduce_sum(
tf.cast(tf.cast(self.qh, tf.bool), tf.int32), axis=2), [-1])
N, PL, QL, CL, d, dc, nh = self._params()
if self.config.fix_pretrained_vector:
dc = self.char_mat.get_shape()[-1]
with tf.variable_scope("Input_Embedding_Layer"):
ch_emb = tf.reshape(tf.nn.embedding_lookup(
self.char_mat, self.ch), [N * PL * self.max_p_num, CL, dc])
qh_emb = tf.reshape(tf.nn.embedding_lookup(
self.char_mat, self.qh), [N * QL * self.max_p_num, CL, dc])
ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout)
qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)
ch_emb = conv(ch_emb, d,
bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=None)
| tensorflow.nn.embedding_lookup | 3,358 |
import tensorflow as tf
# -1 due to the pad above.
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :, :]
return tf.squeeze(logits, axis=[1, 2])
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
if self.has_input:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 1)
| tensorflow.zeros | 3,359 |
import tensorflow as tf
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0]
- adj.sum()) * 2)
with tf.name_scope('optimizer'):
# Optimizer for Non-Variational Autoencoders
if model_name in ('gcn_ae', 'linear_ae', 'deep_gcn_ae'):
opt = OptimizerAE(preds = model.reconstructions,
labels = tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
validate_indices = False), [-1]),
pos_weight = pos_weight,
norm = norm)
# Optimizer for Variational Autoencoders
elif model_name in ('gcn_vae', 'linear_vae', 'deep_gcn_vae'):
opt = OptimizerVAE(preds = model.reconstructions,
labels = tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
validate_indices = False), [-1]),
model = model,
num_nodes = num_nodes,
pos_weight = pos_weight,
norm = norm)
# Normalization and preprocessing on adjacency matrix
adj_norm = preprocess_graph(adj)
adj_label = sparse_to_tuple(adj + sp.eye(adj.shape[0]))
# Initialize TF session
sess = tf.Session()
| tensorflow.sparse_tensor_to_dense | 3,360 |
import tensorflow as tf
with tf.variable_scope("loss"):
loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1))
loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
| tensorflow.nn.softmax_cross_entropy_with_logits | 3,361 |
import tensorflow as tf
A boolean tensor with the same shape as input (indicator) tensor
"""
indices = tf.where(indicator)
indices = tf.random.shuffle(indices)
indices = tf.reshape(indices, [-1])
num_samples = tf.minimum(tf.size(indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0])
return tf.equal(selected_indicator, 1)
def sample_balanced_positive_negative(indicator, sample_size, labels, positive_fraction=0.5):
"""Subsamples minibatches to a desired balance of positives and negatives.
Arguments:
- *indicator*: boolean tensor of shape [N] whose True entries can be sampled.
- *sample_size*: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
| tensorflow.equal | 3,362 |
import tensorflow as tf
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
| tensorflow.nn.seq2seq.attention_decoder | 3,363 |
import tensorflow as tf
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
| tensorflow.shape | 3,364 |
import tensorflow as tf
def tf_kl_1d(q_mu, q_sigma, p_var=1.0):
p_var = tf.ones_like(q_sigma) if p_var is None else p_var
q_var = tf.square(q_sigma)
kl = 0.5 * (q_var / p_var + tf.square(q_mu) / p_var - 1 + tf.log(p_var / q_var))
return tf.reduce_sum(kl)
| tensorflow.square | 3,365 |
from tensorflow.python.ops import math_ops
problem_type=ProblemType.CLASSIFICATION)
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
if proba:
return nn.softmax(logits)
else:
return math_ops.argmax(logits, 1)
def _default_eval_metrics(self):
if self._num_label_columns == 1:
return get_default_binary_metrics_for_eval(thresholds=[.5])
return {}
def get_eval_ops(self, features, logits, labels, metrics=None):
loss = self.loss(logits, labels, features)
| tensorflow.python.ops.math_ops.argmax | 3,366 |
import tensorflow as tf
@property
def is_training(self):
return self.hparams.mode == tf.estimator.ModeKeys.TRAIN
def infer(self, features, *args, **kwargs): # pylint: disable=arguments-differ
del args, kwargs
x = features["inputs"]
batch_size = common_layers.shape_list(x)[0]
features["targets"] = tf.zeros(shape=(batch_size, 1, 1, 1))
_, _ = self(features) # pylint: disable=not-callable
ops = [glow_ops.get_variable_ddi, glow_ops.actnorm, glow_ops.get_dropout]
var_scope = tf.variable_scope("glow/body", reuse=True)
# If eps=None, images are sampled from the prior.
with arg_scope(ops, init=False), var_scope:
predictions, _, _, _ = glow_ops.encoder_decoder(
"codec", self.z_sample, self.hparams, eps=None, reverse=True,
temperature=self.temperature)
return glow_ops.postprocess(predictions, self.hparams.n_bits_x)
def create_init_batch(self, features):
"""Returns a batch of size "hparams.init_batch_size" for initialization.
| tensorflow.variable_scope | 3,367 |
import tensorflow as tf
comp_len = block_num * block_len - sl
rep_tensor_comp = tf.concat([rep_tensor, tf.zeros([bs, comp_len, input_dim], tf.float32)], 1)
rep_mask_comp = tf.concat([rep_mask, tf.cast(tf.zeros([bs, comp_len], tf.int32), tf.bool)], 1)
| tensorflow.zeros | 3,368 |
import tensorflow as tf
Float giving dropout probability for weights (NOT keep probability).
training_only: bool
Boolean. If True (standard dropout), apply dropout only
during training. If False, apply dropout during inference as well.
Returns
-------
tf.Tensor:
A tensor with the same shape as the input tensor.
"""
if not dropout_prob:
return tensor # do nothing
keep_prob = 1.0 - dropout_prob
if training or not training_only:
tensor = tf.nn.dropout(tensor, keep_prob)
return tensor
def fully_connected_layer(tensor,
size=None,
weight_init=None,
bias_init=None,
name=None):
"""Fully connected layer.
Parameters
----------
tensor: tf.Tensor
| tensorflow.nn.dropout | 3,369 |
import tensorflow as tf
min_z = tf.cast(0.0 - labeled_sizes[i][2] / 2.0, dtype=tf.float32)
max_z = tf.cast(0.0 + labeled_sizes[i][2] / 2.0, dtype=tf.float32)
| tensorflow.cast | 3,370 |
import tensorflow as tf
)
x = self.__batch_norm("{}2c".format(bn_name_base), x)
x = tf.add(x, inputs)
return tf.nn.relu(x)
| tensorflow.nn.relu | 3,371 |
import tensorflow as tf
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
| tensorflow.argmax | 3,372 |
import tensorflow as tf
def testReducer(self):
with tf.device(self._test_device):
batch_size = 3
size = 10
tracker_size = 8
reducer = spinn.Reducer(size, tracker_size=tracker_size)
left_in = []
right_in = []
tracking = []
for _ in range(batch_size):
left_in.append(tf.random_normal((1, size * 2)))
right_in.append(tf.random_normal((1, size * 2)))
tracking.append(tf.random_normal((1, tracker_size * 2)))
out = reducer(left_in, right_in, tracking=tracking)
self.assertEqual(batch_size, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual((1, size * 2), out[0].shape)
def testReduceTreeLSTM(self):
with tf.device(self._test_device):
size = 10
tracker_size = 8
reducer = spinn.Reducer(size, tracker_size=tracker_size)
| tensorflow.random_normal | 3,373 |
import tensorflow as tf
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with tf.variable_scope("no_tuple"):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
| tensorflow.variable_scope | 3,374 |
import tensorflow as tf
fc1 = tf.reshape(tf.layers.dense(conv8,4*4*150,activation = tf.nn.relu),[-1,4,4,150])
concat1 = tf.concat([fc1, z],axis = 3)
# annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")
print("###########################################################")
print(fc1)
# now to upscale to actual image size
deconv_shape1 = image_net["pool4"].get_shape()
W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, 278], name="W_t1")
b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
conv_t1 = utils.conv2d_transpose_strided(concat1, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")
deconv_shape2 = image_net["pool3"].get_shape()
W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")
shape = tf.shape(image)
deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], 3])
W_t3 = utils.weight_variable([16, 16, 3, deconv_shape2[3].value], name="W_t3")
b_t3 = utils.bias_variable([3], name="b_t3")
conv_t3 = tf.nn.relu(utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8))
| tensorflow.add | 3,375 |
import tensorflow as tf
variables = [p.free_state for p in self.params if not p.fixed]
variables = utils.unique(variables)
free_state = tf.concat(0, [tf.reshape(v, [-1]) for v in variables])
| tensorflow.reshape | 3,376 |
import tensorflow as tf
elif (re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name))
in init_vars_name and num_of_group > 1):
tvar_name = re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name))
elif (re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name))
in init_vars_name and num_of_group > 1):
tvar_name = re.sub(r"/attention_\d+/", "/attention_1/",
six.ensure_str(name))
else:
tf.logging.warn("name %s does not get matched", name)
continue
# tf.logging.info("name %s match to %s", name, tvar_name)
if num_of_group > 0:
group_matched = False
for gid in range(1, num_of_group):
if (("/group_" + str(gid) + "/" in name) or
("/ffn_" + str(gid) + "/" in name) or
| tensorflow.logging.warn | 3,377 |
import tensorflow as tf
tag = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
tb_logdir = osp.join(base_dir, tag, 'tb')
save_dir = osp.join(base_dir, tag, 'train')
tf.io.gfile.makedirs(tb_logdir)
tf.io.gfile.makedirs(save_dir)
writer = tf.contrib.summary.create_file_writer(tb_logdir)
writer.set_as_default()
return writer, save_dir
| tensorflow.io.gfile.makedirs | 3,378 |
import tensorflow as tf
logits: Logits tensor, float - [batch_size, FLAGS.NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
| tensorflow.to_int64 | 3,379 |
import tensorflow as tf
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32)
outputs = tf.concat(outputs, 2)
self.Z_hat = tf.layers.dense(outputs, 1 + fourier_window_size // 2)
self.loss1 = tf.reduce_mean(tf.abs(self.Y_hat - self.Y))
self.loss2 = tf.reduce_mean(tf.abs(self.Z_hat - self.Z))
self.loss = self.loss1 + self.loss2
| tensorflow.abs | 3,380 |
import tensorflow as tf
d2 = sp.sparse_to_dense(t2ind, t2val, t2sh)
print("strides: \n", strides)
print("input shape", tensor_in_sizes)
print("filter shape", filter_in_sizes)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.7
with tf.device("/gpu:0"):
convd = sc_module.direct_sparse_data_conversion(t1ind, t1val, t1sh)
convf = sc_module.direct_sparse_filter_conversion(t2ind, t2val, t2sh, t1sh)
with tf.Session(config=config) as sess:
pd = sess.run(convd)
pf = sess.run(convf)
tf.reset_default_graph()
| tensorflow.device | 3,381 |
from tensorflow.python.ops import variable_scope as vs
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
| tensorflow.python.ops.variable_scope.variable_scope | 3,382 |
import tensorflow as tf
log("Loading training data from: {}".format(metadat_fpath))
log("Using model: Tacotron")
log(hparams_debug_string())
# Start by setting a seed for repeatability
tf.set_random_seed(hparams.tacotron_random_seed)
# Set up data feeder
coord = tf.train.Coordinator()
with tf.variable_scope("datafeeder") as scope:
feeder = Feeder(coord, metadat_fpath, hparams)
# Set up model:
global_step = tf.Variable(0, name="global_step", trainable=False)
model, stats = model_train_mode(args, feeder, hparams, global_step)
eval_model = model_test_mode(args, feeder, hparams, global_step)
# Embeddings metadata
| tensorflow.variable_scope | 3,383 |
import tensorflow as tf
def my_func(x, y):
return np.sinh(x) + np.cosh(y)
# scalar
with self.test_session():
x = tf.constant(1.0, tf.float32)
y = tf.constant(2.0, tf.float32)
z = tf.py_func(my_func, [x, y], [tf.float32])
self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32))
# array
with self.test_session():
x = tf.constant([1.0, 2.0], tf.float64)
y = tf.constant([2.0, 3.0], tf.float64)
z = tf.py_func(my_func, [x, y], [tf.float64])
self.assertAllEqual(
z[0].eval(),
my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
# a bit exotic type (complex64)
with self.test_session():
x = tf.constant(1+2j, tf.complex64)
y = tf.constant(3+4j, tf.complex64)
z, = tf.py_func(my_func, [x, y], [tf.complex64])
self.assertAllClose(z.eval(), my_func(1+2j, 3+4j))
# a bit excotic function (rfft)
| tensorflow.constant | 3,384 |
import tensorflow as tf
rel_init = dense_maxnorm(rel_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
# Reshape rel_embed into square D x D matrices
rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size))
# Reshape head_embed and tail_embed to be suitable for the matrix multiplication
head_embed_row = tf.expand_dims(head_embed, 1) # embeddings as row vectors
tail_embed_col = tf.expand_dims(tail_embed, 2) # embeddings as column vectors
head_rel_mult = tf.batch_matmul(head_embed_row, rel_embed_square)
# Output needs a squeeze into a 1d vector
raw_output = tf.squeeze(tf.batch_matmul(head_rel_mult, tail_embed_col))
self.output, self.loss = self._create_output_and_loss(raw_output)
| tensorflow.reshape | 3,385 |
import tensorflow as tf
features["inputs"] = tf.expand_dims(features["inputs"], 1)
if len(features["inputs"].shape) < 5:
features["inputs"] = tf.expand_dims(features["inputs"], 4)
# Expand the inputs in to the beam size.
| tensorflow.expand_dims | 3,386 |
import tensorflow as tf
drop_unused_features: bool) -> Dict[str, common_types.TensorType]:
"""Takes a dict of tensors representing raw features and transforms them."""
unbounded_raw_features, transformed_features = (
saved_transform_io.partially_apply_saved_transform_internal(
self.transform_savedmodel_dir, raw_features))
if drop_unused_features:
graph = tf.compat.v1.get_default_graph()
graph_analyzer = graph_tools.InitializableGraphAnalyzer(
graph, raw_features,
[(t, False) for t in unbounded_raw_features.values()])
return {
name: feature
| tensorflow.compat.v1.get_default_graph | 3,387 |
import tensorflow as tf
_phase_train = _phase.assign(True)
_phase_infer = _phase.assign(False)
# TODO: move to ops
def _rank(x):
return len(x.get_shape())
def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True):
random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32)
binary_mask = tf.floor(random_tensor)
if normalize:
binary_mask = tf.reciprocal(keep_prob) * binary_mask
return binary_mask
def _global_keep_prob(keep_prob):
keep_prob = tf.convert_to_tensor(keep_prob, dtype=tf.float32)
keep_prob = tf.cond(_phase, lambda: keep_prob, lambda: keep_prob * 0.0 + 1.0)
return keep_prob
def layer(func):
class Layer(object):
| tensorflow.reciprocal | 3,388 |
import tensorflow as tf
initial_symbol = inputs.read(0) # first symbol is BOS
initial_input = embed(initial_symbol)
initial_pos = tf.zeros([batch_size], tf.float32)
initial_weights = tf.zeros(tf.shape(attention_states[align_encoder_id])[:2])
zero_context = tf.zeros(shape=tf.shape(attention_states[align_encoder_id][:,0])) # FIXME
with tf.variable_scope('decoder_{}'.format(decoder.name)):
| tensorflow.shape | 3,389 |
import tensorflow as tf
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
| tensorflow.logging.info | 3,390 |
import tensorflow as tf
prob = pareto.prob(x)
self.assertEqual(prob.shape, (6, 3))
self.assertAllClose(
self.evaluate(prob),
self._scipy_pareto(concentration_v, scale_v).pdf(x))
def testParetoLogCdf(self):
batch_size = 6
scale = tf.constant([3.] * batch_size)
scale_v = 3.
concentration = tf.constant([2.])
concentration_v = 2.
x = [3., 3.1, 4., 5., 6., 7.]
pareto = tfd.Pareto(concentration, scale)
log_cdf = pareto.log_cdf(x)
self.assertEqual(log_cdf.shape, (6,))
self.assertAllClose(
| tensorflow.constant | 3,391 |
import tensorflow as tf
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
self.loss = tf.reduce_mean(tower_losses)
tf.summary.scalar('loss', self.loss)
# Create optimizer ops
self.global_step = tf.Variable(0, trainable=False, name='global_step')
opt = tf.train.RMSPropOptimizer(self.config['learning_rate'])
with tf.control_dependencies(update_ops):
self.trainer = opt.apply_gradients(
gradvars, global_step=self.global_step)
def _eval_graph(self, data):
tower_metrics = self._gpu_tower(data, Mode.EVAL)
with tf.device('/cpu:0'):
self.metrics = {m: tf.reduce_mean(tf.stack([t[m] for t in tower_metrics]))
for m in tower_metrics[0]}
| tensorflow.control_dependencies | 3,392 |
import tensorflow as tf
monitored_values = {
value.name.split(':')[0]: value # Get rid of ':0' from name
for value in monitored_values
}
for (name, value) in monitored_values.items():
tf.summary.scalar(name, value)
summary_op = tf.summary.merge_all()
return (summary_op, monitored_values)
def _make_var(self, name, shape, dtype=None, no_reg=False, initializer=None, init_constant=None, trainable=True):
if initializer is None:
if init_constant is not None:
initializer = tf.constant_initializer(init_constant, dtype=tf.float32)
| tensorflow.summary.merge_all | 3,393 |
import tensorflow as tf
tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity,self.Dale_rec)),
axes=1) * \
tf.where(tf.greater(xt, 0), tf.ones_like(xt), tf.zeros_like(xt))
denom = dxt
# sum over hidden units
num = tf.reduce_sum(tf.square(num), axis=2)
denom = tf.reduce_sum(tf.square(denom), axis=2)
bounded = tf.where(tf.greater(denom, 1e-20), tf.div(num, 1.0 * denom), tf.ones_like(num))
nelems = tf.reduce_mean(tf.where(tf.greater(denom, 1e-20), 1.0 * tf.ones_like(num), 1.0 * tf.zeros_like(num)), axis=1)
| tensorflow.square | 3,394 |
import tensorflow as tf
def test_gray_images(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 1]),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
| tensorflow.placeholder | 3,395 |
import tensorflow as tf
num_target_frames = 1
target_value_shape_suffix = [num_target_frames]
if distributional_size > 1:
target_value_shape_suffix = [num_target_frames, distributional_size]
features = {
"inputs": observations,
"epoch": tf.constant(epoch + 1),
"input_action": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),
"input_reward": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),
"targets": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]),
"target_action": tf.zeros(
obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),
| tensorflow.constant | 3,396 |
import tensorflow as tf
else:
axis = [2, 3]
ch = shape[1]
new_shape = [1, ch, 1, 1]
if ch is None:
raise ValueError("Input of instancebn require known channel!")
mean, var = tf.nn.moments(inputdata, axis, keep_dims=True)
if not use_affine:
return tf.divide(inputdata - mean, tf.sqrt(var + epsilon), name='output')
beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
gamma = tf.get_variable('gamma', [ch], initializer=tf.constant_initializer(1.0))
gamma = tf.reshape(gamma, new_shape)
return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name)
@staticmethod
def dropout(inputdata, keep_prob, noise_shape=None, name=None):
"""
:param name:
:param inputdata:
:param keep_prob:
:param noise_shape:
:return:
"""
| tensorflow.constant_initializer | 3,397 |
from tensorflow.core.framework import op_def_pb2
def _make_argname_from_tensor_name(name):
return re.sub(":0$", "", name).replace(":", "_o")
def _tensor_to_argdef(t, name=None, used_names=None):
"""Convert tensor t to an argdef, with a specified name or a unique name."""
arg = op_def_pb2.OpDef.ArgDef()
if name is None:
arg.name = _make_argname_from_tensor_name(t.name)
if used_names is not None:
if arg.name in used_names:
i = 0
| tensorflow.core.framework.op_def_pb2.OpDef.ArgDef | 3,398 |
import tensorflow as tf
features = {
d.input_ids: tf.io.VarLenFeature(tf.int64),
d.token_type_ids: tf.io.VarLenFeature(tf.int64),
d.attention_mask: tf.io.VarLenFeature(tf.int64),
d.labels: tf.io.VarLenFeature(tf.int64),
}
dataset = dataset.map(
lambda x: tf.io.parse_example(x, features),
num_parallel_calls=utils.AUTOTUNE,
).prefetch(utils.AUTOTUNE)
dataset = dataset.map(
lambda x: (
tf.cast(tf.sparse.to_dense(x[d.input_ids]), tf.int32),
tf.cast(tf.sparse.to_dense(x[d.token_type_ids]), tf.int32),
| tensorflow.io.parse_example | 3,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.