seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
self.reward = self.valid_acc
if self.entropy_weight is not None:
self.reward += self.entropy_weight * self.sample_entropy
self.sample_log_prob = tf.reduce_sum(self.sample_log_prob)
self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)
baseline_update = tf.assign_sub(
self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))
with tf.control_dependencies([baseline_update]):
self.reward = tf.identity(self.reward)
self.loss = self.sample_log_prob * (self.reward - self.baseline)
self.train_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="train_step")
tf_variables = [var for var in tf.trainable_variables() if var.name.startswith(self.name)]
print("-" * 80)
for var in tf_variables:
print(var)
self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(
self.loss,
tf_variables,
self.train_step,
clip_mode=self.clip_mode,
grad_bound=self.grad_bound,
l2_reg=self.l2_reg,
| tensorflow.Variable | 13,600 |
import tensorflow as tf
method=2)
tf.summary.image('Compare/gtboxes_q_gpu:%d' % i, gtboxes_in_img_q)
gtboxes_in_img_h = self.drawer.draw_boxes_with_categories(
img_batch=tf.expand_dims(img[0, :, :, :], axis=0),
boxes=gtboxes_and_label_h[0, :, :-1],
labels=gtboxes_and_label_h[0, :, -1],
method=0)
| tensorflow.expand_dims | 13,601 |
import tensorflow as tf
ls = [-1] + l.get_shape().as_list()[1:]
xs = ls[:-1] + [3]
# unpack parameters
logit_probs = l[:, :, :, :nr_mix]
l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
# sample mixture indicator from softmax
sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(
tf.shape(logit_probs), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
sel = tf.reshape(sel, xs[:-1] + [1, nr_mix])
# select logistic parameters
means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4)
log_scales = tf.maximum(tf.reduce_sum(
l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.)
coeffs = tf.reduce_sum(tf.nn.tanh(
l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = tf.random_uniform(tf.shape(means), minval=1e-5, maxval=1. - 1e-5)
x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))
x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)
x1 = tf.minimum(tf.maximum(
x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)
x2 = tf.minimum(tf.maximum(
x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)
return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)
| tensorflow.nn.tanh | 13,602 |
import tensorflow as tf
@pytest.mark.parametrize('white', [True, False])
def test_diags(session_tf, white, mu, sqrt_diag, K):
"""
The covariance of q(x) can be Cholesky matrices or diagonal matrices.
Here we make sure the behaviours overlap.
"""
# the chols are diagonal matrices, with the same entries as the diag representation.
chol_from_diag = tf.stack([tf.diag(sqrt_diag[:, i]) for i in range(Datum.N)]) # N x M x M
# run
kl_diag = gauss_kl(mu, sqrt_diag, K if white else None)
kl_dense = gauss_kl(mu, chol_from_diag, K if white else None)
np.testing.assert_allclose(kl_diag.eval(), kl_dense.eval())
@pytest.mark.parametrize('diag', [True, False])
| tensorflow.diag | 13,603 |
import tensorflow as tf
name='y')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
| tensorflow.placeholder | 13,604 |
import tensorflow as tf
"MSE": mse, "eval_loss": loss,}
elif task_name == "cola":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Matthew's correlations for STS-B."""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
tp, tp_op = tf.metrics.true_positives(
predictions, label_ids, weights=is_real_example)
tn, tn_op = tf.metrics.true_negatives(
predictions, label_ids, weights=is_real_example)
fp, fp_op = tf.metrics.false_positives(
predictions, label_ids, weights=is_real_example)
fn, fn_op = tf.metrics.false_negatives(
predictions, label_ids, weights=is_real_example)
# Compute Matthew's correlation
| tensorflow.metrics.true_negatives | 13,605 |
import tensorflow as tf
train = dataset['train'].map(
load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test = dataset['test'].map(load_image_test)
train_dataset = train.cache().shuffle(1000).batch(batch_size).repeat()
train_dataset = train_dataset.prefetch(
buffer_size=tf.data.experimental.AUTOTUNE)
test_dataset = test.batch(batch_size)
config = hparams_config.get_efficientdet_config('efficientdet-d0')
config.heads = ['segmentation']
model = efficientdet_keras.EfficientDetNet(config=config)
model.build((1, 512, 512, 3))
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
val_subsplits = 5
val_steps = info.splits['test'].num_examples // batch_size // val_subsplits
model.fit(
train_dataset,
epochs=20,
steps_per_epoch=steps_per_epoch,
validation_steps=val_steps,
validation_data=test_dataset,
callbacks=[])
model.save_weights('./test/segmentation')
| tensorflow.keras.losses.SparseCategoricalCrossentropy | 13,606 |
import tensorflow as tf
Return:
A tuple of `SparseTensor` (neibors, weights).
neighbors: A `SparseTensor` of `int64`.
weights: A `SparseTensor` of `float`.
types: A `SparseTensor` of `int32`
"""
sp_returns = base._LIB_OP.get_full_neighbor(nodes, edge_types)
return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \
tf.SparseTensor(*sp_returns[6:])
def get_sorted_full_neighbor(nodes, edge_types):
"""
Args:
| tensorflow.SparseTensor | 13,607 |
from tensorflow.python.client import device_lib
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
from tensorflow.python.lib.io import file_io
import argparse
from tensorflow.python.client import device_lib
print("CHECK GPU USAGE!")
print(device_lib.list_local_devices())
K.tensorflow_backend._get_available_gpus()
img_height = 300 # Height of the model input images
img_width = 300 # Width of the model input images
img_channels = 3 # Number of color channels of the model input images
mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.
| tensorflow.python.client.device_lib.list_local_devices | 13,608 |
import tensorflow as tf
inputs_ = encoder_inputs_
inputs_ = tf.nn.convolution(inputs_, filter=filter_, padding='VALID')
inputs.append(inputs_)
encoder_inputs_ = tf.concat(inputs, axis=2)
# if encoder.convolution_activation.lower() == 'relu':
encoder_inputs_ = tf.nn.relu(encoder_inputs_)
if encoder.maxout_stride:
if encoder.binary:
raise NotImplementedError
stride = encoder.maxout_stride
k = tf.to_int32(tf.ceil(time_steps / stride) * stride) - time_steps # TODO: simpler
pad = tf.zeros([batch_size, k, tf.shape(encoder_inputs_)[2]])
encoder_inputs_ = tf.concat([encoder_inputs_, pad], axis=1)
encoder_inputs_ = tf.nn.pool(encoder_inputs_, window_shape=[stride], pooling_type='MAX',
padding='VALID', strides=[stride])
encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / stride))
if encoder.highway_layers:
x = encoder_inputs_
for j in range(encoder.highway_layers):
size = x.shape[2].value
with tf.variable_scope('highway_{}'.format(j + 1)):
g = tf.layers.dense(x, size, activation=tf.nn.sigmoid, use_bias=True, name='g')
| tensorflow.ceil | 13,609 |
import tensorflow as tf
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg)
vf = tf.layers.dense(layer_c2, 1, kernel_regularizer=reg)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
| tensorflow.layers.dense | 13,610 |
import tensorflow as tf
z2 = tf.where(x2 > self.epsilon, z2, x2)
ldj = tf.where(x2 > self.epsilon, ldj, tf.zeros_like(ldj))
return z2, tf.math.reduce_sum(ldj, axis=[1,2,3])
def _inverse(self, x1, z2, **kwargs):
params = self.parameterizer(x1)
mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]
x2, ldj = log_gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True))
x2 = tf.where(z2 > self.epsilon, x2, z2)
ldj = tf.where(z2 > self.epsilon, ldj, tf.zeros_like(ldj))
return x2, tf.math.reduce_sum(ldj, axis=[1,2,3])
def half_gaussianize(x, log_sigmas, inverse=tf.constant(False)):
if inverse:
| tensorflow.constant | 13,611 |
import tensorflow as tf
mask = tf.cast(tf.not_equal(target, tf.zeros_like(target)), dtype=tf.float32)
out = cross_entropy * mask
return out
@layer
def sigmoid_cross_entropy_layer(tensor, target, **opts):
out = tf.nn.sigmoid_cross_entropy_with_logits(logits=tensor, labels=target)
return out
@layer
def mean_loss_by_example_layer(tensor, sequence_length, **opts):
loss = tf.div(
tf.reduce_sum(tensor, axis=1),
tf.cast(sequence_length, dtype=tf.float32)
)
out = tf.reduce_mean(loss)
tf.summary.scalar('cost', out)
return out
@layer
def conv1d_layer(tensor, dilation_rate=1, **opts):
raise NotImplementedError
@layer
def residual_layer(tensor, **opts):
raise NotImplementedError
| tensorflow.cast | 13,612 |
import tensorflow as tf
w = tf.get_variable('w', [self.fc2.get_shape()[1], num_classes], initializer=initializer,
regularizer=regularizer)
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(1.0))
self.fc3 = tf.matmul(self.fc2, w) + b
# Calculate Mean cross-entropy loss
with tf.name_scope("loss"):
self.predictions = tf.argmax(self.fc3, 1, name="predictions")
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.fc3, labels=self.input_y)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.loss = tf.reduce_mean(losses) + sum(regularization_losses)
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
| tensorflow.nn.softmax_cross_entropy_with_logits | 13,613 |
import tensorflow as tf
W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
b7 = utils.bias_variable([4096], name="b7")
conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
relu7 = tf.nn.relu(conv7, name="relu7")
if FLAGS.debug:
utils.add_activation_summary(relu7)
| tensorflow.nn.relu | 13,614 |
import tensorflow as tf
# Support for verisons of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return deconv, w, biases
else:
return deconv
| tensorflow.nn.bias_add | 13,615 |
import tensorflow as tf
l1=tf.nn.relu(l1)
out = tf.matmul(l1, self.w2)+self.b2
| tensorflow.matmul | 13,616 |
import tensorflow as tf
if self.dale_ratio:
new_state = (1-self.alpha) * state \
+ self.alpha * (
tf.matmul(
tf.nn.relu(state),
tf.matmul(
tf.abs(self.W_rec) * self.rec_Connectivity,
self.Dale_rec, name="in_1"),
transpose_b=True, name="1")
+ tf.matmul(
rnn_in,
tf.abs(self.W_in) * self.input_Connectivity,
transpose_b=True, name="2")
+ self.b_rec)\
+ np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\
* tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0)
else:
new_state = ((1-self.alpha) * state) \
+ self.alpha * (
tf.matmul(
tf.nn.relu(state),
self.W_rec * self.rec_Connectivity,
| tensorflow.abs | 13,617 |
import tensorflow as tf
Args:
x: A `Tensor` or `CompositeTensor`.
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
name: (Optional) A name for this operation.
Returns:
A `Tensor`. Has the same type as `x`.
Raises:
TypeError: If the type of `x` is not supported.
"""
with tf.compat.v1.name_scope(name, 'max'):
return _min_and_max(x, reduce_instance_dims, name)[1]
def _min_and_max(x: common_types.TensorType,
reduce_instance_dims: bool = True,
name: Optional[str] = None) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes the min and max of the values of a `Tensor` or `CompositeTensor`.
In the case of a `CompositeTensor` missing values will be used in return
value:
for float, NaN is used and for other dtypes the min is used.
| tensorflow.compat.v1.name_scope | 13,618 |
import tensorflow as tf
weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1)
weighted_average.append(weighted_average_)
weighted_average = tf.concat(weighted_average, axis=1)
weighted_average = dense(weighted_average, encoder.attn_size)
elif pos is not None:
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
else:
# Local attention of Luong et al. (http://arxiv.org/abs/1508.04025)
wp = get_variable('Wp', [state_size, state_size])
vp = get_variable('vp', [state_size, 1])
pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp))
| tensorflow.expand_dims | 13,619 |
import tensorflow as tf
hooks.append(tf.data.experimental.CheckpointInputPipelineHook(estimator))
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps, hooks=hooks)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| tensorflow.estimator.EvalSpec | 13,620 |
import tensorflow as tf
# Remove episode indices.
num_episodes = tf.count_nonzero(
input_tensor=tf.gather(params=self.terminal_memory, indices=indices),
axis=0,
dtype=util.tf_dtype('int')
)
num_episodes = tf.minimum(x=num_episodes, y=self.episode_count)
assignment = tf.assign(
ref=self.episode_indices[:self.episode_count - num_episodes],
value=self.episode_indices[num_episodes: self.episode_count]
)
# Decrement episode count.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign_sub(ref=self.episode_count, value=num_episodes)
# Assign new observations.
with tf.control_dependencies(control_inputs=(assignment,)):
assignments = list()
for name in sorted(states):
assignments.append(tf.scatter_update(
ref=self.states_memory[name],
indices=indices,
updates=states[name]
))
for name in sorted(internals):
assignments.append(tf.scatter_update(
| tensorflow.control_dependencies | 13,621 |
import tensorflow as tf
observ_shape = self._parse_shape(self._batch_env.observation_space)
print(observ_shape)
observ_dtype = self._parse_dtype(self._batch_env.observation_space)
print(observ_dtype)
action_shape = self._parse_shape(self._batch_env.action_space)
print(action_shape)
action_dtype = self._parse_dtype(self._batch_env.action_space)
print(action_dtype)
with tf.variable_scope('env_temporary'):
self._observ = tf.Variable(
lambda: tf.zeros(batch_dims + observ_shape, observ_dtype),
name='observ', trainable=False)
self._action = tf.Variable(
lambda: tf.zeros(batch_dims + action_shape, action_dtype),
name='action', trainable=False)
self._reward = tf.Variable(
lambda: tf.zeros(batch_dims, tf.float32),
name='reward', trainable=False)
self._done = tf.Variable(
lambda: tf.cast(tf.ones(batch_dims), tf.bool),
| tensorflow.zeros | 13,622 |
import tensorflow as tf
def testSliceVariable(self):
test_dir = self._TestDir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
v1 = tf.Variable([20.0], name="v1")
v2 = tf.Variable([20.0], name="v2")
v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = tf.train.Saver({"first": v1, "second": v2})
tf.initialize_all_variables().run()
| tensorflow.Variable | 13,623 |
import tensorflow as tf
to_tf = tf.transpose(reshaped, [0, 2, 3, 1])
return to_tf
def _softmax_layer(self, bottom, name):
if name == 'rpn_cls_prob_reshape':
input_shape = tf.shape(bottom)
# tf.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来
# 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........
bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]])
reshaped_score = tf.nn.softmax(bottom_reshaped, name=name)
return tf.reshape(reshaped_score, input_shape) # [1,none,none,2]
return tf.nn.softmax(bottom, name=name)
def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name):
with tf.variable_scope(name):
rois, rpn_scores = tf.py_func(proposal_top_layer,
[rpn_cls_prob, rpn_bbox_pred, self._im_info,
| tensorflow.reshape | 13,624 |
import tensorflow as tf
num_users = params["num_users"]
num_items = params["num_items"]
users = tf.random_uniform([batch_size], dtype=tf.int32, minval=0,
maxval=num_users)
items = tf.random_uniform([batch_size], dtype=tf.int32, minval=0,
maxval=num_items)
if is_training:
valid_point_mask = tf.cast(tf.random_uniform(
[batch_size], dtype=tf.int32, minval=0, maxval=2), tf.bool)
labels = tf.cast(tf.random_uniform(
[batch_size], dtype=tf.int32, minval=0, maxval=2), tf.bool)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.VALID_POINT_MASK: valid_point_mask,
}, labels
| tensorflow.random_uniform | 13,625 |
import tensorflow as tf
normalizers_copy_parameters = normalizers_copy.parameters(trainable=False, non_trainable=True)
copy_normalizers = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(normalizers_copy_parameters, normalizers_parameters)])
| tensorflow.assign | 13,626 |
from tensorflow.python.ops import math_ops
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype)
| tensorflow.python.ops.math_ops.cast | 13,627 |
import tensorflow as tf
import time
from itertools import product, combinations
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from plotting import newfig, savefig
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
np.random.seed(1234)
tf.set_random_seed(1234)
class PhysicsInformedNN:
from setup_PINN_ns import __init__
from initialize_PINN_ns import initialize_NN
from xavier_init_ns import xavier_init
from def_NN_ns import neural_net
from def_Net_NS import net_NS
from func_call_ns import callback
| tensorflow.set_random_seed | 13,628 |
import tensorflow as tf
# iterations will make the summary writer only flush the data to storage
# once per loop.
with (tf.contrib.summary.create_file_writer(
params['model_dir'],
| tensorflow.contrib.summary.create_file_writer | 13,629 |
import tensorflow as tf
col_layers[c].append(self.top_layer)
col_layer_sizes[c].append(self.top_size)
catdim = 3 if self.data_format == 'NHWC' else 1
self.top_layer = tf.concat([layers[-1] for layers in col_layers], catdim)
self.top_size = sum([sizes[-1] for sizes in col_layer_sizes])
return self.top_layer
def residual(self, nout, net, scale=1.0):
inlayer = self.top_layer
net(self)
self.conv(nout, 1, 1, activation=None)
self.top_layer = tf.nn.relu(inlayer + scale * self.top_layer)
def spatial_mean(self, keep_dims=False):
name = 'spatial_mean' + str(self.counts['spatial_mean'])
self.counts['spatial_mean'] += 1
axes = [1, 2] if self.data_format == 'NHWC' else [2, 3]
self.top_layer = tf.reduce_mean(
self.top_layer, axes, keep_dims=keep_dims, name=name)
return self.top_layer
def dropout(self, keep_prob=0.5, input_layer=None):
| tensorflow.nn.relu | 13,630 |
import tensorflow as tf
self._train_op = tf.get_collection_ref('train_op')[0]
self._lr = tf.get_collection_ref('lr')[0]
self._new_lr = tf.get_collection_ref('new_lr')[0]
self._lr_update = tf.get_collection_ref('lr_update')[0]
rnn_params = tf.get_collection_ref('rnn_params')
if self._cell and rnn_params:
params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable(
self._cell,
self._cell.params_to_canonical,
self._cell.canonical_to_params,
rnn_params,
base_variable_scope='Model/RNN')
| tensorflow.contrib.cudnn_rnn.RNNParamsSaveable | 13,631 |
import tensorflow as tf
class ModelOwner:
LEARNING_RATE = 0.1
ITERATIONS = 60000 // 30
def __init__(self, player_name):
self.player_name = player_name
with tf.device(tfe.get_config().get_player(player_name).device_name):
self._initialize_weights()
def _initialize_weights(self):
with tf.name_scope('parameters'):
self.w0 = tf.Variable(tf.random_normal([28 * 28, 512]))
self.b0 = tf.Variable(tf.zeros([512]))
self.w1 = tf.Variable(tf.random_normal([512, 10]))
self.b1 = tf.Variable(tf.zeros([10]))
def _build_model(self, x, y):
w0 = self.w0.read_value()
b0 = self.b0.read_value()
w1 = self.w1.read_value()
b1 = self.b1.read_value()
params = (w0, b0, w1, b1)
| tensorflow.name_scope | 13,632 |
import tensorflow as tf
tgt1 = tf.gather(tgt, even)
tgt2 = tf.gather(tgt, odd)
geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tgt_larg = tf.where(geq, tgt1, tgt2)
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small))
# loss = tf.maximum(0.0, tf.math.abs(tgt_larg - pred_larg) - tf.math.abs(tgt_small - pred_small))
loss = tf.reduce_mean(loss)
| tensorflow.where | 13,633 |
import tensorflow as tf
def _scipy_pareto(self, concentration, scale):
# In scipy pareto is defined with scale = 1, so we need to scale.
return stats.pareto(concentration, scale=scale)
def testParetoShape(self):
scale = tf.constant([2.] * 5)
concentration = tf.constant([2.] * 5)
pareto = tfd.Pareto(concentration, scale)
self.assertEqual(self.evaluate(pareto.batch_shape_tensor()), (5,))
| tensorflow.constant | 13,634 |
import tensorflow as tf
return samples, logits, loss
# Create an initial output tensor. This will be passed
# to the infer_step, which adds one timestep at every iteration.
if "partial_targets" in features:
initial_output = tf.to_int64(features["partial_targets"])
while len(initial_output.get_shape().as_list()) < 4:
initial_output = tf.expand_dims(initial_output, 2)
batch_size = common_layers.shape_list(initial_output)[0]
else:
batch_size = common_layers.shape_list(features["inputs"])[0]
initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64)
# Hack: foldl complains when the output shape is less specified than the
# input shape, so we confuse it about the input shape.
| tensorflow.expand_dims | 13,635 |
import tensorflow as tf
output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype)
if output_dtype is None:
raise TypeError('Tensor type %r is not supported' % x.dtype)
if not reduce_instance_dims and isinstance(x, tf.RaggedTensor):
raise NotImplementedError(
'Elementwise mean_and_var does not support RaggedTensors.')
with tf.compat.v1.name_scope('mean_and_var'):
x = tf.cast(x, output_dtype)
x_count, x_mean, x_variance = (
tf_utils.reduce_batch_count_mean_and_var(x, reduce_instance_dims))
combine_inputs = _WeightedMeanAndVarAccumulator(
count=x_count,
mean=x_mean,
variance=x_variance,
| tensorflow.cast | 13,636 |
import tensorflow as tf
tfrecord_writer = tf.python_io.TFRecordWriter(record_filename)
| tensorflow.python_io.TFRecordWriter | 13,637 |
import tensorflow as tf
use_spectral_norm,
is_training,
None,
conv_dims=2)
def hw_flatten(x):
return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]])
# N = h * w
s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]
beta = tf.nn.softmax(s, axis=-1) # attention map
| tensorflow.reshape | 13,638 |
from tensorflow.python.ops import array_ops
quantizer: `Quantizer` used to quantize tensors.
**kwargs: Additional keyword arguments to be passed to the keras layer.
"""
super(VitisGlobalAveragePooling2D, self).__init__(**kwargs)
def build(self, input_shape):
super(VitisGlobalAveragePooling2D, self).build(input_shape)
def call(self, inputs):
outputs = super(VitisGlobalAveragePooling2D, self).call(inputs)
# Simulate DPU hahavior of AvgPooling
input_shape = array_ops.shape(inputs)
rescale_factor = _get_avgpool_scale(input_shape[1], input_shape[2])
if rescale_factor != 1.0:
outputs *= rescale_factor
return outputs
@register_keras_serializable(package='Vitis', name='AveragePooling2D')
class VitisAveragePooling2D(tf.keras.layers.AveragePooling2D):
"""Vitis version of AveragePooling2D layer.
| tensorflow.python.ops.array_ops.shape | 13,639 |
import tensorflow as tf
tmp2 = tf.tensordot(query, w2, axes=1)
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tmp = tf.tanh((tmp1 + tmp2) + b)
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)
v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
| tensorflow.tensordot | 13,640 |
import tensorflow as tf
sample_grid = tf.tile(
tf.linspace(float(z_near), float(z_far), depth), [width * height])
z_t = tf.reshape(sample_grid, [height, width, depth])
z_t = tf.transpose(z_t, [2, 0, 1])
z_t = 1 / z_t
d_t = 1 / z_t
x_t /= z_t
y_t /= z_t
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
d_t_flat = tf.reshape(d_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([d_t_flat, y_t_flat, x_t_flat, ones], 0)
return grid
def _transform(theta, input_dim, out_size, z_near, z_far):
with tf.variable_scope('_transform'):
num_batch = input_dim.get_shape().as_list()[0]
num_channels = input_dim.get_shape().as_list()[4]
| tensorflow.reshape | 13,641 |
import tensorflow as tf
epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out])
hyp_params = tf.get_variable('hyp_params_layer'+str(h),
shape=[2],
initializer=tf.random_normal_initializer())
l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1])
epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2
# Compute A_{h+1}
A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1])
# Compute z_{h}A_{h+1}
Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5)
Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5)
# Compute u_{h+1} and v_{h+1}
U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2)
Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.)
KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2.
# Output layer
else:
F = tf.squeeze(tf.layers.dense(Z, n_out), [2])
return F, KL | tensorflow.concat | 13,642 |
from tensorflow.python.framework import constant_op
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_dense(self):
"""Tests only dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],
['batch2-FC1-F1', 'batch2-FC1-F2']],
dtypes.string),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',
'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'
], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
| tensorflow.python.framework.constant_op.constant | 13,643 |
import tensorflow as tf
conv, [1, 1, max_chars-width+1, 1],
[1, 1, 1, 1], 'VALID')
# activation
conv = activation(conv)
conv = tf.squeeze(conv, squeeze_dims=[2])
convolutions.append(conv)
return tf.concat(convolutions, 2)
embedding = make_convolutions(self.char_embedding)
# for highway and projection layers
n_highway = cnn_options.get('n_highway')
use_highway = n_highway is not None and n_highway > 0
use_proj = n_filters != projection_dim
| tensorflow.concat | 13,644 |
import tensorflow as tf
self.k = 16
self.g_dim = 256
self.c = 10
self.vf_hidden_size = 128 # for value function network
self.alpha = 0.5 # for build loss
self.batch_processor = FeudalBatchProcessor(self.c)
self.build_model() # build feudal policy model
with tf.name_scope('local_grad'):
grads = tf.gradients(self.loss, self.var_list)
grads, _ = tf.clip_by_global_norm(grads, 40)
with tf.name_scope('sync'): # worker和global的同步过程
with tf.name_scope('pull'): # 获取global参数,复制到local—net
self.pull_params_op = tf.group(*[v1.assign(v2)
for v1, v2 in zip(self.var_list, globalAC.var_list)])
| tensorflow.name_scope | 13,645 |
import tensorflow as tf
max_iter=max_iter,
model_type=model_type,
opt=opt)
self.rel_maxnorm_mult = rel_maxnorm_mult
def _create_model(self, train_triples):
# Count unique items to determine embedding matrix sizes
entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))
rel_cnt = len(set(train_triples[:,1]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
# Embedding variables for all entities and relationship types
entity_embedding_shape = [entity_cnt, self.embedding_size]
# Relationship embeddings will be stored in flattened format to make
# applying maxnorm constraints easier
rel_embedding_shape = [rel_cnt, self.embedding_size * self.embedding_size]
entity_init = tf.truncated_normal(entity_embedding_shape, stddev=init_sd)
rel_init = tf.truncated_normal(rel_embedding_shape, stddev=init_sd)
if self.maxnorm is not None:
# Ensure maxnorm constraints are initially satisfied
entity_init = dense_maxnorm(entity_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
# Reshape rel_embed into square D x D matrices
rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size))
# Reshape head_embed and tail_embed to be suitable for the matrix multiplication
| tensorflow.truncated_normal | 13,646 |
import tensorflow as tf
lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.H)
for t in range(self.T):
context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0))
alpha_list.append(alpha)
if self.selector:
context, beta = self._selector(context, h, reuse=(t!=0))
with tf.variable_scope('lstm', reuse=(t!=0)):
_, (c, h) = lstm_cell(inputs=tf.concat(axis=1, values=[x[:,t,:], context]), state=[c, h])
logits = self._decode_lstm(x[:,t,:], h, context, dropout=self.dropout, reuse=(t!=0))
loss += tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=captions_out[:, t]) * mask[:, t])
if self.alpha_c > 0:
alphas = tf.transpose(tf.stack(alpha_list), (1, 0, 2)) # (N, T, L)
alphas_all = tf.reduce_sum(alphas, 1) # (N, L)
| tensorflow.variable_scope | 13,647 |
import tensorflow as tf
'weight_decay', 1e-5, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'mse_weight', 1., 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')#1e-3
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.000001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'warmup_learning_rate', 0.00001,
| tensorflow.app.flags.DEFINE_float | 13,648 |
import tensorflow as tf
class Linear(object) :
def __init__(self,name,input_dim,output_dim,stddev=0.02) :
with tf.variable_scope(name) :
self.w = tf.get_variable('w',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(0.0))
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
return tf.matmul(tf.reshape(input_var,[-1,dims]),w) + b
else :
return tf.matmul(input_var,w)+b
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormLinear(object):
def __init__(self,name,input_dim,output_dim,stddev=0.02,epsilon=1e-10) :
with tf.variable_scope(name) :
self.v = tf.get_variable('v',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[output_dim],
| tensorflow.shape | 13,649 |
import tensorflow as tf
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_train_defun(self):
self._benchmark_eager_train(
"eager_train", MockIterator, device_and_data_format(), defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device("/device:CPU:0"):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
"eager_train_dataset_with_defun",
make_iterator,
device_and_data_format(),
defun=True)
if __name__ == "__main__":
tf.enable_eager_execution()
| tensorflow.data.Dataset.from_tensors | 13,650 |
import tensorflow as tf
# pylint: enable=g-bad-import-order
def _generate_synthetic_snli_data_batch(sequence_length,
batch_size,
vocab_size):
"""Generate a fake batch of SNLI data for testing."""
with tf.device("cpu:0"):
labels = tf.random_uniform([batch_size], minval=1, maxval=4, dtype=tf.int64)
prem = tf.random_uniform(
(sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)
prem_trans = tf.constant(np.array(
[[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3,
2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2,
3, 2, 2]] * batch_size, dtype=np.int64).T)
hypo = tf.random_uniform(
| tensorflow.random_uniform | 13,651 |
import tensorflow as tf
Args:
xs: 4-D `tensor` [batch_size, height, width, channels], input
Returns:
a `float` decov loss
"""
with tf.name_scope(name):
x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
m = tf.reduce_mean(x, 0, True)
z = tf.expand_dims(x - m, 2)
corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0)
corr_frob_sqr = tf.reduce_sum(tf.square(corr))
corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
loss = 0.5 * (corr_frob_sqr - corr_diag_sqr)
return loss
def center_loss(features, label, alpha, num_classes, name='center_loss'):
"""Center loss based on the paper "A Discriminative Feature Learning Approach
for Deep Face Recognition" (http://ydwen.github.io/papers/WenECCV16.pdf)
| tensorflow.transpose | 13,652 |
from tensorflow.python.framework import ops
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `ignore_mask` is not `None` and its shape doesn't match
`predictions`, or if `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('precision', k, class_id=class_id)
with ops.name_scope(name, default_name,
(predictions, labels, ignore_mask, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return _streaming_sparse_precision_at_k(
top_k_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
ignore_mask=ignore_mask,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
| tensorflow.python.framework.ops.name_scope | 13,653 |
import tensorflow as tf
pos_inputs_ = tf.nn.embedding_lookup(pos_embeddings, pos_inputs_)
pos_inputs_ = tf.tile(tf.expand_dims(pos_inputs_, axis=0), [batch_size, 1, 1])
encoder_inputs_ = tf.concat([encoder_inputs_, pos_inputs_], axis=2)
if other_inputs is not None:
encoder_inputs_ = tf.concat([encoder_inputs_, other_inputs], axis=2)
if encoder.use_dropout:
noise_shape = [1, time_steps, 1] if encoder.pervasive_dropout else [batch_size, time_steps, 1]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.word_keep_prob,
noise_shape=noise_shape)
size = tf.shape(encoder_inputs_)[2]
noise_shape = [1, 1, size] if encoder.pervasive_dropout else [batch_size, time_steps, size]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.embedding_keep_prob,
noise_shape=noise_shape)
if encoder.input_layers:
for j, layer_size in enumerate(encoder.input_layers):
if encoder.input_layer_activation is not None and encoder.input_layer_activation.lower() == 'relu':
activation = tf.nn.relu
else:
activation = tf.tanh
| tensorflow.shape | 13,654 |
import tensorflow as tf
raise NotImplementedError(nl_type)
def update_params(self, kwargs):
"""Update the class attributes with kwargs."""
if kwargs is not None:
for k, v in kwargs.iteritems():
setattr(self, k, v)
def symmetric_weights(self, w, name):
"""Apply symmetric weight sharing."""
conv_w_t = tf.transpose(w, (2, 3, 0, 1))
conv_w_symm = 0.5 * (conv_w_t + tf.transpose(conv_w_t, (1, 0, 2, 3)))
conv_w = tf.transpose(conv_w_symm, (2, 3, 0, 1), name=name)
return conv_w
def prepare_tensors(self):
""" Prepare recurrent/forward weight matrices.
(np.prod([h, w, k]) / 2) - k params in the surround filter
"""
# FEEDFORWARD AND FEEDBACK KERNELS
lower_feats = self.in_k
for idx, (higher_feats, ff_dhw, fb_dhw) in enumerate(
zip(self.ff_conv_k,
self.ff_conv_dhw,
| tensorflow.transpose | 13,655 |
import tensorflow as tf
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
| tensorflow.FixedLenFeature | 13,656 |
import tensorflow as tf
padding=padding, kernel_initializer=init, name='tr_conv' + id, use_bias=use_bias)
# Traditional U-Net
def build_Unet_Arch(self, input_data, name="Unet_Arch"):
self.base_number_of_features = 32
with tf.variable_scope(name):
# Encoder definition
o_c1 = self.general_conv2d(input_data, self.base_number_of_features, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_1')
o_mp1 = tf.layers.max_pooling2d(o_c1, 2, 2, name = name + '_maxpooling_1')
o_c2 = self.general_conv2d(o_mp1, self.base_number_of_features * 2, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_2')
o_mp2 = tf.layers.max_pooling2d(o_c2, 2, 2, name = name + '_maxpooling_2')
o_c3 = self.general_conv2d(o_mp2, self.base_number_of_features * 4, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_3')
o_mp3 = tf.layers.max_pooling2d(o_c3, 2, 2, name = name + '_maxpooling_3')
o_c4 = self.general_conv2d(o_mp3, self.base_number_of_features * 8, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_4')
o_mp4 = tf.layers.max_pooling2d(o_c4, 2, 2, name = name + '_maxpooling_4')
o_c5 = self.general_conv2d(o_mp4, self.base_number_of_features * 16, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_5')
# Decoder definition
o_d1 = self.general_deconv2d(o_c5, self.base_number_of_features * 8, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_1')
o_me1 = tf.concat([o_d1, o_c4], 3) # Skip connection
o_d2 = self.general_deconv2d(o_me1, self.base_number_of_features * 4, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_2')
o_me2 = tf.concat([o_d2, o_c3], 3) # Skip connection
o_d3 = self.general_deconv2d(o_me2, self.base_number_of_features * 2, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_3')
o_me3 = tf.concat([o_d3, o_c2], 3) # Skip connection
o_d4 = self.general_deconv2d(o_me3, self.base_number_of_features, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_4')
o_me4 = tf.concat([o_d4, o_c1], 3) # Skip connection
logits = tf.layers.conv2d(o_me4, self.args.num_classes, 1, 1, 'SAME', activation = None)
prediction = tf.nn.softmax(logits, name = name + '_softmax')
| tensorflow.layers.max_pooling2d | 13,657 |
import tensorflow as tf
else:
var_list = None
with tf.device('/cpu:0'):
saver = tf.train.Saver(var_list=var_list, save_relative_paths=True)
saver.restore(self.sess, checkpoint_path)
| tensorflow.train.Saver | 13,658 |
import tensorflow as tf
reset: Whether to reset the datasets.
Returns:
data: a batch of data from either the train or test set.
"""
with datasets.unlocked:
datasets.train = datasets.train.make_one_shot_iterator()
datasets.test = datasets.test.make_one_shot_iterator()
data = tf.cond(
tf.equal(phase, 'train'),
datasets.train.get_next,
datasets.test.get_next)
if not isinstance(data, dict):
data = {'data': data}
if 'length' not in data:
example = data[list(data.keys())[0]]
data['length'] = (
tf.zeros((tf.shape(example)[0],), tf.int32) + tf.shape(example)[1])
| tensorflow.equal | 13,659 |
import tensorflow as tf
y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2
num_ratings: numbers of rater to used, typically num_classes of the model
batch_size: batch_size of the training or validation ops
eps: a float, prevents divide by zero
name: Optional scope/name for op_scope.
Returns:
A tensor with the kappa loss.
"""
with tf.name_scope(name):
labels = tf.to_float(labels)
repeat_op = tf.to_float(
tf.tile(tf.reshape(tf.range(0, num_ratings), [num_ratings, 1]), [1, num_ratings]))
repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))
weights = repeat_op_sq / tf.to_float((num_ratings - 1)**2)
pred_ = predictions**y_pow
try:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))
except Exception:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1]))
| tensorflow.range | 13,660 |
import tensorflow as tf
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(_):
return tf.constant(1)
output = f(tf.constant([1]))
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.CancelledError):
session.run(output)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
| tensorflow.train.Coordinator | 13,661 |
import tensorflow as tf
def _add_global_avg_pool(self, X, in_w, in_h, in_ch):
X = tf.nn.relu(X)
| tensorflow.nn.relu | 13,662 |
import tensorflow as tf
def valid_inference(self,images):
images=tf.cast(images,tf.float32)/255.0
l1 = tf.matmul(images, self.w1)+self.b1
l1=tf.nn.relu(l1)
l2 = tf.matmul(l1, self.w2)+self.b2
l2=tf.nn.relu(l2)
l3=tf.matmul(l2, self.w3)+self.b3
l3=tf.nn.relu(l3)
out=tf.matmul(l3, self.w4)+self.b4
return out
def softmax_loss(self,predicts,labels):
predicts=tf.nn.softmax(predicts)
labels=tf.one_hot(labels,classnum)
loss=-tf.reduce_sum(labels*tf.log(predicts))
return loss
def optimer(self,loss,lr=0.001):
train_step=tf.train.GradientDescentOptimizer(lr).minimize(loss)
return train_step
path=r'C:\JC\test\train_model.ckpt'
image,label=getinputs(r'C:\JC\tfrecord\64_shuffle/train.tfrecords')
test_image,test_label=getinputs(r'C:\JC\tfrecord\64_shuffle/test.tfrecords')
valid_image,valid_label= getinputs(r'C:\JC\tfrecord\64_shuffle\validation.tfrecords')
batch_image,batch_label=get_batch(image,label,trainnum,0)
| tensorflow.one_hot | 13,663 |
import tensorflow as tf
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testOutputStride16BuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
output_stride = 16
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
| tensorflow.random_uniform | 13,664 |
import tensorflow as tf
default_value=-1)
vz = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string,
value_dtype=tf.int64,
default_value=-1)
vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1))
vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1))
x_t = tf.gather(x, l)
x_t_len = tf.strings.length(x_t)
x_t = tf.string_split([x_t], delimiter='').values
z_t = tf.gather(y, m)
z_t_len = tf.strings.length(z_t)
z_t = tf.string_split([z_t], delimiter='').values
for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'):
u = tf.string_join(x_t[i:i + self._p], '')
vx_keys, r = tf.cond(
| tensorflow.strings.length | 13,665 |
import tensorflow as tf
[self.timesteps], dtype=tf.float32)))
if self.lesion_omega:
setattr(
self,
'omega_%s' % layer,
tf.constant(0.))
if self.lesion_kappa:
setattr(
self,
'kappa_%s' % layer,
| tensorflow.constant | 13,666 |
import tensorflow as tf
shape = py_utils.static_or_dynamic_shape(x)
encode_params, decode_params = stage.get_params()
encoded_x = stage.encode(x, encode_params)
# Get all values out of TensorFlow as Python constants. This is a trivial
# example of communication happening outside of TensorFlow.
with self.session(graph=server_graph):
x, decode_params, encoded_x, shape = self.evaluate_tf_py_list(
[x, decode_params, encoded_x, shape])
client_graph = tf.Graph()
with client_graph.as_default():
decoded_x = stage.decode(encoded_x, decode_params, shape=shape)
with self.session(graph=client_graph):
decoded_x = self.evaluate(decoded_x)
return TestData(x, encoded_x, decoded_x)
if is_adaptive_stage(stage):
return _adaptive_one_to_many_encode_decode(state)
| tensorflow.Graph | 13,667 |
import tensorflow as tf
logits=real_logits, labels=tf.ones_like(real_logits)))
fake_logits = discriminator_fn(fake_data)
if isinstance(fake_logits, (list, tuple)):
fake_logits = fake_logits[0]
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=fake_logits, labels=tf.zeros_like(fake_logits)))
d_loss = real_loss + fake_loss
if mode == "min_fake":
g_loss = - fake_loss
| tensorflow.zeros_like | 13,668 |
from tensorflow.contrib import metrics as metrics_lib
metrics[_MetricKeys.PRECISION_MEAN % threshold] = _streaming_with_threshold(
metrics_lib.streaming_precision, threshold)
# Recall for positive examples.
metrics[_MetricKeys.RECALL_MEAN % threshold] = _streaming_with_threshold(
metrics_lib.streaming_recall, threshold)
return metrics
# TODO(zakaria): support weights.
def _targets_streaming_mean(unused_predictions, targets):
return metrics_lib.streaming_mean(targets)
def _predictions_streaming_mean(predictions, unused_targets):
return metrics_lib.streaming_mean(predictions)
def _streaming_with_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, targets):
return streaming_metrics_fn(predictions=math_ops.to_float(
| tensorflow.contrib.metrics.streaming_mean | 13,669 |
import tensorflow as tf
degree_l = tf.convert_to_tensor(value=degree_l)
order_m = tf.convert_to_tensor(value=order_m)
x = tf.convert_to_tensor(value=x)
pmm = _evaluate_legendre_polynomial_pmm_eval(order_m, x)
return tf.where(
tf.equal(degree_l, order_m), pmm,
_evaluate_legendre_polynomial_branch(degree_l, order_m, x, pmm))
def _spherical_harmonics_normalization(l, m, var_type=tf.float64):
l = tf.cast(l, dtype=var_type)
m = tf.cast(m, dtype=var_type)
numerator = (2.0 * l + 1.0) * factorial(l - tf.abs(m))
denominator = 4.0 * np.pi * factorial(l + tf.abs(m))
return tf.sqrt(numerator / denominator)
def _evaluate_spherical_harmonics_branch(degree,
order,
theta,
phi,
sign_order,
var_type=tf.float64):
sqrt_2 = tf.constant(1.41421356237, dtype=var_type)
order_float = tf.cast(order, dtype=var_type)
tmp = sqrt_2 * _spherical_harmonics_normalization(
degree, order, var_type) * evaluate_legendre_polynomial(
| tensorflow.abs | 13,670 |
import tensorflow as tf
with tf.variable_scope("polyak_model", reuse=True, custom_getter=custom_getter):
self.polyak_model = polyak_model = self.policy(self.sess, self.observation_space, self.action_space,
self.n_envs, self.n_steps + 1,
self.n_envs * (self.n_steps + 1), reuse=True,
**self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
self.done_ph = tf.placeholder(tf.float32, [self.n_batch]) # dones
self.reward_ph = tf.placeholder(tf.float32, [self.n_batch]) # rewards, not returns
self.mu_ph = tf.placeholder(tf.float32, [self.n_batch, self.n_act]) # mu's
self.action_ph = train_model.pdtype.sample_placeholder([self.n_batch])
self.learning_rate_ph = tf.placeholder(tf.float32, [])
eps = 1e-6
| tensorflow.placeholder | 13,671 |
import tensorflow as tf
# element when gathering L_flat into a lower triangular matrix L.
nb_rows = tf.shape(L_flat)[0]
zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
try:
# Old TF behavior.
L_flat = tf.concat(1, [zeros, L_flat])
except TypeError:
# New TF behavior
L_flat = tf.concat([zeros, L_flat], 1)
| tensorflow.concat | 13,672 |
import tensorflow as tf
if not self.lm_file:
elmo_module = hub.Module("https://tfhub.dev/google/elmo/2")
lm_embeddings = elmo_module(
inputs={"tokens": tokens, "sequence_len": text_len},
signature="tokens", as_dict=True)
word_emb = lm_embeddings["word_emb"] # [num_sentences, max_sentence_length, 512]
lm_emb = tf.stack([tf.concat([word_emb, word_emb], -1),
lm_embeddings["lstm_outputs1"],
lm_embeddings["lstm_outputs2"]], -1) # [num_sentences, max_sentence_length, 1024, 3]
lm_emb_size = util.shape(lm_emb, 2)
lm_num_layers = util.shape(lm_emb, 3)
with tf.variable_scope("lm_aggregation"):
self.lm_weights = tf.nn.softmax(tf.get_variable("lm_scores", [lm_num_layers], initializer=tf.constant_initializer(0.0)))
| tensorflow.concat | 13,673 |
import tensorflow as tf
# Overall connectivity and Dale's law matrices
# ------------------------------------------------
# Recurrent Dale's law weight matrix:
self.Dale_rec = tf.get_variable('Dale_rec', [N_rec, N_rec],
initializer=tf.constant_initializer(self.dale_rec),
trainable=False)
# Output Dale's law weight matrix:
self.Dale_out = tf.get_variable('Dale_out', [N_rec, N_rec],
initializer=tf.constant_initializer(self.dale_out),
trainable=False)
# Connectivity weight matrices:
self.input_Connectivity = tf.get_variable('input_Connectivity', [N_rec, N_in],
initializer=tf.constant_initializer(
self.input_connectivity_mask),
trainable=False)
self.rec_Connectivity = tf.get_variable('rec_Connectivity', [N_rec, N_rec],
initializer=tf.constant_initializer(
self.recurrent_connectivity_mask),
trainable=False)
self.output_Connectivity = tf.get_variable('output_Connectivity', [N_out, N_rec],
initializer=tf.constant_initializer(
self.output_connectivity_mask),
trainable=False)
# ------------------------------------------------
# Network loss
| tensorflow.constant_initializer | 13,674 |
import tensorflow as tf
while global_step < FLAGS.train_step:
steps_and_files = {}
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
for filename in filenames:
| tensorflow.gfile.ListDirectory | 13,675 |
import tensorflow as tf
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0)
horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1])
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
final_loss = tf.reduce_mean(loss)
return final_loss, cstr_pct
| tensorflow.cast | 13,676 |
import tensorflow as tf
output_t = linear([cell_output] + [context_t], options.gen_hidden_size, True)
with tf.variable_scope('output_projection'):
w = tf.get_variable('w', [options.gen_hidden_size, vocab.vocab_size+1], dtype=tf.float32)
b = tf.get_variable('b', [vocab.vocab_size +1], dtype=tf.float32)
# vocab_scores is the vocabulary distribution before applying softmax.
# Each entry on the list corresponds to one decoder step
vocab_score_t = tf.nn.xw_plus_b(output_t, w, b) # apply the linear layer
| tensorflow.get_variable | 13,677 |
from tensorflow.python.framework import ops
# -- Register operations ------------------------------------------------------
bilateral_slice = _hdrnet.bilateral_slice
bilateral_slice_apply = _hdrnet.bilateral_slice_apply
# ----------- Register gradients ----------------------------------------------
@ops.RegisterGradient('BilateralSlice')
def _bilateral_slice_grad(op, grad):
grid_tensor = op.inputs[0]
guide_tensor = op.inputs[1]
return _hdrnet.bilateral_slice_grad(grid_tensor, guide_tensor, grad)
| tensorflow.python.framework.ops.RegisterGradient | 13,678 |
import tensorflow as tf
# loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
# total_loss.append(loss_i.mean())
# total_loss = np.mean(total_loss)
# print ('loaded model val loss:', total_loss)
##exit(0)
######################### debug #########################
slbo_n_stages = nslbo
print (f"each task will do nslbo = {nslbo}")
for param in model.parameters():
param.invalidate()
all_task_parameter = []
while (not test and TASK_NUM < ITERS) or (test and TEST_TASK_NUM < ITERS):
# first task or maxstep, update the model. Otherwise, revert the model
logger.info('Sync model from lazymodel')
tf.get_default_session().run(sync_model_from_lazymodel)
taskres = {}
if 'goal_velocity' not in taskres.keys():
taskres['goal_velocity'] = []
if not test and inittask == 'none':
slbo_n_stages = nslbo
elif not test and TASK_NUM == 0:
slbo_n_stages = initnslbo
elif not test and TASK_NUM > 0:
slbo_n_stages = nslbo
time_start = time.time()
trpo_warmup = []
trpo_slbo = []
surprisal = []
| tensorflow.get_default_session | 13,679 |
import tensorflow as tf
W = tf.get_variable('W', [state_size, input_size_y])
b = tf.get_variable('b', [input_size_y], initializer=tf.constant_initializer(0.0))
rnn_outputs = tf.reshape(rnn_outputs, [-1, state_size])
predictions = tf.matmul(rnn_outputs, W) + b
yy = tf.reshape(y, [-1, input_size_y]) #batch_size*num_steps when yo udefine a placeholder in Tensorflow, the shape of the input during the session should be the same as the shape of the plcae holder
"Mean squared error loss"
loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1])))
"Adding regularization"
if lambda_l2_reg > 0 :
cell_l2 = tf.reduce_sum([tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not ("noreg" in tf_var.name or "Bias" in tf_var.name)])
Predict_l2 = tf.nn.l2_loss(W) #+ tf.nn.l2_loss(b)
total_loss = tf.reduce_sum(loss + lambda_l2_reg* tf.reduce_sum(cell_l2+Predict_l2) )
| tensorflow.reshape | 13,680 |
import tensorflow as tf
Returns:
ids: the encoded integer ids.
tokens: the encoded string.
"""
words = tf.sparse.to_dense(tf.strings.split([text]), default_value='')[0]
num_words = tf.size(words)
ids_ta = tf.TensorArray(tf.int32, 0, dynamic_size=True)
def _WordsToIds(i, words, ids_ta):
encoded_ids = self._EncodeToIds(BOW_STR + words[i])
ids_ta = ids_ta.scatter(
tf.range(ids_ta.size(),
| tensorflow.TensorArray | 13,681 |
import tensorflow as tf
w_c: [1,1, attention_vec_size]
coverage: [batch_size, passage_len]
'''
with variable_scope.variable_scope("Attention"):
# Equation (11) in the paper
state_features = linear(decoder_state, attention_vec_size, True) # [batch_size, attention_vec_size]
state_features = tf.expand_dims(state_features, 1) # [batch_size, 1, attention_vec_size]
all_features = encoder_features + state_features # [batch_size,passage_len,attention_vec_size]
if use_coverage and coverage is not None:
coverage_features = tf.expand_dims(coverage, axis=-1) * w_c # [batch_size, passage_len, attention_vec_size]
all_features += coverage_features
e = tf.reduce_sum(v * tf.tanh(all_features), axis=-1) # [batch_size, passage_len]
| tensorflow.expand_dims | 13,682 |
import tensorflow as tf
x = (x - u) * tf.rsqrt(s + e)
if g is not None and b is not None:
x = x*g + b
return x
def norm(x, scope, axis=[-1]):
with tf.variable_scope(scope):
n_state = shape_list(x)[-1]
g = tf.get_variable("g", [n_state], initializer=tf.constant_initializer(1))
b = tf.get_variable("b", [n_state], initializer=tf.constant_initializer(0))
return _norm(x, g, b, axis=axis)
| tensorflow.variable_scope | 13,683 |
import tensorflow as tf
features=tf.parse_single_example(serialized_example,
features={
'label':tf.FixedLenFeature([], tf.int64),
'img_raw' : tf.FixedLenFeature([], tf.string),
| tensorflow.FixedLenFeature | 13,684 |
import tensorflow as tf
# inputs = tf.unstack(inputs, num=self.num_steps, axis=1)
# outputs, state = tf.nn.static_rnn(cell, inputs,
# initial_state=self._initial_state)
outputs = []
with tf.variable_scope("RNN"):
for time_step in range(self.num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
| tensorflow.variable_scope | 13,685 |
import tensorflow as tf
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
input_size = query.get_shape().as_list()[-1]
# Trainable parameters
w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
tmp2 = tf.tensordot(query, w2, axes=1)
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
| tensorflow.random_normal | 13,686 |
import tensorflow as tf
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.cs = self._mask / tf.reduce_sum(self._mask, 1, keep_dims=True)
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
preds_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(self.preds_by_word,
[-1, self._out_vocab_size]))
preds_weighted_reshaped = tf.reshape(preds_weighted,
self.preds_by_word.get_shape())
self.probs = tf.reduce_sum(preds_weighted_reshaped, 0)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights)
class WordAvgModel(BaseModel): #formerly SimpleModel
"""A bag of word /predictions/."""
def __init__(self, out_vocab_size=None,
batch_size=10,
model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
| tensorflow.reduce_sum | 13,687 |
import tensorflow as tf
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
| tensorflow.variable_scope | 13,688 |
import tensorflow as tf
tfd.Exponential,
tfd.Gamma,
tfd.InverseGamma,
tfd.Laplace,
tfd.StudentT,
tfd.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, tf.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape, self.evaluate(
tf.shape(dist.sample())))
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
self.evaluate(tf.shape(dist_copy.sample())))
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
# Note: we cannot easily test all distributions since each requires
| tensorflow.random_normal | 13,689 |
import tensorflow as tf
"""
loss = None
with tf.name_scope(name, "click_softmax_cross_entropy",[output]):
label_dis = labels*propensity_weights / tf.reduce_sum(labels*propensity_weights, 1, keep_dims=True)
loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels*propensity_weights, 1)
return tf.reduce_sum(loss) / tf.reduce_sum(labels*propensity_weights)
def click_loglikelihood(self, labels, propensity,train_output, name=None):
| tensorflow.reduce_sum | 13,690 |
import tensorflow as tf
beta = tf.train.polynomial_decay(beta_start, self.global_step,
| tensorflow.train.polynomial_decay | 13,691 |
import tensorflow as tf
self.assertTrue(gfile.Exists(s4))
class SaveRestoreWithVariableNameMap(tf.test.TestCase):
def testNonReshape(self):
save_path = os.path.join(self.get_temp_dir(), "basics")
with self.test_session() as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
| tensorflow.Variable | 13,692 |
import tensorflow as tf
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
| tensorflow.logging.info | 13,693 |
import tensorflow as tf
if with_hessian == True: #if true, it evaluates
hess = hessian(grads, tvars) #Hessian matrix
res = tf.subtract(an, Y) #residual error
#---------------------------Initialize evaluation metrics----------------------------------------------------
e_len = len(epoch_sample)
acc_train = [] #store train accuracy for each epoch
acc_test = [] #store test accuracy for each epoch
if actL == 'sigmoid': #accuracy score for binary class classification
Yp = tf.greater(an , 0.5)
accuracy = tf.reduce_mean(tf.cast(tf.equal(Yp, tf.equal(Y,1.0)), "float"))
elif actL == 'esp' or actL == 'relu': #r2 score
norm= tf.reduce_mean( tf.squared_difference(Y,tf.reduce_mean(Y)) )
accuracy = 1 - tf.divide( tf.reduce_mean(tf.squared_difference(an, Y)), norm)
elif actL == 'softmax': #accuracy score for multiclass classification
Yp = tf.sigmoid(betan*hn)
correct = tf.equal(tf.argmax(Yp), tf.argmax(Y))
accuracy= tf.reduce_mean(tf.cast(correct, "float"))
| tensorflow.equal | 13,694 |
import tensorflow as tf
with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]):
with ops.colocate_with(mean):
new_mean = tf.assign_sub(
mean,
tf.check_numerics(
decay * (mean - cur_mean), "NaN in moving mean."))
with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]):
with ops.colocate_with(var):
new_var = tf.assign_sub(
var,
tf.check_numerics(decay * (var - cur_var),
"NaN in moving variance."))
with tf.name_scope(name, "IncrementTime", [step]):
with ops.colocate_with(step):
new_step = tf.assign_add(step, 1.)
used_var += 0. * new_mean * new_var * new_step
used_var += epsilon
return used_mean, used_var
def convnet(input_,
dim_in,
dim_hid,
| tensorflow.name_scope | 13,695 |
import tensorflow as tf
## End new version
if self._normalize_cols:
logits_vec = logits_vec - tf.math.reduce_logsumexp(
logits_vec, axis=0)[None]
relabel_indices = tf.random.categorical(logits=logits_vec, num_samples=1)
### Metrics
global_step = tf.compat.v1.train.get_or_create_global_step()
orig_indices = tf.range(
self._sample_batch_size, dtype=relabel_indices.dtype)
with tf.name_scope("relabelling"):
# How often are the originally commanded goals most optimal?
opt_indices = tf.argmax(logits_vec, axis=1)
orig_is_opt = opt_indices == orig_indices
| tensorflow.compat.v1.train.get_or_create_global_step | 13,696 |
import tensorflow as tf
for output in sorted(outputs_to_scales_to_logits):
scales_to_logits = outputs_to_scales_to_logits[output]
logits = _resize_bilinear(
scales_to_logits[MERGED_LOGITS_SCOPE],
tf.shape(images)[1:3],
scales_to_logits[MERGED_LOGITS_SCOPE].dtype)
outputs_to_predictions[output].append(
tf.expand_dims(tf.nn.softmax(logits), 4))
if add_flipped_images:
scales_to_logits_reversed = (
outputs_to_scales_to_logits_reversed[output])
logits_reversed = _resize_bilinear(
tf.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]),
tf.shape(images)[1:3],
scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype)
outputs_to_predictions[output].append(
tf.expand_dims(tf.nn.softmax(logits_reversed), 4))
for output in sorted(outputs_to_predictions):
predictions = outputs_to_predictions[output]
# Compute average prediction across different scales and flipped images.
predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4)
outputs_to_predictions[output] = tf.argmax(predictions, 3, output_type=tf.dtypes.int32)
outputs_to_predictions[output + PROB_SUFFIX] = tf.nn.softmax(predictions)
| tensorflow.reverse_v2 | 13,697 |
import tensorflow as tf
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
filed_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
| tensorflow.logging.info | 13,698 |
import tensorflow as tf
mnist_eager.train(model, optimizer, dataset,
step_counter=tf.train.get_or_create_global_step())
| tensorflow.train.get_or_create_global_step | 13,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.