seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
a `float` `scalar`, KL divergence.
"""
if num_classes == 2:
q = tf.nn.sigmoid(q_logits)
p = tf.nn.sigmoid(p_logits)
kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) +
f.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q))
else:
q = tf.nn.softmax(q_logits)
p = tf.nn.softmax(p_logits)
kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1)
num_labels = tf.reduce_sum(weights)
num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)
kl.get_shape().assert_has_rank(2)
weights.get_shape().assert_has_rank(1)
loss = tf.identity(tf.reduce_sum(tf.expand_dims(weights, -1) * kl) / num_labels, name='kl')
return loss
| tensorflow.log | 5,800 |
import tensorflow as tf
# TODO: check that all i in indices have ndim n-1
# TODO: support broadcasting for numpy arrays with np.broadcast_to()
#indices = tf.pack(list(indices), axis=len(indices)-1)
indices = tf.pack(list(indices), axis=-1)
# indices = tf.Print(indices, [indices], 'indices', summarize=100)
return tf.gather_nd(t, indices)
else:
raise NotImplementedError('index {} with {}'.format(t, indices))
| tensorflow.gather_nd | 5,801 |
import tensorflow as tf
l = layer
if use_bn:
l = batch_norm_conv(l, b_train=bn_phaze, scope='bn')
l = act_func(l)
l = conv(l, scope='conv', filter_dims=filter_dims, stride_dims=[1, 1], dilation=dilation,
non_linear_fn=None, bias=use_bias)
l = tf.concat([l, layer], 3)
return l
def add_residual_layer(layer, filter_dims, act_func=tf.nn.relu, scope='residual_layer',
use_bn=True, bn_phaze=False, use_bias=False, dilation=[1, 1, 1, 1]):
with tf.variable_scope(scope):
| tensorflow.concat | 5,802 |
import tensorflow as tf
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
| tensorflow.nn.rnn | 5,803 |
import tensorflow as tf
xent_mat *= class_weights
return tf.reduce_sum(xent_mat, sumd)
def _SafeNegEntropy(probs, batch_size, eps=0.0001):
"""Computes negative entropy in a way that will not overflow."""
adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps)
entropy = tf.mul(probs, tf.log(adjusted_probs))
return tf.reduce_sum(entropy) / batch_size
| tensorflow.reduce_sum | 5,804 |
import tensorflow as tf
# loss of value function
vf_loss = 0.5 * tf.reduce_sum(tf.square(pi.vf - self.r))
entropy = - tf.reduce_sum(prob_tf * log_prob_tf)
| tensorflow.square | 5,805 |
import tensorflow as tf
placeholders = {
'batch': tf.placeholder(tf.int32, name='batch'),
'batch_neg': tf.placeholder(tf.int32, name='batch_neg'),
'batch_node':tf.placeholder(tf.int32,name = 'batch_node'),
'adj_min_batch': tf.placeholder(tf.float32,name='adj_min_batch'),
'sim_min_batch': tf.placeholder(tf.float32,name='sim_min_batch'),
'batch_edge_type_idx': tf.placeholder(tf.int32, shape=(), name='batch_edge_type_idx'),
'batch_row_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_row_edge_type'),
'batch_col_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_col_edge_type'),
'degrees': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
}
| tensorflow.placeholder | 5,806 |
import tensorflow as tf
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
u = tf.contrib.layers.conv2d_transpose(layer_input,filters,f_size,stride=stride,padding=padding)
if dropout_rate:
u = tf.contrib.layers.dropout(u,keep_prob=dropout_rate)
u = tf.contrib.layers.batch_norm(u)
u = tf.nn.relu(u)
| tensorflow.contrib.layers.conv2d_transpose | 5,807 |
import tensorflow as tf
def net_U0(self, x):
lambda_1 = self.lambda_1
lambda_2 = tf.exp(self.lambda_2)
U = self.neural_net(x, self.weights, self.biases)
U_x = self.fwd_gradients_0(U, x)
U_xx = self.fwd_gradients_0(U_x, x)
F = -lambda_1*U*U_x + lambda_2*U_xx
U0 = U - self.dt*tf.matmul(F, self.IRK_alpha.T)
return U0
def net_U1(self, x):
lambda_1 = self.lambda_1
lambda_2 = tf.exp(self.lambda_2)
U = self.neural_net(x, self.weights, self.biases)
U_x = self.fwd_gradients_1(U, x)
U_xx = self.fwd_gradients_1(U_x, x)
F = -lambda_1*U*U_x + lambda_2*U_xx
U1 = U + self.dt*tf.matmul(F, (self.IRK_beta - self.IRK_alpha).T)
return U1
def callback(self, loss):
print('Loss:', loss)
def train(self, nIter):
| tensorflow.exp | 5,808 |
import tensorflow as tf
'lr_decay_factors', '1, 0.6, 0.1',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', './model/resnet50',#None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', '',
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
'model_scope', 'xdet_resnet',
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', 'xdet_resnet/xdet_head, xdet_resnet/xdet_multi_path, xdet_resnet/xdet_additional_conv',#None
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'run_on_cloud', True,
'Wether we will train on cloud (pre-trained model will be placed in the "data_dir/cloud_checkpoint_path").')
tf.app.flags.DEFINE_string(
'cloud_checkpoint_path', 'resnet50/model.ckpt',
'The path to a checkpoint from which to fine-tune.')
| tensorflow.app.flags.DEFINE_string | 5,809 |
import tensorflow as tf
alpha: A scalar, slope of negative section (default=`0.`).
max_value: Saturation threshold.
Returns
-------
A tensor.
"""
if alpha != 0.:
negative_part = tf.nn.relu(-x)
x = tf.nn.relu(x)
if max_value is not None:
max_value = _to_tensor(max_value, x.dtype.base_dtype)
zero = _to_tensor(0., x.dtype.base_dtype)
x = tf.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns 0. if x < -2.5, 1. if x > 2.5.
In -2.5 <= x <= 2.5, returns 0.2 * x + 0.5.
| tensorflow.clip_by_value | 5,810 |
from tensorflow.python.ops import math_ops
iou = math_ops.div(cm_diag, denominator)
return math_ops.reduce_mean(iou, name=name)
| tensorflow.python.ops.math_ops.reduce_mean | 5,811 |
import tensorflow as tf
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
with self.assertRaisesOpError("uninitialized value v0"):
sess.run(v0)
| tensorflow.Variable | 5,812 |
import tensorflow as tf
sequence_length=nwords)
output = tf.concat([output_fw, output_bw], axis=-1)
# transpose it back
output = tf.transpose(output, perm=[1, 0, 2])
return output
| tensorflow.transpose | 5,813 |
import tensorflow as tf
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
input_size = query.get_shape().as_list()[-1]
# Trainable parameters
w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
tmp2 = tf.tensordot(query, w2, axes=1)
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tmp = tf.tanh((tmp1 + tmp2) + b)
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
| tensorflow.random_normal | 5,814 |
import tensorflow as tf
'''
y_pairs = tf.reshape(output, [-1,2]) # fold: 1 x n -> [n/2 x 2]
pos_scores, neg_scores = tf.split(1, 2, y_pairs) # separate pairs
hinge_losses = tf.nn.relu(margin - pos_scores + neg_scores)
| tensorflow.split | 5,815 |
import tensorflow as tf
n_batch_train = n_batch*n_gpu
n_updates_total = (n_train//n_batch_train)*n_iter
X_train = tf.placeholder(tf.int32, [n_batch_train, 2, n_ctx, 2])
M_train = tf.placeholder(tf.float32, [n_batch_train, 2, n_ctx])
X = tf.placeholder(tf.int32, [None, 2, n_ctx, 2])
M = tf.placeholder(tf.float32, [None, 2, n_ctx])
Y_train = tf.placeholder(tf.int32, [n_batch_train])
Y = tf.placeholder(tf.int32, [None])
train, logits, clf_losses, lm_losses = mgpu_train(X_train, M_train, Y_train)
clf_loss = tf.reduce_mean(clf_losses)
params = find_trainable_variables('model')
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(tf.global_variables_initializer())
shapes = json.load(open('model/params_shapes.json'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load('model/params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
init_params[0] = init_params[0][:n_ctx]
init_params[0] = np.concatenate([init_params[1], (np.random.randn(n_special, n_embd)*0.02).astype(np.float32), init_params[0]], 0)
del init_params[1]
if n_transfer == -1:
n_transfer = 0
else:
| tensorflow.ConfigProto | 5,816 |
import tensorflow as tf
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
# pylint: disable=unused-variable,invalid-name
def testDynamicAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with tf.variable_scope(
"root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = tf.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
| tensorflow.nn.rnn_cell.MultiRNNCell | 5,817 |
import tensorflow.contrib as contrib
train_y_2 = to_categorical(train_y_2, n_class_2)
test_y_2 = to_categorical(test_y_2, n_class_2)
return train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2
def apply_cross_stitch(input1, input2):
input1_reshaped = contrib.layers.flatten(input1)
input2_reshaped = contrib.layers.flatten(input2)
input = tf.concat((input1_reshaped, input2_reshaped), axis=1)
# initialize with identity matrix
cross_stitch = tf.get_variable("cross_stitch", shape=(input.shape[1], input.shape[1]), dtype=tf.float32,
| tensorflow.contrib.layers.flatten | 5,818 |
import tensorflow as tf
updates=actions[name]
))
assignments.append(tf.scatter_update(ref=self.terminal_memory, indices=indices, updates=terminal))
assignments.append(tf.scatter_update(ref=self.reward_memory, indices=indices, updates=reward))
# Add episode indices.
| tensorflow.scatter_update | 5,819 |
import tensorflow as tf
for variable, value in zip(all_vars, cog):
vstar = self.get_slot(variable, "vstar")
vstar.load(value, client.model.sess)
# get old gradient
_, gprev = client.get_grads()
# Find g_t - F'(old)
gdiff = [g1 - g2 for g1, g2 in zip(avg_gradient, gprev)]
with client.model.graph.as_default():
all_vars = tf.trainable_variables()
for variable, grad in zip(all_vars, gdiff):
gold = self.get_slot(variable, "gold")
gold.load(grad, client.model.sess)
| tensorflow.trainable_variables | 5,820 |
import tensorflow as tf
import tensorflow as tf
import importlib
import subprocess
tf.enable_eager_execution()
from entropy_model import EntropyBottleneck
| tensorflow.enable_eager_execution | 5,821 |
import tensorflow as tf
[num_batch * num_prior, 1])
# 3. classification loss including positive and negative examples
loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg)
loss_class_mask_b = tf.broadcast_to(loss_class_mask,
tf.shape(class_pred))
filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32),
loss_class_mask)
filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b)
filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class])
loss_class = tf.keras.losses.sparse_categorical_crossentropy(
y_true=filter_class_true, y_pred=filter_class_pred)
loss_class = tf.reduce_mean(loss_class)
return loss_loc, loss_landm, loss_class
return multi_box_loss
| tensorflow.reduce_mean | 5,822 |
import tensorflow as tf
self.s_ = tf.placeholder(tf.float32, [None, self.num_s], name='s1_') # input next state for agent1
self.R = tf.placeholder(tf.float32, [None, ], name='R') # input Reward
| tensorflow.placeholder | 5,823 |
import tensorflow as tf
loss = tf.losses.softmax_cross_entropy(labels, outputs)
| tensorflow.losses.softmax_cross_entropy | 5,824 |
import tensorflow as tf
:return: [batch_size, window_size] boolean tensor mask.
"""
return tf.logical_not(tf.sequence_mask(before_padding, maxlen=window_size))
def _right_mask(after_padding, window_size):
"""Same as above, but for right-padded vectors."""
return tf.sequence_mask(window_size - after_padding, maxlen=window_size)
class WindowFeatures(object):
"""Helper for dealing with window-like raw features.
In particular, this class generates concatenated "dflux_dt" values, and has a masking
| tensorflow.sequence_mask | 5,825 |
import tensorflow as tf
weights: 1-D `float` tensor with shape [num_timesteps * batch_size].
Elements should be 1.0 only on end of sequences
num_classes: a `int`, number of training classes
Returns:
a `float` `scalar`, KL divergence.
"""
if num_classes == 2:
q = tf.nn.sigmoid(q_logits)
p = tf.nn.sigmoid(p_logits)
kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) +
f.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q))
else:
q = tf.nn.softmax(q_logits)
p = tf.nn.softmax(p_logits)
kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1)
num_labels = tf.reduce_sum(weights)
num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)
| tensorflow.nn.sigmoid_cross_entropy_with_logits | 5,826 |
import tensorflow as tf
x = tf.matmul(x, V)
| tensorflow.matmul | 5,827 |
import tensorflow as tf
features={
'label':tf.FixedLenFeature([], tf.int64),
'img_raw' : tf.FixedLenFeature([], tf.string),
})
| tensorflow.FixedLenFeature | 5,828 |
import tensorflow as tf
if padding == "PARTIAL":
with tf.variable_scope('mask'):
_, h, w, _ = input.get_shape().as_list()
| tensorflow.variable_scope | 5,829 |
import tensorflow as tf
decoder_use_sum_merge=model_options.decoder_use_sum_merge,
decoder_filters=model_options.decoder_filters,
decoder_output_is_logits=model_options.decoder_output_is_logits,
weight_decay=weight_decay,
reuse=reuse,
is_training=is_training,
fine_tune_batch_norm=fine_tune_batch_norm,
use_bounded_activation=model_options.use_bounded_activation)
outputs_to_logits = {}
for output in sorted(model_options.outputs_to_num_classes):
if model_options.decoder_output_is_logits:
outputs_to_logits[output] = tf.identity(features,
name=output)
else:
outputs_to_logits[output] = get_branch_logits(
features,
model_options.outputs_to_num_classes[output],
model_options.atrous_rates,
weight_decay=weight_decay,
reuse=reuse,
scope_suffix=output)
return outputs_to_logits
| tensorflow.identity | 5,830 |
import tensorflow as tf
self.is_training = tf.placeholder(tf.bool)
initializer = tf.contrib.layers.variance_scaling_initializer()
# Embedding Lookup 16
with tf.device('/cpu:0'), tf.name_scope("embedding"):
if use_he_uniform:
self.embedding_W = tf.get_variable(name='lookup_W', shape=[num_quantized_chars, embedding_size],
initializer=tf.contrib.layers.variance_scaling_initializer())
else:
self.embedding_W = tf.Variable(tf.random_uniform([num_quantized_chars, embedding_size], -1.0, 1.0),name="embedding_W")
self.embedded_characters = tf.nn.embedding_lookup(self.embedding_W, self.input_x)
embedded_text_expand = tf.expand_dims(self.embedded_characters, -1)
with tf.device('/cpu:0'), tf.name_scope("embedding_tags"):
W_tags = tf.get_variable("embed_W_tags", [tags_vocab_size, embedding_size], initializer=initializer)
embedded_tags = tf.nn.embedding_lookup(W_tags, self.input_tags)
embedded_tags_expanded = tf.expand_dims(embedded_tags, -1)
with tf.device('/cpu:0'), tf.name_scope("embedding_deps"):
W_deps = tf.get_variable("embed_W_deps", [deps_vocab_size, embedding_size], initializer=initializer)
embedded_deps = tf.nn.embedding_lookup(W_deps, self.input_deps)
embedded_deps_expanded = tf.expand_dims(embedded_deps, -1)
with tf.device('/cpu:0'), tf.name_scope("embedding_head"):
W_head = tf.get_variable("embed_W_head", [num_quantized_chars, embedding_size], initializer=initializer)
embedded_head = tf.nn.embedding_lookup(W_head, self.input_head)
embedded_head_expanded = tf.expand_dims(embedded_head, -1)
| tensorflow.name_scope | 5,831 |
import tensorflow as tf
weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
def GetCell():
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
if dropout_keep_prob is not None:
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
if proj_size:
out_size = 2 * proj_size
else:
| tensorflow.variable_scope | 5,832 |
import tensorflow as tf
# use the TPU version of RunConfig
config = tf.contrib.tpu.RunConfig(
| tensorflow.contrib.tpu.RunConfig | 5,833 |
import tensorflow as tf
A = [[0.8, 0.6, 0.3], [0.1, 0.6, 0.4]]
B = [1, 1]
top_k = tf.nn.top_k(A, 2)
in_top_k = tf.nn.in_top_k(A, B, 1)
| tensorflow.nn.top_k | 5,834 |
import tensorflow as tf
coord.request_stop()
coord.join(threads)
def predict_time(loop=100):
feed_dict={
testnum:1
}
with tf.Session(config=config) as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
tf.train.Saver().restore(sess,path)
total=0.0
for i in range(loop):
a = datetime.now()
accuracy_np = sess.run([accuracy],feed_dict=feed_dict)
b = datetime.now()
c = (b - a).microseconds
total+=c
print('predict_time(ms): ',total/(loop*1000))
coord.request_stop()
coord.join(threads)
| tensorflow.train.Saver | 5,835 |
import tensorflow as tf
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
| tensorflow.where | 5,836 |
import tensorflow as tf
with tf.name_scope('AccumGradOptimizer'):
ops = []
for s, gv in zip(slots, grads_and_vars):
g, v = gv
ops.append(s.assign_add(g))
update_counter = tf.assign_add(counter, 1, name='update_counter')
update_slot_op = tf.group(update_counter, *ops, name='update_slot')
def update_grad():
update_op = self._opt.apply_gradients(slots_and_vars)
with tf.control_dependencies([update_op]):
clear_ops = [tf.assign(s, tf.zeros_like(s)) for s in slots]
return tf.group(*clear_ops, name='update_grad')
pred = tf.equal(tf.mod(counter, self._niter), 0)
with tf.control_dependencies([update_slot_op]):
if name is None:
name = 'cond_update_grad'
op = tf.cond(pred, update_grad, tf.no_op, name=name).op
return op
if __name__ == '__main__':
# run it with "python -m tensorpack.tfutils.optimizer"
x = tf.get_variable('x', shape=[6])
cost = tf.reduce_sum(tf.abs(x), name='cost')
opt = tf.train.GradientDescentOptimizer(0.01)
opt = AccumGradOptimizer(opt, 5)
| tensorflow.mod | 5,837 |
import tensorflow as tf
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
'''因为rnn_outputs是三维的,这里需要将其转成2维的,
矩阵运算后再转换回来[batch_size, num_steps, num_classes]'''
logits = tf.reshape(tf.matmul(tf.reshape(rnn_outputs, [-1, state_size]), W) +b, \
shape=[batch_size, num_steps, num_classes])
predictions = tf.nn.softmax(logits)
y_as_list = tf.unstack(y, num=num_steps, axis=1)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits)
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss)
'''训练网络'''
def train_rnn(num_epochs, num_steps, state_size=4, verbose=True):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
training_losses = []
for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)):
training_loss = 0
| tensorflow.train.AdagradOptimizer | 5,838 |
import tensorflow as tf
src_mask = tf.sequence_mask(encoder_input_length[0], maxlen=max_src_len, dtype=tf.float32)
src_mask = tf.einsum('ij,ik->ijk', src_mask, src_mask)
attn_loss *= tf.to_float(src_mask) # don't take padding words into account
attn_loss = tf.norm(attn_loss) / tf.to_float(batch_size)
xent_loss += reconstruction_attn_weight * attn_loss
attention_weights = [attention_weights, reconstructed_weights]
| tensorflow.to_float | 5,839 |
import tensorflow as tf
for var in tf.all_variables():
print(var)
batch_size = tf.shape(policy.obs_ph)[0]
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
| tensorflow.shape | 5,840 |
import tensorflow as tf
batch_size = tf.shape(X)[0]
noise_shape = (batch_size, 1, 1, 1)
random_tensor = keep_prob + tf.random_uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.floor(random_tensor)
X = (X / keep_prob) * binary_tensor
return X
def _do_conv(self, X, w, h, in_ch, out_ch, filter_size=1, no_relu=False, no_reg=False, is_train=False):
W = self._make_var('W', (filter_size, filter_size, in_ch, out_ch), no_reg=no_reg)
if not no_relu:
X = tf.nn.relu(X)
X = tf.nn.conv2d(X, W, (1, 1, 1, 1), padding='SAME')
X = self._add_batch_norm(X, out_ch, is_train=is_train)
X = tf.reshape(X, (-1, w, h, out_ch)) # Sanity shape check
return X
def _do_separable_conv(self, X, w, h, ch, filter_size=3, stride=1, ch_mul=1,
no_batch_norm=False, W_d=None, W_p=None, is_train=False):
if W_d is None:
W_d = self._make_var('W_d', (filter_size, filter_size, ch, ch_mul))
if W_p is None:
W_p = self._make_var('W_p', (1, 1, ch_mul * ch, ch))
| tensorflow.nn.conv2d | 5,841 |
import tensorflow as tf
v_backup = tf.stop_gradient(min_qf_pi - self.ent_coef * logp_pi)
value_loss = 0.5 * tf.reduce_mean(((value_fn - v_backup) ** 2)*self.weight_ph)
#value_for_priority = tf.reduce_mean((value_fn - v_backup) ** 2,1)
regularizervf = tf.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope='model/values_fn')
all_trainable_weights_vf = tf_util.get_trainable_vars('model/values_fn')
regularization_penalty_vf = tf.contrib.layers.apply_regularization(regularizervf, all_trainable_weights_vf)
if self.n_step:
values_losses = qf1_loss + qf2_loss + value_loss + regularization_penalty_vf + qf1_loss_n + qf2_loss_n
else:
values_losses = qf1_loss + qf2_loss + value_loss + regularization_penalty_vf
# Policy train op
# (has to be separate from value train op, because min_qf_pi appears in policy_loss)
policy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
policy_train_op = policy_optimizer.minimize(policy_loss, var_list=tf_util.get_trainable_vars('model/pi'))
# Value train op
value_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
values_params = tf_util.get_trainable_vars('model/values_fn')
source_params = tf_util.get_trainable_vars("model/values_fn/vf")
target_params = tf_util.get_trainable_vars("target/values_fn/vf")
# Polyak averaging for target variables
self.target_update_op = [
tf.assign(target, (1 - self.tau) * target + self.tau * source)
for target, source in zip(target_params, source_params)
]
| tensorflow.train.AdamOptimizer | 5,842 |
import tensorflow as tf
"""Computes listwise softmax loss with propensity weighting.
Args:
output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
name: A string used as the name for this variable scope.
Returns:
(tf.Tensor) A single value tensor containing the loss.
"""
loss = None
with tf.name_scope(name, "click_softmax_cross_entropy",[output]):
label_dis = labels*propensity_weights / tf.reduce_sum(labels*propensity_weights, 1, keep_dims=True)
loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels*propensity_weights, 1)
return tf.reduce_sum(loss) / tf.reduce_sum(labels*propensity_weights)
def click_loglikelihood(self, labels, propensity,train_output, name=None):
"""Computes listwise softmax loss with propensity weighting.
Args:
output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a
relevant example.
propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
name: A string used as the name for this variable scope.
| tensorflow.reduce_sum | 5,843 |
import tensorflow as tf
inputs = tf.nn.relu(inputs, name="first_relu")
#print("temp cnn output shape:", inputs.shape)
inputs = tf.squeeze(inputs, axis=2)
#print("squeeze shape", inputs.shape)
| tensorflow.squeeze | 5,844 |
import tensorflow as tf
init_op=tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
scaffold_fn=tf.train.Scaffold(init_op=init_op)
| tensorflow.train.Scaffold | 5,845 |
import tensorflow as tf
shuffle=True)
def placeholder_like(x, name=None):
return tf.placeholder(shape=x.shape, dtype=tf.float32, name=name)
def random_sphere(shape):
n = tf.random_normal(shape=shape, dtype=tf.float32)
n = tf.reshape(n, shape=(int(shape[0]), -1))
n = tf.nn.l2_normalize(n, dim=1)
n = tf.reshape(n, shape)
return n
| tensorflow.random_normal | 5,846 |
import tensorflow as tf
masked_lm_log_probs = tf.reshape(
masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]
)
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32
)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights,
)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights
| tensorflow.reshape | 5,847 |
import tensorflow as tf
if is_adaptive_stage(stage):
# The property should have keys matching those of state_update_tensors.
self.assertSameElements(stage.state_update_aggregation_modes.keys(),
test_data.state_update_tensors.keys())
for mode in six.itervalues(stage.state_update_aggregation_modes):
self.assertIn(mode, encoding_stage.StateAggregationMode)
for tensor in six.itervalues(test_data.initial_state):
self.assertTrue(tf.is_tensor(tensor))
for tensor in six.itervalues(test_data.state_update_tensors):
self.assertTrue(tf.is_tensor(tensor))
for tensor in six.itervalues(test_data.updated_state):
self.assertTrue(tf.is_tensor(tensor))
# The state related Tensors should have appropriate substrings in their
# names.
for tensor in six.itervalues(test_data.initial_state):
self.assertIn(encoding_stage.INITIAL_STATE_SCOPE_SUFFIX, tensor.name)
for tensor in six.itervalues(test_data.updated_state):
self.assertIn(encoding_stage.UPDATE_STATE_SCOPE_SUFFIX, tensor.name)
| tensorflow.is_tensor | 5,848 |
import tensorflow as tf
def good():
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(
byte, crop_window, channels=3, **JPEG_OPT)
image = uint8_resize_bicubic(image, [224, 224])
return image
def bad():
| tensorflow.image.decode_and_crop_jpeg | 5,849 |
from tensorflow.python.platform import googletest
ig = integrated_gradients.IntegratedGradients(graph, sess, y[0], x)
mask = ig.GetMask(x_value=x_input_val[0], feed_dict={},
x_baseline=x_baseline_val[0], x_steps=1000)
# Verify the result.
self.assertAlmostEqual(expected_val, mask.sum(), places=3)
if __name__ == '__main__':
googletest.main()
| tensorflow.python.platform.googletest.main | 5,850 |
import tensorflow as tf
options = self.options
input_shape = tf.shape(encoder_states)
batch_size = input_shape[0]
passage_len = input_shape[1]
with variable_scope.variable_scope("attention_decoder"):
encoder_features = tf.expand_dims(encoder_states, axis=2) # now is shape [batch_size, passage_len, 1, encoder_dim]
W_h = variable_scope.get_variable("W_h", [1, 1, encoder_dim, options.attention_vec_size])
self.W_h = W_h
encoder_features = nn_ops.conv2d(encoder_features, W_h, [1, 1, 1, 1], "SAME") # [batch_size, passage_len, 1, attention_vec_size]
encoder_features = tf.reshape(encoder_features, [batch_size, passage_len, options.attention_vec_size])
return encoder_features
def decode_mode(self, word_vocab, beam_size, state_t_1, context_t_1, coverage_t_1, word_t,
encoder_states, encoder_features, passage_word_idx, passage_mask):
options = self.options
with variable_scope.variable_scope("attention_decoder"):
v = variable_scope.get_variable("v", [options.attention_vec_size])
v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0)
| tensorflow.reshape | 5,851 |
import tensorflow as tf
self.abs_td = tf.abs(self.target_q - self.q)
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('TD_error'):
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q))
with tf.variable_scope('C_train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP)
with tf.variable_scope('a_grad'):
self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim)
| tensorflow.variable_scope | 5,852 |
import tensorflow as tf
fc_weight = tf.get_variable("weights", initializer=initial_value[0])
fc_bias = tf.get_variable("bias", shape=[out_dim], initializer=initial_value[1])
if use_bias:
output = tf.add(tf.matmul(flat_input, fc_weight), fc_bias)
else:
output = tf.matmul(flat_input, fc_weight)
| tensorflow.matmul | 5,853 |
from tensorflow.python.framework import ops as _ops
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("Old", name=name)
return result
_ops.RegisterShape("Old")(None)
_resource_create_op_outputs = [""]
def resource_create_op(resource, name=None):
r"""TODO: add doc.
Args:
| tensorflow.python.framework.ops.RegisterShape | 5,854 |
import tensorflow as tf
# Select logits to prob function
self.logits_to_prob = tf.nn.softmax
if self.hparams.logits_to_prob == 'sigmoid':
self.logits_to_prob = sigmoid_prob
self.output = self.ranking_model(self.max_candidate_num, scope='ranking_model')
pad_removed_output = self.remove_padding_for_metric_eval(self.docid_inputs, self.output)
reshaped_labels = tf.transpose(tf.convert_to_tensor(self.labels)) # reshape from [max_candidate_num, ?] to [?, max_candidate_num]
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_labels, pad_removed_output, None)
tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['eval'])
if not forward_only:
# Build model
self.rank_list_size = exp_settings['train_list_cutoff']
train_output = self.ranking_model(self.rank_list_size, scope='ranking_model')
self.propensity = self.DenoisingNet(self.rank_list_size, forward_only)
train_labels = self.labels[:self.rank_list_size]
print('Loss Function is ' + self.hparams.loss_func)
# Select loss function
self.loss_func = None
| tensorflow.summary.scalar | 5,855 |
from tensorflow.python.framework import ops
guide_tensor = op.inputs[1]
input_tensor = op.inputs[2]
has_offset = op.get_attr('has_offset')
return _hdrnet.bilateral_slice_apply_grad(
grid_tensor, guide_tensor, input_tensor, grad, has_offset=has_offset)
# ----------- Register Shape inference ----------------------------------------
@ops.RegisterShape('BilateralSlice')
def _bilateral_slice_shape(op):
input_tensor = op.inputs[0]
guide_tensor = op.inputs[1]
return [guide_tensor.get_shape().concatenate(input_tensor.get_shape()[-1])]
@ops.RegisterShape('BilateralSliceApply')
| tensorflow.python.framework.ops.RegisterShape | 5,856 |
from tensorflow.python.ops.rnn_cell_impl import _Linear
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
| tensorflow.python.ops.rnn_cell_impl._Linear | 5,857 |
import tensorflow as tf
querry_size = query.get_shape().as_list()[-1]
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
if mask is not None:
mask = tf.equal(mask, tf.ones_like(mask))
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
| tensorflow.expand_dims | 5,858 |
import tensorflow as tf
return tf.squeeze(logits, axis=[1, 2, 3])
# -1 due to the pad above.
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :, :]
return tf.squeeze(logits, axis=[1, 2])
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
if self.has_input:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 1)
if len(features["inputs"].shape) < 5:
features["inputs"] = tf.expand_dims(features["inputs"], 4)
# Expand the inputs in to the beam size.
features["inputs"] = tf.tile(features["inputs"], [1, beam_size, 1, 1, 1])
s = common_layers.shape_list(features["inputs"])
features["inputs"] = tf.reshape(features["inputs"],
[s[0] * s[1], s[2], s[3], s[4]])
target_modality = self._problem_hparams.target_modality
| tensorflow.expand_dims | 5,859 |
from tensorflow.python.ops import array_ops
def benchmarkTfRNNLSTMBlockCellTraining(self):
test_configs = self._GetTestConfig()
for config_name, config in test_configs.items():
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
with ops.Graph().as_default(), ops.device("/device:GPU:0"):
inputs = seq_length * [
array_ops.zeros([batch_size, num_units], dtypes.float32)
]
cell = lambda: lstm_ops.LSTMBlockCell(num_units=num_units) # pylint: disable=cell-var-from-loop
multi_cell = rnn_cell.MultiRNNCell(
[cell() for _ in range(num_layers)])
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
| tensorflow.python.ops.array_ops.zeros | 5,860 |
import tensorflow as tf
generated_images = tf.reshape(decoder_output, [-1, 28, 28, 1])
# Tensorboard visualization
tf.summary.scalar(name='Autoencoder Loss', tensor=autoencoder_loss)
tf.summary.scalar(name='Discriminator gauss Loss', tensor=dc_g_loss)
tf.summary.scalar(name='Discriminator categorical Loss', tensor=dc_c_loss)
tf.summary.scalar(name='Generator Loss', tensor=generator_loss)
tf.summary.scalar(name='Supervised Encoder Loss', tensor=supervised_encoder_loss)
tf.summary.histogram(name='Encoder Gauss Distribution', values=encoder_output_latent)
tf.summary.histogram(name='Real Gauss Distribution', values=real_distribution)
tf.summary.histogram(name='Encoder Categorical Distribution', values=encoder_output_label)
tf.summary.histogram(name='Real Categorical Distribution', values=categorial_distribution)
tf.summary.image(name='Input Images', tensor=input_images, max_outputs=10)
tf.summary.image(name='Generated Images', tensor=generated_images, max_outputs=10)
summary_op = tf.summary.merge_all()
# Saving the model
saver = tf.train.Saver()
step = 0
with tf.Session() as sess:
| tensorflow.summary.histogram | 5,861 |
from tensorflow.python.ops import array_ops
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
next_size = _next_array_size(new_size)
next_shape = array_ops.pack([next_size] + fixed_shape)
new_value = array_ops.zeros(next_shape, dtype=values.dtype)
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
| tensorflow.python.ops.array_ops.zeros | 5,862 |
import tensorflow as tf
image_resizer_fn(tensor_dict[fields.InputDataFields.image], None)[0],
tf.uint8)
if fields.InputDataFields.image_additional_channels in tensor_dict:
channels = tensor_dict[fields.InputDataFields.image_additional_channels]
tensor_dict[fields.InputDataFields.image] = tf.concat(
[tensor_dict[fields.InputDataFields.image], channels], axis=2)
# Apply data augmentation ops.
if data_augmentation_fn is not None:
| tensorflow.concat | 5,863 |
import tensorflow as tf
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
if task_name != "sts-b":
probabilities = tf.nn.softmax(logits, axis=-1)
predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
| tensorflow.nn.bias_add | 5,864 |
import tensorflow as tf
-------
The selected tensor.
"""
if condition.dtype != tf.bool:
condition = tf.cast(condition, 'bool')
if not callable(then_expression):
def then_expression_fn():
| tensorflow.cast | 5,865 |
import tensorflow as tf
self.b1 = tf.Variable(tf.zeros([10]))
def _build_model(self, x, y):
w0 = self.w0.read_value()
b0 = self.b0.read_value()
w1 = self.w1.read_value()
b1 = self.b1.read_value()
params = (w0, b0, w1, b1)
layer0 = tf.matmul(x, w0) + b0
layer1 = tf.nn.sigmoid(layer0)
layer2 = tf.matmul(layer1, w1) + b1
predictions = layer2
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=predictions, labels=y))
grads = tf.gradients(ys=loss, xs=params)
return predictions, loss, grads
def build_training_model(self, x, y):
"""
| tensorflow.nn.sigmoid | 5,866 |
import tensorflow as tf
def import_params(model_dir, model_name, params):
model_dir = os.path.abspath(model_dir)
p_name = os.path.join(model_dir, "params.json")
m_name = os.path.join(model_dir, model_name + ".json")
if not tf.gfile.Exists(p_name) or not tf.gfile.Exists(m_name):
return params
with tf.gfile.Open(p_name) as fd:
tf.logging.info("Restoring hyper parameters from %s" % p_name)
json_str = fd.readline()
params.parse_json(json_str)
with tf.gfile.Open(m_name) as fd:
tf.logging.info("Restoring model parameters from %s" % m_name)
json_str = fd.readline()
| tensorflow.gfile.Open | 5,867 |
import tensorflow as tf
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
# print("w", w.get_shape())
| tensorflow.matmul | 5,868 |
import tensorflow as tf
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output /= tf.reduce_sum(
output, axis=len(output.get_shape()) - 1, keep_dims=True)
# manual computation of crossentropy
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1. - epsilon)
return -tf.reduce_sum(
target * tf.log(output), axis=len(output.get_shape()) - 1)
else:
try:
return tf.nn.softmax_cross_entropy_with_logits(
labels=target, logits=output)
except TypeError:
return tf.nn.softmax_cross_entropy_with_logits(
logits=output, labels=target)
def sparse_categorical_crossentropy(output, target, from_logits=False):
"""Categorical crossentropy between an output tensor
and a target tensor, where the target is an integer tensor.
"""
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
| tensorflow.nn.softmax_cross_entropy_with_logits | 5,869 |
import tensorflow as tf
depth = im.get_shape().as_list()[1]
height = im.get_shape().as_list()[2]
width = im.get_shape().as_list()[3]
channels = im.get_shape().as_list()[4]
x = tf.to_float(x)
y = tf.to_float(y)
z = tf.to_float(z)
depth_f = tf.to_float(depth)
height_f = tf.to_float(height)
width_f = tf.to_float(width)
# Number of disparity interpolated.
| tensorflow.to_float | 5,870 |
import tensorflow as tf
grad = grad - tf.reshape(adj, [self.n_envs * self.n_steps, 1]) * kl_grad
# These are turst region adjusted gradients wrt f ie statistics of policy pi
grads_f = -grad / (self.n_envs * self.n_steps)
grads_policy = tf.gradients(f_i_, self.params, grads_f)
grads_q = tf.gradients(loss_q * self.q_coef, self.params)
grads = [gradient_add(g1, g2, param, verbose=self.verbose)
for (g1, g2, param) in zip(grads_policy, grads_q, self.params)]
avg_norm_grads_f = avg_norm(grads_f) * (self.n_steps * self.n_envs)
| tensorflow.gradients | 5,871 |
import tensorflow as tf
weights = np.load(self.load_weights_path)
init_state_initializer = tf.constant_initializer(weights['init_state'])
W_in_initializer = tf.constant_initializer(weights['W_in'])
W_rec_initializer = tf.constant_initializer(weights['W_rec'])
W_out_initializer = tf.constant_initializer(weights['W_out'])
b_rec_initializer = tf.constant_initializer(weights['b_rec'])
b_out_initializer = tf.constant_initializer(weights['b_out'])
| tensorflow.constant_initializer | 5,872 |
import tensorflow as tf
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
| tensorflow.reduce_mean | 5,873 |
import tensorflow as tf
if add_bias:
bias = tf.Variable([0.0])
| tensorflow.Variable | 5,874 |
import tensorflow as tf
padding=self.padding,
name='resize_x_to_y')
resized = tf.nn.bias_add(
resized,
bias)
resized = self.ff_nl(resized)
return resized
elif mode == 'replicate_n_transpose':
resized = tf.image.resize_images(
x,
y_size[:-1],
kernel,
align_corners=False)
resized = tf.nn.conv3d_transpose(
value=resized,
filter=kernel,
output_shape=y_size,
strides=[1, 1, 1, 1, 1],
padding='SAME',
name='resize_x_to_y')
resized = tf.nn.bias_add(
resized,
bias)
resized = self.ff_nl(resized)
return resized
else:
| tensorflow.nn.conv3d_transpose | 5,875 |
from tensorflow.python.framework import ops
Returns:
If the default graph is being used to define a function, the
returned list of variables are those created inside the function
body so far. Otherwise, returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_vars
else:
return []
| tensorflow.python.framework.ops.get_default_graph | 5,876 |
import tensorflow as tf
gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02))
pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False)
pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False)
| tensorflow.constant_initializer | 5,877 |
import tensorflow as tf
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testEmbeddingAttentionDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.GRUCell(2)
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_attention_decoder(
dec_inp, enc_state, attn_states, cell, num_symbols=4,
embedding_size=2, output_size=3)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
| tensorflow.nn.rnn | 5,878 |
from tensorflow.python.ops import check_ops
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A tensor representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = _create_local('count', shape=[])
values = math_ops.to_float(values)
if weights is not None:
weights = math_ops.to_float(weights)
values = math_ops.mul(values, weights)
value_tensor = array_ops.identity(count)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if metrics_collections:
ops.add_to_collections(metrics_collections, value_tensor)
| tensorflow.python.ops.check_ops.assert_type | 5,879 |
import tensorflow as tf
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
| tensorflow.variable_scope | 5,880 |
import tensorflow as tf
tf.flags.DEFINE_integer("batch_size", "50", "batch size for training")
tf.flags.DEFINE_string("logs_dir", "/scratch1/ram095/nips20/logs_mnist128/", "path to logs directory")
tf.flags.DEFINE_string("data_dir", "/scratch1/ram095/nips20/paris_street", "path to dataset")
tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_string("model_dir", "Model_zoo/", "Path to vgg model mat")
tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False")
tf.flags.DEFINE_string('mode', "train", "Mode train/ test/ visualize")
MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
MAX_ITERATION = int(1e5 + 1)
NUM_OF_CLASSESS = 3
| tensorflow.flags.DEFINE_string | 5,881 |
import tensorflow as tf
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
| tensorflow.global_variables_initializer | 5,882 |
import tensorflow as tf
def import_ops(self):
"""Imports ops from collections."""
if self._is_training:
self._train_op = tf.get_collection_ref("train_op")[0]
self._lr = tf.get_collection_ref("lr")[0]
self._new_lr = tf.get_collection_ref("new_lr")[0]
| tensorflow.get_collection_ref | 5,883 |
import tensorflow as tf
if __name__ == "__main__":
tf.autograph.set_verbosity(0)
| tensorflow.autograph.set_verbosity | 5,884 |
from tensorflow.python.ops import math_ops
value_tensor, update_op = streaming_mean_squared_error(
predictions, labels, weights, None, None,
name or 'root_mean_squared_error')
root_mean_squared_error = math_ops.sqrt(value_tensor)
with ops.control_dependencies([update_op]):
update_op = math_ops.sqrt(update_op)
| tensorflow.python.ops.math_ops.sqrt | 5,885 |
import tensorflow as tf
return out
@layer
def recurrent_layer(tensor, cell=None, hidden_dims=128, sequence_length=None, decoder_fn=None,
activation=tf.nn.tanh, initializer=tf.orthogonal_initializer(), initial_state=None,
keep_prob=1.0,
return_final_state=False, return_next_cell_input=True, **opts):
if cell is None:
cell = tf.contrib.rnn.BasicRNNCell(hidden_dims, activation=activation)
# cell = tf.contrib.rnn.LSTMCell(hidden_dims, activation=activation)
if keep_prob < 1.0:
keep_prob = _global_keep_prob(keep_prob)
cell = tf.contrib.rnn.DropoutWrapper(cell, keep_prob, keep_prob)
if opts.get("name"):
tf.add_to_collection(opts.get("name"), cell)
| tensorflow.contrib.rnn.BasicRNNCell | 5,886 |
from tensorflow.python.ops import math_ops
if weight_tensor is None:
return math_ops.reduce_mean(loss_vec, name="loss")
else:
loss_vec = array_ops.reshape(loss_vec, shape=(-1,))
loss_vec = math_ops.mul(
loss_vec, array_ops.reshape(weight_tensor, shape=(-1,)))
return math_ops.div(
math_ops.reduce_sum(loss_vec),
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
name="loss")
def _get_linear_vars(self):
if self._get_linear_feature_columns():
return ops.get_collection(self._linear_weight_collection)
| tensorflow.python.ops.math_ops.reduce_sum | 5,887 |
import tensorflow as tf
cur_batch_size = dynamic_image_shape[0]
# Get static shape of image.
# shape = (3,)
static_image_shape = params["generator_projection_dims"]
print_obj(
"minibatch_stddev", "static_image_shape", static_image_shape
)
# cur_batch_size must be divisible by or smaller than group_size.
divisbility_condition = tf.equal(
x=tf.mod(x=cur_batch_size, y=group_size),
y=0,
name="divisbility_condition"
)
less_than_condition = tf.less(
x=cur_batch_size, y=group_size, name="less_than_condition"
)
any_condition = tf.reduce_any(
input_tensor=[divisbility_condition, less_than_condition],
| tensorflow.mod | 5,888 |
import tensorflow as tf
E_init_args=None,
name='embedding',
):
if E_init_args is None:
E_init_args = {}
super(EmbeddingInputlayer, self).__init__(prev_layer=None, name=name)
logging.info("EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size))
self.inputs = inputs
with tf.variable_scope(name):
embeddings = tf.get_variable(
name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args)
embed = tf.nn.embedding_lookup(embeddings, self.inputs)
self.outputs = embed
self.all_layers = [self.outputs]
self.all_params = [embeddings]
self.all_drop = {}
class AverageEmbeddingInputlayer(Layer):
| tensorflow.get_variable | 5,889 |
import tensorflow as tf
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(1.0, tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
def summarize_features(features, num_shards=1):
with tf.name_scope("input_stats"):
for (k, v) in six.iteritems(features):
if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1:
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
_already_logged = set()
| tensorflow.name_scope | 5,890 |
import tensorflow as tf
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# batch norm updates
with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]):
train_op = tf.no_op(name='train_op')
saver = tf.train.Saver(tf.global_variables())
| tensorflow.control_dependencies | 5,891 |
import tensorflow as tf
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
batch_size = common_layers.shape_list(features["inputs"])[0]
def symbols_to_logits_fn(ids):
"""Go from ids to logits."""
ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]])
if "partial_targets" in features:
pt = features["partial_targets"]
pt_length = common_layers.shape_list(pt)[1]
pt = tf.tile(pt, [1, beam_size])
pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1])
ids = tf.concat([pt, ids], axis=1)
features["targets"] = ids
self._coverage = None
logits, _ = self(features) # pylint: disable=not-callable
# now self._coverage is a coverage tensor for the first datashard.
# it has shape [batch_size] and contains floats between 0 and
# source_length.
if self._problem_hparams:
modality = self._problem_hparams.target_modality
if modality.top_is_pointwise:
return tf.squeeze(logits, axis=[1, 2, 3])
# -1 due to the pad above.
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :, :]
| tensorflow.concat | 5,892 |
import tensorflow.contrib.layers as layers
return out
def simple_model_w_feat_eng(img_in, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
out = img_in
out = layers.flatten(out)
# stddev = 1/n, where n = number of inputs
gauss_initializer = initializers.xavier_initializer(uniform=False)
with tf.variable_scope("action_value"):
out = layers.fully_connected(
out,
| tensorflow.contrib.layers.flatten | 5,893 |
import tensorflow as tf
self._reward = tf.Variable(
lambda: tf.zeros(batch_dims, tf.float32),
name='reward', trainable=False)
self._done = tf.Variable(
lambda: tf.cast(tf.ones(batch_dims), tf.bool),
name='done', trainable=False)
def __getattr__(self, name):
| tensorflow.ones | 5,894 |
from tensorflow.python.platform import test
embeddings = ops.categorical_variable(
cat_var_idx, n_classes=5, embedding_size=10, name="my_cat_var")
sess.run(variables.global_variables_initializer())
emb1 = sess.run(embeddings,
feed_dict={cat_var_idx.name: [[0, 1], [2, 3]]})
emb2 = sess.run(embeddings,
feed_dict={cat_var_idx.name: [[0, 2], [1, 3]]})
self.assertEqual(emb1.shape, emb2.shape)
self.assertAllEqual(np.transpose(emb2, axes=[1, 0, 2]), emb1)
if __name__ == "__main__":
test.main()
| tensorflow.python.platform.test.main | 5,895 |
from tensorflow.python.training import moving_averages
if is_train:
# For training, do batch norm with batch mean & variance
# Update moving averages if training
(X, mean, variance) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True)
update_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)
update_variance = moving_averages.assign_moving_average(moving_variance, variance, decay)
with tf.control_dependencies([update_mean, update_variance]):
X = tf.identity(X)
else:
# For prediction, do batch norm with computed moving mean & variance from training
| tensorflow.python.training.moving_averages.assign_moving_average | 5,896 |
import tensorflow as tf
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
| tensorflow.ones | 5,897 |
import tensorflow as tf
pi = act_limit * mlp(x, list(hidden_sizes) + [act_dim], activation, output_activation)
with tf.variable_scope('q1'):
q1 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1)
with tf.variable_scope('q2'):
q2 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1)
with tf.variable_scope('q1', reuse=True):
q1_pi = tf.squeeze(mlp(tf.concat([x, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1)
elif nn_type == 'mlp_dropout':
| tensorflow.concat | 5,898 |
import tensorflow as tf
gradvars = []
with tf.device('/cpu:0'):
| tensorflow.device | 5,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.