seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
# Now try a restore with the sharded filename.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
| tensorflow.ConfigProto | 5,900 |
import tensorflow as tf
dout_dx = sbnet_module.sparse_gather(
grad,
binCounts,
activeBlockIndices,
dynamic_bsize=bsize,
dynamic_bstride=bstride,
dynamic_boffset=boffset)
# return a list of gradients of output with respect to each input
if not doAdd:
# scatter blocks of zeroes over a base tensor of ones to compute a stamp-out gradient mask for dy_dybase
stamp_out_blocks = sbnet_module.sparse_scatter(
tf.zeros_like(blocksX),
binCounts,
activeBlockIndices,
tf.ones_like(grad),
dynamic_bsize=bsize,
dynamic_bstride=bstride,
dynamic_boffset=boffset,
add=False)
dy_dybase = grad * stamp_out_blocks
return [dout_dx, None, None, dy_dybase, None, None, None]
else:
# d(x+ybase)/dybase = 1, so just pass back grad as dout_dybase
return [dout_dx, None, None, grad, None, None, None]
| tensorflow.zeros_like | 5,901 |
import tensorflow as tf
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, do_serve):
"""Returns `model_fn` closure for TPUEstimator."""
| tensorflow.reduce_sum | 5,902 |
from tensorflow.contrib.framework import tensor_util
name: An optional variable_scope name.
Returns:
mean_relative_error: A tensor representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels = tensor_util.remove_squeezable_dimensions(
predictions, labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions, normalizer = tensor_util.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = math_ops.select(
math_ops.equal(normalizer, 0.0),
array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return streaming_mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
| tensorflow.contrib.framework.tensor_util.remove_squeezable_dimensions | 5,903 |
import tensorflow as tf
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = (metric_fn, [
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
| tensorflow.metrics.accuracy | 5,904 |
import tensorflow as tf
predict_nor = tf.nn.softmax(logit_nor)
predict_adv = tf.nn.softmax(logit_adv)
# Calculate entropy
argmax_y_onehot = tf.one_hot(tf.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1)
normalized_y_nonmaximal = tf.reduce_sum(predict * argmax_y_onehot, 1)
entropy = tf.reduce_sum(-tf.log(predict) * predict * argmax_y_onehot,1) / normalized_y_nonmaximal + tf.log(normalized_y_nonmaximal)
for k in range(1):
result_dict = loadmat('kernel_para_'+FLAGS.dataset+'/kernel1000_for_attack_' + f1 + '.mat')
result_dict_median = loadmat('kernel_para_'+FLAGS.dataset+'/kernel1000_median_for_attack_' + f1 + '.mat')
# e_mean = result_dict['mean_logits_' + f1] # 10X64
| tensorflow.log | 5,905 |
import tensorflow as tf
with tf.variable_scope('word_char_embedding'):
if self.config.fix_pretrained_vector:
self.pretrained_word_mat = tf.get_variable("word_emb_mat",
[self.vocab.word_size() - 2, self.vocab.word_embed_dim],
dtype=tf.float32,
initializer=tf.constant_initializer(
self.vocab.word_embeddings[2:],
dtype=tf.float32),
trainable=False)
self.word_pad_unk_mat = tf.get_variable("word_unk_pad",
[2, self.pretrained_word_mat.get_shape()[1]],
| tensorflow.constant_initializer | 5,906 |
import tensorflow as tf
def initial_state(self):
"""See base class."""
return {self.ITERATION_STATE_KEY: tf.constant(1, dtype=tf.int32)}
| tensorflow.constant | 5,907 |
import tensorflow as tf
ema = tf.train.ExponentialMovingAverage(decay=0.9)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(b_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def add_dense_layer(layer, filter_dims, act_func=tf.nn.relu, scope='dense_layer',
use_bn=True, bn_phaze=False, use_bias=False, dilation=[1, 1, 1, 1]):
with tf.variable_scope(scope):
l = layer
| tensorflow.nn.batch_normalization | 5,908 |
import tensorflow as tf
src_one_hot_labels = tf.one_hot(tf.cast(src_labels, tf.int64), num_classes)
src_logits, dst_logits = get_model_logits(src_features, finetune_features,
mode, num_classes,
target_num_classes)
loss, _, _ = get_final_loss(src_logits, src_one_hot_labels, dst_logits,
finetune_one_hot_labels, global_step,
loss_weights, inst_weights)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
src_train_op, _ = get_src_train_op(loss)
with tf.control_dependencies([src_train_op]):
target_avg_pool = get_logits(
target_features, mode, FLAGS.target_dataset, reuse=True)
target_logits = do_cls(
target_avg_pool, target_num_classes, name='final_target_dense')
is_prediction_correct = tf.equal(
tf.argmax(tf.identity(target_logits), axis=1),
tf.argmax(target_one_hot_labels, axis=1))
acc = tf.reduce_mean(tf.cast(is_prediction_correct, tf.float32))
entropy = loss_entropy + rl_entropy
log_prob = loss_log_prob + log_prob
train_op, _, _ = meta_train_op(acc, entropy, log_prob, rl_scope, params)
| tensorflow.control_dependencies | 5,909 |
import tensorflow as tf
w0 = self.w0.read_value()
b0 = self.b0.read_value()
w1 = self.w1.read_value()
b1 = self.b1.read_value()
params = (w0, b0, w1, b1)
layer0 = tf.matmul(x, w0) + b0
layer1 = tf.nn.sigmoid(layer0)
layer2 = tf.matmul(layer1, w1) + b1
predictions = layer2
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=predictions, labels=y))
| tensorflow.matmul | 5,910 |
import tensorflow as tf
for matrix in matrices:
full_matrix_shape = matrix.get_shape().with_rank_at_least(2)
batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])
blocked_rows += full_matrix_shape[-2]
blocked_cols += full_matrix_shape[-1]
ret_columns_list = []
for matrix in matrices:
matrix_shape = tf.shape(matrix)
ret_columns_list.append(matrix_shape[-1])
ret_columns = tf.add_n(ret_columns_list)
row_blocks = []
current_column = 0
for matrix in matrices:
matrix_shape = tf.shape(matrix)
| tensorflow.shape | 5,911 |
import tensorflow as tf
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
if dropout_keep_prob is not None:
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
if proj_size:
out_size = 2 * proj_size
else:
out_size = 2 * hidden_size
self._DoPredictions(out_size, rnnout, self.weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
def _DoPredictions(self, in_size, mats, class_weights=None):
"""Takes in an array of states and calculates predictions.
Get the cross-entropy for each example in the vector self._xent.
| tensorflow.nn.bidirectional_rnn | 5,912 |
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import _Linear
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
| tensorflow.contrib.rnn.python.ops.core_rnn_cell._Linear | 5,913 |
import tensorflow as tf
raw_loss = losses_utils.weighted_surrogate_loss(
labels=tf.ones_like(signed_logits_difference),
| tensorflow.ones_like | 5,914 |
import tensorflow as tf
new_mean = tf.reduce_mean(x, [1, 2], keep_dims=True)
new_mean_sq = tf.reduce_mean(tf.square(x), [1, 2], keep_dims=True)
| tensorflow.square | 5,915 |
import tensorflow as tf
source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb]
target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb]
| tensorflow.nn.dropout | 5,916 |
import tensorflow as tf
next_states = dict()
for name in sorted(self.states_memory):
next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices)
next_internals = dict()
| tensorflow.gather | 5,917 |
import tensorflow as tf
# Declare model operations
x_col_sums_2D = tf.expand_dims(x_col_sums, 0)
model_output = tf.add(tf.matmul(x_col_sums_2D, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
# Prediction operation
prediction = tf.sigmoid(model_output)
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.001)
train_step = my_opt.minimize(loss)
# Intitialize Variables
init = tf.global_variables_initializer()
sess.run(init)
# Start Logistic Regression
print('Starting Training Over {} Sentences.'.format(len(texts_train)))
loss_vec = []
train_acc_all = []
| tensorflow.train.GradientDescentOptimizer | 5,918 |
import tensorflow as tf
from .ops import gmf, loss_fn, train_fn
np.random.seed(self.seed)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(self.seed)
self.user_id = tf.placeholder(shape=[None, ], dtype=tf.int32, name='user_id')
self.item_id = tf.placeholder(shape=[None, ], dtype=tf.int32, name='item_id')
self.labels = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='labels')
self.interaction = gmf(uid=self.user_id, iid=self.item_id, num_users=self.train_set.num_users,
num_items=self.train_set.num_items, emb_size=self.num_factors,
| tensorflow.set_random_seed | 5,919 |
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
# glimpse2 = tf.image.extract_glimpse(inputs, [glimpse_size2,glimpse_size2], self.prev_loc, centered=True, normalized=True)
# glimpse2 = tf.image.resize(glimpse2, [glimpse_size1,glimpse_size1])
# glimpse3 = tf.image.extract_glimpse(inputs, [glimpse_size3,glimpse_size3], self.prev_loc, centered=True, normalized=True)
# glimpse3 = tf.image.resize(glimpse3, [glimpse_size1,glimpse_size1])
# self.glimpses = tf.concat([glimpse1,glimpse2,glimpse3],axis=-1)
# Block 1
conv1a = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[8, 8], strides=4, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(self.inputs)
conv1b = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1a)
conv1c = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1b)
pool1 = MaxPool2D(pool_size=[2,2])(conv1c)
# Block 2
conv2a = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool1)
conv2b = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2a)
conv2c = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2b)
pool2 = MaxPool2D(pool_size=[2,2])(conv2c)
# Block 3
conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2)
| tensorflow.keras.layers.Conv2D | 5,920 |
import tensorflow as tf
path=r'C:\JC\test\train_model.ckpt'
image,label=getinputs(r'C:\JC\tfrecord\64_shuffle/train.tfrecords')
test_image,test_label=getinputs(r'C:\JC\tfrecord\64_shuffle/test.tfrecords')
valid_image,valid_label= getinputs(r'C:\JC\tfrecord\64_shuffle\validation.tfrecords')
batch_image,batch_label=get_batch(image,label,trainnum,0)
work=trainwork()
inf=work.inference(batch_image)
loss=work.softmax_loss(inf,batch_label)
opti=work.optimer(loss,learnrate)
test_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum)
test_inf=work.test_inference(test_image_batch)
test_labels=tf.one_hot(test_label_batch,classnum)
test_pre = tf.reshape(test_inf, [testnum, classnum])
correct_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
test_pre = tf.argmax(test_pre, 1)
test_true = tf.argmax(test_labels, 1)
valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum)
valid_inf=work.valid_inference(valid_image_batch)
valid_labels=tf.one_hot(valid_label_batch,classnum)
#train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)
valid_pre = tf.reshape(valid_inf, [validnum, classnum])
valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1))
valid_accuracy=tf.reduce_mean(tf.cast(valid_correct_prediction,tf.float32))
valid_pre = tf.argmax(valid_pre, 1)
valid_true = tf.argmax(valid_labels, 1)
| tensorflow.argmax | 5,921 |
import tensorflow as tf
# nearest neighbor upsampling. The second option may introduce the "blocking
# effect" but is computationally efficient.
if model_options.prediction_with_upsampled_logits:
logits = _resize_bilinear(logits,
#tf.shape(images)[1:3],
tf.TensorShape([512,512]),
scales_to_logits[MERGED_LOGITS_SCOPE].dtype)
predictions[output] = tf.argmax(logits, 3, output_type=tf.dtypes.int32)
#predictions[output + PROB_SUFFIX] = tf.nn.softmax(logits)
else:
argmax_results = tf.argmax(logits, 3, output_type=tf.dtypes.int32)
argmax_results = tf.image.resize_nearest_neighbor(
tf.expand_dims(argmax_results, 3),
tf.shape(images)[1:3],
align_corners=True,
name='resize_prediction')
predictions[output] = tf.squeeze(argmax_results, 3)
#predictions[output + PROB_SUFFIX] = tf.image.resize_bilinear(
# tf.nn.softmax(logits),
# tf.shape(images)[1:3],
# align_corners=True,
# name='resize_prob')
return predictions
| tensorflow.expand_dims | 5,922 |
import tensorflow as tf
avg_norm_g = avg_norm(grad)
avg_norm_k_dot_g = tf.reduce_mean(tf.abs(k_dot_g))
avg_norm_adj = tf.reduce_mean(tf.abs(adj))
grad = grad - tf.reshape(adj, [self.n_envs * self.n_steps, 1]) * kl_grad
# These are turst region adjusted gradients wrt f ie statistics of policy pi
grads_f = -grad / (self.n_envs * self.n_steps)
grads_policy = tf.gradients(f_i_, self.params, grads_f)
grads_q = tf.gradients(loss_q * self.q_coef, self.params)
grads = [gradient_add(g1, g2, param, verbose=self.verbose)
for (g1, g2, param) in zip(grads_policy, grads_q, self.params)]
avg_norm_grads_f = avg_norm(grads_f) * (self.n_steps * self.n_envs)
norm_grads_q = tf.global_norm(grads_q)
| tensorflow.gradients | 5,923 |
import tensorflow as tf
gn_grads_, gn_grads_true_, v_grads_, v_grads_true_ = sess.run(
[gn_grads, gn_grads_true, v_grads, v_grads_true])
np.testing.assert_array_equal(gn_grads_, gn_grads_true_)
np.testing.assert_array_equal(v_grads_, v_grads_true_)
def test_get_train_op(self):
"""Tests get_train_op.
"""
var = tf.Variable(0.)
loss = tf.nn.l2_loss(var)
train_op = opt.get_train_op(loss)
self.assertTrue(tf.contrib.framework.is_tensor(train_op))
if __name__ == "__main__":
tf.test.main()
| tensorflow.Variable | 5,924 |
import tensorflow as tf
output = tf.log(output)
output_shape = output.get_shape()
targets = cast(flatten(target), 'int64')
logits = tf.reshape(output, [-1, int(output_shape[-1])])
try:
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
except TypeError:
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=targets)
if len(output_shape) == 3:
| tensorflow.nn.sparse_softmax_cross_entropy_with_logits | 5,925 |
import tensorflow as tf
def random_crop_and_resize(images, ratio=0.8):
b, h, w, c = images.get_shape().as_list()
ch, cw = map(lambda x: int(x * ratio), (h, w))
crop = tf.random_crop(images, size=[b, ch, cw, 3])
crop = tf.image.resize(crop, [h, w])
return crop
def random_apply(fn, image, prob=1.):
b, *_ = image.get_shape().as_list()
chance = tf.less(tf.random_uniform([b], 0, 1.0), prob)
return tf.where(chance, fn(image), tf.identity(image))
def color_distortion(image, s=1.0):
lower, upper, x = (1 - 0.8 * s), (1 + 0.8 * s), image
x = tf.image.random_brightness(x, max_delta=0.8*s)
x = tf.image.random_contrast(x, lower=lower, upper=upper)
x = tf.image.random_saturation(x, lower=lower, upper=upper)
x = tf.image.random_hue(x, max_delta=0.2*s)
x = tf.clip_by_value(x, 0, 1)
return x
| tensorflow.identity | 5,926 |
import tensorflow.contrib.graph_editor as ge
bwd_inputs = [t for op in bwd_ops for t in op.inputs]
# list of tensors in forward graph that is in input to bwd graph
ts_filtered = list(set(bwd_inputs).intersection(ts_all))
debug_print("Using tensors %s", ts_filtered)
# try two slightly different ways of getting bottlenecks tensors
# to checkpoint
for ts in [ts_filtered, ts_all]:
# get all bottlenecks in the graph
bottleneck_ts = []
for t in ts:
b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops))
f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops))
# check that there are not shortcuts
b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all)
f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all)
if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all):
bottleneck_ts.append(t) # we have a bottleneck!
else:
debug_print("Rejected bottleneck candidate and ops %s", [t] + list(set(ts_all) - set(b_inp) - set(f_inp)))
# success? or try again without filtering?
if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found!
break
| tensorflow.contrib.graph_editor.get_forward_walk_ops | 5,927 |
import tensorflow as tf
cur_weights = tf.slice(weights_tensors[tf.cast(rand_horizon, tf.int32)], [0, 0], [epi_len, new_w])
# cur_weights = tf.slice(weights_tensors, [tf.cast(rand_horizon, tf.int32), 0, 0], [1, epi_len, new_w])
horizon_pred = tf.matmul(pred, cur_weights)
horizon_tgt = tf.matmul(tgt, cur_weights)
return horizon_pred, horizon_tgt
def contra_traj_lossV2(pred, tgt, horizon=9):
# Step-wise contrastive loss
horizon_pred = horizon_sumV1(pred, horizon)
horizon_tgt = horizon_sumV1(tgt, horizon)
pred1, pred2 = tf.split(horizon_pred, 2, axis=0)
tgt1, tgt2 = tf.split(horizon_tgt, 2, axis=0)
geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tgt_larg = tf.where(geq, tgt1, tgt2)
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small)))
loss = tf.reduce_mean(loss)
return loss
# randrom horizon
def contra_traj_lossV3(pred, tgt, horizon=12):
# Step-wise contrastive loss
horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
| tensorflow.cast | 5,928 |
import tensorflow as tf
# last_pred_mse = tf.metrics.mean_squared_error(score_map, targets,
# weights=1.0 / tf.cast(cur_batch_size, tf.float32),
# name='last_pred_mse')
# filter all invisible keypoint maybe better for this task
# all_visible = tf.logical_and(key_v>0, isvalid>0)
# targets_list = [tf.boolean_mask(targets_list[ind], all_visible) for ind in list(range(len(targets_list)))]
# pred_outputs = [tf.boolean_mask(pred_outputs[ind], all_visible, name='boolean_mask_{}'.format(ind)) for ind in list(range(len(pred_outputs)))]
all_visible = tf.expand_dims(tf.expand_dims(tf.cast(tf.logical_and(key_v>0, isvalid>0), tf.float32), axis=-1), axis=-1)
targets_list = [targets_list[ind] * all_visible for ind in list(range(len(targets_list)))]
pred_outputs = [pred_outputs[ind] * all_visible for ind in list(range(len(pred_outputs)))]
sq_diff = tf.reduce_sum(tf.squared_difference(targets, pred_outputs[-1]), axis=-1)
last_pred_mse = tf.metrics.mean_absolute_error(sq_diff, tf.zeros_like(sq_diff), name='last_pred_mse')
metrics = {'normalized_error': ne_mertric, 'last_pred_mse':last_pred_mse}
predictions = {'normalized_error': ne_mertric[1]}
ne_mertric = tf.identity(ne_mertric[1], name='ne_mertric')
base_learning_rate = params['learning_rate']
mse_loss_list = []
if params['use_ohkm']:
base_learning_rate = 1. * base_learning_rate
for pred_ind in list(range(len(pred_outputs) - 1)):
mse_loss_list.append(0.5 * tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind],
weights=1.0 / tf.cast(cur_batch_size, tf.float32),
| tensorflow.zeros_like | 5,929 |
import tensorflow as tf
schema = schema_inference.infer_feature_schema(tensors, graph)
expected_schema = schema_utils.schema_from_feature_spec(
feature_spec, domains)
self.assertEqual(schema, expected_schema)
def test_infer_feature_schema_bad_rank(self):
with tf.compat.v1.Graph().as_default() as graph:
tensors = {
'a': tf.compat.v1.placeholder(tf.float32, ()),
}
with self.assertRaises(ValueError):
schema_inference.infer_feature_schema(tensors, graph)
def test_bucketization_annotation(self):
# TODO(b/132098015): Schema annotations aren't yet supported in OSS builds.
# pylint: disable=g-import-not-at-top
try:
| tensorflow.compat.v1.placeholder | 5,930 |
import tensorflow as tf
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
| tensorflow.global_variables_initializer | 5,931 |
import tensorflow as tf
self.d_loss_reals.append(self.d_loss_real)
self.d_loss_fakes.append(self.d_loss_fake)
self.d_loss_classes.append(self.d_loss_class)
self.d_losses.append(self.d_loss)
self.predictions = self.end_points_D_val['predictions']
def _get_vars_semi_supervised(self):
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if var.name.startswith('d_')]
g_vars = [var for var in t_vars if var.name.startswith('g_')]
for x in d_vars:
assert x not in g_vars
for x in g_vars:
assert x not in d_vars
| tensorflow.trainable_variables | 5,932 |
import tensorflow as tf
input_size=config.hidden_size,
dropout=1 - config.keep_prob if is_training else 0)
params_size_t = self._cell.params_size()
self._rnn_params = tf.get_variable(
"lstm_params",
initializer=tf.random_uniform(
[params_size_t], -config.init_scale, config.init_scale),
validate_shape=False)
c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size],
tf.float32)
h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size],
tf.float32)
self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),)
outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training)
outputs = tf.transpose(outputs, [1, 0, 2])
outputs = tf.reshape(outputs, [-1, config.hidden_size])
return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),)
def _get_lstm_cell(self, config, is_training):
if config.rnn_mode == BASIC:
return tf.contrib.rnn.BasicLSTMCell(
config.hidden_size, forget_bias=0.0, state_is_tuple=True,
reuse=not is_training)
if config.rnn_mode == BLOCK:
| tensorflow.contrib.rnn.LSTMStateTuple | 5,933 |
import tensorflow as tf
return out
def deconv2d(x, dim=(32, [3, 3], [1, 1]), pad='SAME', scope="deconv2d", training=True, ema=None, init=False, bias_initializer=tf.constant_initializer(0.)):
num_filters, filter_size, stride = dim
xs = x.get_shape().as_list()
if pad=='SAME':
target_shape = [tf.shape(x)[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]
else:
target_shape = [tf.shape(x)[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]
with tf.variable_scope(scope):
V = tf.get_variable("V", shape=list(filter_size) + [num_filters, int(x.get_shape()[-1])], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
g = tf.get_variable("g", shape=[num_filters], dtype=tf.float32, initializer=tf.constant_initializer(1.), trainable=True)
b = tf.get_variable("b", shape=[num_filters], dtype=tf.float32, initializer=bias_initializer, trainable=True)
def maybe_avg(v):
if ema is not None and not init:
v = tf.cond(training, lambda: v, lambda: ema.average(v))
return v
| tensorflow.shape | 5,934 |
import tensorflow as tf
@staticmethod
def _relu(name, x):
with tf.variable_scope(name):
return tf.nn.relu(x)
| tensorflow.variable_scope | 5,935 |
import tensorflow as tf
'The number of epochs to use for training.')
tf.app.flags.DEFINE_integer(
'batch_size', 12,
'Batch size for training and evaluation.')
tf.app.flags.DEFINE_string(
'data_format', 'channels_first', # 'channels_first' or 'channels_last'
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
tf.app.flags.DEFINE_float(
'match_threshold', 0.56, 'Matching threshold in the loss function.')
tf.app.flags.DEFINE_float(
'neg_threshold', 0.4, 'Matching threshold for the negtive examples in the loss function.')
# optimizer related configuration
tf.app.flags.DEFINE_float(
'weight_decay', 0.0005, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
| tensorflow.app.flags.DEFINE_float | 5,936 |
import tensorflow as tf
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
| tensorflow.constant_initializer | 5,937 |
import tensorflow as tf
return self.predicted
def res_block_3_layers(self, bottom, channel_list, name, change_dimension = False):
if (change_dimension):
block_conv_input = self.conv_layer(bottom = bottom, kernal_size = 1, in_channels = bottom.get_shape().as_list()[-1],
out_channels = channel_list[2], stride = 1, name = name + "_branch1")
else:
block_conv_input = bottom
input_filter = bottom.get_shape().as_list()[-1]
block_conv_1 = self.conv_layer(bottom, 1, input_filter, channel_list[0], 1, name + "_branch2a")
block_norm_1 = tf.layers.batch_normalization(inputs=block_conv_1, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True)
block_relu_1 = tf.nn.relu(block_norm_1)
block_conv_2 = self.conv_layer(block_relu_1, 3, channel_list[0], channel_list[1], 1, name + "_branch2b")
block_norm_2 = tf.layers.batch_normalization(inputs=block_conv_2, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True)
block_relu_2 = tf.nn.relu(block_norm_2)
block_conv_3 = self.conv_layer(block_relu_2, 1, channel_list[1], channel_list[2], 1, name + "_branch2c")
block_res = tf.add(block_conv_input, block_conv_3)
relu = tf.nn.relu(block_res)
return relu
| tensorflow.layers.batch_normalization | 5,938 |
import tensorflow as tf
B = [1, 1]
top_k = tf.nn.top_k(A, 2)
in_top_k = tf.nn.in_top_k(A, B, 1)
| tensorflow.nn.in_top_k | 5,939 |
import tensorflow as tf
with tf.variable_scope(name) :
self.v = tf.get_variable('v',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[output_dim],
| tensorflow.random_normal_initializer | 5,940 |
import tensorflow as tf
pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1])
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
final_loss = tf.reduce_mean(loss)
return final_loss, cstr_pct
| tensorflow.where | 5,941 |
import tensorflow as tf
weights=mix_kernels,
strides=[1, 1, 1, 1, 1],
symmetric_weights=self.symmetric_gate_weights,
dilations=self.hgru_dilations[layer_idx])
with tf.variable_scope(
'%s/g2_bn' % var_scope,
reuse=self.scope_reuse) as scope:
g2_intermediate = tf.contrib.layers.batch_norm(
inputs=g2_intermediate + mix_bias,
| tensorflow.variable_scope | 5,942 |
import tensorflow as tf
end_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[3]],axis = -1),1, bias = False, name = "end_pointer"), -1)
self.logits = [mask_logits(start_logits, mask = self.c_mask),
mask_logits(end_logits, mask = self.c_mask)]
logits1, logits2 = [l for l in self.logits]
outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2),
tf.expand_dims(tf.nn.softmax(logits2), axis=1))
outer = tf.matrix_band_part(outer, 0, config.ans_limit)
self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)
self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits1, labels=self.y1)
losses2 = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits2, labels=self.y2)
self.loss = tf.reduce_mean(losses + losses2)
if config.l2_norm is not None:
variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables)
self.loss += l2_loss
if config.decay is not None:
self.var_ema = tf.train.ExponentialMovingAverage(config.decay)
ema_op = self.var_ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
| tensorflow.nn.sparse_softmax_cross_entropy_with_logits | 5,943 |
import tensorflow as tf
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
if FLAGS.debug:
# print(len(var_list))
for grad, var in grads:
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads)
def train_z(loss,Z):
return tf.gradients(ys = loss, xs = Z)
def main(argv=None):
keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
annotation = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation")
z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name="z")
# pred_annotation, logits = inference(image, keep_probability,z)
# tf.summary.image("input_image", image, max_outputs=2)
# tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
# tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
# loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
# labels=tf.squeeze(annotation, squeeze_dims=[3]),
# name="entropy")))
mask_ = tf.ones([FLAGS.batch_size,64,64,3])
mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]])
| tensorflow.placeholder | 5,944 |
import tensorflow as tf
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
| tensorflow.python_io.TFRecordWriter | 5,945 |
import tensorflow as tf
for a in attn_dists:
covloss = tf.reduce_sum(tf.minimum(a, coverage), [1]) # calculate the coverage loss for this step
covlosses.append(covloss)
coverage += a # update the coverage vector
coverage_loss = _mask_and_avg(covlosses, loss_weights)
return coverage_loss
# values: [batch_size, step_size, vocab_size]
# answers: [batch_size, step_size]
def _mask_and_accuracy(values, answers, loss_weights):
values = tf.argmax(values,axis=2)
x = tf.cast(values, dtype=tf.int32)
y = tf.cast(answers, dtype=tf.int32)
res = tf.equal(x, y)
res = tf.cast(res, dtype=tf.float32)
res = tf.multiply(res, loss_weights)
return tf.reduce_sum(res)
| tensorflow.multiply | 5,946 |
import tensorflow as tf
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
| tensorflow.reshape | 5,947 |
import tensorflow as tf
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tt_rank = np.array(tt_rank)
_validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank)
n_in = np.prod(shape[0])
lamb = 1.0 / n_in
with tf.name_scope(name):
return random_matrix(shape, tt_rank=tt_rank, stddev=np.sqrt(lamb),
dtype=dtype)
| tensorflow.name_scope | 5,948 |
import tensorflow as tf
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
| tensorflow.FixedLenFeature | 5,949 |
import tensorflow as tf
try:
gfile.DeleteRecursively(save_dir)
except OSError:
pass # Ignore
gfile.MakeDirs(save_dir)
with self.test_session() as sess:
v = tf.Variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
tf.initialize_all_variables().run()
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
save = tf.train.Saver({"v": v}, max_to_keep=2,
keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 0.7 second have elapsed so s1 will be old enough to keep.
time.sleep((time.time() + 0.7) - start_time)
| tensorflow.initialize_all_variables | 5,950 |
import tensorflow as tf
return tf.to_float(safe_rew_labels)
return safe_rew_labels
# Pre-processor layers to remove observation from observation dict returned by
# goal-conditioned point-mass environment.
@gin.configurable
def extract_obs_merge_w_ac_layer():
def f(layer_input):
return tf.keras.layers.concatenate(
[layer_input[0]['observation'], layer_input[1]], axis=1)
return tf.keras.layers.Lambda(f)
# HACK: inputs to concatenate have to be in list (not tuple) format
# see "tensorflow_core/python/keras/layers/merge.py", line 378
@gin.configurable
def merge_obs_w_ac_layer():
| tensorflow.keras.layers.concatenate | 5,951 |
import tensorflow as tf
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
| tensorflow.flags.DEFINE_string | 5,952 |
import tensorflow as tf
alpha: A scalar, slope of positive section.
Returns
-------
A tensor.
"""
res = tf.nn.elu(x)
if alpha == 1:
return res
else:
return tf.where(x > 0, res, alpha * res)
def relu(x, alpha=0., max_value=None):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Parameters
----------
x: A tensor or variable.
| tensorflow.where | 5,953 |
import tensorflow as tf
return masks, tf.group(prune_ops)
def __calc_prune_ratio_dyn(self, prune_ratio_fnl):
"""Calculate the dynamic pruning ratio.
Args:
* prune_ratio_fnl: final pruning ratio
Returns:
* prune_ratio_dyn: dynamic pruning ratio
"""
idx_iter_beg = int(self.nb_iters_train * FLAGS.ws_iter_ratio_beg)
idx_iter_end = int(self.nb_iters_train * FLAGS.ws_iter_ratio_end)
base = tf.cast(self.global_step - idx_iter_beg, tf.float32) / (idx_iter_end - idx_iter_beg)
base = tf.minimum(1.0, tf.maximum(0.0, base))
prune_ratio_dyn = prune_ratio_fnl * (1.0 - tf.pow(1.0 - base, FLAGS.ws_prune_ratio_exp))
return prune_ratio_dyn
def __calc_grads_pruned(self, grads_origin):
"""Calculate the mask-pruned gradients.
Args:
* grads_origin: list of original gradients
Returns:
* grads_pruned: list of mask-pruned gradients
| tensorflow.cast | 5,954 |
import tensorflow as tf
Returns:
`input_fn` for `Estimator` in PREDICT mode.
"""
def _predict_input_fn(params=None):
"""Decodes serialized tf.Examples and returns `ServingInputReceiver`.
Args:
params: Parameter dictionary passed from the estimator.
Returns:
`ServingInputReceiver`.
"""
del params
example = tf.placeholder(dtype=tf.string, shape=[], name='tf_example')
num_classes = config_util.get_number_of_classes(model_config)
model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
model_config, is_training=False).preprocess
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
transform_fn = functools.partial(
transform_input_data, model_preprocess_fn=model_preprocess_fn,
image_resizer_fn=image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=None)
| tensorflow.placeholder | 5,955 |
import tensorflow as tf
if params['optimizer'] == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=params['momentum'])
elif params['optimizer'] == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
elif params['optimizer'] == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(learning_rate)
elif params['optimizer'] == 'adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate)
elif params['optimizer'] == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate, momentum=params['momentum'])
elif params['optimizer'] == 'lars':
optimizer = tf.contrib.opt.LARSOptimizer(
learning_rate,
momentum=params['momentum'],
weight_decay=params['lars_weight_decay'],
skip_list=['batch_normalization', 'bias'])
else:
raise ValueError('Unsupported optimizer type %s.' % params['optimizer'])
return optimizer
def remove_variables(variables, resnet_depth=50):
"""Removes low-level variables from the input.
| tensorflow.contrib.opt.LARSOptimizer | 5,956 |
import tensorflow as tf
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
mobilenet_v1.mobilenet_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(3217920, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
| tensorflow.random_uniform | 5,957 |
import tensorflow as tf
# Create graph
sess = tf.Session()
# Create tensors
# Create data to feed in
x_vals = np.array([1., 3., 5., 7., 9.])
x_data = tf.placeholder(tf.float32)
m = tf.constant(3.)
# Multiplication
prod = tf.mul(x_data, m)
for x_val in x_vals:
print(sess.run(prod, feed_dict={x_data: x_val}))
| tensorflow.placeholder | 5,958 |
import tensorflow as tf
elif hasattr(graph_manager, 'agents_params'):
for ap in graph_manager.agents_params:
for network_parameters in ap.network_wrappers.values():
network_parameters.framework = args.framework
return graph_manager
def _save_tf_model(self):
ckpt_dir = '/opt/ml/output/data/checkpoint'
model_dir = '/opt/ml/model'
import tensorflow as tf # importing tensorflow here so that MXNet docker image is compatible with this file.
# Re-Initialize from the checkpoint so that you will have the latest models up.
tf.train.init_from_checkpoint(ckpt_dir,
{'main_level/agent/online/network_0/': 'main_level/agent/online/network_0'})
tf.train.init_from_checkpoint(ckpt_dir,
{'main_level/agent/online/network_1/': 'main_level/agent/online/network_1'})
# Create a new session with a new tf graph.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(tf.global_variables_initializer()) # initialize the checkpoint.
# This is the node that will accept the input.
input_nodes = tf.get_default_graph().get_tensor_by_name('main_level/agent/main/online/' + \
'network_0/observation/observation:0')
# This is the node that will produce the output.
output_nodes = tf.get_default_graph().get_operation_by_name('main_level/agent/main/online/' + \
'network_1/ppo_head_0/policy')
# Save the model as a servable model.
| tensorflow.train.init_from_checkpoint | 5,959 |
import tensorflow as tf
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
| tensorflow.contrib.tpu.TPUEstimatorSpec | 5,960 |
import tensorflow as tf
op = tf.cond(pred, update_grad, tf.no_op, name=name).op
return op
if __name__ == '__main__':
# run it with "python -m tensorpack.tfutils.optimizer"
x = tf.get_variable('x', shape=[6])
cost = tf.reduce_sum(tf.abs(x), name='cost')
opt = tf.train.GradientDescentOptimizer(0.01)
opt = AccumGradOptimizer(opt, 5)
min_op = opt.minimize(cost)
sess = tf.Session()
| tensorflow.get_variable | 5,961 |
import tensorflow as tf
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
| tensorflow.shape | 5,962 |
from tensorflow.python.framework import ops
ops.RegisterShape("Tanh")(common_shapes.unchanged_shape)
ops.RegisterShape("Cast")(common_shapes.unchanged_shape)
ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
| tensorflow.python.framework.ops.RegisterShape | 5,963 |
import tensorflow as tf
if transpose:
q = tf.nn.conv2d(p, w, strides, 'VALID', data_format='NCHW', use_cudnn_on_gpu=True)
else:
q = tf.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True)
# Allocate output tensor.
if use_var:
y = sbnet_module.sparse_scatter_var(
q,
indices.bin_counts,
indices.active_block_indices,
x,
dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32),
dynamic_bstride=tf.constant(block_params.bstrides, dtype=tf.int32),
dynamic_boffset=tf.constant([0, 0], dtype=tf.int32),
add=False,
transpose=transpose,
atomic=atomic)
else:
y = sbnet_module.sparse_scatter(
q,
indices.bin_counts,
indices.active_block_indices,
x,
| tensorflow.constant | 5,964 |
import tensorflow as tf
# crop for lsun 96
rgb = tf.image.random_crop(rgb,size=[h,w,c])
# crop for patch training
crop_h = h//self.crop_factor
crop_w = w//self.crop_factor
rgb = tf.image.random_crop(rgb,size=[crop_h,crop_w,c])
# cast, bit conversion, compress domain, center
rgb = tf.cast(rgb, tf.float32)
if n_bits < 8:
rgb = tf.floor(rgb/(2**(8-n_bits)))
rgb = rgb/(n_bins) - 0.5
return rgb
def post_process(self, rgb, add_dequantization_noise=True):
n_bits = config.model.data.n_bits
n_bins = 2**n_bits
rgb_out = rgb
# discretization noise
| tensorflow.floor | 5,965 |
import tensorflow as tf
latest_dir = '%s/checkpoint_latest' % args.model_dir
best_dir = '%s/checkpoint_best' % args.model_dir
if tf.train.get_checkpoint_state(latest_dir) and args.restore == "last":
print("Reading model parameters from %s" % latest_dir)
model.latest_saver.restore(sess, tf.train.latest_checkpoint(latest_dir))
else:
if tf.train.get_checkpoint_state(best_dir) and args.restore == "best":
print('Reading model parameters from %s' % best_dir)
model.best_saver.restore(sess, tf.train.latest_checkpoint(best_dir))
else:
print("Created model with fresh parameters.")
global_variable = [gv for gv in tf.global_variables() if args.name in gv.name]
sess.run(tf.variables_initializer(global_variable))
return model
def main(args):
if args.debug:
debug()
if args.cuda:
| tensorflow.global_variables | 5,966 |
import tensorflow as tf
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
| tensorflow.reshape | 5,967 |
import tensorflow as tf
length_1 = tf.reshape(box1[3 + 0], [1])
height_1 = tf.reshape(box1[3 + 2], [1])
width_1 = tf.reshape(box1[3 + 1], [1])
| tensorflow.reshape | 5,968 |
import tensorflow as tf
with tf.variable_scope(name):
# tf.squeeze()返回一个张量,这个张量是将原始input中所有维度中为1的那些维都删掉的结果
batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
# Get the normalized coordinates of bboxes
bottom_shape = tf.shape(bottom)
height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])
width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])
# rois除以h,w就得到了rois在特征图上的位置
x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
# Won't be backpropagated to rois anyway, but to save time
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
# 'roi_pooling_size', 7
pre_pool_size = cfg.FLAGS.roi_pooling_size * 2
# 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")
| tensorflow.slice | 5,969 |
import tensorflow as tf
| tensorflow.shape | 5,970 |
import tensorflow as tf
train_op=train_op,
host_call=host_call,
training_hooks=[restore_hook, saver_hook])
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op,
training_chief_hooks=[restore_hook, saver_hook])
| tensorflow.estimator.EstimatorSpec | 5,971 |
import tensorflow as tf
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
| tensorflow.add_n | 5,972 |
import tensorflow as tf
source_target_pairs = cleaner_en_xx.clean_en_xx_pairs(source_target_pairs)
for source, target in source_target_pairs:
source_resfile.write(source)
source_resfile.write("\n")
target_resfile.write(target)
target_resfile.write("\n")
def compile_data(tmp_dir, datasets, filename, datatypes_to_clean=None):
"""Concatenates all `datasets` and saves to `filename`."""
datatypes_to_clean = datatypes_to_clean or []
filename = os.path.join(tmp_dir, filename)
lang1_fname = filename + ".lang1"
lang2_fname = filename + ".lang2"
if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname):
tf.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname,
lang2_fname)
return filename
with tf.gfile.GFile(lang1_fname, mode="w") as lang1_resfile:
with tf.gfile.GFile(lang2_fname, mode="w") as lang2_resfile:
for dataset in datasets:
url = dataset[0]
compressed_filename = os.path.basename(url)
compressed_filepath = os.path.join(tmp_dir, compressed_filename)
if url.startswith("http"):
generator_utils.maybe_download(tmp_dir, compressed_filename, url)
if compressed_filename.endswith(".zip"):
zipfile.ZipFile(os.path.join(compressed_filepath),
| tensorflow.gfile.Exists | 5,973 |
import tensorflow as tf
# prevent Keras from using up all gpu memory
if tf.executing_eagerly():
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
else:
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
def pytest_generate_tests(metafunc):
# This function generates the list of tests for pytest, based
| tensorflow.ConfigProto | 5,974 |
import tensorflow as tf
# Borrowed from: https://gist.github.com/rockt/f4f9df5674f3da6a32786bcf9fbb6a88
batch_size, max_length, hidden_size = tf.unstack(tf.shape(outputs))
index = tf.range(0, batch_size) * max_length + (tf.cast(length, tf.int32) - 1)
flat = tf.reshape(outputs, [-1, hidden_size])
relevant = tf.gather(flat, index)
return relevant
| tensorflow.reshape | 5,975 |
import tensorflow as tf
Only valid for argus_pdf with p=0.5! Otherwise need to do numerical
integral.
"""
def F(m_bound, name=None):
with tf.name_scope(name, "argus_integral_phalf_primitive"):
a = tf.minimum(m_bound, m0)
x = 1 - tf.pow(a / m0, 2)
primitive = -0.5 * m0 * m0 * (tf.exp(c * x) * tf.sqrt(x) / c + 0.5 / tf.pow(-c, 1.5) * tf.sqrt(pi) * tf.erf(gradsafe_sqrt(-c * x)))
# We have to safeguard the sqrt, because otherwise the analytic
# derivative blows up for x = 0
return primitive
area = tf.sub(F(m_high, name="F2"), F(m_low, name="F1"), name="argus_integral_phalf")
return area
| tensorflow.pow | 5,976 |
import tensorflow as tf
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
input_var = tf.reshape(input_var,[-1,dims])
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=0)
t = tf.matmul(input_var,v_norm)
mu,var = tf.nn.moments(t,axes=[0])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0)
return tf.matmul(input_var,w)+self.b
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
| tensorflow.is_nan | 5,977 |
import tensorflow as tf
coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2
sampled = ImageSample('warp', [image, coor], borderMode='constant')
return sampled
with argscope([Conv2D, FullyConnected], nl=tf.nn.relu):
with tf.variable_scope('STN1'):
sampled1 = get_stn(image)
with tf.variable_scope('STN2'):
sampled2 = get_stn(image)
| tensorflow.variable_scope | 5,978 |
import tensorflow as tf
parallel_iterations=1)
rnn_outputs = \
tf.scan(
self.output_step_scan,
rnn_states,
initializer=tf.zeros([self.N_batch, self.N_out]),
parallel_iterations= 1)
return tf.transpose(rnn_outputs, [1, 0, 2]), tf.unstack(rnn_states)
# fix spectral radius of recurrent matrix
def initial_W(self):
# added gamma distributed initial weights as in pycog
| tensorflow.transpose | 5,979 |
import tensorflow as tf
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(
masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]
)
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32
)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights,
)
masked_lm_mean_loss = tf.metrics.mean(
| tensorflow.reshape | 5,980 |
import tensorflow as tf
Tout=[tf.float32, tf.float32])
gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5])
| tensorflow.reshape | 5,981 |
import tensorflow as tf
# ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , index , Error (only be used for softmax output)
L2_w = tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[0]) \
+ tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[2]) # faster than the code below
# L2_w = lambda_l2_w * tf.reduce_mean(tf.square(self.train_params[0])) + lambda_l2_w * tf.reduce_mean( tf.square(self.train_params[2]))
| tensorflow.contrib.layers.l2_regularizer | 5,982 |
import tensorflow as tf
if self.has_input:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 1)
if len(features["inputs"].shape) < 5:
features["inputs"] = tf.expand_dims(features["inputs"], 4)
# Expand the inputs in to the beam size.
features["inputs"] = tf.tile(features["inputs"], [1, beam_size, 1, 1, 1])
s = common_layers.shape_list(features["inputs"])
features["inputs"] = tf.reshape(features["inputs"],
[s[0] * s[1], s[2], s[3], s[4]])
target_modality = self._problem_hparams.target_modality
vocab_size = target_modality.top_dimensionality
# Setting decode length to input length + decode_length
decode_length = tf.constant(decode_length)
if "partial_targets" not in features:
| tensorflow.reshape | 5,983 |
import tensorflow as tf
weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
| tensorflow.constant | 5,984 |
import tensorflow as tf
def conv2d(x, shape, name, bias=False, stride=2, padding='SAME'):
with tf.variable_scope(name):
W = weight_variable(shape)
h = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
if bias:
b = bias_variable([shape[-1]])
h = h + b
| tensorflow.nn.conv2d | 5,985 |
import tensorflow as tf
keep_dims=True,
shift=shift,
name="batch_norm_ss")
mean, variance = tf.nn.normalize_moments(counts,
shifted_sum_x,
shifted_sum_x2,
shift,
name="normalize_moments")
second_moment = variance + tf.square(mean)
return mean, variance, second_moment
def build_moving_stats():
return (
tf.identity(self._moving_mean),
tf.identity(self._moving_variance),
tf.identity(self._moving_second_moment),
| tensorflow.square | 5,986 |
import tensorflow as tf
self.letor_features = tf.placeholder(tf.float32, shape=[None, self.feature_size],
name="letor_features") # the letor features for the documents
self.labels = [] # the labels for the documents (e.g., clicks)
self.types=[]
for i in range(self.max_candidate_num):
self.docid_inputs.append(tf.placeholder(tf.int64, shape=[None],
name="docid_input{0}".format(i)))
self.labels.append(tf.placeholder(tf.float32, shape=[None],
name="label{0}".format(i)))
self.types.append(tf.placeholder(tf.float32, shape=[None],
name="type{0}".format(i)))
self.global_step = tf.Variable(0, trainable=False)
# Select logits to prob function
self.logits_to_prob = tf.nn.softmax
if self.hparams.logits_to_prob == 'sigmoid':
self.logits_to_prob = sigmoid_prob
self.output = self.ranking_model(self.max_candidate_num, scope='ranking_model')
pad_removed_output = self.remove_padding_for_metric_eval(self.docid_inputs, self.output)
reshaped_labels = tf.transpose(tf.convert_to_tensor(self.labels)) # reshape from [max_candidate_num, ?] to [?, max_candidate_num]
| tensorflow.Variable | 5,987 |
import tensorflow as tf
with self._graph.as_default():
self._sess = tf.Session(config=self._config)
tf.saved_model.load(
self._sess, [tag_constants.SERVING], self._model_path)
self._image_tensor = self._sess.graph.get_tensor_by_name(
'serving_default_input_1:0')
self._output_tensor = self._sess.graph.get_tensor_by_name(
'StatefulPartitionedCall:0')
self._boxes = tf.placeholder(
tf.float32, shape=(None, None, None, 4))
self._scores = tf.placeholder(
tf.float32, shape=(None, None, self._num_classes))
self._boxes_predi, self._scores_predi, self._classes_predi,\
self._valid_detections_predi = \
tf.image.combined_non_max_suppression(
boxes=self._boxes, scores=self._scores,
max_output_size_per_class=50, max_total_size=50,
iou_threshold=0.45, score_threshold=self._score_threshold)
self._label_map = self._load_labelmap(self._label_file)
def _load_labelmap(self, label_file):
| tensorflow.placeholder | 5,988 |
import tensorflow as tf
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
| tensorflow.logging.info | 5,989 |
import tensorflow as tf
img_h2 = lrelu(conv2d(img_h1, nf2, d_h=ns2, d_w=ns2, name='h2_conv'))
img_h3 = lrelu(conv2d(img_h2, nf3, d_h=ns3, d_w=ns3, name='h3_conv'))
print(img_h3.get_shape())
img_h4 = lrelu(linear(tf.nn.dropout(tf.reshape(img_h3, [self.batch_size, -1]), keep_prob), featsize, 'h4_lin'))
img_z = lrelu(linear(tf.nn.dropout(img_h4, keep_prob), featsize, 'hz_lin'))
return img_h0, img_h1, img_h2, img_h3, img_h4, img_z
with tf.variable_scope("conv") as scope:
srcimg_h0, srcimg_h1, srcimg_h2, srcimg_h3, srcimg_h4, srcimg_z = encode(srcimg)
scope.reuse_variables()
tgtimg_h0, tgtimg_h1, tgtimg_h2, tgtimg_h3, tgtimg_h4, tgtimg_z = encode(tgtimg)
tgtctx_h0, tgtctx_h1, tgtctx_h2, tgtctx_h3, tgtctx_h4, tgtctx_z = encode(tgtctx)
with tf.variable_scope("translate") as scope:
| tensorflow.variable_scope | 5,990 |
from tensorflow.python.framework import ops
features = self._get_feature_dict(features)
logits = self._logits(features, is_training=True)
if self._enable_centered_bias:
centered_bias_step = [self._centered_bias_step(targets, features)]
else:
centered_bias_step = []
with ops.control_dependencies(centered_bias_step):
loss = self._target_column.loss(logits, targets, features)
logging_ops.scalar_summary("loss", loss)
linear_train_step = self._linear_model.get_train_step(loss)
dnn_train_step = (self._dnn_model.get_train_step(loss)
if self._dnn_model else [])
with ops.control_dependencies(linear_train_step + dnn_train_step):
with ops.get_default_graph().colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op, loss
def _get_eval_ops(self, features, targets, metrics=None):
raise NotImplementedError
def _get_predict_ops(self, features):
"""See base class."""
features = self._get_feature_dict(features)
logits = self._logits(features)
return self._target_column.logits_to_predictions(logits, proba=True)
def _get_feature_ops_from_example(self, examples_batch):
| tensorflow.python.framework.ops.control_dependencies | 5,991 |
from tensorflow.python.ops import math_ops
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index],
tn[tf_index] + fp[tf_index] + kepsilon,
name)
| tensorflow.python.ops.math_ops.cast | 5,992 |
import tensorflow as tf
else:
fvar = (
(eKff - tf.trace(Li_eKuffu_Lit))[:, None] +
tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) +
tf.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) -
fmean ** 2 +
tf.matrix_diag_part(e_related_to_mean)
)
return fmean, fvar
| tensorflow.matrix_diag_part | 5,993 |
import tensorflow as tf
offset = [stride[0]/2, stride[1]/2]
features_width = tf.cast(features_width, tf.int32)
features_height = tf.cast(features_height, tf.int32)
scales = tf.convert_to_tensor(scales, dtype=tf.float32)
ratios = tf.convert_to_tensor(ratios, dtype=tf.float32)
offset = tf.convert_to_tensor(offset, dtype=tf.float32)
scales_grid, ratios_grid = tf.meshgrid(scales,
ratios)
scales_grid = tf.reshape(scales_grid, [-1, 1])
ratios_grid = tf.reshape(ratios_grid, [-1, 1])
ratio_sqrts = tf.sqrt(ratios_grid)
heights = scales_grid / ratio_sqrts * base_size[1]
widths = scales_grid * ratio_sqrts * base_size[0]
x_centers = tf.cast(tf.range(features_width), tf.float32)
x_centers = x_centers * stride[1]
y_centers = tf.cast(tf.range(features_height), tf.float32)
| tensorflow.reshape | 5,994 |
import tensorflow as tf
# Downsample with factorized reduction
downsample_no = 0
while w > w_out or h > h_out:
downsample_no += 1
with tf.variable_scope('downsample_{}x'.format(downsample_no)):
X = tf.nn.relu(X)
X = self._add_factorized_reduction(X, w, h, ch, ch_out, is_train=is_train)
ch = ch_out
w >>= 1
h >>= 1
# If channel counts finally don't match, convert channel counts with 1x1 conv
if ch != ch_out:
with tf.variable_scope('convert_conv'):
X = self._do_conv(X, w, h, ch, ch_out, filter_size=1, is_train=is_train)
X = tf.reshape(X, (-1, w_out, h_out, ch_out)) # Sanity shape check
return X
def _add_fully_connected(self, X, in_shape, out_ch, no_reg=False):
ch = np.prod(in_shape)
X = tf.reshape(X, (-1, ch))
W = self._make_var('W', (ch, out_ch), no_reg=no_reg)
X = tf.matmul(X, W)
X = tf.reshape(X, (-1, out_ch)) # Sanity shape check
return X
| tensorflow.variable_scope | 5,995 |
import tensorflow as tf
# add mask for glabels and cls_pred here
glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask))
cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask))
location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask))
gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask))
predictions = {
'classes': tf.argmax(cls_pred, axis=-1),
'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1),
'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]) }
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.)
#cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred)
| tensorflow.reshape | 5,996 |
import tensorflow as tf
Returns:
a scalar tensor representing the correlation loss value.
"""
with tf.name_scope(name):
source_samples -= tf.reduce_mean(source_samples, 0)
target_samples -= tf.reduce_mean(target_samples, 0)
source_samples = tf.nn.l2_normalize(source_samples, 1)
target_samples = tf.nn.l2_normalize(target_samples, 1)
source_cov = tf.matmul(tf.transpose(source_samples), source_samples)
target_cov = tf.matmul(tf.transpose(target_samples), target_samples)
corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight
assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss])
with tf.control_dependencies([assert_op]):
tag = 'Correlation Loss'
barrier = tf.no_op(tag)
return corr_loss
| tensorflow.square | 5,997 |
import tensorflow as tf
norm_grads = None
if self.max_grad_norm is not None:
grads, norm_grads = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, self.params))
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate))
tf.summary.scalar('advantage', tf.reduce_mean(adv))
tf.summary.scalar('action_probability', tf.reduce_mean(self.mu_ph))
if self.full_tensorboard_log:
tf.summary.histogram('rewards', self.reward_ph)
tf.summary.histogram('learning_rate', self.learning_rate)
tf.summary.histogram('advantage', adv)
tf.summary.histogram('action_probability', self.mu_ph)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', train_model.obs_ph)
| tensorflow.reduce_mean | 5,998 |
import tensorflow as tf
source_length = encoder_input_length[0]
target_length = tf.to_int32(tf.reduce_sum(trg_mask, axis=1))
true_src_len = tf.reshape(source_length, shape=[batch_size, 1, 1]) - 1
true_trg_len = tf.reshape(target_length, shape=[batch_size, 1, 1]) - 1
src_mask = tf.to_float(tf.sequence_mask(source_length, maxlen=src_len))
mask = tf.matmul(tf.expand_dims(trg_mask, axis=2), tf.expand_dims(src_mask, axis=1))
monotonous = tf.sqrt(((true_trg_len * src_indices - true_src_len * trg_indices) ** 2)
/ (true_trg_len**2 + true_src_len**2))
monotonous = tf.to_float(monotonous < monotonicity_dist)
non_monotonous = (1 - monotonous) * mask
attn_loss = tf.reduce_sum(attention_weights * tf.stop_gradient(non_monotonous)) / tf.to_float(batch_size)
if monotonicity_decay:
decay = tf.stop_gradient(0.5 ** (tf.to_float(global_step) / monotonicity_decay))
else:
decay = 1.0
xent_loss += monotonicity_weight * decay * attn_loss
losses = [xent_loss, reinforce_loss, baseline_loss_]
return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data
| tensorflow.stop_gradient | 5,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.