seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
def _get_offset_array_tf(shape):
"""
Computes the offset array used to upsample indices with TensorFlow.
:param shape: [list] Window shape.
"""
center = [(ss - 1) // 2 for ss in shape]
axes = [tf.range(-cc, ss - cc, dtype=tf.int32) for cc, ss in zip(center, shape)]
# Broadcast and match dimension.
if len(shape) > 1:
for jj in range(len(shape)):
for ii in range(len(shape) + 1):
if ii != jj:
axes[jj] = tf.expand_dims(axes[jj], ii)
| tensorflow.range | 11,400 |
from tensorflow.contrib.util import make_tensor_proto
# Set request objects using the tf-serving `CopyFrom` setter method
request.model_spec.name = '0'
request.model_spec.signature_name = 'serving_default'
# This is correct (default constant).
request.inputs['input'].CopyFrom(make_tensor_proto(input_data,
shape=input_data.shape))
# Boiler-Plate
| tensorflow.contrib.util.make_tensor_proto | 11,401 |
import tensorflow as tf
def initial_layer_binned(
initial_layer_features: tf.Tensor,
cutoff_data: CutoffData,
band: str,
soft_onehot: Nonlinearity = Nonlinearity.SIGMOID
):
batch_size, twice_window_size, channels = map(int, initial_layer_features.shape)
nonlinearity = nonlinearity_fcn(soft_onehot)
if channels == 3:
scales = cutoff_data.dflux_dt_dflux_dtime_scales(band)
cutoffs = cutoff_data.dflux_dt_dflux_dtime_cutoffs(band)
cutoffs_batch_window = tf.expand_dims(tf.expand_dims(cutoffs, 0), 0)
scales_batch_window = tf.expand_dims(
tf.expand_dims(tf.expand_dims(scales, 0), 0), -1
)
init_layer_per_cutoff = tf.expand_dims(initial_layer_features, -1)
graph_typecheck.assert_shape(
cutoffs_batch_window, [1, 1, channels, cutoff_data.embedding_size]
)
graph_typecheck.assert_shape(scales_batch_window, [1, 1, channels, 1])
graph_typecheck.assert_shape(
init_layer_per_cutoff, [batch_size, twice_window_size, channels, 1]
)
result = nonlinearity(
(init_layer_per_cutoff - cutoffs_batch_window) / scales_batch_window
)
return graph_typecheck.assert_shape(
result, [batch_size, twice_window_size, channels, cutoff_data.embedding_size]
| tensorflow.expand_dims | 11,402 |
import tensorflow as tf
class MobilenetV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'MobilenetV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
| tensorflow.random_uniform | 11,403 |
import tensorflow as tf
# add hidden layer
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu,nameScope="layerTest1")
# add output layer
prediction = add_layer(l1, 10, 1, activation_function=None,nameScope="layerTest2")
sess = tf.Session()
# 上面的wtih或者是name都是可选的,可以选择添加,也可以选择不添加,but下面的这一行是一定要写的。
# 这个表明了 在当前的目录下面创建以恶搞logs的文件家,然后把图的信息保存进去
# 这样运行完这段代码之后,就会有一个logs的文件夹被创建
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: # tensorflow version < 0.12
writer = tf.train.SummaryWriter('logs/', sess.graph)
else: # tensorflow version >= 0.12
writer = tf.summary.FileWriter("logs/", sess.graph)
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init) | tensorflow.summary.FileWriter | 11,404 |
import tensorflow as tf
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
if activation_fn is not None:
# outputs = activation_fn(outputs)
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
| tensorflow.nn.leaky_relu | 11,405 |
import tensorflow as tf
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
| tensorflow.gfile.MakeDirs | 11,406 |
import tensorflow as tf
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
train_distribute=strategy
)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config,
params={'batch_size':FLAGS.train_batch_size}
)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
if not tf.gfile.Exists(train_file) or not FLAGS.data_converted:
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
| tensorflow.gfile.Exists | 11,407 |
from tensorflow.python.framework import ops
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return [tensor_shape.TensorShape(return_dims)]
@ops.RegisterShape("AddN")
def _AddNShape(op):
merged_shape = tensor_shape.unknown_shape()
for input_ in op.inputs:
merged_shape = merged_shape.merge_with(input_.get_shape())
return [merged_shape]
@ops.RegisterShape("Select")
def _SelectShape(op):
# All three inputs must have the same shape.
return [op.inputs[0].get_shape()
.merge_with(op.inputs[1].get_shape())
.merge_with(op.inputs[2].get_shape())]
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
"""Common shape function for arg-reduction ops."""
dimension_shape = op.inputs[1].get_shape()
dimension_shape.assert_is_compatible_with(tensor_shape.scalar())
input_shape = op.inputs[0].get_shape()
| tensorflow.python.framework.ops.RegisterShape | 11,408 |
import tensorflow as tf
nrof_preprocess_threads = 4
image_size = (args.image_size, args.image_size)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
# Load the model
input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
facenet.load_model(args.model, input_map=input_map)
# Get output tensor
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
#
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean,
args.use_flipped_images, args.use_fixed_image_standardization, args.warmup_steps, args.max_steps)
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization,
warmup_steps, max_steps):
# Run forward pass to calculate embeddings
| tensorflow.get_default_graph | 11,409 |
import tensorflow as tf
tgtimg_h0, tgtimg_h1, tgtimg_h2, tgtimg_h3, tgtimg_h4, tgtimg_z = encode(tgtimg)
tgtctx_h0, tgtctx_h1, tgtctx_h2, tgtctx_h3, tgtctx_h4, tgtctx_z = encode(tgtctx)
with tf.variable_scope("translate") as scope:
trans_h0 = lrelu(linear(tf.nn.dropout(tf.concat([srcimg_z, tgtctx_z], 1), keep_prob), featsize, 'trans_h0'))
trans_z = linear(tf.nn.dropout(trans_h0, keep_prob), featsize, 'trans_z')
self.translated_z = trans_z
s_h, s_w = self.output_height, self.output_width
s_h0, s_h1, s_h2, s_h3 = \
| tensorflow.nn.dropout | 11,410 |
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class OpsTest(test.TestCase):
"""Ops tests."""
def test_softmax_classifier(self):
with self.cached_session() as session:
features = array_ops.placeholder(dtypes.float32, [None, 3])
labels = array_ops.placeholder(dtypes.float32, [None, 2])
weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]])
biases = constant_op.constant([0.2, 0.3])
class_weight = constant_op.constant([0.1, 0.9])
prediction, loss = ops.softmax_classifier(features, labels, weights,
biases, class_weight)
self.assertEqual(prediction.get_shape()[1], 2)
self.assertEqual(loss.get_shape(), [])
value = session.run(loss, {features: [[0.2, 0.3, 0.2]], labels: [[0, 1]]})
self.assertAllClose(value, 0.55180627)
| tensorflow.python.ops.array_ops.placeholder | 11,411 |
import tensorflow as tf
# fc1
with tf.variable_scope('fc1'):
w = tf.get_variable('w', [self.flatten.get_shape()[1], 2048], initializer=he_normal,
regularizer=regularizer)
b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0))
out = tf.matmul(self.flatten, w) + b
self.fc1 = tf.nn.relu(out)
# fc2
with tf.variable_scope('fc2'):
w = tf.get_variable('w', [self.fc1.get_shape()[1], 2048], initializer=he_normal,
regularizer=regularizer)
b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0))
out = tf.matmul(self.fc1, w) + b
self.fc2 = tf.nn.relu(out)
# fc3
with tf.variable_scope('fc3'):
w = tf.get_variable('w', [self.fc2.get_shape()[1], num_classes], initializer=initializer,
| tensorflow.variable_scope | 11,412 |
import tensorflow as tf
fields.InputDataFields.groundtruth_classes:
tf.placeholder(tf.int32, [None, 3]),
| tensorflow.placeholder | 11,413 |
import tensorflow as tf
Waa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Waa")
Wsa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Wsa")
| tensorflow.placeholder | 11,414 |
import tensorflow as tf
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
if clip:
log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6))
else:
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
| tensorflow.nn.log_softmax | 11,415 |
from tensorflow.python.ops import variable_scope as vs
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
| tensorflow.python.ops.variable_scope._get_default_variable_store | 11,416 |
import tensorflow as tf
r3det_dcl = build_whole_network.DetectionNetworkR3DetDCL(cfgs=self.cfgs,
is_training=True)
with tf.name_scope('get_batch'):
if cfgs.IMAGE_PYRAMID:
shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN)
shortside_len = tf.random_shuffle(shortside_len_list)[0]
else:
shortside_len = cfgs.IMG_SHORT_SIDE_LEN
img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \
| tensorflow.random_shuffle | 11,417 |
import tensorflow as tf
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
| tensorflow.where | 11,418 |
import tensorflow as tf
# sample mixture indicator from softmax
sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(
tf.shape(logit_probs), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
sel = tf.reshape(sel, xs[:-1] + [1, nr_mix])
# select logistic parameters
means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4)
log_scales = tf.maximum(tf.reduce_sum(
l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.)
coeffs = tf.reduce_sum(tf.nn.tanh(
l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = tf.random_uniform(tf.shape(means), minval=1e-5, maxval=1. - 1e-5)
x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))
x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)
x1 = tf.minimum(tf.maximum(
x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)
x2 = tf.minimum(tf.maximum(
x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)
return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)
| tensorflow.maximum | 11,419 |
import tensorflow as tf
with tf.control_dependencies([train_op]), tf.name_scope('ema'):
| tensorflow.control_dependencies | 11,420 |
import tensorflow as tf
init_lr = self._params.init_learning_rate
total_steps = self._total_steps
linear_warmup = (
warmup_lr + global_step / warmup_steps * (init_lr - warmup_lr))
cosine_learning_rate = (
init_lr * (tf.cos(np.pi * (global_step - warmup_steps) /
(total_steps - warmup_steps)) + 1.0) / 2.0)
learning_rate = tf.where(global_step < warmup_steps, linear_warmup,
cosine_learning_rate)
return learning_rate
| tensorflow.cos | 11,421 |
import tensorflow as tf
if mode == tf.estimator.ModeKeys.TRAIN:
var_grads = mtf.gradients(
[loss], [v.outputs[0] for v in graph.trainable_variables])
lr = learning_rate.learning_rate_schedule(hparams)
tf.summary.scalar("learning_rate", lr)
mtf_lr = mtf.import_tf_tensor(
mesh, tf.convert_to_tensor(lr, dtype=tf.float32), mtf.Shape([]))
optimizer = mtf.optimize.make_optimizer(hparams, mtf_lr)
update_ops = []
for grad, var in zip(var_grads, graph.trainable_variables):
update_ops.extend(optimizer.apply_grad(grad, var))
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_loss = lowering.export_to_tf_tensor(loss)
tf_loss = tf.to_float(tf_loss)
if logits and mode != tf.estimator.ModeKeys.TRAIN:
tf_logits = lowering.export_to_tf_tensor(logits)
if mode == tf.estimator.ModeKeys.TRAIN:
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
# tf.logging.info("tf_update_ops: {}".format(tf_update_ops))
train_op = tf.group(tf_update_ops)
with mtf.utils.outside_all_rewrites():
# Copy master variables to slices. Must be called first.
restore_hook = mtf.MtfRestoreHook(lowering)
saver = tf.train.Saver(
tf.global_variables(),
| tensorflow.to_float | 11,422 |
import tensorflow as tf
# resize y-z
squeeze_b_x = tf.reshape(input_tensor, [-1, y_size, z_size, c_size], name='reshape_bx')
resize_b_x = tf.compat.v1.image.resize_bilinear(squeeze_b_x, [y_size_new, z_size_new], align_corners=align)
resume_b_x = tf.reshape(resize_b_x, [-1, x_size, y_size_new, z_size_new, c_size], name='resume_bx')
# Reorient
| tensorflow.reshape | 11,423 |
import tensorflow as tf
:returns: Tensor of same shape as expanded_tensor, but with `value_if_masked` filled
in masked dimensions.
"""
mask_shape = list(map(int, self.mask.shape))
graph_typecheck.assert_shape(expanded_tensor, mask_shape + expected_extra_dims)
value_if_masked = expanded_tensor.dtype.as_numpy_dtype(value_if_masked)
if_masked_tensor = tf.fill(expanded_tensor.shape, value_if_masked)
mask = self.mask
for i in range(2, 2 + len(expected_extra_dims)):
mask = tf.expand_dims(mask, axis=i)
mask = tf.tile(mask, [1, 1] + expected_extra_dims)
return tf.where(mask, expanded_tensor, if_masked_tensor)
def initial_layer(
window_feature: WindowFeatures, *, clip_magnitude=10.0, include_flux_and_time=False
) -> tf.Tensor:
features = tf.expand_dims(window_feature.dflux_dt(clip_magnitude=clip_magnitude), 2)
if include_flux_and_time:
dflux = tf.expand_dims(window_feature.dflux, 2)
dtime = tf.expand_dims(window_feature.dtime, 2)
| tensorflow.tile | 11,424 |
import tensorflow as tf
return final_loss, cstr_pct
def contra_traj_lossV9(pred, tgt, horizon=12, margin=1):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1])
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
# tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., margin-pred_posi_dif)
| tensorflow.reshape | 11,425 |
import tensorflow as tf
tf.constant(np.array([4, 4], np.int32))
}
def fake_image_resizer_fn(image, masks=None):
resized_image = tf.image.resize_images(image, [8, 8])
results = [resized_image]
if masks is not None:
resized_masks = tf.transpose(
tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]),
[2, 0, 1])
results.append(resized_masks)
results.append(tf.shape(resized_image))
return results
num_classes = 3
input_transformation_fn = functools.partial(
| tensorflow.transpose | 11,426 |
import tensorflow as tf
# AC net
def build_anet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg)
mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg)
# sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5))
sigma = tf.clip_by_value(sigma, 0.0, 1.0)
norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def build_cnet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
| tensorflow.clip_by_value | 11,427 |
import tensorflow as tf
X = self.conv('DZ2', X, 512, 1, 1)
X = tf.nn.leaky_relu(X, 0.2)
X = self.conv('DZ3', X, 512, 1, 1)
X = tf.nn.leaky_relu(X, 0.2)
X = self.conv('DZ4', X, 512, 1, 1)
X = tf.nn.leaky_relu(X, 0.2)
| tensorflow.nn.leaky_relu | 11,428 |
import tensorflow as tf
strides=self.strides,
padding='SAME',
data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,tf.shape(self.v)[2],1]) * tf.nn.l2_normalize(self.v,axis=[0,1,3])
return tf.nn.bias_add(
| tensorflow.is_nan | 11,429 |
import tensorflow as tf
training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=training_decoder,
impute_finished=True,
maximum_iterations=tf.reduce_max(seq_lens),
)
self.Y_hat = training_decoder_output.rnn_output
out_decoder2 = tf.reshape(self.Y_hat, [tf.shape(self.Y_hat)[0], -1, n_mels])
dec = conv1d_banks(out_decoder2, K=decoder_num_banks, is_training=self.training)
dec = tf.layers.max_pooling1d(dec, pool_size=2, strides=1, padding="same")
dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-1", padding="SAME")
dec = tf.nn.relu(tf.layers.batch_normalization(dec, training=self.training))
dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-2", padding="SAME")
dec = tf.layers.batch_normalization(dec, training=self.training)
dec = tf.layers.dense(dec, embed_size // 2)
for i in range(4):
dec = highwaynet(
dec, num_units=embed_size // 2, scope="decoder-highwaynet-{}".format(i)
| tensorflow.layers.max_pooling1d | 11,430 |
import tensorflow as tf
conditional_entropy_model = SymmetricConditional()
checkpoint = tf.train.Checkpoint(synthesis_transform=synthesis_transform,
hyper_encoder=hyper_encoder,
hyper_decoder=hyper_decoder,
estimator=entropy_bottleneck)
status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))
zs = entropy_bottleneck.decompress(z_strings, z_min_v, z_max_v, z_shape, z_shape[-1])
print("Entropy Decoder (Hyper)")
def loop_hyper_deocder(z):
z = tf.expand_dims(z, 0)
loc, scale = hyper_decoder(z)
return tf.squeeze(loc, [0]), tf.squeeze(scale, [0])
locs, scales = tf.map_fn(loop_hyper_deocder, zs, dtype=(tf.float32, tf.float32),
parallel_iterations=1, back_prop=False)
lower_bound = 1e-9# TODO
scales = tf.maximum(scales, lower_bound)
print("Hyper Decoder")
ys = conditional_entropy_model.decompress(y_strings, locs, scales, y_min_v, y_max_v, y_shape)
print("Entropy Decoder")
def loop_synthesis(element):
y = tf.expand_dims(element[0], 0)
x_coori = tf.expand_dims(element[1], 0)
| tensorflow.squeeze | 11,431 |
import tensorflow as tf
lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64),
tf.constant(config.global_norm_lvalues, dtype=tf.int64)), 0)
| tensorflow.constant | 11,432 |
import tensorflow as tf
return intprod(var_shape(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
| tensorflow.gradients | 11,433 |
from tensorflow.python.ops import array_ops
new_shape = array_ops.concat(0, ((-1,), batch_shape, event_shape))
x = array_ops.reshape(x, shape=new_shape)
| tensorflow.python.ops.array_ops.reshape | 11,434 |
import tensorflow as tf
var_shape = var.get_shape().as_list()
if var_shape == saved_shapes[name]:
restored.append(var)
else:
shape_conflicts.add(name)
found_names -= shape_conflicts
return (restored, sorted(found_names),
sorted(missing_names), sorted(shape_conflicts))
def load(self, checkpoint_path, flexible_restore=True):
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
if checkpoint_path is None:
raise ValueError('Checkpoint directory is empty.')
if flexible_restore:
var_list, found, missing, conflicts = self._checkpoint_var_search(
checkpoint_path)
tf.logging.info('Restoring variables: \n\t{}'.format(
'\n\t'.join(found)))
if len(missing) > 0:
tf.logging.info('Variables not found in checkpoint: \n\t{}'.format(
'\n\t'.join(missing)))
| tensorflow.train.latest_checkpoint | 11,435 |
import tensorflow as tf
height = tf.shape(inputs)[1]
width = tf.shape(inputs)[2]
| tensorflow.shape | 11,436 |
import tensorflow as tf
predicted_symbol = tf.case([
(use_target, target),
(tf.logical_not(feed_argmax), softmax)],
default=argmax) # default case is useful for beam-search
predicted_symbol.set_shape([None])
predicted_symbol = tf.stop_gradient(predicted_symbol)
input_ = embed(predicted_symbol)
pos = update_pos(pos, predicted_symbol, encoder_input_length[align_encoder_id])
samples = samples.write(time, predicted_symbol)
| tensorflow.stop_gradient | 11,437 |
import tensorflow as tf
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
| tensorflow.identity | 11,438 |
import tensorflow as tf
# In each sequence, column index 0 to N_INPUTS - 1 are features, and column index N_INPUTS to SEQ_LEN are labels
N_OUTPUTS = 1
N_INPUTS = SEQ_LEN - N_OUTPUTS
LSTM_SIZE = 3 # number of hidden layers in each of the LSTM cells
# Read data and convert to needed format
def read_dataset(filename, mode, batch_size):
def _input_fn():
# Provide the ability to decode a CSV
def decode_csv(line):
# all_data is a list of scalar tensors
all_data = tf.decode_csv(line, record_defaults = DEFAULTS)
inputs = all_data[:len(all_data) - N_OUTPUTS] # first N_INPUTS values
labels = all_data[len(all_data) - N_OUTPUTS:] # last N_OUTPUTS values
# Convert each list of rank R tensors to one rank R+1 tensor
inputs = tf.stack(inputs, axis = 0)
labels = tf.stack(labels, axis = 0)
# Convert input R+1 tensor into a feature dictionary of one R+1 tensor
features = {TIMESERIES_COL: inputs}
| tensorflow.decode_csv | 11,439 |
import tensorflow as tf
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = tf.placeholder(dtype=tf.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape, lambda: None))
self.assertTrue(normal._is_scalar_helper(lambda: tf.TensorShape(None),
lambda: tf.shape(x)))
x = tf.placeholder(dtype=tf.int32, shape=[1])
# None would fire an exception were it actually executed.
| tensorflow.placeholder | 11,440 |
import tensorflow as tf
optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
fcos = build_whole_network_batch_quad.DetectionNetworkFCOS(cfgs=self.cfgs,
is_training=True)
with tf.name_scope('get_batch'):
if cfgs.IMAGE_PYRAMID:
shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN)
shortside_len = tf.random_shuffle(shortside_len_list)[0]
| tensorflow.name_scope | 11,441 |
import tensorflow as tf
sample["dist_targets"].append(text_encoder.EOS_ID)
yield sample
def generate_samples(self, data_dir, tmp_dir, dataset_split):
data_path = self.source_data_files(dataset_split)
assert tf.gfile.Exists(data_path)
return text_problems.text2text_distill_iterator(data_path + "inputs",
data_path + "gold",
data_path + "prediction")
| tensorflow.gfile.Exists | 11,442 |
import tensorflow as tf
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if lengths is None:
if input_ids is not None:
lengths = tf.reduce_sum(tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=tf.int32), axis=1)
else:
lengths = tf.convert_to_tensor([slen] * bs, tf.int32)
# mask = input_ids != self.pad_index
| tensorflow.not_equal | 11,443 |
import tensorflow as tf
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
| tensorflow.layers.dense | 11,444 |
import tensorflow as tf
tf.flags.DEFINE_boolean('cross_replica_sync', True, '')
# Summary and Save & load checkpoints.
tf.flags.DEFINE_integer('summary_verbosity', 0,
"""Verbosity level for summary ops. Pass 0 to disable
both summaries and checkpoints.""")
tf.flags.DEFINE_integer('save_summaries_steps', 0,
"""How often to save summaries for trained models.
Pass 0 to disable summaries.""")
tf.flags.DEFINE_integer('save_model_secs', 0,
"""How often to save trained models. Pass 0 to disable
checkpoints""")
| tensorflow.flags.DEFINE_integer | 11,445 |
import tensorflow as tf
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--max_train_step', type=int, default=50000, help='the maximum training step')
parser.add_argument('--model_path', type=str, default='', help='the path of checkpoint file')
args = parser.parse_args()
def model():
x = tf.placeholder(tf.float32, [None, 784], name='x')
gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth')
with tf.variable_scope('layer1'):
w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer())
b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0))
h1 = tf.nn.relu(tf.matmul(x, w1) + b1)
| tensorflow.placeholder | 11,446 |
import tensorflow as tf
with tf.name_scope('BoundingBoxTransform/decode'):
(roi_width, roi_height,
roi_urx, roi_ury) = get_width_upright(roi)
dx, dy, dw, dh = tf.split(deltas, 4, axis=1)
if variances is None:
variances = [1., 1.]
pred_ur_x = dx * roi_width * variances[0] + roi_urx
pred_ur_y = dy * roi_height * variances[0] + roi_ury
pred_w = tf.exp(dw * variances[1]) * roi_width
pred_h = tf.exp(dh * variances[1]) * roi_height
bbox_x1 = pred_ur_x - 0.5 * pred_w
bbox_y1 = pred_ur_y - 0.5 * pred_h
# This -1. extra is different from reference implementation.
bbox_x2 = pred_ur_x + 0.5 * pred_w - 1.
bbox_y2 = pred_ur_y + 0.5 * pred_h - 1.
bboxes = tf.concat(
| tensorflow.exp | 11,447 |
import tensorflow as tf
else:
assert tf.get_variable_scope().reuse is False
| tensorflow.get_variable_scope | 11,448 |
import tensorflow as tf
training for benchmarking""")
tf.flags.DEFINE_integer('batch_size', 0, 'batch size per compute device')
tf.flags.DEFINE_integer('num_batches', 100,
'number of batches to run, excluding warmup')
tf.flags.DEFINE_integer('num_warmup_batches', None,
'number of batches to run before timing')
tf.flags.DEFINE_integer('autotune_threshold', None,
'The autotune threshold for the models')
tf.flags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on')
tf.flags.DEFINE_integer('display_every', 10,
"""Number of local steps after which progress is printed
out""")
tf.flags.DEFINE_string('data_dir', None, """Path to dataset in TFRecord format
(aka Example protobufs). If not specified,
synthetic data will be used.""")
tf.flags.DEFINE_string('data_name', None,
| tensorflow.flags.DEFINE_integer | 11,449 |
from tensorflow.contrib.boosted_trees.proto import learner_pb2
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
regressor.fit(input_fn=_train_input_fn, steps=15)
regressor.evaluate(input_fn=_eval_input_fn, steps=1)
regressor.export(self._export_dir_base)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
model = estimator.GradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
| tensorflow.contrib.boosted_trees.proto.learner_pb2.LearnerConfig | 11,450 |
import tensorflow as tf
# argument prediction
# first encode decoded action template and teh true action template
choice = tf.floor(tf.random_uniform([1], self.use_inputs_prob, 1 + self.use_inputs_prob, tf.float32))
prediction_action_argmax = tf.stop_gradient(tf.argmax(self.predictions_action, 1))
predicted_action_templates_embedding = embedding(
input=prediction_action_argmax,
length=action_templates_vocabulary_length,
size=action_templates_embedding_size,
name='action_templates_embedding'
)
true_action_template_embedding = tf.gather(predicted_action_templates_embedding.embedding_table, actions_template)
predicted_action_templates_embedding = tf.stop_gradient(predicted_action_templates_embedding)
action_templates_embedding = choice * true_action_template_embedding + (1.0 - choice) * predicted_action_templates_embedding
dialogue_state_action_template = tf.concat(
1,
[
dialogue_state,
action_templates_embedding
],
name='dialogue_state_action_template'
)
| tensorflow.gather | 11,451 |
import tensorflow as tf
return Y
def fwd_gradients_0(self, U, x):
g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0]
return tf.gradients(g, self.dummy_x0_tf)[0]
def fwd_gradients_1(self, U, x):
g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0]
| tensorflow.gradients | 11,452 |
import tensorflow as tf
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
| tensorflow.logging.info | 11,453 |
import tensorflow as tf
if self.lesion_kappa:
setattr(
self,
'kappa_%s' % layer,
tf.constant(0.))
if self.reuse:
# Make the batchnorm variables
scopes = ['g1_bn', 'g2_bn', 'c1_bn', 'c2_bn']
| tensorflow.constant | 11,454 |
import tensorflow as tf
grad_norms_output = None
with self.test_session(use_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
grad_norms_output = np.array([
| tensorflow.global_variables_initializer | 11,455 |
import tensorflow as tf
predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .
labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.
y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2
num_ratings: numbers of rater to used, typically num_classes of the model
batch_size: batch_size of the training or validation ops
eps: a float, prevents divide by zero
name: Optional scope/name for op_scope.
Returns:
A tensor with the kappa loss.
"""
with tf.name_scope(name):
labels = tf.to_float(labels)
repeat_op = tf.to_float(
tf.tile(tf.reshape(tf.range(0, num_ratings), [num_ratings, 1]), [1, num_ratings]))
repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))
weights = repeat_op_sq / tf.to_float((num_ratings - 1)**2)
pred_ = predictions**y_pow
try:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))
except Exception:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1]))
hist_rater_a = tf.reduce_sum(pred_norm, 0)
hist_rater_b = tf.reduce_sum(labels, 0)
conf_mat = tf.matmul(tf.transpose(pred_norm), labels)
| tensorflow.transpose | 11,456 |
import tensorflow as tf
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
| tensorflow.trainable_variables | 11,457 |
import tensorflow as tf
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses
def _shard_features(self, features): # pylint: disable=missing-docstring
sharded_features = dict()
for k, v in six.iteritems(features):
v = tf.convert_to_tensor(v)
v_shape = common_layers.shape_list(v)
if not v_shape:
v = tf.expand_dims(v, axis=-1)
v_shape = [1]
if v_shape == [1]:
v = tf.tile(v, [self._num_datashards])
| tensorflow.convert_to_tensor | 11,458 |
import tensorflow as tf
return pred_strings
def id2word(self, word_ids, name=None):
mapping_strings = self.load_word_data()
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_strings, name=name
)
word_strings = reverse_vocab_tags.lookup(tf.to_int64(word_ids))
return word_strings
def loss_layer(self, preds, ground_true, nwords, crf_params):
with tf.name_scope("CRF_log_likelihood"):
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
preds, ground_true, nwords, crf_params
)
| tensorflow.to_int64 | 11,459 |
import tensorflow as tf
scope.reuse_variables()
truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8])
truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3),
[self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))
truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3),
[self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))
truthoutput_h3 = lrelu(deconv2d(tf.concat([truthoutput_h2, tgtctx_h1], 3),
[self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3'))
truthoutput_h4 = deconv2d(tf.concat([truthoutput_h3, tgtctx_h0], 3),
[self.batch_size, s_h, s_w, self.c_dim], name='d_h4')
self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3
mean, var = tf.nn.moments(tgtimg_z, axes=[0])
print(var.get_shape())
# self.simloss /= tf.reduce_mean(var)
| tensorflow.concat | 11,460 |
import tensorflow as tf
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
| tensorflow.variable_scope | 11,461 |
import tensorflow as tf
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"maximal_learning_rate": self.maximal_learning_rate,
"step_size": self.step_size,
"scale_mode": self.scale_mode,
}
@tf.keras.utils.register_keras_serializable(package="Addons")
class TriangularCyclicalLearningRate(CyclicalLearningRate):
def __init__(
self,
initial_learning_rate,
maximal_learning_rate,
step_size,
scale_mode="cycle",
name="TriangularCyclicalLearningRate",
):
| tensorflow.keras.utils.register_keras_serializable | 11,462 |
import tensorflow as tf
h_conv3 = self.conv2d('h_conv3', h_conv2, W_conv3, 1, b_conv3)
h_conv3_flat = tf.reshape(h_conv3, [-1, 1600], 'h_conv3_flat')
h_fc1 = tf.nn.relu(tf.add(tf.matmul(h_conv3_flat, W_fc1), b_fc1, 'h_fc1'))
readout = tf.add(tf.matmul(h_fc1, W_fc2), b_fc2, 'h_fc2')
return s, readout, h_fc1
def creat_optimizer(self,readout):
action = tf.placeholder(tf.float32,[None,self.ACTIONS])
y = tf.placeholder(tf.float32,[None])
readout_action = tf.reduce_sum(tf.multiply(readout,action),reduction_indices=1)
cost =tf.reduce_mean(tf.square(y-readout_action))
train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)
return train_step,y,action
#输入一个初始状态s_t,时间为t,之后进行游戏
def process_game(self,s_t):
#通过CNN运算得到Q值向量
read_out_t = self.sess.run(self.readout,feed_dict={self.s:[s_t]})[0]
a_t =np.zeros([self.ACTIONS])
action_index =0
if self.time % self.FRAME_PER_ACTION == 0:
if random.random()<= self.epsilon: #随机选择动作
| tensorflow.multiply | 11,463 |
import tensorflow as tf
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
| tensorflow.expand_dims | 11,464 |
import tensorflow as tf
pi = act_limit * pi
with tf.variable_scope('q1'):
q1_in_ph = tf.concat([x, a], axis=-1)
q1_in_dim = q1_in_ph.shape.as_list()[1]
q1_dropout_mask_generator = DropoutMaskGenerator(q1_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate)
q1_dropout_mask_phs = q1_dropout_mask_generator.generate_dropout_mask_placeholders()
q1, q1_reg = mlp_variational(q1_in_ph, q1_dropout_mask_phs, list(hidden_sizes) + [1],
activation, None, dropout_rate)
q1 = tf.squeeze(q1, axis=2)
with tf.variable_scope('q1', reuse=True):
q1_pi, q1_pi_reg = mlp_variational(tf.concat([x, pi[0]], axis=-1), q1_dropout_mask_phs, list(hidden_sizes) + [1],
activation, None, dropout_rate)
q1_pi = tf.squeeze(q1_pi, axis=2)
with tf.variable_scope('q2'):
q2_in_ph = tf.concat([x, a], axis=-1)
q2_in_dim = q2_in_ph.shape.as_list()[1]
q2_dropout_mask_generator = DropoutMaskGenerator(q2_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate)
| tensorflow.squeeze | 11,465 |
import tensorflow as tf
items = relabelled_items
super(GoalRelabellingReplayBuffer, self)._add_batch(items)
@tf.function
def _soft_relabel(self, experience):
# experience.observation.shape = [B x T=2 x obs_dim+state_dim]
states, orig_tasks = self._task_distribution.split(
experience.observation[:, 0])
if self._task_distribution.tasks is None:
tasks = orig_tasks
else:
tasks = tf.constant(self._task_distribution.tasks, dtype=tf.float32)
next_states, _ = self._task_distribution.split(experience.observation[:, 1])
if self._candidate_task_type == "states":
candidate_tasks = self._task_distribution.state_to_task(states)
elif self._candidate_task_type == "next_states":
candidate_tasks = self._task_distribution.state_to_task(next_states)
else:
assert self._candidate_task_type == "tasks"
candidate_tasks = tasks
actions = experience.action[:, 0]
| tensorflow.constant | 11,466 |
import tensorflow as tf
rgb = tf.floor(rgb/(2**(8-n_bits)))
rgb = rgb/(n_bins) - 0.5
return rgb
def post_process(self, rgb, add_dequantization_noise=True):
n_bits = config.model.data.n_bits
n_bins = 2**n_bits
rgb_out = rgb
# discretization noise
if add_dequantization_noise:
shape = tf.shape(rgb_out)
rgb_out += tf.random_uniform(shape=shape)*(1/n_bins)
return rgb_out
| tensorflow.random_uniform | 11,467 |
import tensorflow as tf
self.assertAllEqual(transformed_inputs[fields.InputDataFields.image].dtype,
tf.float32)
self.assertAllEqual(transformed_inputs[fields.InputDataFields.image].shape,
[4, 4, 5])
self.assertAllClose(transformed_inputs[fields.InputDataFields.image],
np.concatenate((image, additional_channels), axis=2))
def test_use_multiclass_scores_when_present(self):
image = np.random.rand(4, 4, 3).astype(np.float32)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(image),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.multiclass_scores:
tf.constant(np.array([0.2, 0.3, 0.5, 0.1, 0.6, 0.3], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
| tensorflow.constant | 11,468 |
import tensorflow as tf
# Decrement episode count.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign_sub(ref=self.episode_count, value=num_episodes)
# Assign new observations.
with tf.control_dependencies(control_inputs=(assignment,)):
assignments = list()
for name in sorted(states):
assignments.append(tf.scatter_update(
ref=self.states_memory[name],
indices=indices,
updates=states[name]
))
for name in sorted(internals):
assignments.append(tf.scatter_update(
ref=self.internals_memory[name],
indices=indices,
updates=internals[name]
))
for name in sorted(actions):
assignments.append(tf.scatter_update(
ref=self.actions_memory[name],
indices=indices,
updates=actions[name]
))
assignments.append(tf.scatter_update(ref=self.terminal_memory, indices=indices, updates=terminal))
assignments.append(tf.scatter_update(ref=self.reward_memory, indices=indices, updates=reward))
| tensorflow.scatter_update | 11,469 |
import tensorflow as tf
logits = clip_logits(logits, self.hparams)
logits = tf.expand_dims(logits, axis=1)
value = tf.layers.dense(x, self.distributional_value_size)
return {"target_policy": logits, "target_value": value}
| tensorflow.layers.dense | 11,470 |
import tensorflow as tf
except OSError as ex:
pass # ignore existing dir
with tf.Session() as sess:
graph_io.write_graph(sess.graph.as_graph_def(),
model_version_dir,
| tensorflow.Session | 11,471 |
import tensorflow as tf
with tf.variable_scope("target", reuse=False):
# Create the value network
_, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph,
create_qf=False, create_vf=True)
self.value_target = value_target
if self.n_step:
_,_,value_target_n = self.policy_tf.make_critics(self.processed_next_obs_ph_n,
create_qf=False, create_vf=True,reuse=True)
self.value_target_n = value_target_n
with tf.variable_scope("loss", reuse=False):
# Take the min of the two Q-Values (Double-Q Learning)
min_qf_pi = tf.minimum(qf1_pi, qf2_pi)
# Target for Q value regression
q_backup = tf.stop_gradient(
self.rewards_ph +
(1 - self.terminals_ph) * self.gamma * self.value_target
)
# Compute Q-Function loss
# TODO: test with huber loss (it would avoid too high values)
qf1_loss = 0.5 * tf.reduce_mean(((q_backup - qf1) ** 2)*self.weight_ph)
qf1_loss_col = tf.reduce_mean(((q_backup - qf1) ** 2),1)
qf2_loss = 0.5 * tf.reduce_mean(((q_backup - qf2) ** 2)*self.weight_ph)
if self.n_step:
q_backup_n = tf.stop_gradient(
self.rewards_ph_n +
(1 - self.terminals_ph_n) *( self.gamma**self.n_step_length ) * self.value_target_n)
qf1_loss_n = 0.5 * tf.reduce_mean(((q_backup_n - qf1) ** 2)*self.weight_ph)
| tensorflow.stop_gradient | 11,472 |
import tensorflow as tf
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
| tensorflow.cond | 11,473 |
import tensorflow as tf
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, 2].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 2])
fig_obj_count += 1
intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))
union = tf.reduce_sum(tf.math.sign(sdf_values))
iou = intersection / union
self.collisions.append(num_collisions)
self.intersections.append(intersection)
self.ious.append(iou)
return num_collisions, intersection, iou
def evaluate(self):
| tensorflow.nn.relu | 11,474 |
import tensorflow as tf
'image/object/area':
tf.io.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.io.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.io.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
| tensorflow.io.decode_image | 11,475 |
import tensorflow as tf
X = decoder_conf('out', X, self.args.num_classes, 1, 1, '', reuse_decoder, is_train, slope=1.0, stddev=0.02,
use_bias=False)
prediction = tf.nn.softmax(X, name = 'softmax')
| tensorflow.nn.softmax | 11,476 |
import tensorflow as tf
The same batch where cutout has been applied to the images.
"""
length, replace = FLAGS.cutout_length, 0.0
images, labels = batch['image'], batch['label']
num_channels = tf.shape(images)[3]
image_height, image_width = tf.shape(images)[1], tf.shape(images)[2]
cutout_center_height = tf.random.uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random.uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
| tensorflow.random.uniform | 11,477 |
import tensorflow as tf
'AB4DEF.GH', 'ABDEF.GH', 'XYZ']
files = [tempfile.NamedTemporaryFile(prefix=c) for c in cases]
with self.test_session():
# Test exact match without wildcards.
for f in files:
self.assertEqual(tf.matching_files(f.name).eval(),
tf.compat.as_bytes(f.name))
# We will look for files matching "ABxDEF.GH*" where "x" is some wildcard.
pos = files[0].name.find(cases[0])
pattern = files[0].name[:pos] + 'AB%sDEF.GH*'
self.assertEqual(set(tf.matching_files(pattern % 'z').eval()),
self._subset(files, [1]))
self.assertEqual(set(tf.matching_files(pattern % '?').eval()),
self._subset(files, [0, 1, 3, 4]))
self.assertEqual(set(tf.matching_files(pattern % '*').eval()),
self._subset(files, [0, 1, 2, 3, 4, 5]))
self.assertEqual(set(tf.matching_files(pattern % '[cxz]').eval()),
self._subset(files, [0, 1]))
self.assertEqual(set(tf.matching_files(pattern % '[0-9]').eval()),
self._subset(files, [3, 4]))
if __name__ == '__main__':
| tensorflow.matching_files | 11,478 |
from tensorflow.python.framework import constant_op
def _ranking_train_input_fn():
features = {
"a.f1": constant_op.constant([[3.], [0.3], [1.]]),
"a.f2": constant_op.constant([[0.1], [3.], [1.]]),
"b.f1": constant_op.constant([[13.], [0.4], [5.]]),
"b.f2": constant_op.constant([[1.], [3.], [0.01]]),
}
label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32)
return features, label
def _eval_input_fn():
features = {"x": constant_op.constant([[1.], [2.], [2.]])}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
def _infer_ranking_train_input_fn():
features = {
"f1": constant_op.constant([[3.], [2], [1.]]),
"f2": constant_op.constant([[0.1], [3.], [1.]])
}
return features, None
class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
def setUp(self):
| tensorflow.python.framework.constant_op.constant | 11,479 |
import tensorflow as tf
Returns:
A tensor with the log loss.
"""
with tf.name_scope(name):
predictions = tf.to_float(predictions)
labels = tf.to_float(labels)
predictions = tf.clip_by_value(predictions, eps, 1 - eps)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
loss = -tf.reduce_mean(labels * tf.log(predictions))
return loss
| tensorflow.clip_by_value | 11,480 |
import tensorflow as tf
start, stop = 0, start
if K.backend() == 'theano':
from theano import tensor as T
range_ = T.arange(start, stop)
else:
assert K.backend() == 'tensorflow'
import tensorflow as tf
range_ = tf.range(start, stop)
if dtype is not None:
range_ = K.cast(range_, dtype=dtype)
return range_
def ndim(a):
| tensorflow.range | 11,481 |
import tensorflow as tf
return out
def nin(x, num_units, **kwargs):
s = tf.shape(x)
sh = x.get_shape().as_list()
x = tf.reshape(x, [tf.reduce_prod(s[:-1]), sh[-1]])
x = dense(x, num_units, **kwargs)
return tf.reshape(x, [-1] + sh[1:-1] + [num_units])
| tensorflow.shape | 11,482 |
import tensorflow as tf
tf.nn.convolution(_t, self.w,
strides=self.strides, dilation_rate=self.dilates,
padding='VALID'),
self.b,name=name)
class Linear(object) :
def __init__(self,name,input_dim,output_dim,stddev=0.02) :
with tf.variable_scope(name) :
self.w = tf.get_variable('w',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(0.0))
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( input_var.shape.ndims > 2 ) :
| tensorflow.random_normal_initializer | 11,483 |
import tensorflow as tf
gcut = tf.stop_gradient(self.g)
mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001
dcos = dot / mag
manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos)
cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1])
log_p = tf.reduce_sum(self.log_pi * self.ac, [1])
worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p
worker_loss = -tf.reduce_sum(worker_loss, axis=0)
Am = self.r - self.manager_vf
manager_vf_loss = .5 * tf.reduce_sum(tf.square(Am))
Aw = (self.r + self.alpha * self.ri) - self.worker_vf
worker_vf_loss = .5 * tf.reduce_sum(tf.square(Aw))
entropy = -tf.reduce_sum(self.pi * self.log_pi)
beta = tf.train.polynomial_decay(beta_start, self.global_step,
end_learning_rate=beta_end,
decay_steps=decay_steps,
power=1)
| tensorflow.square | 11,484 |
import tensorflow as tf
logits = tf.convert_to_tensor(logits, name='logits')
labels = losses_utils.convert_and_cast(labels, 'labels', logits.dtype.base_dtype)
weights = losses_utils.convert_and_cast(weights, 'weights', logits.dtype.base_dtype)
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError('logits and labels must have the same shape (%s vs %s)' % (logits.get_shape(),
labels.get_shape()))
original_shape = labels.get_shape().as_list()
if labels.get_shape().ndims > 0:
original_shape[0] = -1
if labels.get_shape().ndims <= 1:
labels = tf.reshape(labels, [-1, 1])
logits = tf.reshape(logits, [-1, 1])
if weights.get_shape().ndims == 1:
# Weights has shape [batch_size]. Reshape to [batch_size, 1].
weights = tf.reshape(weights, [-1, 1])
if weights.get_shape().ndims == 0:
# Weights is a scalar. Change shape of weights to match logits.
weights *= tf.ones_like(logits)
return labels, logits, weights, original_shape
def _range_to_anchors_and_delta(precision_range, num_anchors, dtype):
"""Calculates anchor points from precision range.
Args:
| tensorflow.reshape | 11,485 |
import tensorflow as tf
# Using Multiple Devices
#----------------------------------
#
# This function gives us the ways to use
# multiple devices (executors) in TensorFlow.
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
# To find out where placement occurs, set 'log_device_placement'
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Runs the op.
print(sess.run(c))
# If we load a graph and want device placement to be forgotten,
| tensorflow.ConfigProto | 11,486 |
import tensorflow as tf
dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True)
def normalize(input_image, input_mask):
input_image = tf.cast(input_image, tf.float32) / 255.0
input_mask -= 1
return input_image, input_mask
def load_image_train(datapoint):
"""Load images for training."""
input_image = tf.image.resize(datapoint['image'], (512, 512))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def load_image_test(datapoint):
input_image = tf.image.resize(datapoint['image'], (512, 512))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
| tensorflow.image.flip_left_right | 11,487 |
from tensorflow.python.ops import math_ops
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_false_positive = math_ops.logical_and(math_ops.equal(labels, 0),
| tensorflow.python.ops.math_ops.equal | 11,488 |
import tensorflow as tf
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# calculate accuracy
correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
print ("done...")
print ("************")
| tensorflow.argmax | 11,489 |
import tensorflow as tf
b_soft_no_learn = np.array(
[0.25, 0.25] + [-0.25] * (self.num_branches - 2), dtype=np.float32)
b_soft_no_learn = np.reshape(b_soft_no_learn, [1, self.num_branches])
self.b_soft_no_learn = tf.constant(b_soft_no_learn, dtype=tf.float32)
with tf.variable_scope("attention"):
self.w_attn_1 = tf.get_variable("w_1", [self.lstm_size, self.lstm_size])
self.w_attn_2 = tf.get_variable("w_2", [self.lstm_size, self.lstm_size])
self.v_attn = tf.get_variable("v", [self.lstm_size, 1])
def _build_sampler(self, prev_c=None, prev_h=None, use_bias=False):
"""Build the sampler ops and the log_prob ops."""
print ("-" * 80)
| tensorflow.get_variable | 11,490 |
import tensorflow as tf
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range
),
)
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer(),
)
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32
)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
| tensorflow.zeros_initializer | 11,491 |
import tensorflow as tf
return_logits=True)
total_loss = tf.reduce_mean(per_example_loss)
return total_loss, per_example_loss, logits
def get_qa_outputs(FLAGS, features, is_training):
"""Loss for downstream span-extraction QA tasks such as SQuAD."""
inp = tf.transpose(features["input_ids"], [1, 0])
seg_id = tf.transpose(features["segment_ids"], [1, 0])
inp_mask = tf.transpose(features["input_mask"], [1, 0])
cls_index = tf.reshape(features["cls_index"], [-1])
seq_len = tf.shape(inp)[0]
xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)
run_config = xlnet.create_run_config(is_training, True, FLAGS)
xlnet_model = xlnet.XLNetModel(
xlnet_config=xlnet_config,
| tensorflow.transpose | 11,492 |
import tensorflow as tf
sub = tf.subtract(in0, in1, "SUB")
# Cast or convert result to the output dtype.
if tf_output0_dtype == tf.string:
cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0")
else:
cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0")
if tf_output1_dtype == tf.string:
cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1")
else:
cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1")
out0 = tf.identity(cast0, "OUTPUT0")
out1 = tf.identity(cast1, "OUTPUT1")
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"graphdef_nobatch" if max_batch == 0 else "graphdef", input_dtype,
output0_dtype, output1_dtype)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
| tensorflow.identity | 11,493 |
import tensorflow as tf
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps/4, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
| tensorflow.nn.rnn | 11,494 |
import tensorflow as tf
output = f(tf.constant([1, 2]))
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.CancelledError):
| tensorflow.train.start_queue_runners | 11,495 |
import tensorflow as tf
class_feature_map,
boxes=tf.reshape(proposal_boxes, [-1, self._box_code_size]),
box_ind=get_box_indices(proposal_boxes),
crop_size=self._crop_size,
num_spatial_bins=self._num_spatial_bins,
global_pool=True)
class_predictions_with_background = tf.squeeze(
class_predictions_with_background, squeeze_dims=[1, 2])
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size * num_boxes, 1, total_classes])
| tensorflow.squeeze | 11,496 |
from tensorflow.python.ops import math_ops
"""
with variable_scope.variable_scope(name, 'mean_iou', [predictions, labels]):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Local variable to accumulate the predictions in the confusion matrix.
cm_dtype = dtypes.int64 if weights is not None else dtypes.float64
total_cm = _create_local('total_confusion_matrix',
shape=[num_classes, num_classes], dtype=cm_dtype)
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
predictions_rank = predictions.get_shape().ndims
if predictions_rank > 1:
predictions = array_ops.reshape(predictions, [-1])
labels_rank = labels.get_shape().ndims
if labels_rank > 1:
labels = array_ops.reshape(labels, [-1])
| tensorflow.python.ops.math_ops.to_int64 | 11,497 |
import tensorflow as tf
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
| tensorflow.constant | 11,498 |
import tensorflow as tf
q,
indices.bin_counts,
indices.active_block_indices,
x,
dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32),
dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32),
dynamic_boffset=tf.constant([0, 0], dtype=tf.int32),
add=False,
transpose=transpose,
atomic=atomic)
return y
| tensorflow.constant | 11,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.