seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf local_device_protos = device_lib.list_local_devices() return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] def set_global_seeds(i): try: import tensorflow as tf except ImportError: pass else: tf.set_random_seed(i) np.random.seed(i) random.seed(i) def get_session(): tf.reset_default_graph() tf_config = tf.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) session = tf.Session(config=tf_config) print("AVAILABLE GPUS: ", get_available_gpus()) return session def get_env(task, seed): env_id = task.env_id env = gym.make(env_id) set_global_seeds(seed)
tensorflow.reset_default_graph
7,500
import tensorflow as tf predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = tf.to_float(predictions) labels = tf.to_float(labels) losses = -tf.multiply(labels, tf.log(predictions + eps)) - tf.multiply(
tensorflow.to_float
7,501
from tensorflow.python.framework import ops `x` and `y` must have the same type, and the result will have the same type as well. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` numerator of real numeric type. name: A name for the operation (optional). Returns: `x / y` rounded down (except possibly for integers in C). Raises: TypeError: If the inputs are complex. """ with ops.op_scope([x, y], name, "floordiv") as name: x = ops.convert_to_tensor(x, name="x") dtype = x.dtype if dtype.is_floating: return floor(div(x, y), name=name) else: if not dtype.is_integer: raise TypeError("Expected floating point or integer, got %r" % dtype) return div(x, y, name=name) _OverrideBinaryOperatorHelper(add, "add") _OverrideBinaryOperatorHelper(sub, "sub") _OverrideBinaryOperatorHelper(mul, "mul")
tensorflow.python.framework.ops.op_scope
7,502
import tensorflow as tf dc_g_var = [var for var in all_variables if 'dc_g_' in var.name] dc_c_var = [var for var in all_variables if 'dc_c_' in var.name] en_var = [var for var in all_variables if 'e_' in var.name] # Optimizers autoencoder_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(autoencoder_loss) discriminator_g_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(dc_g_loss, var_list=dc_g_var) discriminator_c_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(dc_c_loss, var_list=dc_c_var) generator_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(generator_loss, var_list=en_var) supervised_encoder_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(supervised_encoder_loss, var_list=en_var) init = tf.global_variables_initializer() # Reshape immages to display them input_images = tf.reshape(x_input, [-1, 28, 28, 1]) generated_images = tf.reshape(decoder_output, [-1, 28, 28, 1])
tensorflow.train.AdamOptimizer
7,503
import tensorflow as tf reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a `Tensor` of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor` with the same type as `x`. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'min'): return _min_and_max(x, reduce_instance_dims, name)[0] @common.log_api_use(common.ANALYZER_COLLECTION) def max( # pylint: disable=redefined-builtin x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> tf.Tensor: """Computes the maximum of the values of a `Tensor` over the whole dataset.
tensorflow.compat.v1.name_scope
7,504
import tensorflow as tf tf.zeros((0, initial_data.observations.shape[1]), tf.float64), ) while len(all_new_data) < num_workers: # this line blocks the process until new data is available in the queue new_data = oq.get() print(f"Process {pid}: Main : received data {new_data}", flush=True) new_data = Dataset( query_points=tf.constant(new_data[0], dtype=tf.float64), observations=tf.constant(new_data[1], dtype=tf.float64), ) all_new_data = all_new_data + new_data # tell Trieste of new batch of observations sync_bo.tell(all_new_data)
tensorflow.constant
7,505
import tensorflow as tf for ind in range(pred_heatmap.shape[0]): img = pred_heatmap[ind] img = img - img.min() img *= 255.0/img.max() file_name = 'heatmap_{}_{}.jpg'.format(save_image_with_heatmap.counter, ind) imsave(os.path.join(config.DEBUG_DIR, file_name), img.astype(np.uint8)) return save_image_with_heatmap.counter def get_keypoint(image, targets, predictions, heatmap_size, height, width, category, clip_at_zero=True, data_format='channels_last', name=None): predictions = tf.reshape(predictions, [1, -1, heatmap_size*heatmap_size]) pred_max = tf.reduce_max(predictions, axis=-1) pred_indices = tf.argmax(predictions, axis=-1) pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32) width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32) pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32) if clip_at_zero: pred_x, pred_y = pred_x * tf.cast(pred_max>0, tf.float32), pred_y * tf.cast(pred_max>0, tf.float32) pred_x = pred_x * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (width / 2.) pred_y = pred_y * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (height / 2.) if config.PRED_DEBUG: pred_indices_ = tf.squeeze(pred_indices) image_ = tf.squeeze(image) * 255. pred_heatmap = tf.one_hot(pred_indices_, heatmap_size*heatmap_size, on_value=1., off_value=0., axis=-1, dtype=tf.float32) pred_heatmap = tf.reshape(pred_heatmap, [-1, heatmap_size, heatmap_size]) if data_format == 'channels_first':
tensorflow.cast
7,506
from tensorflow.python.framework import ops ops.RegisterShape("Conj")(common_shapes.unchanged_shape) ops.RegisterShape("Cos")(common_shapes.unchanged_shape) ops.RegisterShape("Exp")(common_shapes.unchanged_shape) ops.RegisterShape("Floor")(common_shapes.unchanged_shape) ops.RegisterShape("Imag")(common_shapes.unchanged_shape) ops.RegisterShape("Inv")(common_shapes.unchanged_shape) ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape) ops.RegisterShape("IsInf")(common_shapes.unchanged_shape) ops.RegisterShape("IsNan")(common_shapes.unchanged_shape) ops.RegisterShape("Log")(common_shapes.unchanged_shape) ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape) ops.RegisterShape("Neg")(common_shapes.unchanged_shape) ops.RegisterShape("Real")(common_shapes.unchanged_shape) ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Sign")(common_shapes.unchanged_shape) ops.RegisterShape("Sin")(common_shapes.unchanged_shape) ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Square")(common_shapes.unchanged_shape) ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape) ops.RegisterShape("Tanh")(common_shapes.unchanged_shape) ops.RegisterShape("Cast")(common_shapes.unchanged_shape) ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape) @ops.RegisterShape("Add") @ops.RegisterShape("Complex") @ops.RegisterShape("Div") @ops.RegisterShape("Equal") @ops.RegisterShape("Greater") @ops.RegisterShape("GreaterEqual")
tensorflow.python.framework.ops.RegisterShape
7,507
import tensorflow as tf final_filters = common_layers.shape_list(layer)[-1] filters = hparams.hidden_size kernel = (4, 4) if hparams.mode == tf.estimator.ModeKeys.PREDICT: layer_shape = common_layers.shape_list(layer) if hparams.full_latent_tower: rand = tf.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits]) else: rand = tf.random_uniform(layer_shape[:-3] + [ 1, 1, hparams.bottleneck_bits]) d = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 z = tf.layers.dense(d, final_filters, name="unbottleneck") return layer + z, 0.0 # Embed. x = tf.layers.dense( features["cur_target_frame"], filters, name="latent_embed",
tensorflow.random_uniform
7,508
import tensorflow as tf data = tf.constant(data_raw, name='event_data', dtype=tf.float64) # // --- Perform extended ML fit of composite PDF to toy data --- # sum.fitTo(*data,"Extended") ; # convert to tf constants, otherwise you'll get complaints about float32s... constraint_tf = {} for key in constraint.keys(): low = constraint[key][0] high = constraint[key][1] constraint_tf[key] = (tf.constant(low, dtype=tf.float64), tf.constant(high, dtype=tf.float64)) print("N.B.: using direct data entry") likelihood = sum_pdf(data, nsig, sigmean, sigwidth, nbkg, m0, argpar, constraint_tf['mes'][0], constraint_tf['mes'][1]) nll = tf.neg(tf.reduce_sum(tf.log(likelihood)), name="nll") variables = tf.all_variables() grads = tf.gradients(nll, variables)
tensorflow.constant
7,509
import tensorflow as tf # update start idx start += self.args.batch_size yield x_batch, y_batch if start >= self.train_data_len: return def train_tfdata_generator(self): with tf.device('/cpu:0'): while True: x_batch, y_batch = self.data_session.run(self.train_next_batch) yield x_batch, y_batch[:, :, :, 0] def train_h5_generator(self): start = 0 idx = np.random.choice(self.train_data_len, self.train_data_len, replace=False)
tensorflow.device
7,510
import tensorflow as tf embedding.tensor_name = self.embedding_test.name embedding.sprite.image_path = './sprite.png' embedding.sprite.single_image_dim.extend([80, 80]) embedding.metadata_path = './metadata.tsv' projector.visualize_embeddings(self.summary_writer, config) sess.run(tf.variables_initializer([self.embedding_test], name='init_embeddings')) # build sprite image ut.images_to_sprite(self.test_set, path=os.path.join(FLAGS.logdir, 'sprite.png')) ut.generate_tsv(len(self.test_set), tsv_path)
tensorflow.variables_initializer
7,511
import tensorflow as tf axes=1) * \ tf.where(tf.greater(xt, 0), tf.ones_like(xt), tf.zeros_like(xt))
tensorflow.ones_like
7,512
import tensorflow as tf update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops):
tensorflow.control_dependencies
7,513
import tensorflow as tf probability of each label learned by the loss, if not provided. true_positives_lower_bound: Lower bound on the number of true positives given `labels` and `logits`. This is the same lower bound which is used in the loss expression to be optimized. false_positives_upper_bound: Upper bound on the number of false positives given `labels` and `logits`. This is the same upper bound which is used in the loss expression to be optimized. Raises: ValueError: If `surrogate_type` is not `xent` or `hinge`. """ with tf.variable_scope(scope, 'precision_recall_auc', [labels, logits, label_priors], reuse=reuse): labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights) num_labels = losses_utils.get_num_labels(logits) # Convert other inputs to tensors and standardize dtypes. dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor', logits.dtype) # Create Tensor of anchor points and distance between anchors. precision_values, delta = _range_to_anchors_and_delta(precision_range, num_anchors, logits.dtype)
tensorflow.variable_scope
7,514
import tensorflow as tf def create_optimizer(learning_rate, params): """Creates optimized based on the specified flags.""" if params['optimizer'] == 'momentum': optimizer = tf.train.MomentumOptimizer( learning_rate, momentum=params['momentum']) elif params['optimizer'] == 'adam': optimizer = tf.train.AdamOptimizer(learning_rate) elif params['optimizer'] == 'adadelta': optimizer = tf.train.AdadeltaOptimizer(learning_rate) elif params['optimizer'] == 'adagrad': optimizer = tf.train.AdagradOptimizer(learning_rate) elif params['optimizer'] == 'rmsprop': optimizer = tf.train.RMSPropOptimizer( learning_rate, momentum=params['momentum']) elif params['optimizer'] == 'lars': optimizer = tf.contrib.opt.LARSOptimizer(
tensorflow.train.AdadeltaOptimizer
7,515
import tensorflow as tf # fc1 with tf.variable_scope('fc1'): w = tf.get_variable('w', [self.flatten.get_shape()[1], 2048], initializer=he_normal, regularizer=regularizer) b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0)) out = tf.matmul(self.flatten, w) + b self.fc1 = tf.nn.relu(out) # fc2 with tf.variable_scope('fc2'): w = tf.get_variable('w', [self.fc1.get_shape()[1], 2048], initializer=he_normal, regularizer=regularizer) b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0)) out = tf.matmul(self.fc1, w) + b self.fc2 = tf.nn.relu(out) # fc3 with tf.variable_scope('fc3'): w = tf.get_variable('w', [self.fc2.get_shape()[1], num_classes], initializer=initializer, regularizer=regularizer) b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(1.0)) self.fc3 = tf.matmul(self.fc2, w) + b # Calculate Mean cross-entropy loss with tf.name_scope("loss"): self.predictions = tf.argmax(self.fc3, 1, name="predictions") losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.fc3, labels=self.input_y) regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.loss = tf.reduce_mean(losses) + sum(regularization_losses)
tensorflow.nn.relu
7,516
import tensorflow as tf self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', - 0.01 * tf.reduce_mean(pi.entropy())) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) # sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params
tensorflow.contrib.layers.l2_regularizer
7,517
import tensorflow as tf def concat_preprocess(dataset, training, pad_symbol=0): """Pre-processing function that concatenates input and target for LM.""" del training def concat(features, targets): inp = features['inputs'] pad = tf.expand_dims(tf.zeros_like(inp[0]) + pad_symbol, axis=0) concat = tf.concat([pad, inp, pad, targets], axis=0) # Note: we're updating existing features dictionary here, so make sure # it is not re-used in some other ways outside of this function. features['inputs'] = concat return features, concat
tensorflow.zeros_like
7,518
import tensorflow as tf used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") cur_mean, cur_var = used_mean, used_var if bn_lag > 0.: used_var = stable_var(input_=input_, mean=used_mean, axes=axes) cur_var = used_var used_mean -= (1 - bn_lag) * (used_mean - tf.stop_gradient(mean)) used_mean /= (1. - bn_lag**(step + 1)) used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var)) used_var /= (1. - bn_lag**(step + 1)) else:
tensorflow.stop_gradient
7,519
import tensorflow as tf # beta = self.printn('beta shape: ', beta) # ha(s): eta * (\varphi(s)^T * K^T * \Sigma^{-1} + W_{sa}) + wa(s)) ha = tf.matmul(varphis, param_eta * tf.matmul(Kt, prec) + Wsa) + wa # hss(s): eta * (\varphi(s)^T * K^T * \Sigma^{-1} * K * \varphi(s)) varphisKt = tf.matmul(varphis, Kt) hss = param_eta * tf.reduce_sum(tf.matmul(varphisKt, prec) * varphisKt, axis=1) Haa = param_eta * prec + Waa # Haa = 0.5 * (Haa + TT.transpose(Haa)) HaaInv = tf.matrix_inverse(Haa) # The two terms 'term1' and 'term2' which come from normalizers of the # 1. Original policy distribution # 2. The distribution after completing the square sigma = tf.matrix_inverse(prec) term1 = -0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * sigma)) if self.beta == 0: term2 = 0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * param_eta * HaaInv)) else: term2 = 0.5 * (param_eta + param_omega) * tf.log(tf.matrix_determinant(2 * np.pi * (param_eta + param_omega) * HaaInv))
tensorflow.matrix_inverse
7,520
import tensorflow as tf perturbs = [tf.stop_gradient(d) for d in perturbs] perturbs = [_scale_l2(_mask_by_length(d, length), perturb_norm_length) for d in perturbs] vadv_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)]) return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes) def _mask_by_length(t, length): maxlen = t.get_shape().as_list()[1] mask = tf.sequence_mask(length, maxlen=maxlen) mask = tf.expand_dims(tf.cast(mask, tf.float32), -1) return t * mask def _scale_l2(x, norm_length): alpha = tf.reduce_max(tf.abs(x), (1, 2), keep_dims=True) + 1e-12 l2_norm = alpha * tf.sqrt(tf.reduce_sum(tf.pow(x / alpha, 2), (1, 2), keep_dims=True) + 1e-6) x_unit = x / l2_norm return norm_length * x_unit def _end_of_seq_mask(tokens, vocab_size): """Generate a mask for the EOS token (1.0 on EOS, 0.0 otherwise). Args: tokens: 1-D integer `Tensor` [num_timesteps*batch_size]. Each element is an id from the vocab. vocab_size: a `int`, vocabular size of the problem Returns:
tensorflow.abs
7,521
import tensorflow as tf for layer in range(self.config["contextualization_layers"]): with tf.variable_scope("layer_{}".format(layer)): with tf.variable_scope("fw_cell"): cell_fw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout) with tf.variable_scope("bw_cell"): cell_bw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout) state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1])) state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1])) (fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn( cell_fw=cell_fw, cell_bw=cell_bw, inputs=current_inputs, sequence_length=text_len, initial_state_fw=state_fw, initial_state_bw=state_bw) text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb]
tensorflow.nn.bidirectional_dynamic_rnn
7,522
import tensorflow as tf height_2 = tf.reshape(box2[3 + 2], [1]) width_2 = tf.reshape(box2[3 + 1], [1])
tensorflow.reshape
7,523
import tensorflow as tf d.token_type_ids: tf.io.VarLenFeature(tf.int64), d.attention_mask: tf.io.VarLenFeature(tf.int64), d.labels: tf.io.VarLenFeature(tf.int64), }
tensorflow.io.VarLenFeature
7,524
import tensorflow as tf An operation that will update var_matrix when run in a Session ''' selected_rows = tf.nn.embedding_lookup(var_matrix, indices) row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1)) scaling = maxnorm / tf.maximum(row_norms, maxnorm) scaled = selected_rows * tf.expand_dims(scaling, 1) return tf.scatter_update(var_matrix, indices, scaled) def dense_maxnorm_update(var_matrix, maxnorm=1.0): '''Dense update operation that ensures all rows in var_matrix do not have a Euclidean norm greater than maxnorm. Rows that exceed
tensorflow.scatter_update
7,525
import tensorflow as tf from atari_wrappers import * def atari_model(img_in, num_actions, scope, reuse=False): # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
tensorflow.variable_scope
7,526
import tensorflow as tf Returns: A tensor with the cross entropy loss. """ logits.get_shape().assert_is_compatible_with(labels.get_shape()) with tf.name_scope(name): num_classes = labels.get_shape()[-1].value labels = tf.cast(labels, logits.dtype) if label_smoothing > 0:
tensorflow.name_scope
7,527
import tensorflow as tf # but only num_anchors terms are included in the Riemann sum, the # effective length of the integration interval is `delta` less than the # length of precision_range. scaled_loss = tf.div( per_label_loss, precision_range[1] - precision_range[0] - delta, name='AUC_Normalize') scaled_loss = tf.reshape(scaled_loss, original_shape) other_outputs = { 'lambdas': lambdas_variable,
tensorflow.reshape
7,528
import tensorflow as tf weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = [] for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1): pos_ = pos + offset pos_ = tf.minimum(pos_, encoder_input_length - 1) pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S> weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length)) weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1) weighted_average.append(weighted_average_)
tensorflow.minimum
7,529
import tensorflow as tf dnn_output_size = rnn_output_size if do_dnn: last_layer = rnn_output last_layer_size = rnn_output_size for i, layer_size in enumerate(dnn_sizes): layer_name = 'dnn_{}'.format(i) with tf.variable_scope(layer_name): dnn_w = tf.get_variable('W', shape=[last_layer_size, layer_size], initializer=dnn_init, dtype=dtype) dnn_b = tf.get_variable('b', shape=[layer_size], initializer=tf.constant_initializer(0.0), dtype=dtype) projected = tf.nn.bias_add(tf.matmul(last_layer, dnn_w), dnn_b) # TODO: argument nonlinearity, change bias to 0.1 if relu if dnn_nonlin == 'tanh': last_layer = tf.nn.tanh(projected) elif dnn_nonlin == 'sigmoid': last_layer = tf.nn.sigmoid(projected) elif dnn_nonlin == 'relu': last_layer = tf.nn.relu(projected) else: raise NotImplementedError() if mode == 'train' and dnn_keep_prob < 1.0: last_layer = tf.nn.dropout(last_layer, dnn_keep_prob) last_layer_size = layer_size print('{}: {}'.format(layer_name, last_layer.get_shape()))
tensorflow.nn.tanh
7,530
from tensorflow.python.framework import tensor_util check_ops.assert_non_negative(x)], x) return x def _introspect_ndims(self, ndims): """Helper to establish some properties of input ndims args.""" if self._is_all_constant_helper(ndims): return (tensor_util.constant_value(ndims), tensor_util.constant_value(ndims) == 0) return None, math_ops.equal(ndims, 0)
tensorflow.python.framework.tensor_util.constant_value
7,531
import tensorflow as tf tf.logging.info("*** Input Files ***")
tensorflow.logging.info
7,532
import tensorflow as tf class SchemaInferenceTest(test_case.TransformTestCase): # pylint: disable=g-long-lambda @test_case.named_parameters( dict( testcase_name='fixed_len_int', make_tensors_fn=lambda: {'x': tf.compat.v1.placeholder(tf.int64, (None,))}, feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)}), dict( testcase_name='fixed_len_string', make_tensors_fn=lambda: {'x': tf.compat.v1.placeholder(tf.string, (None,))}, feature_spec={'x': tf.io.FixedLenFeature([], tf.string)}),
tensorflow.compat.v1.placeholder
7,533
import tensorflow as tf operation = tf.assign(perturbed_var, var) perturb_ops.append(operation) assert len(perturb_ops) == len(all_vars) return tf.group(*perturb_ops) # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. with tf.variable_scope("adaptive_model", reuse=False): adaptive_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs) perturb_for_adaption = perturb_vars(original_scope="model", perturbed_scope="adaptive_model/model") kl_loss = tf.reduce_sum( tf.nn.softmax(policy.q_values) * (tf.log(tf.nn.softmax(policy.q_values)) - tf.log(tf.nn.softmax(adaptive_policy.q_values))), axis=-1) mean_kl = tf.reduce_mean(kl_loss) def update_scale(): """ update the scale expression :return: (TensorFlow Tensor) the updated scale expression """ with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr
tensorflow.reduce_mean
7,534
import tensorflow as tf tf.logging.info(message.format(config.logdir)) tf.gfile.MakeDirs(config.logdir) config_path = os.path.join(config.logdir, 'config.yaml') with tf.gfile.GFile(config_path, 'w') as file_: yaml.dump( config, file_, yaml.Dumper, allow_unicode=True, default_flow_style=False) else: message = ( 'Start a new run without storing summaries and checkpoints since no ' 'logging directory was specified.') tf.logging.info(message) return config def load_config(logdir): """Load a configuration from the log directory. Args: logdir: The logging directory containing the configuration file. Raises: IOError: The logging directory does not contain a configuration file.
tensorflow.logging.info
7,535
import tensorflow as tf iterator = dataset.make_initializable_iterator() tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
tensorflow.add_to_collection
7,536
from tensorflow.python.ops import variable_scope sampled_words = [] self.encoder_features = encoder_features with variable_scope.variable_scope("attention_decoder"): # Get the weight vectors v and W_c (W_c is for coverage) v = variable_scope.get_variable("v", [options.attention_vec_size]) v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0) w_c = None if options.use_coverage: with variable_scope.variable_scope("coverage"): w_c = variable_scope.get_variable("w_c", [options.attention_vec_size]) w_c = tf.expand_dims(tf.expand_dims(w_c, axis=0), axis=0) # For each step, dec_input => lstm_output => vocab_score wordidx_t = decoder_inputs[0] # [batch_size] int32 for i in range(options.max_answer_len): if mode_gen in ('ce_train', 'loss',): wordidx_t = decoder_inputs[i] # the wordidx_t must from decoder_inputs for phrase model
tensorflow.python.ops.variable_scope.variable_scope
7,537
import tensorflow as tf dep_selection, dep_org_idx, sl_dep, rep_dep_mask, rep_map, rep_dep_tensor, keep_prob, is_train, direction, ivec ): # data for self-attention rep_map_dp = dropout(rep_map, keep_prob, is_train) rep_dep_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, dep_selection) rep_head_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, head_selection) # mask generation dep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_head, 1]) head_idxs = tf.tile(tf.expand_dims(head_org_idx, 2), [1, 1, sl_dep]) if direction is None: direct_mask = tf.not_equal(head_idxs, dep_idxs) # [bs, slh, sld] else: if direction == 'forward': direct_mask = tf.greater(head_idxs, dep_idxs) # [bs, slh, sld] else:
tensorflow.expand_dims
7,538
import tensorflow as tf answer_batch_unstack = tf.unstack(answer_batch, axis=1) # initialize all the variables state_t_1 = init_state context_t_1 = tf.zeros([batch_size, encoder_dim]) coverage_t_1 = None # store variables from each time-step
tensorflow.zeros
7,539
from tensorflow.python.framework import ops grouped_args = collections.OrderedDict() resource_handles = [] # Check that the set of arguments is the same across all the scheduled ops. for op in batch: if set(op.args.keys()) != arg_keys: raise ValueError("Mismatching arguments: %s, %s.", op.args, arg_keys) for key in arg_keys: grouped_args.setdefault(key, []).append(op.args[key]) resource_handles.append(op.resource_handle) # Move all the inputs to the op device in one RPC. grouped_args = collections.OrderedDict( (k, _move_tensors(v, resource_handles[0].device)) for k, v in sorted(grouped_args.items())) with ops.device(resource_handles[0].device): return batch[0].op(resource_handles, stamp, **grouped_args) def run_handler_scheduled_ops(per_handler_ops, stamp, worker_device): """Given a dictionary of ops for each handler, runs them in batch.""" batched_ops = collections.OrderedDict() # Group the ops by their batching_key. Ops that share the same batching key # can be executed together. for handler in per_handler_ops.keys(): for op in per_handler_ops[handler]: key = (op.batching_key(), op.batch_runner_fn()) batched_ops.setdefault(key, []).append(op)
tensorflow.python.framework.ops.device
7,540
import tensorflow as tf def cw(X, y=None): D = tf.cast(tf.shape(X)[1], tf.float32) N = tf.cast(tf.shape(X)[0], tf.float32) if y is None: y = silverman_rule_of_thumb(N) K = 1/(2*D-3) A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2) A = (1/(N**2)) * tf.reduce_sum((1/tf.sqrt(y + K*A1))) B1 = euclidean_norm_squared(X, axis=1) B = (2/N)*tf.reduce_sum((1/tf.sqrt(y + 0.5 + K*B1))) return (1/tf.sqrt(1+y)) + A - B def cw_choose(z_dim: int): if z_dim == 1: return cw_1d elif z_dim == 2: return cw_2d elif z_dim >= 20: return cw else: raise ValueError('Not defined for this latent dimension')
tensorflow.sqrt
7,541
import tensorflow as tf negative = tmp * tf.sin(order_float * phi) return tf.where(tf.greater(sign_order, 0), positive, negative) def evaluate_spherical_harmonics( degree_l: TensorLike, order_m: TensorLike, theta: TensorLike, phi: TensorLike, name: str = "spherical_harmonics_evaluate_spherical_harmonics") -> TensorLike: # pylint: disable=line-too-long with tf.name_scope(name): degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) theta = tf.convert_to_tensor(value=theta) phi = tf.convert_to_tensor(value=phi) var_type = theta.dtype sign_m = tf.math.sign(order_m) order_m = tf.abs(order_m) zeros = tf.zeros_like(order_m) result_m_zero = _spherical_harmonics_normalization( degree_l, zeros, var_type) * evaluate_legendre_polynomial( degree_l, zeros, tf.cos(theta)) result_branch = _evaluate_spherical_harmonics_branch( degree_l, order_m, theta, phi, sign_m, var_type) return tf.where(tf.equal(order_m, zeros), result_m_zero, result_branch)
tensorflow.convert_to_tensor
7,542
from tensorflow.python.ops import math_ops n = math_ops.cast(global_step, dtypes.float32) decay = math_ops.minimum(decay, n / (n + 1.)) # update averages mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay) variance = sq_mean - math_ops.square(mean) std = math_ops.sqrt(math_ops.maximum(epsilon, variance)) max_norms = math_ops.exp(mean + std_factor * std) return max_norms, mean def adaptive_clipping_fn(std_factor=2., decay=0.95, static_max_norm=None, global_step=None, report_summary=False,
tensorflow.python.ops.math_ops.exp
7,543
import tensorflow as tf scope = tf.get_variable_scope() with tf.variable_scope(scope): if self._max_diffusion_step == 0: pass else: for support in self._supports: x1 = tf.sparse_tensor_dense_matmul(support, x0) x = self._concat(x, x1) for _ in range(2, self._max_diffusion_step + 1): x2 = 2 * tf.sparse_tensor_dense_matmul(support, x1) - x0 x = self._concat(x, x2) x1, x0 = x2, x1 num_matrices = len(self._supports) * self._max_diffusion_step + 1 # Adds for x itself. x = tf.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size]) x = tf.transpose(x, perm=[3, 1, 2, 0]) # (batch_size, num_nodes, input_size, order) x = tf.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) weights = tf.get_variable( 'weights', [input_size * num_matrices, output_size],
tensorflow.sparse_tensor_dense_matmul
7,544
import tensorflow as tf filter_size[1] - input_.get_shape().as_list()[2], input_.get_shape().as_list()[3] ]) res = tf.concat(axis=1, values=[pad_1, res]) res = tf.concat(axis=2, values=[pad_2, res]) res = tf.nn.conv2d(
tensorflow.concat
7,545
import tensorflow as tf tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) if FLAGS.do_eval: tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False)
tensorflow.logging.info
7,546
import tensorflow as tf # split into train and test train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.3, random_state=42) # define other non user input params # initialize l2_loss as zero l2_loss = tf.constant(0.0) # define sequence length sequence_length = data_x.shape[1] # define num_features num_feature = data_x.shape[2] # store the weights
tensorflow.constant
7,547
import tensorflow as tf else: key_name = "eval_accuracy" global_step, best_perf_global_step, best_perf = _best_trial_info() writer = tf.gfile.GFile(output_eval_file, "w") while global_step < FLAGS.train_step: steps_and_files = {} filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
tensorflow.gfile.GFile
7,548
import tensorflow as tf import gpflow from gpflow.ci_utils import ci_niter from gpflow import set_trainable from multiclass_classification import plot_from_samples, colors gpflow.config.set_default_float(np.float64) gpflow.config.set_default_jitter(1e-4) gpflow.config.set_default_summary_fmt("notebook") # convert to float64 for tfp to play nicely with gpflow in 64 f64 = gpflow.utilities.to_default_float tf.random.set_seed(123) # %matplotlib inline # %% [markdown] # # In this notebook, we provide three examples: # # * [Example 1](#Example-1:-GP-regression): Sampling hyperparameters in Gaussian process regression # * [Example 2](#Example-2:-Sparse-MC-for-multiclass-classification): Sparse Variational MC applied to the multiclass classification problem # * [Example 3](#Example-3:-Fully-Bayesian-inference-for-generalized-GP-models-with-HMC): Full Bayesian inference for Gaussian process models
tensorflow.random.set_seed
7,549
import tensorflow as tf res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual(1, len(res)) self.assertEqual((2, 2), res[0].shape) def testEmbeddingRNNDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) _, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec, mem = tf.nn.seq2seq.embedding_rnn_decoder( dec_inp, enc_state, cell, num_symbols=4, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape)
tensorflow.constant
7,550
import tensorflow as tf nodes_list = [nodes] adj_list = [] for hop_edge_types in edge_types: neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64) next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)]) next_shape = tf.cast(next_shape, tf.int64) next_adj = tf.SparseTensor(next_indices, next_values, next_shape) next_adj = tf.sparse_reorder(next_adj) nodes_list.append(next_nodes) adj_list.append(next_adj) nodes = next_nodes return nodes_list, adj_list
tensorflow.cast
7,551
import tensorflow as tf outputs_dict1, _ = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2, feed_previous=True) outputs_dict2, _ = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict, embedding_size=2, feed_previous=True) res1 = sess.run(outputs_dict1["0"]) res2 = sess.run(outputs_dict2["0"]) res3 = sess.run(outputs_dict3["0"]) self.assertAllClose(res1, res2) self.assertAllClose(res1, res3) def testSequenceLoss(self): with self.test_session() as sess: logits = [tf.constant(i + 0.5, shape=[2, 5]) for i in range(3)] targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] weights = [tf.constant(1.0, shape=[2]) for i in range(3)] average_loss_per_example = tf.nn.seq2seq.sequence_loss( logits, targets, weights, average_across_timesteps=True, average_across_batch=True) res = sess.run(average_loss_per_example) self.assertAllClose(1.60944, res) average_loss_per_sequence = tf.nn.seq2seq.sequence_loss( logits, targets, weights, average_across_timesteps=False,
tensorflow.constant
7,552
import tensorflow as tf def test_train_with_defun(self): train(defun=True) def test_evaluate_with_defun(self): evaluate(defun=True) if __name__ == "__main__": tf.test.main()
tensorflow.test.main
7,553
import tensorflow as tf for state in states: dJr = tf.matmul(tf.nn.relu(state),
tensorflow.nn.relu
7,554
import tensorflow as tf print(sess.run(D)) print('\nA+B=') print(sess.run(A + B)) print('\nB-B=') print(sess.run(B - B)) print('\nB*I=') BI = tf.matmul(B, identity_matrix) print(sess.run(BI)) print('\ntranspose(C)=') print(sess.run(tf.transpose(C))) print('\ntranspose(D)=') print(sess.run(tf.transpose(D))) print('\ninverse(D)=') print(sess.run(tf.matrix_inverse(D))) print('\ndeterminant(D)={:.1f}'.format(sess.run(tf.matrix_determinant(D)))) print('\ncholesky(D):') print(sess.run(tf.cholesky(identity_matrix)))
tensorflow.transpose
7,555
import tensorflow as tf f = open(self.log_file, 'w') f.close() def weight_variable(shape): return tf.get_variable('W', shape, initializer=tf.random_normal_initializer(0., 0.02)) def bias_variable(shape): return tf.get_variable('b', shape, initializer=tf.constant_initializer(0.)) def keep_prob(dropout, train): return tf.cond(train, lambda: tf.constant(dropout), lambda: tf.constant(1.)) def softmax_ce_with_logits(logits, labels): return tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits) def sigmoid_ce_with_logits(logits, labels): return tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits) def sigmoid_kl_with_logits(logits, targets): assert isinstance(targets, float)
tensorflow.constant
7,556
import tensorflow as tf def _fill_problem_hparams_features(self, features): if features is not None: for k, v in six.iteritems( problem_hparams_to_features(self._problem_hparams)): if k not in features: features[k] = tf.constant(v, name=k) def infer(self, features=None, decode_length=50,
tensorflow.constant
7,557
import tensorflow as tf self.assertEqual(set(tf.matching_files(pattern % 'z').eval()), self._subset(files, [1])) self.assertEqual(set(tf.matching_files(pattern % '?').eval()), self._subset(files, [0, 1, 3, 4])) self.assertEqual(set(tf.matching_files(pattern % '*').eval()),
tensorflow.matching_files
7,558
import tensorflow as tf # # cumsum(decay_prods * sequence) / decay_prods # where decay_prods = reverse_cumprod(decay) # # One reason this hasn't been done is that multiplying then dividing again by # products of decays isn't ideal numerically, in particular if any of the # decays are zero it results in NaNs. with tf.name_scope(name, values=[sequence, decay, initial_value]): if sequence_lengths is not None: # Zero out sequence and decay beyond sequence_lengths. with tf.control_dependencies( [tf.assert_equal(sequence.shape[0], decay.shape[0])]): mask = tf.sequence_mask(sequence_lengths, maxlen=sequence.shape[0], dtype=sequence.dtype) mask = tf.transpose(mask) # Adding trailing dimensions to mask to allow for broadcasting. to_seq = mask.shape.dims + [1] * (sequence.shape.ndims - mask.shape.ndims) sequence *= tf.reshape(mask, to_seq) to_decay = mask.shape.dims + [1] * (decay.shape.ndims - mask.shape.ndims) decay *= tf.reshape(mask, to_decay) sequences = [sequence, decay] if reverse: sequences = [_reverse_seq(s, sequence_lengths) for s in sequences] summed = tf.scan(lambda a, x: x[0] + x[1] * a, sequences,
tensorflow.transpose
7,559
import tensorflow as tf trainable=False) self._moving_variance = tf.subtract(self._moving_second_moment, tf.square(self._moving_mean), name="moving_variance") def build_batch_stats(): """Builds the batch statistics calculation ops.""" # Copy for better stability. # We use the moving mean as an estimate of the mean in order to perform # a more numerically stable calculation of the batch mean. shift = tf.add(self._moving_mean, 0) counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics( input_batch, reduction_indices, keep_dims=True, shift=shift, name="batch_norm_ss") mean, variance = tf.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") second_moment = variance + tf.square(mean)
tensorflow.nn.sufficient_statistics
7,560
import tensorflow as tf else: initial_state = dense(initial_state, cell_state_size, use_bias=True, name='initial_state_projection', activation=activation_fn) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: initial_output = initial_state else: # Last layer's state is the right-most part. Output is the left-most part of an LSTM's state. initial_output = initial_state[:, -cell_output_size:] time = tf.constant(0, dtype=tf.int32, name='time') outputs = tf.TensorArray(dtype=tf.float32, size=time_steps) samples = tf.TensorArray(dtype=tf.int64, size=time_steps) inputs = tf.TensorArray(dtype=tf.int64, size=time_steps).unstack(tf.to_int64(tf.transpose(decoder_inputs))) states = tf.TensorArray(dtype=tf.float32, size=time_steps) weights = tf.TensorArray(dtype=tf.float32, size=time_steps) attns = tf.TensorArray(dtype=tf.float32, size=time_steps) initial_symbol = inputs.read(0) # first symbol is BOS initial_input = embed(initial_symbol) initial_pos = tf.zeros([batch_size], tf.float32) initial_weights = tf.zeros(tf.shape(attention_states[align_encoder_id])[:2]) zero_context = tf.zeros(shape=tf.shape(attention_states[align_encoder_id][:,0])) # FIXME
tensorflow.TensorArray
7,561
from tensorflow.python.platform import gfile self.assertFalse(gfile.Exists(s1)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1))) self.assertTrue(gfile.Exists(s3)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Exercise the second helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save2.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save2.last_checkpoints) # Created by the first helper. self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Deleted by the first helper. self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
tensorflow.python.platform.gfile.Exists
7,562
from tensorflow.python.ops import init_ops loss = control_flow_ops.with_dependencies(list(update_ops), loss) # Learning rate variable, with possible decay. lr = None if learning_rate is not None: if (isinstance(learning_rate, ops.Tensor) and learning_rate.get_shape().ndims == 0): lr = learning_rate elif isinstance(learning_rate, float): if learning_rate < 0.0: raise ValueError("Invalid learning_rate %s.", learning_rate) lr = vs.get_variable( "learning_rate", [], trainable=False, initializer=init_ops.constant_initializer(learning_rate)) else: raise ValueError("Learning rate should be 0d Tensor or float. " "Got %s of type %s" % (str(learning_rate), str(type(learning_rate)))) if summaries is None: summaries = ["loss", "learning_rate", "global_gradient_norm"] else: for summ in summaries: if summ not in OPTIMIZER_SUMMARIES: raise ValueError("Summaries should be one of [%s], you provided %s." % (", ".join(OPTIMIZER_SUMMARIES), summ)) if learning_rate is not None and learning_rate_decay_fn is not None: if global_step is None:
tensorflow.python.ops.init_ops.constant_initializer
7,563
import tensorflow as tf if use_bias: b = tf.get_variable( "b", [hidden], initializer=tf.constant_initializer(0.)) res = tf.nn.bias_add(res, b) res = tf.reshape(res, out_shape) return res
tensorflow.nn.bias_add
7,564
import tensorflow as tf params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] x2, ildj = gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True)) return x2, ildj def log_gaussianize(x, mus, log_sigmas, inverse=tf.constant(False)): """ Standardize log normal random variable x using mus and log_sigmas. """ if inverse:
tensorflow.constant
7,565
import tensorflow as tf create_session=True)) # pylint: enable=g-long-lambda def test_infer_feature_schema(self, make_tensors_fn, feature_spec, domains=None, create_session=False): with tf.compat.v1.Graph().as_default() as graph: tensors = make_tensors_fn() if create_session: with tf.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(tensors, graph, session) else:
tensorflow.compat.v1.Graph
7,566
import tensorflow as tf def zeros(shape, dtype=K.floatx()): """Return all-zeros tensor of given shape and type.""" # As of Keras version 1.1.0, Keras zeros() requires integer values # in shape (e.g. calling np.zeros() with the Theano backend) and # thus can't be called with tensor values. This version avoids the # issue by using the backend zeros() instead. if K.backend() == 'theano': from theano import tensor as T return T.zeros(shape, dtype) else: assert K.backend() == 'tensorflow' import tensorflow as tf return tf.zeros(shape, dtype) def values(value, shape, dtype=K.floatx()): """Return tensor of given shape and type filled with given value.""" return value * ones(shape, dtype) # or zeros() + ? def meshgrid(i, j, indexing='ij'): """Return matrices broadcasting indices on a 2d grid. This is a partial backend-independent version of TensorFlow meshgrid()
tensorflow.zeros
7,567
from tensorflow.python.framework import ops common_shapes.unchanged_shape_with_rank(2)) @ops.RegisterShape("InTopK") def _InTopKShape(op): """Shape function for InTopK op."""
tensorflow.python.framework.ops.RegisterShape
7,568
import tensorflow as tf self.assertAllClose(v_true.eval(), v_false.eval()) def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
tensorflow.nn.rnn_cell.BasicLSTMCell
7,569
import tensorflow as tf if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") n_in = int(self.inputs.get_shape()[-1]) with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) if b_init is not None: try: b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) except Exception: # If initializer is a constant, do not specify shape. b = tf.get_variable(name='b', initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) self.outputs = act(tf.matmul(self.inputs, W) + b) else: self.outputs = act(tf.matmul(self.inputs, W)) self.all_layers.append(self.outputs) if b_init is not None: self.all_params.extend([W, b]) else: self.all_params.append(W) class ReconLayer(DenseLayer):
tensorflow.matmul
7,570
import tensorflow as tf logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the
tensorflow.one_hot
7,571
import tensorflow as tf inputs = tf.identity(inputs, 'initial_conv') # We do not include batch normalization or activation functions in V2 # for the initial conv1 because the first ResNet unit will perform these # for both the shortcut and non-shortcut paths as part of the first # block's projection. Cf. Appendix of [2]. if self.resnet_version == 1: inputs = batch_norm(inputs, training, self.data_format) inputs = tf.nn.relu(inputs) if self.first_pool_size: inputs = tf.layers.max_pooling2d( inputs=inputs, pool_size=self.first_pool_size, strides=self.first_pool_stride, padding='SAME', data_format=self.data_format) inputs = tf.identity(inputs, 'initial_max_pool')
tensorflow.nn.relu
7,572
import tensorflow as tf def cnn_bi_lstm_model(x, amp_factor, bil_lstm_win_size, num_classes): logits = cnn_model(x, amp_factor=amp_factor) logits = tf.reshape(logits, [-1, bil_lstm_win_size, 256*amp_factor]) forward_cell = tf.nn.rnn_cell.LSTMCell(128) backward_cell = tf.nn.rnn_cell.LSTMCell(128) encoder_outputs,_ = tf.nn.bidirectional_dynamic_rnn( forward_cell, backward_cell, logits,
tensorflow.nn.rnn_cell.LSTMCell
7,573
import tensorflow as tf Returns: A dictionary containing at least the following tensors. box_encodings: A float tensor of shape [batch_size, num_anchors, q, code_size] representing the location of the objects, where q is 1 or the number of classes. class_predictions_with_background: A float tensor of shape [batch_size, num_anchors, num_classes + 1] representing the class predictions for the proposals. """ with tf.variable_scope(scope): return self._predict(image_features, num_predictions_per_location, **params) # TODO: num_predictions_per_location could be moved to constructor. # This is currently only used by ConvolutionalBoxPredictor. @abstractmethod def _predict(self, image_features, num_predictions_per_location, **params): """Implementations must override this method.
tensorflow.variable_scope
7,574
import tensorflow as tf transformer_output = self.transparent_merger[0].FProp( theta.transparent_merger[0], outputs_list) else: transformer_output = [] for i in range(p.num_transparent_outputs): merged_outputs = self.transparent_merger[i].FProp( theta.transparent_merger[i], outputs_list) transformer_output.append(merged_outputs) if p.is_eval: transformer_output = tf.stack(transformer_output, 3) return transformer_output, paddings, src_segment_id
tensorflow.stack
7,575
import tensorflow as tf fields.InputDataFields.original_image].dtype, tf.uint8) self.assertAllEqual(transformed_inputs[ fields.InputDataFields.original_image_spatial_shape], [4, 4]) self.assertAllEqual(transformed_inputs[ fields.InputDataFields.original_image].shape, [8, 8, 3]) self.assertAllEqual(transformed_inputs[ fields.InputDataFields.groundtruth_instance_masks].shape, [2, 8, 8]) def test_applies_model_preprocess_fn_to_image_tensor(self): np_image = np.random.randint(256, size=(4, 4, 3)) tensor_dict = { fields.InputDataFields.image: tf.constant(np_image), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, 1], np.int32)) } def fake_model_preprocessor_fn(image): return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0)) num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=fake_model_preprocessor_fn,
tensorflow.constant
7,576
import tensorflow as tf reward_buffer[:,t+1:] *= y discounted_reward[:,t] = np.sum(reward_buffer[:,t:],1) return np.reshape(discounted_reward,(batch_size *trace_length)) def make_cube(trace_length): cube = tf.Variable(tf.zeros([trace_length, trace_length, trace_length])) cube_ops = [] for i in range(trace_length): cube_ops.append(cube[i, :(i+1), :(i+1)].assign(tf.ones([i+1, i+1]))) return cube, cube_ops
tensorflow.ones
7,577
import tensorflow as tf # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
tensorflow.constant
7,578
import tensorflow as tf pred_label = tf.argmax(distillation_loss["te_logits"], axis=-1, output_type=tf.int32) correct = tf.equal( tf.cast(tf.zeros_like(label_ids, dtype=tf.int32), tf.int32), tf.cast(pred_label, tf.int32) ) te_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) except: te_accuracy = tf.constant(0.0) st_accuracy = tf.constant(0.0) try: st_accuracy = tf.reduce_mean(distillation_loss["src_f1_prob"]) te_accuracy = tf.reduce_mean(distillation_loss["tgt_f1_prob"]) except: te_accuracy = tf.constant(0.0) st_accuracy = tf.constant(0.0) return { "train":{ "loss":loss, "logits":logits, "train_op":train_op,
tensorflow.reduce_mean
7,579
from tensorflow.python.training import summary_io # TODO(mdan): This line looks redundant. if self._summary_writer is None: self._summary_writer = summary_io.SummaryWriter(estimator.model_dir)
tensorflow.python.training.summary_io.SummaryWriter
7,580
import tensorflow as tf # when the windows size is non-zero, we concatenate consecutive encoder states # and map it to the right attention vector size. weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = [] for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1): pos_ = pos + offset pos_ = tf.minimum(pos_, encoder_input_length - 1) pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S> weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length)) weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1) weighted_average.append(weighted_average_) weighted_average = tf.concat(weighted_average, axis=1) weighted_average = dense(weighted_average, encoder.attn_size) elif pos is not None: weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1) else: # Local attention of Luong et al. (http://arxiv.org/abs/1508.04025)
tensorflow.expand_dims
7,581
import tensorflow as tf tf.summary.scalar(name, value) summary_op = tf.summary.merge_all() return (summary_op, monitored_values) def _make_var(self, name, shape, dtype=None, no_reg=False, initializer=None, init_constant=None, trainable=True): if initializer is None: if init_constant is not None: initializer = tf.constant_initializer(init_constant, dtype=tf.float32) else: initializer = tf.contrib.keras.initializers.he_normal() # Ensure that name is unique by shape too name += '-shape-{}'.format('x'.join([str(x) for x in shape])) var = tf.get_variable(name, shape=shape, dtype=dtype, initializer=initializer, trainable=trainable)
tensorflow.constant_initializer
7,582
import tensorflow as tf for layer_id in range(self.lstm_num_layers): with tf.variable_scope("layer_{}".format(layer_id)): w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size]) self.w_lstm.append(w) self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size]) with tf.variable_scope("emb"): self.w_emb = tf.get_variable("w", [self.num_branches, self.lstm_size]) with tf.variable_scope("softmax"): self.w_soft = tf.get_variable("w", [self.lstm_size, self.num_branches]) b_init = np.array([10.0, 10.0] + [0] * (self.num_branches - 2), dtype=np.float32) self.b_soft = tf.get_variable( "b", [1, self.num_branches], initializer=tf.constant_initializer(b_init))
tensorflow.variable_scope
7,583
from tensorflow.python.platform import gfile # Adding s1 (s3 should not be deleted because helper is unaware of it) s1 = save3.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save3.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) def testSharded(self): save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_sharded") try: gfile.DeleteRecursively(save_dir) except OSError:
tensorflow.python.platform.gfile.Exists
7,584
from tensorflow.python.framework import constant_op metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkTensorData(self): def _input_fn(): iris = test_data.prepare_iris_data_for_logistic_regression() features = {} for i in range(4): # The following shows how to provide the Tensor data for # RealValuedColumns. features.update({ str(i): array_ops.reshape( constant_op.constant( iris.data[:, i], dtype=dtypes.float32), (-1, 1)) }) # The following shows how to provide the SparseTensor data for # a SparseColumn. features['dummy_sparse_column'] = sparse_tensor.SparseTensor( values=('en', 'fr', 'zh'), indices=((0, 0), (0, 1), (60, 0)), dense_shape=(len(iris.target), 2)) labels = array_ops.reshape( constant_op.constant( iris.target, dtype=dtypes.int32), (-1, 1)) return features, labels iris = test_data.prepare_iris_data_for_logistic_regression()
tensorflow.python.framework.constant_op.constant
7,585
import tensorflow as tf loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator."""
tensorflow.contrib.tpu.TPUEstimatorSpec
7,586
import tensorflow as tf dst_paths.append(dst_path) print("skipping %d files that already exist" % skipped) global total total = len(src_paths) print("processing %d files" % total) global start start = time.time() if a.workers == 1: with tf.Session() as sess: for src_path, dst_path in zip(src_paths, dst_paths): process(src_path, dst_path) complete() else: queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1) dequeue_op = queue.dequeue() def worker(coord): with sess.as_default(): while not coord.should_stop(): try: src_path, dst_path = sess.run(dequeue_op) except tf.errors.OutOfRangeError: coord.request_stop()
tensorflow.Session
7,587
import tensorflow as tf max_outputs, iou_threshold, score_threshold,inputs): boxes, objects, classes = [], [], [] dtype = inputs[0].dtype for i, logits in enumerate(inputs): print(i,mask[i]) stride = strides[i] anchors = anchorlist[mask[i]] x_shape = tf.shape(logits) logits = tf.reshape(logits, (x_shape[0], x_shape[1], x_shape[2], len(anchors), num_classes + 5)) box_xy, box_wh, obj, cls = tf.split(logits, (2, 2, 1, num_classes), axis=-1) box_xy = tf.sigmoid(box_xy) obj = tf.sigmoid(obj) cls = tf.sigmoid(cls) anchors = anchors.astype(np.float32) grid_shape = x_shape[1:3] # print(grid_shape) grid_h, grid_w = grid_shape[0], grid_shape[1] # print(grid_h,tf.range(grid_h)) grid = tf.meshgrid(tf.range(grid_w), tf.range(grid_h)) grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
tensorflow.sigmoid
7,588
from tensorflow.python.ops import gen_math_ops """ return gen_math_ops._range(start, limit, delta, name=name)
tensorflow.python.ops.gen_math_ops._range
7,589
import tensorflow as tf out += aux out = activation(out) if dropout > 0: out = tf.layers.dropout(out, rate=dropout, training=training) if sum(dim[2]) > 2: out = deconv2d(out, [2*dim[0], dim[1], dim[2]], scope="%s_conv_out"%scope, training=training, ema=ema, init=init) else: out = conv2d(out, [2*dim[0], dim[1], dim[2]], scope="%s_conv_out"%scope, training=training, ema=ema, init=init) h_stack1, h_stack2 = tf.split(out, 2, 3) sigmoid_out = tf.sigmoid(h_stack2) out = (h_stack1 * sigmoid_out) out_shp = out.get_shape().as_list() if out_shp[1:-1] < in_shp[1:-1]: x = tf.nn.avg_pool(x, [1, dim[2][0], dim[2][1], 1], strides=[1, dim[2][0], dim[2][1], 1], padding='SAME') elif out_shp[1:-1] > in_shp[1:-1]: warnings.warn( "The height and width of the output are larger than the input. There will be no residual connection.") residual = False
tensorflow.split
7,590
import tensorflow as tf def testEmbeddingAttentionDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 cell = tf.nn.rnn_cell.GRUCell(2) enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec, mem = tf.nn.seq2seq.embedding_attention_decoder( dec_inp, enc_state, attn_states, cell, num_symbols=4, embedding_size=2, output_size=3) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape)
tensorflow.nn.seq2seq.embedding_attention_decoder
7,591
import tensorflow as tf #negtive_mask = tf.logical_and(gscores < params['neg_threshold'], tf.logical_not(positive_mask)) fnegtive_mask = tf.cast(negtive_mask, tf.float32) n_negtives = tf.reduce_sum(fnegtive_mask) n_neg_to_select = tf.cast(params['negative_ratio'] * n_positives, tf.int32) n_neg_to_select = tf.minimum(n_neg_to_select, tf.cast(n_negtives, tf.int32)) # hard negative mining for classification predictions_for_bg = tf.nn.softmax(cls_pred)[:, 0] prob_for_negtives = tf.where(negtive_mask, 0. - predictions_for_bg, # ignore all the positives 0. - tf.ones_like(predictions_for_bg)) topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=n_neg_to_select) selected_neg_mask = prob_for_negtives > topk_prob_for_bg[-1] # # random select negtive examples for classification # selected_neg_mask = tf.random_uniform(tf.shape(gscores), minval=0, maxval=1.) < tf.where( # tf.greater(n_negtives, 0), # tf.divide(tf.cast(n_neg_to_select, tf.float32), n_negtives), # tf.zeros_like(tf.cast(n_neg_to_select, tf.float32)), # name='rand_select_negtive') # include both selected negtive and all positive examples final_mask = tf.stop_gradient(tf.logical_or(tf.logical_and(negtive_mask, selected_neg_mask), positive_mask)) total_examples = tf.reduce_sum(tf.cast(final_mask, tf.float32))
tensorflow.nn.top_k
7,592
import tensorflow as tf direct_mask = tf.greater(sl_col, sl_row) # bl,bl direct_mask_tile = tf.tile( tf.expand_dims(tf.expand_dims(direct_mask, 0), 0), [bs, bn, 1, 1]) # bs,bn,bl,bl rep_mask_tile_1 = tf.tile(tf.expand_dims(rep_mask_split, 2), [1, 1, bl, 1]) # bs,bn,bl,bl rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2) attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl
tensorflow.expand_dims
7,593
import tensorflow as tf # regularizer = tf.contrib.layers.l2_regularizer(0.001) # reg = regularizer(embedding_variable) # loss += reg return loss def crf_decode_layer(self, logits, crf_params, nwords): with tf.name_scope("CRF_decode"): pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords) return pred_ids def compute_metrics(self, tags, pred_ids, num_tags, indices, nwords): weights = tf.sequence_mask(nwords) # metrics_correct_rate, golden, predict = correct_rate(tags, pred_ids)
tensorflow.contrib.crf.crf_decode
7,594
import tensorflow as tf # convert to RGB array x *= 255 if K.image_dim_ordering() == 'th': x = x.transpose((1, 2, 0)) x = np.clip(x, 0, 255).astype('uint8') return x def _compute_gradients(tensor, var_list): grads = tf.gradients(tensor, var_list) return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)] def grad_cam(input_model, image, category_index, layer_name): nb_classes = 1000 target_layer = lambda x: target_category_loss(x, category_index, nb_classes) x = Lambda(target_layer, output_shape = target_category_loss_output_shape)(input_model.output) model = Model(inputs=input_model.input, outputs=x) #model.summary() loss = K.sum(model.output) conv_output = [l for l in model.layers if l.name == layer_name][0].output #is
tensorflow.zeros_like
7,595
import tensorflow as tf # Create tensors of pairwise differences for logits and labels, and # pairwise products of weights. These have shape # [batch_size, batch_size, num_labels]. logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1) labels_difference = tf.expand_dims(labels, 0) - tf.expand_dims(labels, 1) weights_product = tf.expand_dims(weights, 0) * tf.expand_dims(weights, 1) signed_logits_difference = labels_difference * logits_difference
tensorflow.expand_dims
7,596
import tensorflow as tf layer4, weights4 = new_fc_layer(input=layer3, name="fc1", num_inputs=num_features, num_outputs=fc_size1) # print(layer4) with tf.variable_scope('fc2'): logits, weights5 = new_fc_layer(input=layer4, name="fc2", num_inputs=fc_size1, num_outputs=fc_size2)
tensorflow.variable_scope
7,597
import tensorflow as tf Returns ------- tf.Tensor: Of shape (n_test, n_support) """ rnorm_test = tf.rsqrt( tf.reduce_sum(tf.square(test), 1, keep_dims=True)) + 1e-7 rnorm_support = tf.rsqrt( tf.reduce_sum(tf.square(support), 1, keep_dims=True)) + 1e-7 test_normalized = test * rnorm_test support_normalized = support * rnorm_support # Transpose for mul
tensorflow.square
7,598
import tensorflow as tf # logloss not being an upper bound on the indicator function. weighted_loss = weights * losses_utils.weighted_surrogate_loss( labels, logits, surrogate_type, positive_weights=lambdas, negative_weights=1.0) maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) lambda_term = lambdas * label_priors * (target_recall - 1.0) * maybe_log2
tensorflow.log
7,599