seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable) w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable) b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable) net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1) with tf.variable_scope('l2'): net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable) with tf.variable_scope('q'): q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a) return q def learn(self, s, a, r, s_, ISW): _, abs_td = self.sess.run([self.train_op, self.abs_td], feed_dict={S: s, self.a: a, R: r, S_: s_, self.ISWeights: ISW}) if self.t_replace_counter % self.t_replace_iter == 0: self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)]) self.t_replace_counter += 1
tensorflow.layers.dense
4,700
import tensorflow as tf all_previous_modalities = [] # Transform the input features for key, input_modality in six.iteritems( self._problem_hparams.input_modality): if key not in features: tf.logging.warning("Missing feature %s - ignoring." % key) continue do_reuse = input_modality.name in all_previous_modalities with tf.variable_scope(input_modality.name, reuse=do_reuse): log_info("Transforming feature '%s' with %s.bottom", key, input_modality.name)
tensorflow.logging.warning
4,701
import tensorflow as tf with tarfile.open(compressed_filepath, mode) as corpus_tar: corpus_tar.extractall(tmp_dir) filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern)) for tsv_filename in filenames: if tsv_filename.endswith(".gz"): new_filename = tsv_filename.strip(".gz") generator_utils.gunzip_file(tsv_filename, new_filename) tsv_filename = new_filename with tf.gfile.Open(tsv_filename) as tsv_file: for line in tsv_file: if line and "\t" in line: parts = line.split("\t") source, target = parts[src_column], parts[trg_column] source, target = source.strip(), target.strip() if source and target: lang1_resfile.write(source)
tensorflow.gfile.Open
4,702
import tensorflow as tf q_tp1_best = tf.reduce_max(target_policy.q_values, axis=1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best # compute RHS of bellman equation q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked # compute the error (potentially clipped) td_error = q_t_selected - tf.stop_gradient(q_t_selected_target) errors = tf_util.huber_loss(td_error) weighted_error = tf.reduce_mean(importance_weights_ph * errors) tf.summary.scalar("td_error", tf.reduce_mean(td_error)) tf.summary.scalar("loss", weighted_error)
tensorflow.stop_gradient
4,703
from tensorflow.python.framework import ops Returns: A Tensor with the same type as `x` if `x.dtype != qint32` otherwise the return type is `quint8`. """ with ops.op_scope([x], name, "Tanh") as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops._tanh(x, name=name) ops.RegisterShape("Abs")(common_shapes.unchanged_shape) ops.RegisterShape("Ceil")(common_shapes.unchanged_shape) ops.RegisterShape("Conj")(common_shapes.unchanged_shape) ops.RegisterShape("Cos")(common_shapes.unchanged_shape) ops.RegisterShape("Exp")(common_shapes.unchanged_shape) ops.RegisterShape("Floor")(common_shapes.unchanged_shape) ops.RegisterShape("Imag")(common_shapes.unchanged_shape) ops.RegisterShape("Inv")(common_shapes.unchanged_shape) ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape) ops.RegisterShape("IsInf")(common_shapes.unchanged_shape) ops.RegisterShape("IsNan")(common_shapes.unchanged_shape) ops.RegisterShape("Log")(common_shapes.unchanged_shape) ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape) ops.RegisterShape("Neg")(common_shapes.unchanged_shape) ops.RegisterShape("Real")(common_shapes.unchanged_shape) ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Sign")(common_shapes.unchanged_shape) ops.RegisterShape("Sin")(common_shapes.unchanged_shape) ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
tensorflow.python.framework.ops.RegisterShape
4,704
import tensorflow as tf a = 2 values = interpolated inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 0].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 0]) print(mtype, fig_obj_count, 0) values = tf.math.sign(tf.nn.relu(interpolated + self.tol)) inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 1]) print(mtype, fig_obj_count, 1) if mtype == 1:
tensorflow.nn.relu
4,705
import tensorflow as tf from six.moves import urllib import tensorflow as tf from datasets import dataset_utils tf.app.flags.DEFINE_integer('train_shards', 1000, 'Number of shards in training TFRecord files.') FLAGS = tf.app.flags.FLAGS # The URL where the CIFAR data can be downloaded.
tensorflow.app.flags.DEFINE_integer
4,706
import tensorflow as tf idx_z1_y1_x0 = base_z1_y1 + x0_clip idx_z1_y1_x1 = base_z1_y1 + x1_clip # Use indices to lookup pixels in the flat image and restore # channels dim im_flat = tf.reshape(im, tf.stack([-1, channels])) im_flat = tf.to_float(im_flat) i_z0_y0_x0 = tf.gather(im_flat, idx_z0_y0_x0) i_z0_y0_x1 = tf.gather(im_flat, idx_z0_y0_x1) i_z0_y1_x0 = tf.gather(im_flat, idx_z0_y1_x0) i_z0_y1_x1 = tf.gather(im_flat, idx_z0_y1_x1) i_z1_y0_x0 = tf.gather(im_flat, idx_z1_y0_x0) i_z1_y0_x1 = tf.gather(im_flat, idx_z1_y0_x1) i_z1_y1_x0 = tf.gather(im_flat, idx_z1_y1_x0) i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1)
tensorflow.gather
4,707
import tensorflow as tf with tf.device(tf.train.replica_device_setter(1, worker_device=worker_device)): with tf.variable_scope("global"): self.network = LSTMPolicy(env.observation_space.shape, env.action_space.n) self.global_step = tf.get_variable("global_step", [], tf.int32, initializer=tf.constant_initializer(0, dtype=tf.int32), trainable=False) with tf.device(worker_device): with tf.variable_scope("local"): self.local_network = pi = LSTMPolicy(env.observation_space.shape, env.action_space.n) pi.global_step = self.global_step self.ac = tf.placeholder(tf.float32, [None, env.action_space.n], name="ac") self.adv = tf.placeholder(tf.float32, [None], name="adv")
tensorflow.device
4,708
import tensorflow as tf all_logits = [] all_top_1_ops = [] all_top_5_ops = [] enqueue_ops = [] gpu_copy_stage_ops = [] gpu_compute_stage_ops = [] gpu_grad_stage_ops = [] use_synthetic_gpu_images = (self.dataset is None) with tf.device(self.global_step_device): global_step = tf.contrib.framework.get_or_create_global_step() # Build the processing and model for the worker. with tf.device(self.cpu_device): nclass, images_splits, labels_splits = add_image_preprocessing( self.dataset, input_nchan, image_size, self.batch_size, len(self.devices), input_data_type, self.resize_method, not FLAGS.eval) update_ops = None staging_delta_ops = []
tensorflow.contrib.framework.get_or_create_global_step
4,709
from tensorflow.python.framework import ops merged_shape = merged_shape.merge_with(input_.get_shape()) return [merged_shape] @ops.RegisterShape("Select") def _SelectShape(op): # All three inputs must have the same shape. return [op.inputs[0].get_shape() .merge_with(op.inputs[1].get_shape()) .merge_with(op.inputs[2].get_shape())] @ops.RegisterShape("ArgMax") @ops.RegisterShape("ArgMin") def _ArgOpShape(op): """Common shape function for arg-reduction ops.""" dimension_shape = op.inputs[1].get_shape() dimension_shape.assert_is_compatible_with(tensor_shape.scalar()) input_shape = op.inputs[0].get_shape() if input_shape.ndims is None: return [tensor_shape.unknown_shape()] elif input_shape.ndims <= 1: return [tensor_shape.scalar()]
tensorflow.python.framework.ops.RegisterShape
4,710
import tensorflow as tf accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn)
tensorflow.contrib.tpu.TPUEstimatorSpec
4,711
import tensorflow as tf def _crop_pool_layer(self, bottom, rois, name): with tf.variable_scope(name): # tf.squeeze()返回一个张量,这个张量是将原始input中所有维度中为1的那些维都删掉的结果 batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1]) # Get the normalized coordinates of bboxes bottom_shape = tf.shape(bottom) height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0]) width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0]) # rois除以h,w就得到了rois在特征图上的位置 x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height # Won't be backpropagated to rois anyway, but to save time bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1)) # 'roi_pooling_size', 7 pre_pool_size = cfg.FLAGS.roi_pooling_size * 2 # 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小
tensorflow.slice
4,712
import tensorflow as tf H_pool = tf.concat(pooled_outputs, 3) #flatten it for fully connected layer H_pool_flat = tf.reshape(H_pool, [-1, total_filters]) with tf.name_scope("dropout"): H_drop = tf.nn.dropout(H_pool_flat, keep_prob = keep_prob) # Final (unnormalized) layer with tf.name_scope("output"): W = tf.get_variable("W", shape=[total_filters, nb_classes], initializer=tf.contrib.layers.xavier_initializer()) # add final layer bias b = tf.Variable(tf.constant(0.1, shape=[nb_classes]), name="b") # calc l2 losses l2_loss += tf.nn.l2_loss(W) l2_loss += tf.nn.l2_loss(b) # do logit = W*X+b logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores") predictions = tf.nn.softmax(logit, name="predictions") #claulate loss and optimizer with tf.variable_scope("FCoptimize", reuse=None): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logit, labels=Y)
tensorflow.constant
4,713
import tensorflow as tf x1_valid = tf.to_float( tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0)) y0_valid = tf.to_float( tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0)) y1_valid = tf.to_float( tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0))
tensorflow.greater_equal
4,714
import tensorflow as tf # Add leading zero element to each element in the L_flat. We use this zero # element when gathering L_flat into a lower triangular matrix L. nb_rows = tf.shape(L_flat)[0] zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1) try: # Old TF behavior. L_flat = tf.concat(1, [zeros, L_flat]) except TypeError: # New TF behavior L_flat = tf.concat([zeros, L_flat], 1) # Finally, process each element of the batch. def fn(a, x): x_ = tf.gather(x, diag_mask) return x_ P = tf.scan(fn, L_flat, initializer=K.zeros((self.nb_actions, self.nb_actions))) else: raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend())) assert P is not None assert K.ndim(P) == 3 # Combine a, mu and P into a scalar (over the batches). What we compute here is # -.5 * (a - mu)^T * P * (a - mu), where * denotes the dot-product. Unfortunately # TensorFlow handles vector * P slightly suboptimal, hence we convert the vectors to # 1xd/dx1 matrices and finally flatten the resulting 1x1 matrix into a scalar. All
tensorflow.gather
4,715
import tensorflow as tf dec, num_units=embed_size // 2, scope="decoder-highwaynet-{}".format(i) ) with tf.variable_scope("decoder-gru", reuse=False): cell = tf.contrib.rnn.GRUCell(embed_size // 2) cell_bw = tf.contrib.rnn.GRUCell(embed_size // 2) outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32)
tensorflow.contrib.rnn.GRUCell
4,716
import tensorflow as tf except tf.errors.OutOfRangeError: break print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1)) return summary class PPO_HC(PPO): def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params class PPO_LSTM(Base): def __init__(self, env, summary_dir='./', gpu=False):
tensorflow.layers.dense
4,717
import tensorflow as tf image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth return tf.summary.image('ground_truth', image) def _add_act_summary(self, tensor): tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor) tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction', tf.nn.zero_fraction(tensor)) def _add_score_summary(self, key, tensor): tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor) def _add_train_summary(self, var): tf.summary.histogram('TRAIN/' + var.op.name, var) # Custom Layers # def _reshape_layer(self, bottom, num_dim, name): input_shape = tf.shape(bottom) with tf.variable_scope(name): # change the channel to the caffe format
tensorflow.summary.histogram
4,718
import tensorflow as tf super(DeepLIFTRescale, self).__init__(T, X, session, keras_learning_phase, Y_shape) def get_symbolic_attribution(self): return [g * (x - b) for g, x, b in zip( tf.gradients(ys=self.T, xs=self.X), self.X if self.has_multiple_inputs else [self.X], self.baseline if self.has_multiple_inputs else [self.baseline])]
tensorflow.gradients
4,719
import tensorflow as tf result[k] = preds losses.append(loss) return (result,losses) """ Test RNN graph multi layer """ def test_rnn_kstep_layer(test_data_x,test_data_y, preds, rnn_outputs, g, checkpoint, input_prob, output_prob, state_prob, num_test, kstep = 3): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) result= {} "read the trained graph" g['saver'].restore(sess, checkpoint) losses = [] for step_num in range(kstep): k=step_num+1 for index,(X, Y, YP, S) in enumerate(gen_batch_kstep_layer(test_data_x[k:], test_data_y[k:], preds[:-1],rnn_outputs)): feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y, g['init_state']: S, g['batch_size']:num_test, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob} preds, rnn_outputs= sess.run([g['preds'], g['final_state']], feed_dict)
tensorflow.global_variables_initializer
4,720
import tensorflow as tf def __init__(self): self.mse = tf.losses.MeanSquaredError() def __call__(self, y_pred, target, target_weight): batch_size = y_pred.shape[0] num_of_joints = y_pred.shape[-1] pred = tf.reshape(tensor=y_pred, shape=(batch_size, -1, num_of_joints)) heatmap_pred_list = tf.split(value=pred, num_or_size_splits=num_of_joints, axis=-1) gt = tf.reshape(tensor=target, shape=(batch_size, -1, num_of_joints)) heatmap_gt_list = tf.split(value=gt, num_or_size_splits=num_of_joints, axis=-1) loss = 0.0 for i in range(num_of_joints): heatmap_pred = tf.squeeze(heatmap_pred_list[i]) heatmap_gt = tf.squeeze(heatmap_gt_list[i])
tensorflow.split
4,721
import tensorflow as tf ValueError: If `surrogate_type` is not `xent` or `hinge`. """ with tf.name_scope(scope, 'roc_auc', [labels, logits, weights]): # Convert inputs to tensors and standardize dtypes. labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights) # Create tensors of pairwise differences for logits and labels, and # pairwise products of weights. These have shape # [batch_size, batch_size, num_labels]. logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1) labels_difference = tf.expand_dims(labels, 0) - tf.expand_dims(labels, 1) weights_product = tf.expand_dims(weights, 0) * tf.expand_dims(weights, 1) signed_logits_difference = labels_difference * logits_difference raw_loss = losses_utils.weighted_surrogate_loss( labels=tf.ones_like(signed_logits_difference), logits=signed_logits_difference, surrogate_type=surrogate_type) weighted_loss = weights_product * raw_loss
tensorflow.expand_dims
4,722
import tensorflow as tf output_layer = model.get_sequence_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias",[], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.reduce_sum(tf.multiply(output_layer,output_weights),-1) logits = tf.add(logits, output_bias)
tensorflow.zeros_initializer
4,723
import tensorflow as tf Returns: A preprocessed image. """ image = _aspect_preserving_resize(image, resize_side) image = _central_crop([image], output_height, output_width)[0] image.set_shape([output_height, output_width, 3]) image = tf.to_float(image) return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) def preprocess_image(image, output_height, output_width, is_training=False, resize_side_min=_RESIZE_SIDE_MIN,
tensorflow.to_float
4,724
import tensorflow as tf actions = experience.action[:, 0] num_tasks = tasks.shape[0] batch_size = states.shape[0] task_dim = tasks.shape[1] obs_dim = states.shape[1] action_dim = actions.shape[1] action_spec = self._actor.output_tensor_spec states_tiled = tf.tile(states[:, None], [1, num_tasks, 1]) # B x B x D states_tiled = tf.reshape(states_tiled, [batch_size * num_tasks, obs_dim]) # B*B x D actions_tiled = tf.tile(actions[:, None], [1, num_tasks, 1]) # B x B x D actions_tiled = tf.reshape(actions_tiled, [batch_size * num_tasks, action_dim]) # B*B x D tasks_tiled = tf.tile(tasks[None], [batch_size, 1, 1]) # B x B x D tasks_tiled = tf.reshape(tasks_tiled, [batch_size * num_tasks, task_dim]) # B*B x D next_states_tiled = tf.tile(next_states[:, None], [1, num_tasks, 1]) next_states_tiled = tf.reshape(next_states_tiled, [batch_size * num_tasks, obs_dim]) # B*B x D next_relabelled_obs = self._task_distribution.combine( next_states_tiled, tasks_tiled) sampled_actions_tiled = self._actor( next_relabelled_obs, step_type=(), network_state=())[0].sample() critic_input = (next_relabelled_obs, sampled_actions_tiled) q_vals, _ = self._critic(critic_input, training=False) q_vals_vec = tf.reshape(q_vals, (batch_size, num_tasks))
tensorflow.reshape
4,725
import tensorflow as tf pairwise_iou = isu.points_mask_pairwise_iou(masks1=masks1, masks2=masks2) expected_iou = tf.constant([0, 1, 0.4, 0.2], dtype=tf.float32) self.assertAllClose(pairwise_iou.numpy(), expected_iou.numpy()) if __name__ == '__main__': tf.test.main()
tensorflow.test.main
4,726
from tensorflow.python.ops import math_ops Args: required_size: number or tf.Tensor specifying required array capacity. growth_factor: optional number or tf.Tensor specifying the growth factor between subsequent allocations. Returns: tf.Tensor with dtype=int32 giving the next array size. """ exponent = math_ops.ceil( math_ops.log(math_ops.cast(required_size, dtypes.float32)) / math_ops.log(math_ops.cast(growth_factor, dtypes.float32))) return math_ops.cast(math_ops.ceil(growth_factor ** exponent), dtypes.int32) def streaming_concat(values, axis=0, max_size=None, metrics_collections=None, updates_collections=None, name=None): """Concatenate values along an axis across batches. The function `streaming_concat` creates two local variables, `array` and
tensorflow.python.ops.math_ops.ceil
4,727
import tensorflow as tf w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer()) b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0)) h1 = tf.nn.relu(tf.matmul(x, w1) + b1) with tf.variable_scope('layer2'): w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer()) b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0)) h2 = tf.nn.relu(tf.matmul(h1, w2) + b2) with tf.variable_scope('layer3'): w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer()) b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0)) y = tf.matmul(h2, w3) + b3 # losses cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y)) # optimizer optimizer = tf.train.GradientDescentOptimizer(args.lr) # define one-step train ops train_op = optimizer.minimize(cross_entropy) return x, y, gt, train_op if __name__ == "__main__": max_train_step = args.max_train_step batch_size = args.batch_size mnist = input_data.read_data_sets(args.data_dir, one_hot=True) x, y, gt, train_op = model()
tensorflow.nn.softmax_cross_entropy_with_logits
4,728
import tensorflow as tf Args: predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network . labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels. eps: a constant to set upper or lower limit for labels, smoothening factor name: Optional scope/name for op_scope. Returns: A tensor with the log loss. """ with tf.name_scope(name): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = tf.to_float(predictions) labels = tf.to_float(labels) losses = -tf.multiply(labels, tf.log(predictions + eps)) - tf.multiply( (1 - labels), tf.log(1 - predictions + eps)) return tf.losses.compute_weighted_loss(losses, weights) def kappa_loss(predictions, labels, y_pow=1, eps=1e-15, num_ratings=5, batch_size=32, name='kappa'): """Define a kappa loss, Its a continuous differentiable approximation of discrete kappa loss.
tensorflow.to_float
4,729
import tensorflow as tf def build_graph(self, image, label): image = self.image_preprocess(image) assert self.data_format in ['NCHW', 'NHWC'] if self.data_format == 'NCHW': image = tf.transpose(image, [0, 3, 1, 2]) logits = self.get_logits(image, label) loss = ImageNetModel.compute_loss_and_error( logits, label, label_smoothing=self.label_smoothing)
tensorflow.transpose
4,730
import tensorflow as tf ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool") pooled_outputs.append(pooled) with tf.name_scope("preFc"): # combine all pooled outputs total_filters = num_filter * len(filter_list) # concat all the pooled weights H_pool = tf.concat(pooled_outputs, 3) #flatten it for fully connected layer H_pool_flat = tf.reshape(H_pool, [-1, total_filters])
tensorflow.name_scope
4,731
import tensorflow as tf with tf.device('/cpu:0'), tf.name_scope("embedding_deps"):
tensorflow.name_scope
4,732
import tensorflow as tf def merge_prob_dist_for_one_step(self, vocab_dist, attn_dist, p_gen, passage_word_idx, passage_mask=None): ''' max_phrase_size: an input placehoder indications the maximum phrase size inside this batch vocab_dist: [batch_size, vsize] attn_dist: [batch_size, passage_length] p_gen: [batch_size, 1] passage_word_idx: [batch_size, passage_length] passage_mask: [batch_size, passage_length] ''' input_shape = tf.shape(vocab_dist) batch_size = input_shape[0] vsize = input_shape[1] passage_length = tf.shape(passage_word_idx)[1] with tf.variable_scope('final_distribution'): vocab_dist = p_gen * vocab_dist attn_dist = (1.0-p_gen) * attn_dist # Concatenate some zeros to each vocabulary dist, to hold the probabilities for phrases extended_vsize = vsize if self.max_phrase_size is not None: extended_vsize += self.max_phrase_size extra_zeros = tf.zeros((batch_size, self.max_phrase_size)) vocab_dist = tf.concat(values=[vocab_dist, extra_zeros], axis=1) # [batch_size, extended_vsize] if self.options.add_first_word_prob_for_phrase: # add prob of the first word to each phrase attn_dist = add_first_word_prob_to_atten_dists(self.in_passage_words, self.phrase_starts, vocab_dist, attn_dist)
tensorflow.shape
4,733
from tensorflow.python.ops import array_ops logging_ops.scalar_summary( ["centered_bias_%d" % cb for cb in range( self._target_column.num_label_columns)], array_ops.reshape(centered_bias, [-1])) return centered_bias def _centered_bias_step(self, targets, features): centered_bias = ops.get_collection(self._centered_bias_weight_collection) batch_size = array_ops.shape(targets)[0] logits = array_ops.reshape( array_ops.tile(centered_bias[0], [batch_size]), [batch_size, self._target_column.num_label_columns]) loss = self._target_column.loss(logits, targets, features) # Learn central bias by an optimizer. 0.1 is a convervative lr for a single # variable. return training.AdagradOptimizer(0.1).minimize(loss, var_list=centered_bias)
tensorflow.python.ops.array_ops.shape
4,734
import tensorflow as tf expected_cdf[x < 1] = 0. expected_cdf[x > 10] = 1. self.assertAllClose(expected_cdf, qdist.cdf(x).eval()) def test_sampling_from_batch_of_normals(self): batch_shape = (2,) with self.test_session(): qdist = distributions.QuantizedDistribution( base_dist_cls=distributions.Normal, lower_cutoff=0., upper_cutoff=None, mu=tf.zeros( batch_shape, dtype=tf.float32), sigma=tf.ones( batch_shape, dtype=tf.float32)) samps = qdist.sample_n(n=5000, seed=42) samps_v = samps.eval() # With lower_cutoff = 0, the interval j=0 is (-infty, 0], which holds 1/2 # of the mass of the normals. # rtol chosen to be 2x as large as necessary to pass. self.assertAllClose([0.5, 0.5], (samps_v == 0).mean(axis=0), rtol=0.03) # The interval j=1 is (0, 1], which is from the mean to one standard # deviation out. This should contain 0.6827 / 2 of the mass.
tensorflow.ones
4,735
import tensorflow as tf token_type_ids=token_type_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) with tf.variable_scope("encoder"): # Run the stacked transformer. # `sequence_output` shape = [batch_size, seq_length, hidden_size]. self.all_encoder_layers = transformer_model( input_tensor=self.embedding_output, attention_mask=input_mask, hidden_size=config.hidden_size, num_hidden_layers=config.num_hidden_layers,
tensorflow.variable_scope
4,736
import tensorflow as tf with tf.variable_scope("FCoptimize", reuse=None): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logit, labels=Y) + l2_reg_lambda * l2_loss) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
tensorflow.train.AdamOptimizer
4,737
import tensorflow as tf def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable(
tensorflow.get_collection_ref
4,738
import tensorflow as tf same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates] same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
tensorflow.expand_dims
4,739
import tensorflow as tf return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,logits=logits)) self.images_real = tf.placeholder(tf.float32,[None,self.image_size,self.image_size,self.input_dim + self.output_dim])
tensorflow.placeholder
4,740
import tensorflow as tf with tf.variable_scope('self_attention'): # @2.self-attention in block # mask generation sl_indices = tf.range(block_len, dtype=tf.int32) sl_col, sl_row = tf.meshgrid(sl_indices, sl_indices) if direction == 'forward': direct_mask = tf.greater(sl_row, sl_col) # bl,bl else: direct_mask = tf.greater(sl_col, sl_row) # bl,bl direct_mask_tile = tf.tile( tf.expand_dims(tf.expand_dims(direct_mask, 0), 0), [bs, bn, 1, 1]) # bs,bn,bl,bl rep_mask_tile_1 = tf.tile(tf.expand_dims(rep_mask_split, 2), [1, 1, bl, 1]) # bs,bn,bl,bl rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2) attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl # attention f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.)) dependent_head = linear( rep_map, 2 * ivec, False, 0., 'linear_dependent_head', False, wd, keep_prob, is_train) # bs,bn,bl,2vec dependent, head = tf.split(dependent_head, 2, 3) dependent_etd = tf.expand_dims(dependent, 2) # bs,bn,1,bl,vec head_etd = tf.expand_dims(head, 3) # bs,bn,bl,1,vec logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,bn,bl,bl,vec logits_masked = exp_mask_for_high_rank(logits, attn_mask) attn_score = tf.nn.softmax(logits_masked, 3) # bs,bn,bl,bl,vec attn_score = mask_for_high_rank(attn_score, attn_mask) # bs,bn,bl,bl,vec self_attn_result = tf.reduce_sum(attn_score * rep_map_tile, 3) # bs,bn,bl,vec
tensorflow.logical_and
4,741
import tensorflow as tf tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.') tf.app.flags.DEFINE_string('train_data_path', '', 'Filepattern for training data.') tf.app.flags.DEFINE_string('eval_data_path', '', 'Filepattern for eval data') tf.app.flags.DEFINE_string('train_dir', '',
tensorflow.app.flags.DEFINE_string
4,742
import tensorflow as tf if FLAGS.job_name == 'ps': log_fn('Running parameter server %s' % self.task_index) self.server.join() return with tf.Graph().as_default(): if FLAGS.eval: self._eval_cnn() else: self._benchmark_cnn()
tensorflow.Graph
4,743
import tensorflow as tf t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.data.experimental.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d
tensorflow.data.TFRecordDataset
4,744
import tensorflow as tf def get_regression_loss( FLAGS, features, is_training): """Loss for downstream regression tasks.""" bsz_per_core = tf.shape(features["input_ids"])[0] inp = tf.transpose(features["input_ids"], [1, 0]) seg_id = tf.transpose(features["segment_ids"], [1, 0]) inp_mask = tf.transpose(features["input_mask"], [1, 0]) label = tf.reshape(features["label_ids"], [bsz_per_core])
tensorflow.shape
4,745
import tensorflow as tf name="conv2d"): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) # print("c", w.get_shape()) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv class batch_norm(object): def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"): with tf.variable_scope(name): self.epsilon = epsilon self.momentum = momentum self.name = name def __call__(self, x): return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, is_training=tftrain, scope=self.name)
tensorflow.variable_scope
4,746
import tensorflow as tf return safe_get(name, list(shape), initializer=conv_initializer, dtype=tf.float32) def init_fc_weights_snn(shape, name=None): weights = np.random.normal(scale=np.sqrt(1.0/shape[0]), size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def init_conv_weights_snn(shape, name=None): weights = np.random.normal(scale=np.sqrt(1.0/(shape[0]*shape[1]*shape[2])), size=shape).astype('f')
tensorflow.constant_initializer
4,747
import tensorflow as tf def __init__(self, hparams=None): ClassifierBase.__init__(self, hparams) with tf.variable_scope(self.variable_scope): encoder_hparams = utils.dict_fetch( hparams, Conv1DEncoder.default_hparams()) self._encoder = Conv1DEncoder(hparams=encoder_hparams)
tensorflow.variable_scope
4,748
import tensorflow as tf start_top_log_probs, start_top_index = tf.nn.top_k( start_log_probs, k=FLAGS.start_n_top) start_index = tf.one_hot(start_top_index, depth=seq_len, axis=-1, dtype=tf.float32) start_features = tf.einsum("lbh,bkl->bkh", output, start_index) end_input = tf.tile(output[:, :, None], [1, 1, FLAGS.start_n_top, 1]) start_features = tf.tile(start_features[None],
tensorflow.einsum
4,749
import tensorflow as tf gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth') with tf.variable_scope('layer1'): w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer()) b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0)) h1 = tf.nn.relu(tf.matmul(x, w1) + b1) with tf.variable_scope('layer2'): w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer()) b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0)) h2 = tf.nn.relu(tf.matmul(h1, w2) + b2) with tf.variable_scope('layer3'):
tensorflow.variable_scope
4,750
import tensorflow as tf # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) if return_alphas: return output, scores return output
tensorflow.matmul
4,751
import tensorflow as tf kernel_h, kernel_w = kernel_size stride_h, stride_w = stride outputs = tf.nn.max_pool(inputs, ksize=[1, kernel_h, kernel_w, 1],
tensorflow.nn.max_pool
4,752
import tensorflow as tf - **`pred`** is the prediction, a Tensor of shape `[batch_size]` \ and type `tf.int64`. For binary classification, the standard \ sigmoid function is used for prediction, and the class labels are \ `{0, 1}`. """ logits = self._encoder(inputs, sequence_length, dtype, mode) num_classes = self._hparams.num_classes is_binary = num_classes == 1 is_binary = is_binary or (num_classes <= 0 and logits.shape[1] == 1) if is_binary: pred = tf.greater(logits, 0) logits = tf.reshape(logits, [-1]) else: pred = tf.argmax(logits, 1) pred = tf.cast(tf.reshape(pred, [-1]), tf.int64) self._built = True return logits, pred @property def trainable_variables(self): """The list of trainable variables of the module. """
tensorflow.reshape
4,753
import tensorflow as tf batch_size = tf.placeholder(tf.int32, [], name='batch_size') x = tf.placeholder(tf.float32, [None, num_steps, input_size_x], name='x') y = tf.placeholder(tf.float32, [None, num_steps, input_size_y], name='y') input_prob = tf.placeholder(tf.float32, name='input_prob') state_prob = tf.placeholder(tf.float32,name='state_prob') output_prob = tf.placeholder(tf.float32,name='output_prob') rnn_inputs = x """Define a single cell with variational dropout""" def get_a_cell(state_size,input_prob,state_prob,num_input):
tensorflow.placeholder
4,754
import tensorflow as tf p.decoder.target_seq_len = 5 p.encoder.input_shape = input_shape p.input = tig.TestInputGenerator.Params() p.input.target_max_length = 5 p.input.source_shape = input_shape p.input.target_shape = [2, 5] p.name = 'test_mdl' return p def testMakeDecoderTheta(self): # Test that decoder theta returns a copy of theta.decoder without changes. with self.session(use_gpu=False, graph=tf.Graph()): tf.set_random_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() decoder_theta = mdl._MakeDecoderTheta(theta=mdl.theta, input_batch=None) mdl.BProp() self.assertEqual(decoder_theta, mdl.theta.decoder) def testFProp(self): with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams()
tensorflow.set_random_seed
4,755
import tensorflow as tf """ x_min1, y_min1, x_max1, y_max1 = tf.split(boxlist1, 4, axis=1) x_min2, y_min2, x_max2, y_max2 = tf.split(boxlist2, 4, axis=1) all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) return intersect_heights * intersect_widths
tensorflow.transpose
4,756
import tensorflow as tf def combined_static_and_dynamic_shape(tensor): """Returns a list containing static and dynamic values for the dimensions. Returns a list of static and dynamic values for shape dimensions. This is useful to preserve static shapes when available in reshape operation. Args: tensor: A tensor of any type. Returns: A list of size tensor.shape.ndims containing integers or a scalar tensor. """ static_tensor_shape = tensor.shape.as_list() dynamic_tensor_shape = tf.shape(tensor) combined_shape = [] for index, dim in enumerate(static_tensor_shape): if dim is not None: combined_shape.append(dim) else: combined_shape.append(dynamic_tensor_shape[index]) return combined_shape def static_or_dynamic_map_fn(fn, elems, dtype=None, parallel_iterations=32, back_prop=True): """Runs map_fn as a (static) for loop when possible.
tensorflow.shape
4,757
import tensorflow as tf Returns: A logits Tensor with shape [<batch_size>, self.num_classes]. """ with self._model_variable_scope(): if self.data_format == 'channels_first': # Convert the inputs from channels_last (NHWC) to channels_first (NCHW). # This provides a large performance boost on GPU. See # https://www.tensorflow.org/performance/performance_guide#data_formats inputs = tf.transpose(inputs, [0, 3, 1, 2]) inputs = conv2d_fixed_padding( inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size, strides=self.conv_stride, data_format=self.data_format) inputs = tf.identity(inputs, 'initial_conv') # We do not include batch normalization or activation functions in V2 # for the initial conv1 because the first ResNet unit will perform these # for both the shortcut and non-shortcut paths as part of the first
tensorflow.transpose
4,758
import tensorflow as tf "char_mat", initializer=tf.constant(char_mat, dtype=tf.float32)) self.c_mask = tf.cast(self.c, tf.bool) self.q_mask = tf.cast(self.q, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) if opt: # we have to hardcode the max batch size here! use the batch size from the generator as this will be used for PG N, CL = config.batch_size if not self.demo else config.batch_size, config.char_limit
tensorflow.cast
4,759
import tensorflow as tf output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples:
tensorflow.logging.info
4,760
import tensorflow as tf self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0)) self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0)) self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3)
tensorflow.matmul
4,761
import tensorflow as tf """Run cnn in benchmark mode. When forward_only on, it forwards CNN.""" (enqueue_ops, fetches) = self._build_model() main_fetch_group = tf.group(*fetches) execution_barrier = None if self.job_name and not FLAGS.cross_replica_sync: execution_barrier = self.add_sync_queues_and_barrier( 'execution_barrier_', []) global_step = tf.contrib.framework.get_global_step() with tf.device(self.global_step_device): with tf.control_dependencies([main_fetch_group]): inc_global_step = global_step.assign_add(1) fetches.append(inc_global_step) if self.job_name and FLAGS.cross_replica_sync: # Block all replicas until all replicas are ready for next step. fetches.append(self.add_sync_queues_and_barrier( 'sync_queues_step_end_', [main_fetch_group])) variable_mgr_post_init_ops = self.variable_mgr.get_post_init_ops()
tensorflow.control_dependencies
4,762
import tensorflow as tf state = tf.nn.dropout(state, keep_prob=encoder.attn_keep_prob, noise_shape=state_noise_shape) hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if encoder.pervasive_dropout else None hidden = tf.nn.dropout(hidden, keep_prob=encoder.attn_keep_prob, noise_shape=hidden_noise_shape)
tensorflow.nn.dropout
4,763
import tensorflow as tf self.data_format = data_format if( data_format =='NCHW' ): self.strides = [1, 1, d_h, d_w] else: self.strides = [1, d_h, d_w, 1] def __call__(self,input_var,name=None,**xargs): shapes = tf.shape(input_var) if( self.data_format == 'NCHW' ): shapes = tf.stack([shapes[0],tf.shape(self.b)[0],shapes[2]*self.strides[2],shapes[3]*self.strides[3]]) else: shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]]) return tf.nn.bias_add( tf.nn.conv2d_transpose(input_var,self.w,output_shape=shapes,
tensorflow.shape
4,764
import tensorflow as tf .. code-block:: python is_train = session.run(global_mode_train()) # is_train == True is_train = session.run( global_mode_train() feed_dict={tf.global_mode(): tf.estimator.ModeKeys.PREDICT}) # is_train == False """ mode = global_mode() return tf.equal(mode, tf.estimator.ModeKeys.TRAIN) def global_mode_eval(): """Returns a bool Tensor indicating whether the global mode is EVAL. """ mode = global_mode() return tf.equal(mode, tf.estimator.ModeKeys.EVAL) def global_mode_predict():
tensorflow.equal
4,765
import tensorflow as tf 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))) nh = np.prod([v.value for v in c3.get_shape()[1:]]) h3 = tf.reshape(c3, [-1, nh]) pre_s = tf.nn.relu(self.fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) l1 = tf.layers.dense(inputs=pre_s,
tensorflow.reshape
4,766
import tensorflow as tf sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'labsheets', 'CIFAR10')) import cifar10 as cf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('data-dir', os.getcwd() + '/dataset/', 'Directory where the dataset will be stored and checkpoint. (default: %(default)s)') tf.app.flags.DEFINE_integer('max-steps', 10000, 'Number of mini-batches to train on. (default: %(default)d)') tf.app.flags.DEFINE_integer('log-frequency', 10, 'Number of steps between logging results to the console and saving summaries (default: %(default)d)') tf.app.flags.DEFINE_integer('save-model', 1000, 'Number of steps between model saves (default: %(default)d)') # Optimisation hyperparameters
tensorflow.app.flags.DEFINE_integer
4,767
import tensorflow as tf if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tensorflow.distribute.cluster_resolver.TPUClusterResolver
4,768
import tensorflow as tf saver1 = tf.train.Saver({"v1": v1}, name="saver1") tf.add_to_collection("savers", saver0) tf.add_to_collection("savers", saver1) tf.initialize_all_variables().run()
tensorflow.add_to_collection
4,769
import tensorflow as tf grad, = tf.gradients(loss, x)
tensorflow.gradients
4,770
import tensorflow as tf with tf.variable_scope('loss_' + self._tag): # RPN, class loss rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2]) rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1]) # 得到前景和背景anchor的index rpn_select = tf.where(tf.not_equal(rpn_label, -1)) rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2]) rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1]) rpn_cross_entropy = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label)) # RPN, bbox loss
tensorflow.gather
4,771
import tensorflow as tf def contra_step_lossV2(pred, tgt): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2)
tensorflow.split
4,772
import tensorflow as tf cached_dir = FLAGS.cached_dir if not cached_dir: cached_dir = FLAGS.output_dir train_file = os.path.join(cached_dir, task_name + "_train.tf_record") if not tf.gfile.Exists(train_file): classifier_utils.file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file, task_name)
tensorflow.gfile.Exists
4,773
import tensorflow as tf d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all if mask is not None: mask = tf.equal(mask, tf.ones_like(mask)) key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T]
tensorflow.ones_like
4,774
import tensorflow as tf # (gamma (Dale) or normal (non-Dale) initialization) self.W_rec = \ tf.get_variable( 'W_rec', [N_rec, N_rec], initializer=W_rec_initializer, trainable=self.W_rec_train) # Output weight matrix: # (uniform initialization as in pycog) self.W_out = tf.get_variable('W_out', [N_out, N_rec], initializer=W_out_initializer, trainable=self.W_out_train) # Recurrent bias: self.b_rec = tf.get_variable('b_rec', [N_rec], initializer=b_rec_initializer, trainable=self.b_rec_train) # Output bias: self.b_out = tf.get_variable('b_out', [N_out], initializer=b_out_initializer, trainable=self.b_out_train) # ------------------------------------------------ # Non-trainable variables: # Overall connectivity and Dale's law matrices # ------------------------------------------------ # Recurrent Dale's law weight matrix: self.Dale_rec = tf.get_variable('Dale_rec', [N_rec, N_rec], initializer=tf.constant_initializer(self.dale_rec),
tensorflow.get_variable
4,775
import tensorflow as tf if '_' in self.ent_coef: init_value = float(self.ent_coef.split('_')[1]) assert init_value > 0., "The initial value of ent_coef must be greater than 0" self.log_ent_coef = tf.get_variable('log_ent_coef', dtype=tf.float32, initializer=np.log(init_value).astype(np.float32)) self.ent_coef = tf.exp(self.log_ent_coef) else: # Force conversion to float # this will throw an error if a malformed string (different from 'auto') # is passed self.ent_coef = float(self.ent_coef) with tf.variable_scope("target", reuse=False): # Create the value network _, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph, create_qf=False, create_vf=True) self.value_target = value_target if self.n_step: _,_,value_target_n = self.policy_tf.make_critics(self.processed_next_obs_ph_n, create_qf=False, create_vf=True,reuse=True) self.value_target_n = value_target_n with tf.variable_scope("loss", reuse=False): # Take the min of the two Q-Values (Double-Q Learning) min_qf_pi = tf.minimum(qf1_pi, qf2_pi)
tensorflow.variable_scope
4,776
import tensorflow as tf # Compare against the true gradient computed by the tape with tf.GradientTape() as tape:
tensorflow.GradientTape
4,777
import tensorflow as tf } p.input_space_id = problem.SpaceID.IMAGE p.target_space_id = problem.SpaceID.IMAGE NetworkOutput = collections.namedtuple( "NetworkOutput", "policy, value, action_postprocessing") # TODO(koz4k): Translate it to T2TModel or remove. def feed_forward_gaussian_fun(action_space, config, observations): """Feed-forward Gaussian.""" if not isinstance(action_space, gym.spaces.box.Box): raise ValueError("Expecting continuous action space.") mean_weights_initializer = tf.initializers.variance_scaling( scale=config.init_mean_factor) logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10) flat_observations = tf.reshape(observations, [ tf.shape(observations)[0], tf.shape(observations)[1], functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)]) with tf.variable_scope("network_parameters"): with tf.variable_scope("policy"): x = flat_observations for size in config.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) mean = tf.layers.dense( x, action_space.shape[0], activation=tf.tanh,
tensorflow.initializers.variance_scaling
4,778
from tensorflow.contrib import layers with variable_scope.variable_scope( parent_scope + "/logits", values=[net], partitioner=hidden_layer_partitioner) as scope: logits = layers.fully_connected( net, head.logits_dimension, activation_fn=None,
tensorflow.contrib.layers.fully_connected
4,779
import tensorflow as tf with tf.variable_scope('soft_replacement'): self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") train_log_dir = 'logs/' + current_time
tensorflow.global_variables_initializer
4,780
import tensorflow as tf with tf.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i]) tf.summary.scalar("before_loss", model.before_loss) tf.summary.scalar("after_loss", model.after_loss) if hparams.predict_linear: tf.summary.scalar("linear_loss", model.linear_loss) for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) tf.summary.scalar("regularization_loss", model.regularization_loss) tf.summary.scalar("stop_token_loss", model.stop_token_loss) tf.summary.scalar("loss", model.loss) tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [tf.norm(grad) for grad in model.gradients] tf.summary.histogram("gradient_norm", gradient_norms)
tensorflow.summary.histogram
4,781
import tensorflow as tf """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params, base_variable_scope="Model/RNN") tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable) self._cost = tf.get_collection_ref(util.with_prefix(self._name, "cost"))[0]
tensorflow.contrib.cudnn_rnn.RNNParamsSaveable
4,782
from tensorflow.python.layers import pooling as pooling_layers input_layer = self.top_layer else: self.top_size = num_channels_in name = 'apool' + str(self.counts['apool']) self.counts['apool'] += 1 pool = pooling_layers.average_pooling2d( input_layer, [k_height, k_width], [d_height, d_width], padding=mode, data_format=self.channel_pos, name=name)
tensorflow.python.layers.pooling.average_pooling2d
4,783
import tensorflow as tf def pointer(inputs, state, hidden, mask, scope="pointer"): with tf.variable_scope(scope): u = tf.concat([tf.tile(tf.expand_dims(state, axis=1), [1, tf.shape(inputs)[1], 1]), inputs], axis=2) #[N,PL,2d] s0 = tf.nn.tanh(dense(u, hidden, use_bias=False, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask)#[N,PL] a = tf.expand_dims(tf.nn.softmax(s1), axis=2)#[N,PL,1] res = tf.reduce_sum(a * inputs, axis=1) return res, s1 # attention_sum probability def summ(memory, hidden, mask, keep_prob=1.0, is_train=None, scope="summ"): with tf.variable_scope(scope): d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train) s0 = tf.nn.tanh(dense(d_memory, hidden, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask) a = tf.expand_dims(tf.nn.softmax(s1), axis=2) res = tf.reduce_sum(a * memory, axis=1) return res def dot_attention(inputs, memory, mask, hidden, keep_prob=1.0, is_train=None, scope="dot_attention"): with tf.variable_scope(scope):
tensorflow.variable_scope
4,784
import tensorflow as tf 'Wether we will train on cloud.') tf.app.flags.DEFINE_boolean(
tensorflow.app.flags.DEFINE_boolean
4,785
import tensorflow as tf label_ids = features["label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) if use_tpu: def tpu_scaffold():
tensorflow.trainable_variables
4,786
import tensorflow as tf output_weight = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02) ) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer() ) with tf.variable_scope("loss"): if is_training: output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) output_layer = tf.reshape(output_layer, [-1, hidden_size]) logits = tf.matmul(output_layer, output_weight, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11]) log_probs = tf.nn.log_softmax(logits, axis=-1) # labels = tf.cast(labels,dtype=tf.float32) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_sum(per_example_loss) return (loss, per_example_loss, logits) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings):
tensorflow.reshape
4,787
import tensorflow as tf to the length of each input Returns: A tensor of shape [T, B] that contains the loss per example, per time step. """ with tf.name_scope("cross_entropy_sequence_loss"): losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets) loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0])) losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0]) return losses def dice_loss(predictions, targets, weights=1., name='dice_loss'):
tensorflow.to_int32
4,788
import tensorflow as tf model_lam=3e-4, activation=output_activation, name="Out") x = out_layer(x) regularization += out_layer.regularization return x, regularization def mlp_dropout(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None, dropout_rate=0): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) x = tf.layers.dropout(x, rate=dropout_rate, training=True) x = tf.layers.dropout(x, rate=dropout_rate, training=True) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def get_vars(scope):
tensorflow.layers.dropout
4,789
from tensorflow.python.ops import math_ops target = target[self.name] if isinstance(target, dict) else target loss_unweighted = self._loss_fn(logits, target) weight_tensor = self.get_weight_tensor(features) if weight_tensor is None: return math_ops.reduce_mean(loss_unweighted, name="loss") else: loss_unweighted = array_ops.reshape(loss_unweighted, shape=(-1,)) loss_weighted = math_ops.mul( loss_unweighted, array_ops.reshape(weight_tensor, shape=(-1,))) return math_ops.div( math_ops.reduce_sum(loss_weighted), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss") class _RegressionTargetColumn(_TargetColumn): """_TargetColumn for regression.""" def __init__(self, loss_fn, label_name, weight_column_name, target_dimension): super(_RegressionTargetColumn, self).__init__( loss_fn=loss_fn, num_label_columns=target_dimension, label_name=label_name, weight_column_name=weight_column_name)
tensorflow.python.ops.math_ops.reduce_sum
4,790
import tensorflow as tf # print('vnet scope T/R', is_train, reuse_unet) encoderscope = 'unet_enc' decoderscope = 'unet_dec' reuse_encoder = reuse_unet reuse_decoder = reuse_unet print([encoderscope, ' ', decoderscope]) # ===============================================================================ENCODER with tf.variable_scope(encoderscope) as scope: if reuse_encoder: scope.reuse_variables() with tf.variable_scope('color_encoder'): X = encoder_conf('eI', I[:, :, :, :-1], 96, 5, 1, norm, reuse_encoder, is_train, self.args.dropout) # 128 > 124 X0 = encoder_conf('d0', X, 96, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 124 > 62 @2 X = encoder_conf('e1', X0, 128, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 62 > 60 X_EARLY = X X1 = encoder_conf('d1', X, 128, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 60 > 30 @4 X = encoder_conf('e2', X1, 256, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 30 > 28 X2 = encoder_conf('d2', X, 256, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 28 > 14 @8 X = encoder_conf('e3', X2, 512, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 14 > 12 X_MIDDLE = X # ===============================================================================DECODER
tensorflow.variable_scope
4,791
from tensorflow.python.ops import variable_scope update_op: An operation that updates the underlying variables appropriately. Raises: ValueError: If `labels` and `predictions` are of different sizes, or if `weights` is the wrong size, or if either `metrics_collections` or `updates_collections` are not a `list` or `tuple`. """ with variable_scope.variable_scope(name, 'pearson_r', [predictions, labels]): predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) cov, update_cov = streaming_covariance( predictions, labels, weights=weights, name='covariance') var_predictions, update_var_predictions = streaming_covariance(
tensorflow.python.ops.variable_scope.variable_scope
4,792
import tensorflow as tf # update_target_fn will be called periodically to copy Q network to target Q network update_target_expr = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_expr.append(var_target.assign(var)) update_target_expr = tf.group(*update_target_expr) # Create callable functions train = U.function( inputs=[ obs_t_input,
tensorflow.group
4,793
import tensorflow as tf for bi in range(b): with tf.variable_scope('block_{}'.format(bi)): idx1 = cell_arch[bi][0] op1 = cell_arch[bi][1] idx2 = cell_arch[bi][2] op2 = cell_arch[bi][3] with tf.variable_scope('X1'): X1 = self._add_op_dynamic(cell_inputs, blocks, idx1, op1, w, h, block_ch, is_train=is_train) X1 = self._add_drop_path(X1, drop_path_keep_prob) with tf.variable_scope('X2'): X2 = self._add_op_dynamic(cell_inputs, blocks, idx2, op2, w, h, block_ch, is_train=is_train) X2 = self._add_drop_path(X2, drop_path_keep_prob) X = tf.add_n([X1, X2]) blocks.append(X) (X, comb_ch) = self._combine_cell_blocks_dynamic(cell_inputs, blocks, cell_arch, w, h, block_ch, is_train) X = tf.reshape(X, (-1, w, h, comb_ch)) # Sanity shape check layers.append((X, w, h, comb_ch)) def _add_static_cell(self, cell_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train=False, is_reduction=False): b = CELL_NUM_BLOCKS # Calibrate inputs as necessary to last input layer's dimensions and add them to hidden states cell_inputs = [layers[-2] if len(layers) > 1 else layers[-1], layers[-1]]
tensorflow.add_n
4,794
import tensorflow as tf shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples,seq_length], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
tensorflow.constant
4,795
import tensorflow as tf "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range), ) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer() ) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1] )
tensorflow.reduce_mean
4,796
import tensorflow as tf opt=None): self.embedding_size = embedding_size self.maxnorm = maxnorm self.batch_pos_cnt = batch_pos_cnt self.max_iter = max_iter self.model_type = model_type self.add_bias = add_bias if opt is None: opt = tf.train.AdagradOptimizer(1.0) self.opt = opt self.sess = None self.train_step = None self.post_step = None self.graph = tf.Graph() with self.graph.as_default(): self.head_input = tf.placeholder(tf.int32, shape=[None])
tensorflow.train.AdagradOptimizer
4,797
import tensorflow as tf def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor
tensorflow.range
4,798
import tensorflow as tf Returns: A tuple containing: - Input tensor of the restored model. - Prediction tensor of the restored model. - Output tensor, which is the softwmax result of the prediction tensor. - new session of the restored model. """ model_dir = tempfile.mkdtemp() saver.save(sess, model_dir) # Reset the graph. tf.reset_default_graph() x, prediction, output_class = self.buildModel(lstm_layer, is_dynamic_rnn) new_sess = tf.compat.v1.Session(config=CONFIG) saver = tf.train.Saver() saver.restore(new_sess, model_dir) return x, prediction, output_class, new_sess def getInferenceResult(self, x, output_class, sess): """Get inference result given input tensor and output tensor. Args: x: The input tensor. output_class: The output tensor.
tensorflow.reset_default_graph
4,799