query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
create a new branch that merges the clusters of two or more branches that have different clusters
|
def create_merge_branch(self, cid, merge_child, occluded_parents, fathers, line_num, point_list):
saved_parent_nodes = []
for oparent in occluded_parents:
parent_node = [pnode for pnode in fathers if pnode.flat_children == oparent][0]
parent_node.cluster_id = cid # !!!!!!!!!! Questionable cid change??
if parent_node not in saved_parent_nodes:
saved_parent_nodes.append(parent_node)
for node_points in point_list: # get the children in Point class form
try:
flat_children = [cpoint.flat_value for cpoint in node_points]
except:
flat_children = node_points.flat_value
if merge_child == flat_children:
child_points = node_points
break
if type(merge_child) == list:
merge_child = tuple(merge_child) # tupleate the bid if necessary
merge_node = Node(cid, merge_child, saved_parent_nodes, line_num, child_points)
Graph.add_node(cid, merge_child, merge_node) # start itself
for par in saved_parent_nodes:
bra_id = par.branch_id
if bra_id not in Graph.closed_keys:
Graph.closed_keys.append(bra_id)
return merge_node
|
[
"def merge(c1, c2):\n global number_of_effective_clusters, all_clusters\n number_of_effective_clusters += 1\n new_cluster = Cluster(number_of_effective_clusters)\n new_cluster.guest_ids = list(set(c1.guest_ids + c2.guest_ids))\n new_cluster.mean = get_mean(new_cluster.guest_ids)\n all_clusters.append(new_cluster)\n\n for i, o in enumerate(all_clusters):\n if o.id == c1.id:\n del all_clusters[i]\n\n for i, o in enumerate(all_clusters):\n if o.id == c2.id:\n del all_clusters[i]",
"def create_split_branch(self, cid, child_father, father_node, line_num, point_list, lame_parent=0):\n\n child, parent = child_father\n cid_father = father_node.cluster_id\n bid_father = father_node.branch_id\n\n # create the new branch key from the flat child value\n if type(child)==int: # if its an int\n new_branch_id = child\n else:\n new_branch_id = tuple(child) # keys must be hashable\n\n # pull out points from diagonal clustered point row\n for point_class_list in point_list:\n\n try:\n if child == point_class_list.flat_value:\n child_points = point_class_list\n\n except:\n if child == [x.flat_value for x in point_class_list]:\n child_points = point_class_list\n\n new_node = Node(cid, new_branch_id, father_node, line_num, child_points)\n\n if bid_father not in Graph.closed_keys:\n\n Graph.closed_keys.append(bid_father)\n\n if lame_parent == 1:\n if father_node not in Graph.lame_duck_parents:\n Graph.lame_duck_parents.append(father_node)\n\n # add the new orphan node\n Graph.cluster_dict[cid_father][new_branch_id] = [new_node]\n return new_node",
"def create_cluster_branch(self, orphan, line_num): # create cluster\n\n try:\n cluster_id = orphan.flat_value\n except:\n cluster_id = tuple([x.flat_value for x in orphan])\n\n # create a node instance and add it to the graph\n orphan_node = Node(cluster_id, cluster_id, cluster_id, line_num, orphan)\n Graph.cluster_dict[cluster_id][cluster_id] = [orphan_node]",
"def create_new_branch(self, newbranch):\n # self.update(self.branch)\n try:\n self.hg_branch(newbranch)\n return 'succes'\n except Exception as e:\n print(e)\n return 'failure'",
"def _create_branch(repo, from_branch, to_branch, dry_run, force, patches):\n env = os.environ.copy()\n\n if git.branch_exists(repo, to_branch, env):\n click.echo(f\"{to_branch} already exists, skipping...\")\n sys.exit(0)\n\n snap_basename = urlparse(repo)\n snap_basename = Path(snap_basename.path).name\n if snap_basename.endswith(\".git\"):\n snap_basename = snap_basename.rstrip(\".git\")\n sh.rm(\"-rf\", snap_basename)\n sh.git.clone(repo, branch=from_branch, _env=env)\n sh.git.config(\"user.email\", \"cdkbot@gmail.com\", _cwd=snap_basename)\n sh.git.config(\"user.name\", \"cdkbot\", _cwd=snap_basename)\n sh.git.checkout(\"-b\", to_branch, _cwd=snap_basename)\n\n snapcraft_fn = Path(snap_basename) / \"snapcraft.yaml\"\n snapcraft_fn_tpl = Path(snap_basename) / \"snapcraft.yaml.in\"\n if not snapcraft_fn_tpl.exists():\n click.echo(f\"{snapcraft_fn_tpl} not found\")\n sys.exit(1)\n\n # Apply patches\n patches_list = []\n if patches:\n patches_path = Path(patches)\n if patches_path.exists():\n click.echo(\"Patches found, applying.\")\n patches_map = yaml.safe_load(patches_path.read_text(encoding=\"utf8\"))\n # TODO: cleanup\n if \"all\" in patches_map:\n for patch_fn in patches_map[\"all\"]:\n patch_fn = Path(patch_fn).absolute()\n shared_path = str(Path(\"shared\") / patch_fn.parts[-1])\n sh.cp(str(patch_fn), str(shared_path), _cwd=snap_basename)\n patches_list.append(shared_path)\n sh.git.add(shared_path, _cwd=snap_basename)\n if to_branch.lstrip(\"v\") in patches_map:\n for patch_fn in patches_map[to_branch.lstrip(\"v\")]:\n patch_fn = Path(patch_fn).absolute()\n shared_path = str(Path(\"shared\") / patch_fn.parts[-1])\n sh.cp(str(patch_fn), str(shared_path), _cwd=snap_basename)\n patches_list.append(shared_path)\n sh.git.add(shared_path, _cwd=snap_basename)\n\n snapcraft_yml = snapcraft_fn_tpl.read_text()\n snapcraft_yml = _render(\n snapcraft_fn_tpl,\n {\"snap_version\": to_branch.lstrip(\"v\"), \"patches\": patches_list},\n )\n snapcraft_fn.write_text(snapcraft_yml)\n if not dry_run:\n sh.git.add(\".\", _cwd=snap_basename)\n sh.git.commit(\"-m\", f\"Creating branch {to_branch}\", _cwd=snap_basename)\n sh.git.push(repo, to_branch, _cwd=snap_basename, _env=env)",
"def merge_cnvs_clusters(vcfs,temp_dir, output_directory, cluster_merge_slop=0):\n\n # Quality is lierally the sum of the previous VCF files.\n\n basenames = [os.path.basename(x) for x in vcfs]\n __bedtools_duplication_string__ = \"\"\"awk '{{if ($0 !~ /^#/ && $6 > 100){{ split($8,a,\";\"); split(a[2],b,\"=\");print $1,$2,$2+b[2],$3,FILENAME}}}}' {0} | grep duplication | tr ' ' '\\t' | sort -k 1,1 -k 2,2g | bedtools cluster -i - -d {1} > tmp_clusters_duplication.txt\"\"\" \n __bedtools_deletion_string__ = \"\"\"awk '{{if ($0 !~ /^#/ && $6 > 100){{ split($8,a,\";\"); split(a[2],b,\"=\");print $1,$2,$2+b[2],$3,FILENAME}}}}' {0} | grep deletion | tr ' ' '\\t' | sort -k 1,1 -k 2,2g | bedtools cluster -i - -d {1} > tmp_clusters_deletion.txt\"\"\" \n __bedtools_all__= \"\"\"awk '{{if ($0 !~ /^#/ && $6 > 100 ){{ split($8,a,\";\"); split(a[2],b,\"=\");print $1,$2,$2+b[2],$3,FILENAME}}}}' {0} |tr ' ' '\\t' | sort -k 1,1 -k 2,2g | bedtools cluster -i - -d {1} > tmp_clusters.txt\"\"\" \n bedtools_cmd = __bedtools_deletion_string__.format(\" \".join(vcfs), cluster_merge_slop)\n subprocess.check_call(bedtools_cmd, shell=True)\n bedtools_cmd = __bedtools_duplication_string__.format(\" \".join(vcfs), cluster_merge_slop)\n subprocess.check_call(bedtools_cmd, shell=True)\n bedtools_cmd = __bedtools_all__.format(\" \".join(vcfs), cluster_merge_slop)\n subprocess.check_call(bedtools_cmd, shell=True)\n all_vcfs = {}\n __vcf_sort__ =\"vcf-sort {0} | bgzip -c > {0}.gz && tabix -fp vcf {0}.gz\" \n for vcf_f in vcfs:\n vcf_sort_command = __vcf_sort__.format(vcf_f)\n subprocess.check_call(vcf_sort_command,shell=True)\n all_vcfs[vcf_f] = vcf.VCFSimple(vcf_f + \".gz\")\n # Ok now we have all the VCFs in this format.\n # Fix final files\n try:\n os.mkdir(os.path.join(output_directory,\"vcfs\"))\n except OSError:\n pass\n _process_clustering(\"tmp_clusters_duplication.txt\",all_vcfs,os.path.join(output_directory,\"vcfs\",\"duplications.vcf\"))\n _process_clustering(\"tmp_clusters_deletion.txt\",all_vcfs, os.path.join(output_directory, \"vcfs\",\"deletions.vcf\"))\n _process_clustering(\"tmp_clusters.txt\",all_vcfs, os.path.join(output_directory,\"vcfs\", \"all.vcf\"))",
"def merge(cluster1, cluster2):\n cluster1.extend(cluster2.g_num, cluster2.g_id, cluster2.g_ra, cluster2.g_dec, cluster2.g_z, cluster2.g_x, cluster2.g_y)",
"def merge_clusters(self, c1, c2, new_name=None):\n if new_name is None:\n new_name = c1\n\n new_clus = self.clu2elm_dict[c1] | self.clu2elm_dict[c2]\n del self.clu2elm_dict[c1]\n del self.clu2elm_dict[c2]\n\n self.clu2elm_dict[new_name] = new_clus\n self.from_clu2elm_dict(self.clu2elm_dict)\n return self",
"def test_branch_same():\n atom = ATOMClassifier(X10, y10, random_state=1)\n atom.branch = \"master\"\n assert atom.branch.name == \"master\"",
"def _merge_clusters(self, cluster_to_preference_set, jdist_table, cluster_a, cluster_b):\n preference_set_a = cluster_to_preference_set[cluster_a]\n del cluster_to_preference_set[cluster_a]\n preference_set_b = cluster_to_preference_set[cluster_b]\n del cluster_to_preference_set[cluster_b]\n jdist_table.remove_item(cluster_a)\n jdist_table.remove_item(cluster_b)\n\n if cluster_a < cluster_b:\n new_cluster = cluster_a + cluster_b\n else:\n new_cluster = cluster_b + cluster_a\n new_preference_set = preference_set_a & preference_set_b\n cluster_to_preference_set[new_cluster] = new_preference_set\n jdist_table.add_item(new_cluster, new_preference_set)",
"def test_create_branch():",
"def create_leaf_clusters(module, CHANGED_FLAG, task, msg):\n output = ''\n non_clustered_leafs = find_non_clustered_leafs(module, task, msg)\n non_clustered_leafs_count = 0\n mod = 'ospf'\n cli = pn_cli(module)\n clicopy = cli\n\n while non_clustered_leafs_count == 0:\n if len(non_clustered_leafs) == 0:\n non_clustered_leafs_count += 1\n else:\n node1 = non_clustered_leafs[0]\n non_clustered_leafs.remove(node1)\n\n cli = clicopy\n cli += ' switch %s lldp-show ' % node1\n cli += ' format sys-name no-show-headers '\n system_names = run_command(module, cli, task, msg).split()\n system_names = list(set(system_names))\n\n cli = clicopy\n cli += ' switch %s fabric-node-show ' % node1\n cli += ' format name no-show-headers '\n nodes_in_fabric = run_command(module, cli, task, msg).split()\n nodes_in_fabric = list(set(nodes_in_fabric))\n\n for system in system_names:\n if system not in nodes_in_fabric:\n system_names.remove(system)\n\n terminate_flag = 0\n node_count = 0\n while (node_count < len(system_names)) and (terminate_flag == 0):\n node2 = system_names[node_count]\n if node2 in non_clustered_leafs:\n # Cluster creation\n cluster_name = node1 + '-to-' + node2 + '-cluster'\n output1, CHANGED_FLAG = create_cluster(module, node2, name, node1, node2, mod, CHANGED_FLAG, task, msg)\n output += output1\n\n non_clustered_leafs.remove(node2)\n terminate_flag += 1\n\n node_count += 1\n\n return CHANGED_FLAG, output",
"def merge(self, branch_a, branch_b, **kwargs):\n raise NotImplementedError()",
"def merge_nodes(self, node1, node2, initial_commit_graph, df):\n\n new_commit_graph = copy.deepcopy(initial_commit_graph)\n\n # Etapes pour merger les nodes\n # 1. Get list of out connections with a dict\n # eg. {node3 : 5, node4 : 6}\n # 2. Get list of in connections with a dict\n # 3. Merge nodes\n\n # 1 and 2\n\n connections = {}\n\n index = list(df.index)\n new_node_row = []\n\n for column in df.columns:\n if df.at[node1, column] == 1 or df.at[node2, column] == 1:\n new_node_row.append(1)\n for neighbor in index:\n if df.at[neighbor, column] == 1 and neighbor not in [node1, node2]:\n if neighbor not in connections:\n connections[neighbor] = 1\n else:\n connections[neighbor] += 1\n else:\n new_node_row.append(0)\n\n new_node_row = [new_node_row]\n\n\n '''\n for neighbor in initial_commit_graph.adj[node1]:\n if neighbor != node2:\n if neighbor not in connections:\n connections[neighbor] = initial_commit_graph.edges[node1, neighbor]['number_modifications_same_commit']\n else:\n connections[neighbor] += initial_commit_graph.edges[node1, neighbor]['number_modifications_same_commit']\n \n for neighbor in initial_commit_graph.adj[node2]:\n if neighbor != node1:\n if neighbor not in connections:\n connections[neighbor] = initial_commit_graph.edges[node2, neighbor]['number_modifications_same_commit']\n else:\n connections[neighbor] += initial_commit_graph.edges[node2, neighbor]['number_modifications_same_commit']\n '''\n\n\n new_commit_graph.remove_node(node1)\n new_commit_graph.remove_node(node2)\n\n new_node = f'{node1}:{node2}'\n new_commit_graph.add_node(new_node)\n\n new_row = pd.DataFrame(new_node_row, columns=list(df.columns), index=[new_node])\n new_df = df.drop(labels=[node1, node2])\n new_df = new_df.append(new_row)\n\n for neighbor, num_mod in connections.items():\n new_commit_graph.add_edge(new_node, neighbor)\n new_commit_graph.edges[new_node, neighbor]['number_modifications_same_commit'] = num_mod\n\n \n return new_commit_graph, new_df",
"def create_cluster(module, switch, name, node1, node2, mod, CHANGED_FLAG, task, msg):\n cli = pn_cli(module)\n clicopy = cli\n\n if mod == 'l3-vrrp' or mod == 'l2-vrrp':\n spine_list = module.params['pn_spine_list']\n leaf_list = module.params['pn_leaf_list']\n\n cli += ' switch %s system-settings-show ' % node1\n cli += ' format auto-trunk '\n status = run_command(module, cli, task, msg).split()[1]\n if status != 'on':\n if (node1 in leaf_list and node2 in leaf_list) or \\\n (node1 in spine_list and node2 in spine_list):\n\n ports = get_ports(module, node1, node2, task, msg)\n trunk_name = node1 + '-' + node2 + '-trunk'\n ports_string = ','.join(ports)\n CHANGED_FLAG, output = create_trunk(module, node1, trunk_name, ports_string,\n CHANGED_FLAG, task, msg)\n ports = get_ports(module, node2, node1, task, msg)\n trunk_name = node2 + '-' + node1 + '-trunk'\n ports_string = ','.join(ports)\n CHANGED_FLAG, output = create_trunk(module, node2, trunk_name, ports_string,\n CHANGED_FLAG, task, msg)\n cli = clicopy\n\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = list(set(run_command(module, cli, task, msg).split()))\n if name not in cluster_list:\n cli = clicopy\n cli += ' switch %s cluster-create name %s ' % (switch, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n if 'Success' in run_command(module, cli, task, msg):\n CHANGED_FLAG.append(True)\n return ' %s: Created %s \\n' % (switch, name), CHANGED_FLAG\n return '', CHANGED_FLAG",
"def merge_similar_branches(branches, orientation_threshold=0.7, distance_threshold=8, branch_number_threshold=10):\n branches.sort(key=lambda x: -len(x))\n i = 0\n while i < len(branches) and len(branches[i]) > branch_number_threshold:\n i += 1\n\n while i < len(branches):\n j = i + 1\n while j < len(branches):\n br_1 = branches[i]\n br_2 = branches[j]\n if np.linalg.norm(br_1.center - br_2.center) < distance_threshold and \\\n sum(br_1.direction * br_2.direction) > orientation_threshold:\n br_1.merge(br_2)\n del branches[j]\n else:\n j += 1\n i += 1\n return branches",
"def join_clusters(self, tuple_cluster):\n c1 = tuple_cluster[0]\n c2 = tuple_cluster[1]\n self.clusters.remove(c1)\n self.clusters.remove(c2)\n self.clusters.append(c1 + c2)",
"def insert_binary_branch(self, idx, split):\n assert len(split)==2, 'Does not support more than 2 split, actual number={}'.format(len(split))\n # check if the inputs are valid\n left = split[0]\n right = split[1]\n tops = self._net_graph[idx[0]][idx[1]].top_idx\n assert set(range(len(tops)))-set(left)==set(right), 'The splitting does not form a partition of tops.'\n assert idx[0]>0, 'Cannot create new branches at the bottom.'\n\n # insert a new branch, keep track of the parameters.\n bottoms = self._net_graph[idx[0]-1]\n bottom_idx = [i for i in xrange(len(bottoms)) if idx[1] in bottoms[i].top_idx][0] # a singleton\n branch_idx = bottoms[bottom_idx].top_idx.index(idx[1])\n # create new blobs (columns) at the current layer\n # save original blob\n orig_blob = self._net_graph[idx[0]][idx[1]]\n top_idx = orig_blob.top_idx \n tasks = orig_blob.tasks\n # left column\n self._net_graph[idx[0]][idx[1]] = \\\n NetBlob(top_idx=[top_idx[i] for i in left], tasks=[tasks[i] for i in left])\n # right column\n self._net_graph[idx[0]].append(\n NetBlob(top_idx=[top_idx[i] for i in right], tasks=[tasks[i] for i in right]))\n right_idx = len(self._net_graph[idx[0]])-1\n # add a new branch at the bottom layer\n b_blobs = bottoms[bottom_idx]\n b_blobs.set_tasks(branch_idx=[branch_idx], tasks=[[t for i in left for t in tasks[i]]])\n b_blobs.add_top(top_idx=[right_idx], tasks=[[t for i in right for t in tasks[i]]])\n # log changes\n changes = {}\n # layer i\n # blobs\n changes[(idx[0], right_idx)] = (idx[0],idx[1])\n # branches\n for k1 in xrange(len(left)):\n changes[(idx[0], idx[1], k1)] = (idx[0], idx[1], left[k1])\n for k2 in xrange(len(right)):\n changes[(idx[0], right_idx, k2)] = (idx[0], idx[1], right[k2])\n # layer i-1\n # branches\n changes[(idx[0]-1, bottom_idx, b_blobs.num_tops()-1)] = (idx[0]-1, bottom_idx, branch_idx)\n\n # log the newly created branches. \n new_branches = [self.branch_name_at_i_j_k(idx[0]-1, bottom_idx, branch_idx), \n self.branch_name_at_i_j_k(idx[0]-1, bottom_idx, b_blobs.num_tops()-1)]\n\n return self.to_param_mapping(changes), new_branches",
"def create_clusters(self,skids,matching_scores,method):\r\n similarity = 1\r\n step_size = 0.01\r\n\r\n clusters = list(map(lambda x:[x], skids))\r\n all_clusters = [copy.deepcopy(clusters)]\r\n merges_at = [1]\r\n\r\n #print('Start clusters:',clusters)\r\n\r\n while similarity >= 0:\r\n #Find cluster that will be merged in this round\r\n #merge contains indices of c in clusters\r\n merge = {}\r\n for c1 in clusters:\r\n for c2 in clusters:\r\n #if clusters are identical\r\n if c1 == c2:\r\n continue\r\n all_similarities = []\r\n for neuronA in c1:\r\n #if c1 has already been merged to c2 in previous iteration\r\n #if clusters.index(c2) in merge:\r\n # if clusters.index(c1) in merge[clusters.index(c2)]:\r\n #print('!Skipped redundant merging:',c1,c2)\r\n # continue\r\n #merged = False\r\n for neuronB in c2:\r\n #if merged is True:\r\n # continue\r\n #Calculate average from both comparisons: A -> B and B -> A (will be different!!!!)\r\n avg_matching_score = (matching_scores[str(neuronA)+'-'+str(neuronB)] + matching_scores[str(neuronB)+'-'+str(neuronA)]) / 2\r\n all_similarities.append(avg_matching_score)\r\n\r\n\r\n #Important: for method 'max' (maximal distance), find pair of neurons for which the similarity is minimal\r\n # for method 'min' (minimal distance), find pair of neurons for which the similarity is maximal\r\n if (( method == 'avg' and (sum(all_similarities)/len(all_similarities)) >= similarity )\r\n or ( method == 'max' and min(all_similarities) >= similarity )\r\n or ( method == 'min' and max(all_similarities) >= similarity )):\r\n if clusters.index(c1) not in merge:\r\n merge[clusters.index(c1)] = []\r\n if clusters.index(c2) not in merge[clusters.index(c1)]:\r\n merge[clusters.index(c1)].append(clusters.index(c2))\r\n #merged = True\r\n\r\n if len(merge) != 0:\r\n #Check if multiple clusters need to be merged:\r\n #print('Merge:',merge)\r\n temp_to_be_merged = []\r\n for c1 in merge:\r\n #print('C1:',c1)\r\n exists = []\r\n for c2 in merge[c1]:\r\n for entry in temp_to_be_merged:\r\n if c1 in entry or c2 in entry:\r\n if temp_to_be_merged.index(entry) not in exists:\r\n exists.append(temp_to_be_merged.index(entry))\r\n\r\n #print('Exists:', exists)\r\n\r\n if len(exists) > 0:\r\n temp_to_be_merged[exists[0]].append(c1)\r\n temp_to_be_merged[exists[0]] += merge[c1]\r\n for entry in exists[1:]:\r\n temp_to_be_merged[exists[0]] += temp_to_be_merged[entry]\r\n temp_to_be_merged.remove(temp_to_be_merged[entry])\r\n else:\r\n to_append = [c1]\r\n to_append += merge[c1]\r\n temp_to_be_merged.append(to_append)\r\n\r\n #Make sure each cluster shows up only once in to_be_merged:\r\n to_be_merged = []\r\n for entry in temp_to_be_merged:\r\n to_be_merged.append(list(set(entry)))\r\n\r\n #print('Merging at similarity', similarity,':',to_be_merged,merge)\r\n\r\n temp_clusters = copy.deepcopy(clusters)\r\n\r\n #First merge clusters\r\n for entry in to_be_merged:\r\n for c in entry[1:]:\r\n temp_clusters[entry[0]] += copy.deepcopy(clusters[c])\r\n\r\n #Then delete\r\n for entry in to_be_merged:\r\n for c in entry[1:]:\r\n temp_clusters.remove(clusters[c])\r\n\r\n clusters = copy.deepcopy(temp_clusters)\r\n all_clusters.append(copy.deepcopy(temp_clusters))\r\n merges_at.append(similarity)\r\n\r\n\r\n #print(temp_clusters,'\\n')\r\n\r\n similarity -= step_size\r\n\r\n return all_clusters,merges_at"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
In order to use this it must be part of a cluster already. This is used in splits or merges to create a new branch and close the old ones Do we create a father less node??
|
def create_split_branch(self, cid, child_father, father_node, line_num, point_list, lame_parent=0):
child, parent = child_father
cid_father = father_node.cluster_id
bid_father = father_node.branch_id
# create the new branch key from the flat child value
if type(child)==int: # if its an int
new_branch_id = child
else:
new_branch_id = tuple(child) # keys must be hashable
# pull out points from diagonal clustered point row
for point_class_list in point_list:
try:
if child == point_class_list.flat_value:
child_points = point_class_list
except:
if child == [x.flat_value for x in point_class_list]:
child_points = point_class_list
new_node = Node(cid, new_branch_id, father_node, line_num, child_points)
if bid_father not in Graph.closed_keys:
Graph.closed_keys.append(bid_father)
if lame_parent == 1:
if father_node not in Graph.lame_duck_parents:
Graph.lame_duck_parents.append(father_node)
# add the new orphan node
Graph.cluster_dict[cid_father][new_branch_id] = [new_node]
return new_node
|
[
"def create_cluster_branch(self, orphan, line_num): # create cluster\n\n try:\n cluster_id = orphan.flat_value\n except:\n cluster_id = tuple([x.flat_value for x in orphan])\n\n # create a node instance and add it to the graph\n orphan_node = Node(cluster_id, cluster_id, cluster_id, line_num, orphan)\n Graph.cluster_dict[cluster_id][cluster_id] = [orphan_node]",
"def create_leaf_clusters(module, CHANGED_FLAG, task, msg):\n output = ''\n non_clustered_leafs = find_non_clustered_leafs(module, task, msg)\n non_clustered_leafs_count = 0\n mod = 'ospf'\n cli = pn_cli(module)\n clicopy = cli\n\n while non_clustered_leafs_count == 0:\n if len(non_clustered_leafs) == 0:\n non_clustered_leafs_count += 1\n else:\n node1 = non_clustered_leafs[0]\n non_clustered_leafs.remove(node1)\n\n cli = clicopy\n cli += ' switch %s lldp-show ' % node1\n cli += ' format sys-name no-show-headers '\n system_names = run_command(module, cli, task, msg).split()\n system_names = list(set(system_names))\n\n cli = clicopy\n cli += ' switch %s fabric-node-show ' % node1\n cli += ' format name no-show-headers '\n nodes_in_fabric = run_command(module, cli, task, msg).split()\n nodes_in_fabric = list(set(nodes_in_fabric))\n\n for system in system_names:\n if system not in nodes_in_fabric:\n system_names.remove(system)\n\n terminate_flag = 0\n node_count = 0\n while (node_count < len(system_names)) and (terminate_flag == 0):\n node2 = system_names[node_count]\n if node2 in non_clustered_leafs:\n # Cluster creation\n cluster_name = node1 + '-to-' + node2 + '-cluster'\n output1, CHANGED_FLAG = create_cluster(module, node2, name, node1, node2, mod, CHANGED_FLAG, task, msg)\n output += output1\n\n non_clustered_leafs.remove(node2)\n terminate_flag += 1\n\n node_count += 1\n\n return CHANGED_FLAG, output",
"def test_cluster():\n g = Cluster(initialize_head=False)\n node = g.add_node()\n node2 = g.add_node()\n assert node.remaining_processes_alive()\n assert node2.remaining_processes_alive()\n g.remove_node(node2)\n g.remove_node(node)\n assert not any(n.any_processes_alive() for n in [node, node2])",
"def add_node_after_upgrade_neutron_ceph(self):\n self.env.revert_snapshot('upgrade_master_neutron_ceph')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[6:7])\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-07': ['controller']}\n )\n\n self.fuel_web.deploy_cluster_wait_progress(cluster_id=cluster_id,\n progress=60)\n self.fuel_web.stop_deployment_wait(cluster_id)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:7],\n timeout=8 * 60\n )\n\n self.fuel_web.run_network_verify(cluster_id)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'sanity', 'smoke'])",
"def add_delete_node_after_upgrade_neutron_ceph(self):\n self.env.revert_snapshot('upgrade_master_neutron_ceph')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[6:7])\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-01': ['controller']},\n False, True\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-07': ['controller']}\n )\n\n self.fuel_web.run_network_verify(cluster_id)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n self.fuel_web.run_ostf(cluster_id,\n test_sets=['ha', 'sanity', 'smoke'],\n should_fail=1)",
"def add_delete_node_after_upgrade_nova_cinder(self):\n self.env.revert_snapshot('upgrade_master_nova_cinder')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[3:4])\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-02': ['compute', 'cinder']},\n False, True\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-04': ['compute', 'cinder']}\n )\n\n self.fuel_web.run_network_verify(cluster_id)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n self.fuel_web.run_ostf(cluster_id, should_fail=1)",
"def upgrade_ha_one_controller_delete_node(self):\n if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):\n raise SkipTest()\n self.env.revert_snapshot('ceph_ha_one_controller_compact')\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.env.admin_actions.upgrade_master_node()\n\n self.fuel_web.assert_nodes_in_ready_state(cluster_id)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:3])\n self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)\n self.fuel_web.assert_nailgun_upgrade_migration()\n self.fuel_web.verify_network(cluster_id)\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'])\n with self.fuel_web.get_ssh_for_node('slave-03') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n nailgun_nodes = self.fuel_web.update_nodes(\n cluster_id, {'slave-03': ['compute', 'ceph-osd']}, False, True)\n task = self.fuel_web.deploy_cluster(cluster_id)\n self.fuel_web.assert_task_success(task)\n nodes = filter(lambda x: x[\"pending_deletion\"] is True, nailgun_nodes)\n try:\n wait(lambda: len(self.fuel_web.client.list_nodes()) == 3,\n timeout=5 * 60)\n except TimeoutError:\n assert_true(len(self.fuel_web.client.list_nodes()) == 3,\n 'Node {0} is not discovered in timeout 10 *60'.format(\n nodes[0]))\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'],\n should_fail=1)\n self.env.make_snapshot(\"upgrade_ha_one_controller_delete_node\")",
"def add_node_after_upgrade_nova_cinder(self):\n self.env.revert_snapshot('upgrade_master_nova_cinder')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[3:4])\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-04': ['controller']}\n )\n\n self.fuel_web.deploy_cluster_wait_progress(cluster_id=cluster_id,\n progress=60)\n self.fuel_web.stop_deployment_wait(cluster_id)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:4],\n timeout=8 * 60\n )\n\n self.fuel_web.run_network_verify(cluster_id)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n self.fuel_web.run_ostf(cluster_id)\n\n self.env.make_snapshot('add_node_after_upgrade_nova_cinder')",
"def register_for_new_hierarchy_nodes(self):\n pass",
"def deploy_one_node(self):\n self.env.revert_snapshot(\"ready\")\n self.fuel_web.client.get_root()\n self.env.bootstrap_nodes(self.env.nodes().slaves[:1])\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE\n )\n logger.info('cluster is %s' % str(cluster_id))\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-01': ['controller']}\n )\n self.fuel_web.deploy_cluster_wait(cluster_id)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n self.fuel_web.assert_cluster_ready(\n os_conn, smiles_count=4, networks_count=1, timeout=300)\n self.fuel_web.run_single_ostf_test(\n cluster_id=cluster_id, test_sets=['sanity'],\n test_name=('fuel_health.tests.sanity.test_sanity_identity'\n '.SanityIdentityTest.test_list_users'))",
"def _jude_need_cluster(self):\r\n pass",
"def create_vdb_node(self):\n\n # generate a new config and logging file but keep all data files\n self.copy_cassandra_files() \n self.save_config()\n self.generate_logging()\n\n\n # delete local and peer directories to rename cluster and peer list\n # TODO\n # error handling\n path = os.path.join(self.node_dir, \"data\", \"system\", \"local-*\")\n ret = execute_bash(self.config.connection, \"delete_dir\", path=path) \n self.fix_peers() \n\n # check if commit logs are formated and clean up if not\n self.check_commit_log()\n self.start_node()",
"def createClassifier(oldData, bestFeatureSplits, dmnName, decisionId = None,target = -1, clearTable = True, nextClassifer=False, prevDecId=None, createConnection=False):\n preDF = prepareData(oldData, targetClass=target)\n \n\n targetColumnName = preDF.columns[target]\n \n if nextClassifer==False:\n newDmnObject = xmlDmn(dmnName, decisionId)\n else:\n newDmnObject = xmlDmn(dmnName, decisionId=None, prevDecisionId=prevDecId)\n \n featuresForClf = list()\n print(\"Features for CLF : \")\n for column,value in bestFeatureSplits.items():\n if value > 0.02:\n featuresForClf.append(column)\n print(\"\\t-\",column,\":\",value)\n\n featuresForClf.append(targetColumnName)\n\n df = preDF[featuresForClf]\n dfTarget = df[df.columns[target]]\n dfData = df[df.columns.drop(df.columns[target])]\n dfFeature = df.columns.drop(df.columns[target])\n X_train, X_test, y_train, y_test = train_test_split(dfData, dfTarget, test_size=0.1)\n \n if int(percentage(2,len(X_train))) < 1:\n print(\"Minimum samples in leaf is less then 1\\nExit ...\")\n return\n else:\n minInLeafs = int(percentage(2,len(X_train)))\n \n clf = tree.DecisionTreeClassifier(min_samples_leaf=minInLeafs).fit(X_train,y_train)\n print(\"Accuaracy on the testing set : {:.3f}\".format(clf.score(X_test,y_test)))\n print(\"Minimum samples in leaf : \",int(percentage(2,len(X_train))))\n \n visualizeTree(clf,fileName=idGen(\"new_\"),features=dfFeature)\n\n featureNames = [dfFeature[i] for i in clf.tree_.feature] \n leafIds = clf.apply(dfData)\n leftChildren = clf.tree_.children_left\n rightChildren = clf.tree_.children_right\n decPath = clf.decision_path(dfData)\n threshold = clf.tree_.threshold\n leafImpurity = clf.tree_.impurity\n\n #Prepare features for dmn\n helperSet = set()\n for feature in featuresForClf[:-1]:\n helperSet.add(feature.split(\"_\")[0])\n featuresForDmn = list(helperSet)\n featuresForDmn.append(featuresForClf[-1])\n\n #decisionDictionary = defaultdict(list)\n df = df.rename(columns=lambda x: x.split(\"_\")[0])\n\n #Prepare columns type\n colType = dict()\n for element in featuresForDmn:\n if type(df.dtypes[element]) != np.dtype:\n colType[element] = \"object\"\n elif df.dtypes[element] == \"uint8\":\n colType[element] = \"object\"\n else:\n colType[element] = df.dtypes[element] \n \n if clearTable == True:\n newDmnObject.clearDecisionTable(decisionId)\n\n newDmnObject.generateTableColumns(featuresForDmn,colType)\n\n for i in tqdm(set(leafIds)): \n samplesInNode = decPath.getcol(i).copy()\n rows = samplesInNode.nonzero() [0]\n sampleId = rows[0] \n nodeIndex = decPath.indices[decPath.indptr[sampleId]:decPath.indptr[sampleId+1]]\n className = clf.classes_[np.argmax(clf.tree_.value[i])]\n inputOutput = defaultdict(dict) \n for value in featuresForDmn[:-1]: \n inputOutput[className][value] = {} \n for index, nodeId in enumerate(nodeIndex): \n nodeFeature = featureNames[nodeIndex[index-1]]\n nodeThreshold = threshold[nodeIndex[index-1]] \n if len(nodeFeature.split(\"_\"))>1:\n if nodeId in set(leftChildren):\n try:\n inputOutput[className][nodeFeature.split(\"_\")[0]][\"not\"].append(nodeFeature.split(\"_\")[1])\n except KeyError:\n inputOutput[className][nodeFeature.split(\"_\")[0]][\"not\"] = [nodeFeature.split(\"_\")[1]]\n \n if nodeId in set(rightChildren):\n try:\n\n inputOutput[className][nodeFeature.split(\"_\")[0]][\"is\"].append(nodeFeature.split(\"_\")[1])\n except KeyError:\n inputOutput[className][nodeFeature.split(\"_\")[0]][\"is\"] = [nodeFeature.split(\"_\")[1]]\n else: \n if nodeId in set(leftChildren):\n inputOutput[className][nodeFeature][\"<=\"] = nodeThreshold\n if nodeId in set(rightChildren):\n inputOutput[className][nodeFeature][\">\"] = nodeThreshold\n newDmnObject.generateTableRows(inputOutput.items(),calculateAnnotation(leafImpurity[i]))\n \n tableID = newDmnObject.decId\n\n if createConnection:\n newDmnObject.createConnectionForTables(prevDecId, tableID)\n \n newDmnObject.writeTree()\n newDF = selectDataForNextTable(leafIds,decPath,leafImpurity, oldData)\n\n return newDF, tableID",
"def maintain_leaf_nodes(self, node, action='add'):\n if action == 'add':\n self.leaf_nodes.append(node)\n elif action == 'remove':\n if node in self.leaf_nodes:\n self.leaf_nodes.remove(node)\n else:\n raise Exception('Unknown action requested')",
"def create_cluster(module, switch, name, node1, node2, mod, CHANGED_FLAG, task, msg):\n cli = pn_cli(module)\n clicopy = cli\n\n if mod == 'l3-vrrp' or mod == 'l2-vrrp':\n spine_list = module.params['pn_spine_list']\n leaf_list = module.params['pn_leaf_list']\n\n cli += ' switch %s system-settings-show ' % node1\n cli += ' format auto-trunk '\n status = run_command(module, cli, task, msg).split()[1]\n if status != 'on':\n if (node1 in leaf_list and node2 in leaf_list) or \\\n (node1 in spine_list and node2 in spine_list):\n\n ports = get_ports(module, node1, node2, task, msg)\n trunk_name = node1 + '-' + node2 + '-trunk'\n ports_string = ','.join(ports)\n CHANGED_FLAG, output = create_trunk(module, node1, trunk_name, ports_string,\n CHANGED_FLAG, task, msg)\n ports = get_ports(module, node2, node1, task, msg)\n trunk_name = node2 + '-' + node1 + '-trunk'\n ports_string = ','.join(ports)\n CHANGED_FLAG, output = create_trunk(module, node2, trunk_name, ports_string,\n CHANGED_FLAG, task, msg)\n cli = clicopy\n\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = list(set(run_command(module, cli, task, msg).split()))\n if name not in cluster_list:\n cli = clicopy\n cli += ' switch %s cluster-create name %s ' % (switch, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n if 'Success' in run_command(module, cli, task, msg):\n CHANGED_FLAG.append(True)\n return ' %s: Created %s \\n' % (switch, name), CHANGED_FLAG\n return '', CHANGED_FLAG",
"def test_create_node_fail_no_parent_provided(\n client: TestClient, superuser_token_headers: dict, db: Session\n) -> None:\n data = {\n \"node_type\": \"test_create_node\",\n \"name\": random_lower_string(),\n \"is_active\": True,\n }\n response = client.post(\n f\"{settings.API_V1_STR}/nodes/\",\n headers=superuser_token_headers,\n json=data,\n )\n assert response.status_code == 400\n content = response.json()\n assert content[\"detail\"] == \"Cannot create a node without a parent.\"",
"def test_change_attribute_when_npartitions_greater_than_clusters(self):\n\n treename = \"mytree\"\n filename = \"myfile.root\"\n ROOT.RDataFrame(100).Define(\"x\", \"rdfentry_\").Snapshot(treename, filename)\n\n df = Dask.RDataFrame(treename, filename, npartitions=10, daskclient=self.client)\n\n self.assertEqual(df._headnode.npartitions, 10)\n histo = df.Histo1D(\"x\")\n nentries = histo.GetEntries()\n\n self.assertEqual(nentries, 100)\n self.assertEqual(df._headnode.npartitions, 1)\n\n os.remove(filename)",
"def join_dqlite_master_node(info, master_ip):\n\n # The cluster we want to join may be either token-auth based or x509-auth based.\n # The way to identify the cluster type is to look for the \"admin_token\" in the info\n # we got back from the cluster we try to join.\n # In the case of token-auth we need to:\n # - create the known_tokens.csv file (if it does not exist) with the admin token\n # - turn on token-auth on kube-apiserver\n # - create the token based admin kubeconfig\n # - recreate the kubelet, proxy, scheduler, controller kubeconfigs for the new ca\n # - restart kubelite\n # In the case of x509-auth we need to:\n # - recreate the admin/client, kubelet, proxy, scheduler, controller kubeconfigs for the new ca\n # - restart kubelite\n\n hostname_override = info[\"hostname_override\"]\n store_cert(\"ca.crt\", info[\"ca\"])\n store_cert(\"ca.key\", info[\"ca_key\"])\n store_cert(\"serviceaccount.key\", info[\"service_account_key\"])\n\n if \"admin_token\" in info:\n # We try to join a cluster where token-auth is in place.\n rebuild_token_based_auth_configs(info)\n else:\n # We are joining a x509-auth based cluster\n rebuild_x509_auth_client_configs()\n\n update_apiserver(info.get(\"api_authz_mode\"), info.get(\"apiport\"))\n store_base_kubelet_args(info[\"kubelet_args\"])\n update_kubelet_node_ip(info[\"kubelet_args\"], hostname_override)\n update_kubelet_hostname_override(info[\"kubelet_args\"])\n store_callback_token(info[\"callback_token\"])\n\n if \"etcd_servers\" in info:\n set_arg(\"--etcd-servers\", info[\"etcd_servers\"], \"kube-apiserver\")\n if info.get(\"etcd_ca\"):\n store_cert(\"remote-etcd-ca.crt\", info[\"etcd_ca\"])\n set_arg(\"--etcd-cafile\", \"${SNAP_DATA}/certs/remote-etcd-ca.crt\", \"kube-apiserver\")\n if info.get(\"etcd_cert\"):\n store_cert(\"remote-etcd.crt\", info[\"etcd_cert\"])\n set_arg(\"--etcd-certfile\", \"${SNAP_DATA}/certs/remote-etcd.crt\", \"kube-apiserver\")\n if info.get(\"etcd_key\"):\n store_cert(\"remote-etcd.key\", info[\"etcd_key\"])\n set_arg(\"--etcd-keyfile\", \"${SNAP_DATA}/certs/remote-etcd.key\", \"kube-apiserver\")\n\n mark_no_dqlite()\n service(\"restart\", \"k8s-dqlite\")\n service(\"restart\", \"apiserver\")\n else:\n update_dqlite(info[\"cluster_cert\"], info[\"cluster_key\"], info[\"voters\"], hostname_override)\n\n # We want to update the local CNI yaml but we do not want to apply it.\n # The cni is applied already in the cluster we join\n try_initialise_cni_autodetect_for_clustering(master_ip, apply_cni=False)\n mark_no_cert_reissue()",
"def test_has_master_property(self):\n assert_true(self._cluster.has_master)\n self._cluster.master = None\n assert_false(self._cluster.has_master)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Take a list of nucleotide sequences, break into all peptides with lengths in L, expanding degenerate bases after peptides are generated, as needed.
|
def generateMersFromNT(seqList, L=[8, 9, 10, 11]):
L = np.array(L)
Lnt = L*3
mers = set()
for i, seq in enumerate(seqList):
print('Working on seq {} of {}'.format(i+1, len(seqList)))
dna = skbio.sequence.DNA(seq).degap()
for l in Lnt:
for starti in range(0, len(dna)-l+1-1):
dnaMer = dna[starti:starti + l]
if dnaMer.has_degenerates():
for ex_mer in dnaMer.expand_degenerates():
mers.add(str(ex_mer.translate()))
else:
mers.add(str(dnaMer.translate()))
"""Filter out peptides wit stop codon"""
mers = [p for p in mers if not '*' in p]
return mers
|
[
"def dinucleotide(sequence):\n\tfrog = []\n\n\tfor i in range(0,(len(sequence)-1)):\n\t\tbp = sequence[i]\n\t\tbp_next = sequence[i+1]\n\t\tbp = bp.capitalize()\n\t\tbp_next = bp_next.capitalize()\n\n\t\tif bp == 'A':\n\t\t\tif bp_next == 'A':\n\t\t\t\tfrog.append([-1,-1,-1,-1])\n\t\t\telif bp_next == 'C':\n\t\t\t\tfrog.append([-1,-1,-1,1])\n\t\t\telif bp_next == 'G':\n\t\t\t\tfrog.append([-1,-1,1,-1])\n\t\t\telif bp_next == 'T':\n\t\t\t\tfrog.append([-1,-1,1,1])\n\t\telif bp == 'C':\n\t\t\tif bp_next == 'A':\n\t\t\t\tfrog.append([-1,1,-1,-1])\n\t\t\telif bp_next == 'C':\n\t\t\t\tfrog.append([-1,1,-1,1])\n\t\t\telif bp_next == 'G':\n\t\t\t\tfrog.append([-1,1,1,-1])\n\t\t\telif bp_next == 'T':\n\t\t\t\tfrog.append([-1,1,1,1])\n\t\telif bp == 'G':\n\t\t\tif bp_next == 'A':\n\t\t\t\tfrog.append([1,-1,-1,-1])\n\t\t\telif bp_next == 'C':\n\t\t\t\tfrog.append([1,-1,-1,1])\n\t\t\telif bp_next == 'G':\n\t\t\t\tfrog.append([1,-1,1,-1])\n\t\t\telif bp_next == 'T':\n\t\t\t\tfrog.append([1,-1,1,1])\n\t\telif bp == 'T':\n\t\t\tif bp_next == 'A':\n\t\t\t\tfrog.append([1,1,-1,-1])\n\t\t\telif bp_next == 'C':\n\t\t\t\tfrog.append([1,1,-1,1])\n\t\t\telif bp_next == 'G':\n\t\t\t\tfrog.append([1,1,1,-1])\n\t\t\telif bp_next == 'T':\n\t\t\t\tfrog.append([1,1,1,1])\n\tfrog = np.array(frog).flatten()\n\n\treturn frog",
"def decode(length, lehmer):\n result = [(lehmer % factorial(length - i)) // factorial(length - 1 - i) for i in range(length)]\n used = [False] * length\n for i in range(length):\n counter = 0\n for j in range(length):\n if not used[j]:\n counter += 1\n if counter == result[i] + 1:\n result[i] = j\n used[j] = True\n break\n return result",
"def permutations(L):\n if len(L) <= 1:\n yield L\n else:\n a = [L.pop(0)]\n for p in permutations(L):\n for i in range(len(p)+1):\n yield p[:i] + a + p[i:]",
"def motzkin_sequences(length, minwidth):\n if length == 0:\n yield ''\n else:\n # Either the first element of this one is a dot, or it isn't\n # If it is a dot, the number of possible sequences is equal\n # to the number of sequences of length 'length-1'\n # (that is, each of these sequences preceeded by a dot)\n dot_before_prev_possibilities = ('.' + seq\n for seq\n in motzkin_sequences(length-1,\n minwidth))\n for sequence in dot_before_prev_possibilities:\n yield sequence\n\n # If the first element is not a dot, it is an open parenthesis\n\n for length_inside_first_set in range(minwidth, length-2+1):\n # If the first element is an open parenthesis, we need to\n # place a matching, closed parenthesis after it.\n # There must be at least minwidth elements between\n # the first parenthesis and its partner, because we\n # need at least minwidth dots between these two parenthesis.\n #\n # There can be as many as length-2 spots between the\n # first and last element, since this would be representative\n # of having the first space be an open parenthesis and the last\n # spot being its partnered close parenthesis.\n #\n # So, let's allocate the parenthesis inside this pair\n length_after_first_set = length-2-length_inside_first_set\n stuff_inside_first_set = [sequence for sequence in\n motzkin_sequences\n (length_inside_first_set,\n minwidth)]\n stuff_after_first_set = [sequence for sequence in\n motzkin_sequences(\n length_after_first_set,\n minwidth)]\n for inside in stuff_inside_first_set:\n for after in stuff_after_first_set:\n sequence = '({inside}){after}'.format(\n inside=inside,\n after=after)\n yield sequence",
"def intersperse(main, *extras) :\n def isbase(char) :\n return get_ucd(char, 'gc').startswith(\"L\")\n\n res = []\n extras = list(extras)\n #extras.sort(cmp=lambda a,b : cmp(a[1], b[1]))\n for m in main :\n groups = []\n base = \"\"\n for v in groupby(m, lambda x:get_ucd(x, 'gc')[0]) :\n k = v[0]\n d = \"\".join(v[1])\n if k == \"L\" :\n if base : groups.extend((base, \"\"))\n for c in d[:-1] :\n groups.extend((c, \"\"))\n base = d[-1]\n elif k == \"M\" :\n base = base + d\n else :\n groups.extend((base, d))\n base = \"\"\n if base : groups.extend((base, \"\"))\n # groups is now 2n list where list[n] is base+dias, list[n+1] is punc separators\n for i in range(0, len(groups), 2) :\n dias = list(groups[i][1:])\n orders = [get_ucd(c, 'ccc') for c in dias]\n bases = list(zip(dias, orders))\n new = sorted(bases + extras, cmp=lambda a,b: cmp(a[1], b[1]))\n results = list(zip(*new))\n groups[i] = \"\".join([groups[i][0]] + list(results[0]))\n res.append(\"\".join(groups))\n return res",
"def base_combinations_recursive(polymer, polymer_len, combinations):\n # Base case - run out of character space for each 'polymer'\n if polymer_len == 0:\n combinations.append(polymer) # Complete polymer instance\n return # continue back to previous recursion's active for loop iteration\n\n # Starting with all that begin with 'A' ...\n for i in range(len(BASE)):\n # Next Base appended\n new_polymer = polymer + BASE[i] # we build upon a new instance\n base_combinations_recursive(new_polymer, (polymer_len - 1), combinations)\n\n return combinations",
"def seq_permutation(seq_len: int,\n charset: str = \"ATCG\") -> t.Iterable[str]:\n if seq_len <= 0:\n yield \"\"\n else:\n for seq in seq_permutation(seq_len-1, charset):\n for c in charset:\n yield seq + c",
"def _all_splits(seq):\n for index in range(1, len(seq)):\n yield (seq[0:index], seq[index:])",
"def generate_possible_freqL(pL,aL):",
"def gen_permutations(l):\n if len(l) == 0:\n return iter([])\n if len(l) == 1:\n return iter(gen_permutations_for_single_list(l[0]))\n if len(l) == 2:\n return iter(gen_permutations_for_two_lists(l[0], l[1]))\n return iter(gen_glue_two_generated_lists(gen_permutations_for_two_lists(l[0], list(gen_permutations(l[1:])))))",
"def split_subsequences(iterable, length=2, overlap=0, \r\n join_substr=True):\r\n isstring = isinstance(iterable, str) and join_substr\r\n it = iter(iterable)\r\n results = list(itertools.islice(it, length))\r\n while len(results) == length:\r\n yield ''.join(results) if isstring else results\r\n results = results[length - overlap:]\r\n results.extend(itertools.islice(it, length - overlap))\r\n if results:\r\n yield ''.join(results) if isstring else results",
"def expandIupac(seq):\n # http://stackoverflow.com/questions/27551921/how-to-extend-ambiguous-dna-sequence\n d = {'A': 'A', 'C': 'C', 'B': 'CGT', 'D': 'AGT', 'G': 'G', \\\n 'H': 'ACT', 'K': 'GT', 'M': 'AC', 'N': 'GATC', 'S': 'CG', \\\n 'R': 'AG', 'T': 'T', 'W': 'AT', 'V': 'ACG', 'Y': 'CT', 'X': 'GATC'}\n seqs = []\n for i in product(*[d[j] for j in seq]):\n seqs.append(\"\".join(i))\n return seqs",
"def get_generous_result(lambs):\n # Gives 2*n sequence\n gen_result = []\n this_payment = 1\n while(sum(gen_result) <= lambs):\n gen_result.append(this_payment)\n this_payment *= 2\n\n return gen_result",
"def split_l2_dci_vlans(vlan_list):\n for i in range(len(vlan_list)):\n yield vlan_list[i:i+1]\n while True:\n yield vlan_list[-1::]",
"def bruteforce_generator(characters, min_length, max_length):\n for length in range(min_length, max_length + 1):\n product = itertools.product(characters, repeat=length)\n for current in product:\n yield ''.join(current)",
"def process_all(input_list, input_lengths):\n skip = 0\n pos = 0\n length = len(input_list)\n for r in range(0,64):\n for ipl in input_lengths:\n knot(input_list, pos, ipl)\n pos = new_pos(pos, ipl, skip, length)\n skip = (skip + 1 ) % length\n return input_list",
"def pd3sequence(max_level=int(1e5)):\n # The first special cases (for rings 0 and 1) are returned immediately\n yield 1\n yield 2\n\n # Length/number of elements of the last ring\n max_length = 6*max_level\n\n # `max_delta` is the highest difference of values between any 2 adjacent\n # tiles up to the `max_level`-th ring\n max_delta = 2*max_length + 5\n\n primes = set(primes_up_to(max_delta))\n\n for level in xrange(2, max_level+1):\n # Retrieve the current ring\n cur = ring(level)\n\n # Check possibly prime deltas for the first element of the current ring\n first_deltas = [cur.length-1, 2*cur.length + 5, cur.length+1]\n if all(d in primes for d in first_deltas):\n yield cur.first\n\n # Check possibly prime deltas for the last element of the current ring\n last_deltas = [cur.length+5, cur.length-1, 2*cur.length - 7]\n if all(d in primes for d in last_deltas):\n yield cur.last",
"def _splicegen(self, maxchars, stringlist):\n \n runningcount = 0\n tmpslice = []\n for i, item in enumerate(stringlist):\n runningcount += len(item)\n if runningcount <= int(maxchars):\n tmpslice.append(i)\n else:\n yield tmpslice\n tmpslice = [i]\n runningcount = len(item)\n yield(tmpslice)",
"def allPandigitals(N, base=1):\n\tret = []\n\tfor i in range(base,base+N):\n\t\tret += pandigitals(N, base)\n\treturn ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Api can limit the number of documents returned
|
def test_18_api_can_limit_the_number_of_documents(self):
res = self.client.get('/documents?limit=1')
data = json.loads(res.content)
assert len(data['rows']) == 1
assert data['rows'][0]['id'] == 2
|
[
"def get_latest_documents(context, count=5):\n req = context.get('request')\n qs = Document.objects.published(req)[:count]\n return qs",
"def test_19_api_can_offset_the_number_of_documents(self):\n res = self.client.get('/documents?offset=1')\n assert json.loads(res.content)['rows'][0]['id'] == 1",
"def test_max_results_per_page(self):\r\n self.manager.create_api(self.Person, methods=['GET', 'POST'],\r\n max_results_per_page=15)\r\n for n in range(20):\r\n response = self.app.post('/api/person', data=dumps({}))\r\n assert 201 == response.status_code\r\n response = self.app.get('/api/person?results_per_page=20')\r\n assert 200 == response.status_code\r\n data = loads(response.data)\r\n assert 15 == len(data['objects'])",
"def limit_queryset(self):\n self.queryset = self.queryset.limit(self.limit)",
"def test_get_document_all_using_get(self):\n pass",
"def test_list_all_posts__limit(self):\n repository.list_all_posts(30)",
"def test_limit(self):\n actual = self.view001(limit=10)['rows']\n expected = [{'key': 'julia{0:03d}'.format(x),\n 'id': 'julia{0:03d}'.format(x),\n 'value': 1} for x in range(10)]\n self.assertEqual(actual, expected)",
"def test_results_per_page(self):\r\n self.manager.create_api(self.Person, methods=['POST', 'GET'])\r\n for n in range(25):\r\n response = self.app.post('/api/person', data=dumps({}))\r\n assert 201 == response.status_code\r\n response = self.app.get('/api/person?results_per_page=20')\r\n assert 200 == response.status_code\r\n data = loads(response.data)\r\n assert 20 == len(data['objects'])\r\n # Fall back to default number of results per page on bad requests.\r\n response = self.app.get('/api/person?results_per_page=-1')\r\n assert 200 == response.status_code\r\n data = loads(response.data)\r\n assert 10 == len(data['objects'])\r\n # Only return max number of results per page.\r\n response = self.app.get('/api/person?results_per_page=30')\r\n assert 200 == response.status_code\r\n data = loads(response.data)\r\n assert 25 == len(data['objects'])",
"def get_documents(file_type: str, args):\n\n # To debug if start and end of records works well in the loop for.\n # listTmp = [i for i in range(100)]\n # print(listTmp[start:end])\n\n # Getting valid page number and page length , by calling a function form utils.\n page, per_page, start, end = utils.get_valid_pagination_args(args)\n\n file_type = file_type.strip()\n\n mongo_documents = list(mongo.db.documents.find(\n {\"dataType\": file_type}).sort(\"name\", 1))\n total_records = len(mongo_documents)\n error = None\n documents = mongo_documents[start:end]\n for document in documents:\n link = None\n if document[\"format\"] == \"link\":\n\n link = document[\"link\"]\n\n clinicalCases = list(mongo.db.clinicalCases.find(\n {\"sourceId\": document[\"_id\"]}, {\"locationId\": 0}\n ))\n\n for case in clinicalCases:\n case.update({\"_id\": str(case[\"_id\"]),\n \"sourceId\": str(case[\"sourceId\"]),\n })\n try:\n for version in case[\"versions\"]:\n version.pop('locationId', None)\n except:\n pass\n\n document.update({\"_id\": str(document[\"_id\"]),\n \"link\": link,\n \"clinicalCases\": clinicalCases})\n\n data = {\n \"documents\": documents,\n \"totalRecords\": total_records,\n \"currentPage\": page,\n \"perPage\": per_page,\n \"error\": error,\n }\n\n return data",
"def paging_results(self):\n\n return 10",
"def paginate(docs, per_page=10):\n return [docs[i: i + per_page] for i in range(0, len(docs), per_page)]",
"def _get_limited_articles(response, limit):\n result = response['entries']\n if limit is not None:\n logging.info(f\"Completed. Loaded {min(limit, len(result))} articles with limit {limit}\")\n return result[0:min(limit, len(result))]\n else:\n logging.info(f\"Completed. Loaded {len(result)} articles without any limit\")\n return result",
"def getItems(maxItems=None):",
"def do_pagination(self, request, queryset):\n limit_max = getattr(settings, 'WAGTAILAPI_LIMIT_MAX', 20)\n\n try:\n offset = int(request.GET.get('offset', 0))\n assert offset >= 0\n except (ValueError, AssertionError):\n raise BadRequestError(\"offset must be a positive integer\")\n\n try:\n limit = int(request.GET.get('limit', min(20, limit_max)))\n\n if limit > limit_max:\n raise BadRequestError(\"limit cannot be higher than %d\" % limit_max)\n\n assert limit >= 0\n except (ValueError, AssertionError):\n raise BadRequestError(\"limit must be a positive integer\")\n\n start = offset\n stop = offset + limit\n\n return queryset[start:stop]",
"def withLimitedSearch(self, searchTerms, limit):\n\t\treturn 'search='+searchTerms+'&limit='+limit+'&'",
"def get_queryset(self):\n \ttweet_count = Tweet.objects.count()\n \tlimit = tweet_count - tweet_count % 3\n \treturn Tweet.objects.order_by('-created_at')[:limit]",
"def recent(self, userid, limit=DEFAULT_DOCUMENT_LIMIT):\n querySet = super(DocumentManager, self).get_query_set().filter(member__user__id=userid)\n return querySet[:limit], querySet.count() - limit",
"def get_10K_doc_raw(name, cik):\n company = Company(name, cik)\n # tree = company.get_all_filings(filing_type=\"10-K\")\n # docs = Company.get_documents(tree, no_of_documents=1)\n docs = company.get_10Ks(no_of_documents=1)\n return docs",
"def get_documents():\n\n DB_USER = app.config.get('DB_USER', 'postgres')\n DB_PASSWORD = app.config.get('DB_PASSWORD', 'dbpass')\n DB_NAME = app.config.get('DB_NAME', 'envirolens')\n\n DB.connect(\n database=DB_NAME,\n user=DB_USER,\n password=DB_PASSWORD\n )\n\n if DB.cursor is None:\n return jsonify({'Error' : 'The connection could not be established'})\n\n document_ids = request.json.get('document_ids', None)\n\n # If the \"document_ids\" parameter was not set:\n if document_ids is None:\n return jsonify(\n {'Message' : 'You need to provide json with \"document_ids\" : [list of documents ids] value'}\n )\n\n statement = \"\"\"SELECT * FROM documents WHERE document_id IN %s;\"\"\"\n DB.cursor.execute(statement, (tuple(document_ids), )) \n\n # Enumerating the fields\n num_fields = len(DB.cursor.description)\n field_names = [i[0] for i in DB.cursor.description]\n documents = [{ field_names[i]: row[i] for i in range(num_fields) } for row in DB.cursor.fetchall()]\n \n # Cleaning the ouput:\n # - removing fulltext field\n # - slicing down the fulltext_cleaned field to 500 chars\n # - we return only the first 10 results\n for i in range(len(documents)):\n if documents[i]['fulltext_cleaned'] is not None:\n documents[i]['fulltext_cleaned'] = documents[i]['fulltext_cleaned'][:500]\n documents[i].pop('fulltext')\n\n DB.disconnect()\n\n return jsonify(documents[:10])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Api supports offsets for documents
|
def test_19_api_can_offset_the_number_of_documents(self):
res = self.client.get('/documents?offset=1')
assert json.loads(res.content)['rows'][0]['id'] == 1
|
[
"def test_offset():\n segmenter = NLTKSentencizer()\n text = ' This , text is... . Amazing !!'\n docs_chunks = segmenter.segment(np.stack([text, text]))\n for chunks in docs_chunks:\n assert len(chunks) - 1 == chunks[-1]['offset']",
"def docbyoffset(self, offset):\n # empty documents are not stored explicitly in MM format, so the index marks\n # them with a special offset, -1.\n if offset == -1:\n return []\n if isinstance(self.input, string_types):\n fin = utils.smart_open(self.input)\n else:\n fin = self.input\n\n fin.seek(offset) # works for gzip/bz2 input, too\n previd, document = -1, []\n for line in fin:\n docid, termid, val = line.split()\n if not self.transposed:\n termid, docid = docid, termid\n docid, termid, val = int(docid) - 1, int(termid) - 1, float(val) # -1 because matrix market indexes are 1-based => convert to 0-based\n assert previd <= docid, \"matrix columns must come in ascending order\"\n if docid != previd:\n if previd >= 0:\n return document\n previd = docid\n\n document.append((termid, val,)) # add another field to the current document\n return document",
"def latest_offsets(self):\n raise NotImplementedError",
"def test_options_offset(self):\n\n assert query.QueryOptions(offset=50).offset == 50",
"def test_read_with_offset_parameter(self, mock):\n mock.return_value = MagicMock(status_code=200, content=json.dumps([\n {'id': '3', 'name': 'Meg', 'score': '10'},\n {'id': '4', 'name': 'Chris', 'score': '42'},\n {'id': '5', 'name': 'Stewie', 'score': '72'}\n ]))\n\n client = SheetsuClient(**self.kwargs)\n response = client.read(offset=2)\n self.assertEqual(response, [\n {'id': '3', 'name': 'Meg', 'score': '10'},\n {'id': '4', 'name': 'Chris', 'score': '42'},\n {'id': '5', 'name': 'Stewie', 'score': '72'}\n ])",
"def offset_references(self, offset: int) -> None:\n self.stream_dict.offset_references(offset)\n self.object_number += offset",
"def update_offset(self, new_offset):\r\n self.offset = new_offset",
"def offset(self, value):\n self._offset = value",
"def _get_offset_param(params):\n offset = params.pop('offset', 0)\n return validate_integer(offset,\n 'offset',\n 0,\n constants.DB_MAX_INT)",
"def getOffset(self, index: int) -> int:\n ...",
"def earliest_offsets(self):\n raise NotImplementedError",
"def test_post_get_document_tag_field_position(self):\n pass",
"async def beginning_offsets(self, partitions):\n if self._client.api_version <= (0, 10, 0):\n raise UnsupportedVersionError(\n \"offsets_for_times API not supported\"\n f\" for cluster version {self._client.api_version}\"\n )\n offsets = await self._fetcher.beginning_offsets(\n partitions, self._request_timeout_ms)\n return offsets",
"def _getOffsets(self, mapped, magicbytes, start = None):\n if start is None:\n start = 0\n else:\n start -= len(magicbytes)\n\n start = mapped.find(magicbytes, start)\n while True:\n end = mapped.find(magicbytes, start + len(magicbytes))\n if end == -1:\n yield (start + len(magicbytes), mapped.size())\n raise StopIteration\n\n yield (start + len(magicbytes), end)\n start = end",
"def _yield_docs(db, prefix=None, batchsize=500):\n for r in db.iterview(\"_all_docs\", batch=batchsize, include_docs=True):\n _id = str(r.id)\n if prefix and not _id.startswith(prefix):\n continue\n yield dict(r.doc)",
"def _solr_before(request_params):\n del request_params['offset']\n request_params['cursor'] = '*'",
"def register_for_new_offset_events(self):\n pass",
"def register_for_deleted_offset_events(self):\n pass",
"def findOffset(self):\n recv = self.getUpdates(offset=self.offset, timeout=0, updateOffset=False)\n count = len(recv['result'])\n while count == 100:\n self.offset = recv['result'][-1]['update_id']\n recv = self.getUpdates(self.offset, timeout=0, updateOffset=False)\n count = len(recv['result'])\n if recv['result']:\n self.offset = recv['result'][-1]['update_id']\n self.offset += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Api can get a particular document
|
def test_20_api_can_get_a_document(self):
res = self.client.get(
'/documents/1',
format='json'
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
assert json.loads(res.content)['id'] == 1
|
[
"def test_get_document_using_get(self):\n pass",
"def getDocumentById(self, request):\n R = Resource.objects.getResourceById(request)\n D = Document.objects.get(resource=R)\n return D",
"def retrieve_document(doc_id):\n\n db = config_db.get_db()\n success, output = db.get_documents_from_db([doc_id])\n if success:\n return jsonify({\n \"documents\" : output\n }), 200\n else:\n return jsonify(output), 400",
"def on_get(self, req, resp, identifier=None):\n\t\tif not identifier:\n\t\t\traise falcon.HTTPMethodNotAllowed({'PUT'})\n\n\t\tservice = DocumentService()\n\t\tdocument = service.get_document(identifier)\n\t\tif document:\n\t\t\tresp.body = json.dumps(document)\n\t\t\tresp.status = falcon.HTTP_200\n\t\telse:\n\t\t\tresp.status = falcon.HTTP_404",
"def test_get_document_all_using_get(self):\n pass",
"def test_document_details(self):\n\t\tcreate_document()\n\t\tdocument = Document.objects.get(id=1)\n\t\tc = Client()\n\t\tresponse = c.get(\"/documents/%s\" % str(document.id))\n\t\tself.assertEquals(response.status_code, 200)",
"def test_get_document_as_creator(self):\n response = self.clients['fs_user_1'].get(\n \"/api/documents/1\"\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def documents_get(self,\r\n document_id):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(document_id=document_id)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/signature/documents/{documentId}'\r\n _query_builder = APIHelper.append_url_with_template_parameters(_query_builder, { \r\n 'documentId': document_id\r\n })\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, CreateDocumentResponse.from_dictionary)",
"def test_restricted_user_can_retrieve_associated_document(self):\n user = create_test_user(\n permission_codenames=(PropositionDocumentPermission.view_associated,),\n dit_team=TeamFactory(),\n )\n investment_project = InvestmentProjectFactory(\n created_by=user,\n )\n proposition = PropositionFactory(\n investment_project=investment_project,\n )\n entity_document = PropositionDocument.objects.create(\n proposition_id=proposition.pk,\n original_filename='test.txt',\n created_by=user,\n )\n\n url = reverse(\n 'api-v3:investment:proposition:document-item',\n kwargs={\n 'proposition_pk': proposition.pk,\n 'project_pk': proposition.investment_project.pk,\n 'entity_document_pk': entity_document.pk,\n },\n )\n\n api_client = self.create_api_client(user=user)\n response = api_client.get(url)\n assert response.status_code == status.HTTP_200_OK\n assert response.data == {\n 'id': str(entity_document.pk),\n 'av_clean': None,\n 'created_by': {\n 'id': str(entity_document.created_by.pk),\n 'first_name': entity_document.created_by.first_name,\n 'last_name': entity_document.created_by.last_name,\n 'name': entity_document.created_by.name,\n },\n 'original_filename': 'test.txt',\n 'url': _get_document_url(entity_document.proposition, entity_document),\n 'status': UploadStatus.NOT_VIRUS_SCANNED,\n 'created_on': format_date_or_datetime(entity_document.created_on),\n 'uploaded_on': format_date_or_datetime(entity_document.document.uploaded_on),\n }",
"def get(self):\n\n doc_type = request.args.get(\"doc_type\", \"all\")\n start = int(request.args.get(\"start\", \"0\"))\n\n print(start, type(start))\n option = {\n \"start\": start\n }\n\n print(option)\n docs = get_doc_list( doc_type, option)\n\n return jsonify({\n \"errno\": 0,\n \"describe\": \"ok\",\n \"docs\": docs\n })",
"def getDoc(self, key):\n return self._docs.get('doc_' + key)",
"def get_doc_by_id(cls, doc_id):\n return cls.get_index().get(doc_id=doc_id)",
"def _getSpecificDoc(self,m, key, filter_type_doc=('doc', 'pdf', 'xls', 'ascii')):\n result = False\n description = \"\"\n release_item = m.group(1)\n document = m.group(2)\n version = m.group(3)\n task = m.group(4)\n cr = m.group(5)\n type_doc = m.group(6)\n project = m.group(7)\n instance = m.group(8)\n if type_doc in filter_type_doc:\n doc_name = re.sub(r\"(.*)\\.(.*)\", r\"\\1\", document)\n if key in doc_name:\n description, reference = self._getDescriptionDoc(document)\n self.dico_found[key, type_doc] = doc_name + \" issue \" + version\n result = True\n return result",
"def get_document(doc):\n try:\n doc = db.collection(\"parameters\").document(doc).get()\n return doc.to_dict()\n except Exception as e:\n error = f\"Error during 'firedb.get_document()' execution: {e}\"\n tg.send_error_message(error)",
"def check_document(self, index, doc_id, doc_type=\"data\"):\n resp = requests.head(\"%(url)s/%(index)s/%(type)s/%(id)s\" %\n {\"url\": self._url,\n \"index\": index,\n \"type\": doc_type,\n \"id\": doc_id})\n if resp.status_code == 200:\n return True\n elif resp.status_code == 404:\n return False\n else:\n self._check_response(resp, \"check the index at\")",
"def getDocument(self,documentListName, documentId, includeInactive = False, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/content/documentlists/{documentListName}/documents/{documentId}?includeInactive={includeInactive}&responseFields={responseFields}\", \"GET\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"documentId\", documentId);\r\n\t\turl.formatUrl(\"documentListName\", documentListName);\r\n\t\turl.formatUrl(\"includeInactive\", includeInactive);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).execute();\r\n\t\treturn self.client.result();",
"def get_doc(self, type_, name):\n if type_ == \"doxygen\":\n return self.doxydocs.get(name)\n if type_ == \"sphinx\":\n return self.sphinxdocs.get(name)",
"def single_document_details(request, id):\n document = Document.objects.get(id=id)\n return render(request, 'html/detail.html', {'document': document})",
"def get(self, no):\n article = get_a_article(no)\n if not article:\n api.abort(404)\n else:\n return article"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Api can update a particular document
|
def test_21_api_can_update_document(self):
res = self.client.put(
'/documents/2',
{'title': 'new_name'},
format='json'
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
assert json.loads(res.content)['title'] == 'new_name'
|
[
"def test_update_document_using_put(self):\n pass",
"def update_document(self, doc_id, document):\n request = \"update \" + doc_id + \" \" + document\n response = self.__send_to_server(request)\n return response",
"def updateDocument(self,document, documentListName, documentId, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/content/documentlists/{documentListName}/documents/{documentId}?responseFields={responseFields}\", \"PUT\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"documentId\", documentId);\r\n\t\turl.formatUrl(\"documentListName\", documentListName);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).withBody(document).execute();\r\n\t\treturn self.client.result();",
"def update_document(doc, data):\n try:\n db.collection(\"parameters\").document(doc).update(data)\n except Exception as e:\n error = f\"Error during 'firedb.update_document()' execution: {e}\"\n tg.send_error_message(error)",
"def update_document(self, document, doc_id, update_as_script):\n def _get_update_action(source, id_suffix=''):\n action = {'_id': doc_id + id_suffix, '_op_type': 'update'}\n if update_as_script:\n action.update(source)\n else:\n action['doc'] = source\n\n return action\n\n if self.plugin.requires_role_separation:\n user_doc = (self._remove_admin_fields(document)\n if update_as_script else document)\n actions = [_get_update_action(document, ADMIN_ID_SUFFIX),\n _get_update_action(user_doc, USER_ID_SUFFIX)]\n else:\n actions = [_get_update_action(document)]\n result = helpers.bulk(\n client=self.engine,\n index=self.index_name,\n doc_type=self.document_type,\n chunk_size=self.index_chunk_size,\n actions=actions)\n LOG.debug(\"Update result: %s\", result)",
"def _update(self, _filter, update_data, upsert, many):\n try : \n if (many == False) : \n self.collection.update_one(_filter,update_data,upsert=upsert)\n if (many == True):\n self.collection.update_many(_filter, update_data,upsert=upsert)\n except : \n print(\"ERROR : _update\")",
"def documents_update(self,\r\n document_id,\r\n request):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(document_id=document_id,\r\n request=request)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/signature/documents/{documentId}'\r\n _query_builder = APIHelper.append_url_with_template_parameters(_query_builder, { \r\n 'documentId': document_id\r\n })\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json',\r\n 'content-type': 'application/json; charset=utf-8'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.patch(_query_url, headers=_headers, parameters=APIHelper.json_serialize(request))\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, UpdateDocumentRequest.from_dictionary)",
"async def update(self, id, doc_fields):\n id = validate_experiment_id(id)\n doc_fields = validate_experiment_doc(\n pop_experiment_id(dict(doc_fields or ())))\n await self.ensure_indexes()\n if doc_fields:\n return await self._update(id, doc_fields)",
"def put(self, doc):\n if \"_id\" not in doc:\n doc[\"_id\"] = uuid.uuid4().hex\n response = self.server._PUT(self.name, doc[\"_id\"], json=doc)\n doc[\"_rev\"] = response.json()[\"rev\"]",
"def update(self, obj, id):",
"def update(self, doc, update_spec):\n \n doc = dict(doc.items() + update_spec.items())\n index = doc[\"ns\"]\n doc[\"_time\"] = doc[\"_id\"].generation_time\n\n service = self.getConnection()\n\n source = index.split(\".\")\n index_name = index.replace(\"_\",\"-\").replace(\".\",\"_\").lower()\n # Check index presence\n if index_name not in service.indexes:\n service.indexes.create(index_name) \n # Index the source document\n index = service.indexes[index_name]\n with index.attached_socket(sourcetype='json', source=source[0], host=\"abacus\") as sock:\n sock.send(dumps(doc, sort_keys=True)) \n print \"Updation successful\"\n if not doc:\n raise errors.EmptyDocsError(\n \"Cannot upsert an empty sequence of \"\n \"documents into Splunk\") \n return",
"def update_doc(coll, doc, doc_fragment, jpath):\n # modify the doc\n count = modify_doc(doc, doc_fragment, jpath)\n _do_save(coll, doc)\n return count",
"def update(self, collection_name: str, doc_filter: Dict, doc: Dict) -> None:\n if not self._get_collection(collection_name).update_one(doc_filter, {'$set': doc}).matched_count:\n raise FilterError('using the given filter found no document to update')",
"async def update_one(self, _id, **kwargs):\n collection = await self.get_collection()\n document = await collection.find_one_and_update(\n filter={'_id': _id},\n update={'$set': kwargs},\n return_document=ReturnDocument.AFTER\n )\n return document",
"def update(self, document, key, value):\n document[key] = value",
"def update_doc(self, doc_index, doc_type, query_body, data_body):\r\n actions = [{\r\n '_op_type': 'update',\r\n '_index': doc_index,\r\n '_type': doc_type,\r\n '_id': doc_id,\r\n 'doc': data_body} for doc_id in self.get_doc_id(doc_index, doc_type, query_body)]\r\n return helpers.bulk(client=self.es, actions=actions, stats_only=True, refresh=True)",
"def test_update_document(self, mock_index_loads):\n base_index = BaseIndex()\n base_index.document_dict = {\n 'user': {\n '123': {'email': 'test@email'}\n }\n }\n new_org_doc = {\n 'org_name': 'abc'\n }\n expected = {\n 'user': {\n '123': {'email': 'test@email'}\n },\n 'organization': {\n '222': {'org_name': 'abc'}\n }\n }\n base_index.update_document(table_name='organization', uid='222', document=new_org_doc)\n self.assertEqual(base_index.document_dict, expected)",
"def update(self, docs, table_name):\n return self.action('update', docs, table_name)",
"def post_update(self, path, data):\n request = self.factory.post(\n reverse('spaces:document_create', kwargs={\"path\": path}),\n data)\n request.user = self.author\n return views.DocUpdateView.as_view()(request, path=path)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies proxy field label
|
def test_proxy_fields_label_entity(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):
proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)
self.assert_util(
proxy.proxy_enable.get_input_label,
'Enable'
)
self.assert_util(
proxy.type.get_input_label,
'Proxy Type'
)
self.assert_util(
proxy.host.get_input_label,
'Host'
)
self.assert_util(
proxy.port.get_input_label,
'Port'
)
self.assert_util(
proxy.username.get_input_label,
'Username'
)
self.assert_util(
proxy.password.get_input_label,
'Password'
)
self.assert_util(
proxy.dns_enable.get_input_label,
'Reverse DNS resolution'
)
|
[
"def _check_prepopulated_fields_key(self, obj, field_name, label):\n\n try:\n field = obj.model._meta.get_field(field_name)\n except FieldDoesNotExist:\n return refer_to_missing_field(\n field=field_name, option=label, obj=obj, id=\"admin.E027\"\n )\n else:\n if isinstance(\n field, (models.DateTimeField, models.ForeignKey, models.ManyToManyField)\n ):\n return [\n checks.Error(\n \"The value of '%s' refers to '%s', which must not be a \"\n \"DateTimeField, a ForeignKey, a OneToOneField, or a \"\n \"ManyToManyField.\" % (label, field_name),\n obj=obj.__class__,\n id=\"admin.E028\",\n )\n ]\n else:\n return []",
"def test_field_label_name(self):\r\n form = SearchForm()\r\n self.assertTrue(form.fields['name'].label == 'Recherche')",
"def validate(self, field):",
"def _check_autocomplete_fields_item(self, obj, field_name, label):\n try:\n field = obj.model._meta.get_field(field_name)\n except FieldDoesNotExist:\n return refer_to_missing_field(\n field=field_name, option=label, obj=obj, id=\"admin.E037\"\n )\n else:\n if not field.many_to_many and not isinstance(field, models.ForeignKey):\n return must_be(\n \"a foreign key or a many-to-many field\",\n option=label,\n obj=obj,\n id=\"admin.E038\",\n )\n try:\n related_admin = obj.admin_site.get_model_admin(field.remote_field.model)\n except NotRegistered:\n return [\n checks.Error(\n 'An admin for model \"%s\" has to be registered '\n \"to be referenced by %s.autocomplete_fields.\"\n % (\n field.remote_field.model.__name__,\n type(obj).__name__,\n ),\n obj=obj.__class__,\n id=\"admin.E039\",\n )\n ]\n else:\n if not related_admin.search_fields:\n return [\n checks.Error(\n '%s must define \"search_fields\", because it\\'s '\n \"referenced by %s.autocomplete_fields.\"\n % (\n related_admin.__class__.__name__,\n type(obj).__name__,\n ),\n obj=obj.__class__,\n id=\"admin.E040\",\n )\n ]\n return []",
"def test_proxy_required_field_proxy_type(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.type.cancel_selected_value()\n self.assert_util(\n proxy.save,\n \"Proxy type can not be empty\",\n left_args={'expect_error': True}\n )",
"def _check_raw_id_fields_item(self, obj, field_name, label):\n\n try:\n field = obj.model._meta.get_field(field_name)\n except FieldDoesNotExist:\n return refer_to_missing_field(\n field=field_name, option=label, obj=obj, id=\"admin.E002\"\n )\n else:\n # Using attname is not supported.\n if field.name != field_name:\n return refer_to_missing_field(\n field=field_name,\n option=label,\n obj=obj,\n id=\"admin.E002\",\n )\n if not field.many_to_many and not isinstance(field, models.ForeignKey):\n return must_be(\n \"a foreign key or a many-to-many field\",\n option=label,\n obj=obj,\n id=\"admin.E003\",\n )\n else:\n return []",
"def test_account_form_username_label(self):\n form = AccountForm()\n self.assertTrue(\n form.fields['username'].label == 'Username (short name)')",
"def test_label_reverse_is_true(self):\n\n self.mock_lib.gwyfile_object_graphmodel_get.side_effect = (\n self._label_reverse)\n\n self.label_reverse = True\n\n meta = GwyGraphModel._get_meta(self.gwygraphmodel)\n self.assertIs(meta['label.reverse'], True)",
"def test_signup_form_username_label(self):\n form = SignupForm()\n self.assertTrue(\n form.fields['username'].label == 'Username (short name)')",
"def test_label_visible_is_true(self):\n\n self.mock_lib.gwyfile_object_graphmodel_get.side_effect = (\n self._label_visible)\n\n self.label_visible = True\n\n meta = GwyGraphModel._get_meta(self.gwygraphmodel)\n self.assertIs(meta['label.visible'], True)",
"def test_proxy_required_field_host(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\") \n self.assert_util(\n proxy.save,\n \"Proxy Host can not be empty\",\n left_args={'expect_error': True}\n )\n proxy.dns_enable.check()",
"def _is_label_valid(label):\n if label is None:\n return False\n\n return re.match(r\"^[a-zA-Z][a-zA-Z0-9_]*$\", label)",
"def _check_filter_item(self, obj, field_name, label):\n\n try:\n field = obj.model._meta.get_field(field_name)\n except FieldDoesNotExist:\n return refer_to_missing_field(\n field=field_name, option=label, obj=obj, id=\"admin.E019\"\n )\n else:\n if not field.many_to_many:\n return must_be(\n \"a many-to-many field\", option=label, obj=obj, id=\"admin.E020\"\n )\n elif not field.remote_field.through._meta.auto_created:\n return [\n checks.Error(\n f\"The value of '{label}' cannot include the ManyToManyField \"\n f\"'{field_name}', because that field manually specifies a \"\n f\"relationship model.\",\n obj=obj.__class__,\n id=\"admin.E013\",\n )\n ]\n else:\n return []",
"def test_name_field_is_unique(self):\n assert self.model._fields['name'].required is True",
"def test_label_reverse_is_false(self):\n\n self.mock_lib.gwyfile_object_graphmodel_get.side_effect = (\n self._label_reverse)\n\n self.label_reverse = False\n\n meta = GwyGraphModel._get_meta(self.gwygraphmodel)\n self.assertIs(meta['label.reverse'], False)",
"def test_is_failed_label(self):\n player = Player.objects.get(id=1)\n field_label = player._meta.get_field('is_failed').verbose_name\n self.assertEquals(field_label, 'Fail status')",
"def test_label_visible_is_false(self):\n\n self.mock_lib.gwyfile_object_graphmodel_get.side_effect = (\n self._label_visible)\n\n self.label_visible = False\n\n meta = GwyGraphModel._get_meta(self.gwygraphmodel)\n self.assertIs(meta['label.visible'], False)",
"def check_label(self):\r\n for count in self.ECGlabel:\r\n if self.ECGlabel[count] == self.lead_type:\r\n return True\r\n return False",
"def validate_label(data_inst):\n data_inst.mapValues(RegressionLabelChecker.test_numeric_data)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies the default proxy configurations
|
def test_proxy_default_configs(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):
proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)
self.assert_util(
proxy.proxy_enable.is_checked,
False
)
self.assert_util(
proxy.dns_enable.is_checked,
False
)
self.assert_util(
proxy.type.get_value,
"http"
)
self.assert_util(
proxy.host.get_value,
""
)
self.assert_util(
proxy.port.get_value,
""
)
self.assert_util(
proxy.username.get_value,
""
)
self.assert_util(
proxy.password.get_value,
""
)
|
[
"def test_proxy_work(self):\n utils.LazyPyraxProxy().cloudservers.should.be\\\n .equal(pyrax.cloudservers)",
"def checkproxies():\n puts(green('Checking proxy service available on all demo servers.'))\n demo_servers = list(env.roledefs.items())\n proxy_hosts = []\n for role_name, role in demo_servers:\n assert len(role['hosts'])==1, 'Multiple hosts found for role'\n host = role['hosts'][0]\n print('Checking role_name=', role_name, 'host=', host)\n # check if we proxy port is open on host\n proxy_port_open = False\n port = 3128 # squid3 default proxy port\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(3)\n result = sock.connect_ex((host, port))\n proxy_port_open = True if result == 0 else False\n sock.close()\n if proxy_port_open:\n puts(' - proxy port open on {} demoserver'.format(role_name))\n proxy_hosts.append(host)\n PROXY_LIST_value = ';'.join(host+':3128' for host in proxy_hosts)\n puts(blue('Use the following command to set the PROXY_LIST env var:\\n'))\n puts(blue(' export PROXY_LIST=\"' + PROXY_LIST_value + '\"'))\n return proxy_hosts",
"def test_proxy_backend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.save()\n self.assert_util(\n proxy.backend_conf_get.get_stanza(decrypt=True),\n {\n 'disabled': False,\n 'proxy_enabled': '1',\n 'proxy_port': '3285', \n 'proxy_rdns': '1',\n 'proxy_type': 'http',\n 'proxy_url': 'host',\n 'proxy_password': 'Password',\n 'proxy_username': 'Username'\n }\n )",
"def test_alter_proxy(self):\n # Test wrong proxy value\n self.assertRaises(Exception, utils.alter_proxy, 'boohoo') # noqa\n\n # Test when there is proxy value passed\n self.prepare_env()\n test_proxy = 'http://proxy.alternative.domain:8888'\n utils.alter_proxy(test_proxy)\n assert os.environ[\"HTTP_PROXY\"] == test_proxy\n assert os.environ[\"HTTPS_PROXY\"] == test_proxy",
"def test_proxy_frontend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n self.assert_util(proxy.save, True)",
"def test_proxy_required_field_host(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\") \n self.assert_util(\n proxy.save,\n \"Proxy Host can not be empty\",\n left_args={'expect_error': True}\n )\n proxy.dns_enable.check()",
"def verifyConfiguration(self):",
"def test_get_proxy_information_sans_proxy(self):\n self.configure_response()\n\n assert dump._get_proxy_information(self.response) is None",
"def test_proxy_required_field_proxy_type(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.type.cancel_selected_value()\n self.assert_util(\n proxy.save,\n \"Proxy type can not be empty\",\n left_args={'expect_error': True}\n )",
"def check_agent_proxy_settings(self):\n return self.__get_value(\"agentLevelParams/agentConfigParams/agent/use_system_proxy_settings\", True)",
"def can_make_proxy():\n return core.dependency_is_installed(\"voms-clients-cpp\") or server_is_installed()",
"def test_get_cloud_proxy(self):\n pass",
"def test_construct_from_properties_with_proxy_without_authentication(self):\n self.config.set('ConnectSDK', \"connect.api.proxy.uri\", \"http://proxy.example.org:3128\")\n\n communicator_config = CommunicatorConfiguration(self.config)\n\n self.assertDefaults(communicator_config)\n self.assertIsNone(communicator_config.api_key_id)\n self.assertIsNone(communicator_config.secret_api_key)\n proxy_config = communicator_config.proxy_configuration\n self.assertIsNotNone(proxy_config)\n self.assertEqual(\"http\", proxy_config.scheme)\n self.assertEqual(\"proxy.example.org\", proxy_config.host)\n self.assertEqual(3128, proxy_config.port)\n self.assertIsNone(proxy_config.username)\n self.assertIsNone(proxy_config.password)",
"def test_update_cloud_proxy(self):\n pass",
"def supports_proxy(self):\n return # boolean",
"def test_construct_from_properties_with_proxy_authentication(self):\n self.config.set('ConnectSDK', \"connect.api.proxy.uri\", \"http://proxy.example.org:3128\")\n self.config.set('ConnectSDK', \"connect.api.proxy.username\", \"connect-username\")\n self.config.set('ConnectSDK', \"connect.api.proxy.password\", \"connect-password\")\n\n communicator_config = CommunicatorConfiguration(self.config)\n\n self.assertDefaults(communicator_config)\n self.assertIsNone(communicator_config.api_key_id)\n self.assertIsNone(communicator_config.secret_api_key)\n proxy_config = communicator_config.proxy_configuration\n self.assertIsNotNone(proxy_config)\n self.assertEqual(\"http\", proxy_config.scheme)\n self.assertEqual(\"proxy.example.org\", proxy_config.host)\n self.assertEqual(3128, proxy_config.port)\n self.assertEqual(\"connect-username\", proxy_config.username)\n self.assertEqual(\"connect-password\", proxy_config.password)",
"def __checkProperties( self, requestedUserDN, requestedUserGroup ):\n credDict = self.getRemoteCredentials()\n gLogger.debug ( \"in credDict %s\" % credDict[ 'properties' ] )\n if Properties.FULL_DELEGATION in credDict[ 'properties' ]:\n return S_OK( False )\n if Properties.LIMITED_DELEGATION in credDict[ 'properties' ]:\n return S_OK( True )\n if Properties.PRIVATE_LIMITED_DELEGATION in credDict[ 'properties' ]:\n if credDict[ 'DN' ] != requestedUserDN:\n return S_ERROR( \"You are not allowed to download any proxy\" )\n if Properties.PRIVATE_LIMITED_DELEGATION in Registry.getPropertiesForGroup( requestedUserGroup ):\n return S_ERROR( \"You can't download proxies for that group\" )\n return S_OK( True )\n # Not authorized!\n return S_ERROR( \"You can't get proxies! Bad boy!\" )",
"def test_create_cloud_proxy(self):\n pass",
"def proxy_config(self) -> Optional['outputs.MustGatherSpecProxyConfig']:\n return pulumi.get(self, \"proxy_config\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies whether the host field in proxy is required and displays an error if left empty
|
def test_proxy_required_field_host(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):
proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)
proxy.proxy_enable.check()
proxy.type.cancel_selected_value()
proxy.type.select("http")
proxy.dns_enable.check()
proxy.port.set_value("3285")
proxy.username.set_value("Username")
proxy.password.set_value("Password")
self.assert_util(
proxy.save,
"Proxy Host can not be empty",
left_args={'expect_error': True}
)
proxy.dns_enable.check()
|
[
"def test_proxy_host_valid_input(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.host.set_value(\"abc$$\")\n self.assert_util(\n proxy.save,\n \"Proxy Host should not have special characters\",\n left_args={'expect_error': True}\n )",
"def test_proxy_required_field_proxy_type(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.type.cancel_selected_value()\n self.assert_util(\n proxy.save,\n \"Proxy type can not be empty\",\n left_args={'expect_error': True}\n )",
"def _check_required_url_properties(self):\n if not self.host:\n raise MissingProperty('host')\n if not self.command_path:\n raise MissingProperty('command_path')",
"def validate_host(self):\n\n # Input Validation - Rock my regex ;-)\n re_hostname = re.compile(\"^[a-zA-Z0-9]+[a-zA-Z0-9-]*((([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,6})?$\")\n re_ipaddr = re.compile(\"^((25[0-5]|2[0-4]\\d|[01]\\d\\d|\\d?\\d)\\.){3}(25[0-5]|2[0-4]\\d|[01]\\d\\d|\\d?\\d)$\")\n\n if self.server == None:\n end(UNKNOWN, \"You must supply a server hostname or ip address. \" \\\n + \"See --help for details\")\n\n if not re_hostname.match(self.server) and \\\n not re_ipaddr.match(self.server):\n end(UNKNOWN, \"Server given does not appear to be a valid \" \\\n + \"hostname or ip address\")",
"def test_proxy_frontend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n self.assert_util(proxy.save, True)",
"def test_proxy_host_field_length_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n host_value = \"a\" * 4097\n proxy.host.set_value(host_value)\n self.assert_util(\n proxy.save,\n \"Max host length is 4096\",\n left_args={'expect_error': True}\n )",
"def test_get_proxy_information_sans_proxy(self):\n self.configure_response()\n\n assert dump._get_proxy_information(self.response) is None",
"def requires_allowed_hosts_check(self) -> bool:\n return self.ab_internal_ql >= ALLOWED_HOST_THRESHOLD[\"ql\"]",
"def test_hostFromBlankSSLTransport(self):\n x = self.assertRaises(CertificateError,\n sslverify.Certificate.hostFromTransport,\n _MaybeSSLTransport())\n self.assertTrue(str(x).startswith(\"TLS\"))",
"def check(self, host):\n # TODO: Implement this. Be sure to return True only if it's possible to set it up.\n # Also, don't actually set it up just yet.\n raise Exception( \"Not implemented\" )",
"def verify_third_party_p4host(ctx, option_name, value):\n\t(res, error) = _verify_p4_host(value)\n\treturn (res, \"\", error)",
"def test_bad_hostname():\n pytest.xfail(\"Bad hostname.\")\n connect_to_dremio_flight_server_endpoint(\"badHostNamE\",\n \"32010\", \"dremio\", \"dremio123\", False, False, False)",
"async def test_validate_access_proxy(hass: HomeAssistant, provider) -> None:\n\n await async_setup_component(\n hass,\n \"http\",\n {\n \"http\": {\n CONF_TRUSTED_PROXIES: [\"192.168.128.0/31\", \"fd00::1\"],\n CONF_USE_X_FORWARDED_FOR: True,\n }\n },\n )\n provider.async_validate_access(ip_address(\"192.168.128.2\"))\n provider.async_validate_access(ip_address(\"fd00::2\"))\n with pytest.raises(tn_auth.InvalidAuthError):\n provider.async_validate_access(ip_address(\"192.168.128.0\"))\n with pytest.raises(tn_auth.InvalidAuthError):\n provider.async_validate_access(ip_address(\"192.168.128.1\"))\n with pytest.raises(tn_auth.InvalidAuthError):\n provider.async_validate_access(ip_address(\"fd00::1\"))",
"def check_valid_hostnames(options, parser, hn_args=[]): # pylint: disable=dangerous-default-value, unused-argument\n try:\n for hname in hn_args:\n if getattr(options, hname):\n socket.gethostbyname(getattr(options, hname))\n except Exception: # pylint: disable=broad-except\n print('--{0} arg had invalid hostname: {1}'.format(hname, getattr(options, hname)))\n sys.exit(-1)",
"def _no_host(url):\n return not url.startswith('localhost') or not '.' in url",
"def test_https_bad_hostname(self):\n domain = inspect(\"wrong.host.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_bad_hostname)",
"def is_hostonly(self):\n return self.domain is None",
"def host_errors(self, hostname, details):\n if not hostname or not isinstance(hostname, basestring):\n return \"hostname_invalid\"\n if not isinstance(details, list):\n return \"host_details_not_list\"\n if len(details) != 3:\n return \"host_details_wrong_length\"\n if details[0] not in self.balancer.action_mapping:\n return \"host_action_invalid:%s\" % details[0]\n if not isinstance(details[1], dict):\n return \"host_kwargs_not_dict\"\n if not isinstance(details[2], bool):\n return \"host_match_subdomains_not_bool\"\n return None",
"def test_proxy_backend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.save()\n self.assert_util(\n proxy.backend_conf_get.get_stanza(decrypt=True),\n {\n 'disabled': False,\n 'proxy_enabled': '1',\n 'proxy_port': '3285', \n 'proxy_rdns': '1',\n 'proxy_type': 'http',\n 'proxy_url': 'host',\n 'proxy_password': 'Password',\n 'proxy_username': 'Username'\n }\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies if host contains special characters displays an error
|
def test_proxy_host_valid_input(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):
proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)
proxy.host.set_value("abc$$")
self.assert_util(
proxy.save,
"Proxy Host should not have special characters",
left_args={'expect_error': True}
)
|
[
"def test_bad_hostname():\n pytest.xfail(\"Bad hostname.\")\n connect_to_dremio_flight_server_endpoint(\"badHostNamE\",\n \"32010\", \"dremio\", \"dremio123\", False, False, False)",
"def is_valid_host_name(hostname):\n if len(hostname) > 255:\n return False\n if hostname[0].isdigit(): return False\n if hostname[-1:] == \".\":\n hostname = hostname[:-1] # strip exactly one dot from the right, if present\n allowed = re.compile(\"(?!-)[A-Z\\d-]{1,63}(?<!-)$\", re.IGNORECASE)\n return all(allowed.match(x) for x in hostname.split(\".\"))",
"def test_many_chars_with_special(self):\n value = 'Madam, I\\'m Adam!!!'\n self.assertEqual(main(value), True)",
"def test_submit_with_bad_character_in_custom(self):\n url = u'http://www.python.org/'\n custom = 'my_link_bad_chars:##$#$%^$&%^**'\n response = self.client.post(reverse('submit'), {\n 'url': url, 'custom': custom})\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'shortener/submit_failed.html')\n self.assertFormError(\n response, 'link_form', 'custom', u'Invalid character for encoding: _')\n self.assertNotIn('link', response.context)",
"def test_encoding_error(self):\n try:\n mark_safe(\"abcdefghijkl<p>mnὀp</p>qrstuwxyz\").encode(\"ascii\")\n except Exception:\n exc_type, exc_value, tb = sys.exc_info()\n reporter = ExceptionReporter(None, exc_type, exc_value, tb)\n html = reporter.get_traceback_html()\n self.assertIn(\"<h2>Unicode error hint</h2>\", html)\n self.assertIn(\"The string that could not be encoded/decoded was: \", html)\n self.assertIn(\"<strong><p>mnὀp</p></strong>\", html)",
"def test_special_char__username(self):\n form = RegisterForm({'user_name':'foouser!!!',\n 'password':'bar',\n 'confirm_password':'bar',\n 'email':'j@j.com',\n 'confirm_email':'j@j.com',}\n )\n\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.non_field_errors(),\n [\"Username Contains Special Characters\"]\n )",
"def is_suspicious_str(s) -> bool:\n try:\n _ = s.encode('ascii')\n return True if s == '' else not ALPHANUMERIC_REGEXP.search(s)\n except UnicodeEncodeError:\n # Treat string with non-CJK_space characters as \"not suspicious\".\n # NOTE:\n # - \\u3000 is CJK whitespace\n # - There're other unicode whitespaces listed here: https://stackoverflow.com/a/37903645\n return b'\\\\u3000' in s.encode('unicode_escape')\n except:\n print(s)\n raise",
"def test_escape_badstr():\n inst = _encoder.TextEncoder('utf-8')\n with raises(RuntimeError):\n inst.escape(_test.badstr)",
"def test_handles_special_characters(self):\n chars = ['\"', \"'\", \"&\", \"|\", \"<\", \">\", \":\"]\n for special_char in chars:\n with self.subTest(special_char=special_char):\n with self.assertNotRaises(Exception):\n self.queryset.search('Beep' + special_char)",
"def test_invalidHostname(self):\n cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(\n u\"wrong-host.example.com\",\n u\"correct-host.example.com\",\n )\n self.assertEqual(cWrapped.data, b'')\n self.assertEqual(sWrapped.data, b'')\n\n cErr = cWrapped.lostReason.value\n sErr = sWrapped.lostReason.value\n\n self.assertIsInstance(cErr, VerificationError)\n self.assertIsInstance(sErr, ConnectionClosed)",
"def test_three_chars_with_special(self):\n value = '*a#b$&a^'\n self.assertEqual(main(value), True)",
"def _validate_hostname(cls, hostname: str) -> bool:\n hostname_regex = re.compile(\n r\"(?=.{1,255}$)[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?(?:\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\\.?\")\n return bool(hostname_regex.fullmatch(hostname))",
"def test_content_badstr():\n inst = _encoder.TextEncoder('utf-8')\n with raises(RuntimeError):\n inst.content(_test.badstr)",
"def test_badUTF8AsIRI(self):\n urlWithBinary = 'http://xn--9ca.com/%00%FF/%C3%A9'\n uri = URL.fromText(urlWithBinary)\n iri = uri.asIRI()\n expectedIRI = ('http://\\N{LATIN SMALL LETTER E WITH ACUTE}.com/'\n '%00%FF/'\n '\\N{LATIN SMALL LETTER E WITH ACUTE}')\n actualIRI = iri.asText()\n self.assertEqual(actualIRI, expectedIRI,\n '%r != %r' % (actualIRI, expectedIRI))",
"def test_https_bad_hostname(self):\n domain = inspect(\"wrong.host.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_bad_hostname)",
"def test_escape(self):\n self.assertRaises(ValueError, escape, \"I am a string type. Not a unicode type.\")\n self.assertEqual(escape(u\"I am a unicode type.\"), u\"I%20am%20a%20unicode%20type.\")",
"def test_type_nonascii(self):\n self.assert_input(\n 'Failed to type Spanish string',\n u'Teclado (informática)')\n self.assert_input(\n 'Failed to type Russian string',\n u'Компьютерная клавиатура')",
"def test_host_dot_escape_in_re(self):\n def constructor(proto_host: str) -> bytes:\n return (f'<a href=\"{proto_host}\"></a>'\n f'<img src=\"{proto_host}\"/>'\n f'<link href=\"{proto_host}\"/>').encode()\n\n self.assert_modified_html(\n self.get_source(constructor, ['ya.ru', 'yazru.ru']),\n self.get_source(constructor, ['', 'yazru.ru'])\n )",
"def unicode_encode_error():\n try:\n '€'.encode('ascii')\n except UnicodeEncodeError:\n return \"can't encode this character to ascii\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies host field length validation
|
def test_proxy_host_field_length_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):
proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)
host_value = "a" * 4097
proxy.host.set_value(host_value)
self.assert_util(
proxy.save,
"Max host length is 4096",
left_args={'expect_error': True}
)
|
[
"def check_hash_len(hash):\n if len(hash) != 40 + 1:\n raise ProtocolError(\"Invalid hash len!\")",
"def test_check_field_length(self):\n form = EditCouponForm(data={\n 'headline': 'This headline is over twenty-five characters'})\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors['headline'][0], \n 'Please limit this field to 25 characters')",
"def _validate_length(self, string, length):\n assert len(string) == length, \\\n 'String %s length\\'s not %s' % (string, length,)",
"def validate_host(self):\n\n # Input Validation - Rock my regex ;-)\n re_hostname = re.compile(\"^[a-zA-Z0-9]+[a-zA-Z0-9-]*((([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,6})?$\")\n re_ipaddr = re.compile(\"^((25[0-5]|2[0-4]\\d|[01]\\d\\d|\\d?\\d)\\.){3}(25[0-5]|2[0-4]\\d|[01]\\d\\d|\\d?\\d)$\")\n\n if self.server == None:\n end(UNKNOWN, \"You must supply a server hostname or ip address. \" \\\n + \"See --help for details\")\n\n if not re_hostname.match(self.server) and \\\n not re_ipaddr.match(self.server):\n end(UNKNOWN, \"Server given does not appear to be a valid \" \\\n + \"hostname or ip address\")",
"def is_valid(host_port):\n\n if len(host_port.split(\":\")) != 2:\n return False\n\n return True",
"def length(self):\n return len(self.password) >= 12",
"def is_length(message):\n\n if len(message) <= 25:\n return True\n else:\n return False",
"def _checkStringSize(self, lengthAsString):\n if len(lengthAsString) > self._maxLengthSize():\n raise NetstringParseError(self._TOO_LONG % (self.MAX_LENGTH,))",
"def validate_ip_addresses(value: str) -> str:\n if len(value) > 10:\n return \"have length less than or equal to 10\"\n return \"\"",
"def test_maxlength(db_conn):\n assert has_max_length('abcd1234', 2)\n assert has_max_length('a', 2) is None",
"def validate_field_values_length(self, error_message, data):\n\n if len(data[\"email\"]) > 30:\n error_message.append(\"Email length should be less than or equal to 30\")\n if len(data[\"password\"]) > 300:\n error_message.append(\"Password length should be less than or equal to 300\")\n if str(data[\"pincode\"]).isdigit() == False or len(str(data[\"pincode\"])) != 6:\n error_message.append(\"Pincode length should be equal to 6 and should be numeric.\")\n if len(str(data[\"phone\"])) != 10 or str(data[\"phone\"]).isdigit() == False:\n error_message.append(\"Phone length should be equal to 10 and should be numeric.\")\n if \"address\" in data.keys():\n if len(data[\"address\"]) > 160:\n error_message.append(\"Address length should be less than or equal to 160\")\n if \"city\" in data.keys():\n if len(data[\"city\"]) > 50:\n error_message.append(\"City length should be less than or equal to 50\")\n if \"state\" in data.keys():\n if len(data[\"state\"]) > 50:\n error_message.append(\"State length should be less than or equal to 50\")\n if \"country\" in data.keys():\n if len(data[\"country\"]) > 50:\n error_message.append(\"Country length should be less than or equal to 50\")\n full_name = data[\"full_name\"].split()\n if len(full_name) >= 2:\n if len(full_name[0]) > 20:\n error_message.append(\"First Name length should be less than or equal to 20\")\n if len(full_name[1]) > 15 or len(full_name[1]) == 0:\n error_message.append(\"Last Name length should be less than or equal to 15 and should not be empty\")\n if len(full_name) == 1:\n error_message.append(\"Last Name length should be less than or equal to 15\")\n \n \n return error_message",
"def tweetswarm_string_validate(s):\n return s.__len__() < 140 and s.__len__() > 0",
"def test_registration_with_field_length_violation(self):\n self.add_control_lists()\n target_cl = db.session.query(ControlLists). \\\n filter(ControlLists.name == \"Ancien Français - École des Chartes\").first()\n\n # prepare form\n self.driver_find_element_by_id(\"new_corpus_link\").click()\n self.driver_find_element_by_id(\"corpusName\").send_keys(\"example\")\n self.driver_find_element_by_id(\"label_checkbox_reuse\").click()\n self.driver_find_element_by_id(\"control_list_select\").click()\n self.driver_find_element_by_id(\"cl_opt_\" + str(target_cl.id)).click()\n invalid = \"btOUZvzXARqNbnmvVIrcqjAbsRGIvZQsrhspGusZypNlUJSubtOztbiMiwipTpQJVTvSDZyIGCaONJ\"\n self.writeMultiline(\n self.driver_find_element_by_id(\"tokens\"),\n f\"form\\tlemma\\tPOS\\tmorph\\n{invalid}\\tseignor\\tNOMcom\\tNOMB.=p|GENRE=m|CAS=n\"\n )\n\n # submit and wait\n self.driver_find_element_by_id(\"submit\").click()\n self.driver.implicitly_wait(15)\n self.assertEqual(\n self.driver_find_elements_by_css_selector(\".alert.alert-danger\")[0].text.strip(),\n f\"ln. 2, column 'form': '{invalid}' is too long (maximum 64 characters)\"\n )",
"def ValidateStringLenth(value, max_length=_MAX_STRING_LENGTH):\n if isinstance(value, basestring):\n if len(value) <= max_length:\n return True\n return False",
"def requires_allowed_hosts_check(self) -> bool:\n return self.ab_internal_ql >= ALLOWED_HOST_THRESHOLD[\"ql\"]",
"def _get_error_bad_length(self):\n return self.__error_bad_length",
"def check_size(msg):\n\n if len(msg) > TWEET_SIZE:\n return False\n return True",
"def LengthCheck( self, sCheckName, sCheckVariable, iCheckLength ):\n\t\tif len( str( sCheckVariable ) ) > int( iCheckLength ):\n\t\t\tself.setError( ING_ERROR_PARAMETER, '%s too long' % sCheckName, IDEAL_PRV_GENERIC_ERROR )\n\t\t\treturn 'NotOk'\n\t\treturn 'ok'",
"def validate_domain_name(value: str) -> str:\n if len(value) > 256:\n return \"have length less than or equal to 256\"\n return \"\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This test case checks list of proxy types present in the drop down
|
def test_proxy_list_proxy_types(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):
proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)
self.assert_util(
proxy.type.list_of_values(),
["http", "socks4", "socks5"]
)
|
[
"def test_proxy_required_field_proxy_type(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.type.cancel_selected_value()\n self.assert_util(\n proxy.save,\n \"Proxy type can not be empty\",\n left_args={'expect_error': True}\n )",
"def test_tool_types_list(self):\n pass",
"def test_list_cloud_proxies(self):\n pass",
"def test_list_direct(self):\n for source in (\"direct\", \"default\"):\n self.assertEqual(self.setting.detect_type([1, 2, 3], source), \"int\")\n self.assertEqual(self.setting.detect_type([1, 2.3, 3], source), \"int\")\n self.assertEqual(self.setting.detect_type([1.0, 2.0, 3.0], source), \"float\")\n self.assertEqual(self.setting.detect_type([u\"u\", u\"v\", u\"w\"], source), \"unicode\")\n self.assertEqual(self.setting.detect_type([True, 2, 3], source), \"bool\")",
"def get_proxy_record_types(self):\n return # osid.type.TypeList",
"def test_proxy_backend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.save()\n self.assert_util(\n proxy.backend_conf_get.get_stanza(decrypt=True),\n {\n 'disabled': False,\n 'proxy_enabled': '1',\n 'proxy_port': '3285', \n 'proxy_rdns': '1',\n 'proxy_type': 'http',\n 'proxy_url': 'host',\n 'proxy_password': 'Password',\n 'proxy_username': 'Username'\n }\n )",
"def test_countries_list(self):\n pass",
"def test_proxy_frontend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n self.assert_util(proxy.save, True)",
"def test_opts_type():\n type_check = re.compile(r'oslo_config\\.cfg\\.(\\w+Opt)')\n for opt in opts:\n match = type_check.search(str(opt))\n assert match, str(\"{} is not recognized as a oslo_config.cfg.*\"\n \" object!\").format(opt)\n assert hasattr(cfg, match.group(1)), \\\n str(\"{} is not a subclass of oslo_config.cfg\").format(opt)",
"def get_proxy_condition_record_types(self):\n return # osid.type.TypeList",
"def test_get_cloud_proxy(self):\n pass",
"def test_list_registry_provider_types(self):\n pass",
"def test_get_types(self):\n\n self.assertEqual(self.filter_under_test.types,\n ['allpass', 'bandpass',\n 'bandstop', 'highpass',\n 'lowpass'])",
"def test_remove_valid_source_valid_type(self):\n name = 'valid_metadata'\n path = 'test/sample_proxysources/valid_metadata'\n os.environ['PROXY_PATH'] = path\n types = ValidMetadataSource.metadata.get('type')\n\n m = ProxySourceManager()\n\n for t in types:\n m.remove_source(source_name=name, proxy_type=t)\n self.assertEqual(len(m.proxies_per_type.get(t)), 0)",
"def test_form_logger_type_select(self):\n selected_type = \"biomimic_type\"\n selected_value = \"DummyBiomimicType\"\n with self.app.test_client() as client:\n with client.session_transaction() as sess:\n sess['query'] = self.record_type\n response = client.get('/_parse_data', \n query_string=dict(\n select_type=selected_type,\n select_value=selected_value))\n self.assertEqual(selected_type, request.args.get('select_type'))\n self.assertEqual(selected_value, request.args.get('select_value')) \n choices = self.db.fetch_distinct_countries_and_zones(self.record_type)\n country_list = choices[0][\"country\"]\n zone_list = choices[0][\"zone\"]\n for country in country_list:\n self.assertIn(self.stringToBytes(country), response.data)\n for zone in zone_list:\n self.assertIn(self.stringToBytes(zone), response.data)",
"def test_proxy_default_configs(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n self.assert_util(\n proxy.proxy_enable.is_checked,\n False\n )\n self.assert_util(\n proxy.dns_enable.is_checked,\n False\n )\n self.assert_util(\n proxy.type.get_value,\n \"http\"\n )\n self.assert_util(\n proxy.host.get_value,\n \"\"\n )\n self.assert_util(\n proxy.port.get_value,\n \"\"\n )\n self.assert_util(\n proxy.username.get_value,\n \"\"\n )\n self.assert_util(\n proxy.password.get_value,\n \"\"\n )",
"def test_getProxyMappings() -> json:\r\n\r\n # Action\r\n status, result = u.getProxyMappings()\r\n\r\n # Assertion\r\n AssertNotEmptyOrError(status, result)",
"def test_create_cloud_proxy(self):\n pass",
"def test_remove_valid_source_invalid_type(self):\n name = 'valid_metadata'\n path = 'test/sample_proxysources/' + name\n os.environ['PROXY_PATH'] = path\n incorrect_type = 'invalid_proxy_type'\n correct_types = ValidMetadataSource.metadata.get('type')\n\n m = ProxySourceManager()\n m.remove_source(source_name=name, proxy_type=incorrect_type)\n for t in correct_types:\n self.assertEqual(len(m.proxies_per_type.get(t)), 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies whether proxy type is required and displays an error if left empty
|
def test_proxy_required_field_proxy_type(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):
proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)
proxy.proxy_enable.check()
proxy.type.cancel_selected_value()
proxy.type.select("http")
proxy.dns_enable.check()
proxy.host.set_value("host")
proxy.port.set_value("3285")
proxy.username.set_value("Username")
proxy.password.set_value("Password")
proxy.type.cancel_selected_value()
self.assert_util(
proxy.save,
"Proxy type can not be empty",
left_args={'expect_error': True}
)
|
[
"def test_proxy_required_field_host(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\") \n self.assert_util(\n proxy.save,\n \"Proxy Host can not be empty\",\n left_args={'expect_error': True}\n )\n proxy.dns_enable.check()",
"def skip_ok_unless_can_make_proxy():\n if not can_make_proxy():\n raise osgunittest.OkSkipException('Required packages for creating VOMS proxies not installed')",
"def test_get_proxy_information_sans_proxy(self):\n self.configure_response()\n\n assert dump._get_proxy_information(self.response) is None",
"def test_proxy_frontend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n self.assert_util(proxy.save, True)",
"def test_proxy_backend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.save()\n self.assert_util(\n proxy.backend_conf_get.get_stanza(decrypt=True),\n {\n 'disabled': False,\n 'proxy_enabled': '1',\n 'proxy_port': '3285', \n 'proxy_rdns': '1',\n 'proxy_type': 'http',\n 'proxy_url': 'host',\n 'proxy_password': 'Password',\n 'proxy_username': 'Username'\n }\n )",
"def supports_proxy_record_type(self, proxy_record_type):\n return # boolean",
"def can_make_proxy():\n return core.dependency_is_installed(\"voms-clients-cpp\") or server_is_installed()",
"def test_proxy_list_proxy_types(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n self.assert_util(\n proxy.type.list_of_values(),\n [\"http\", \"socks4\", \"socks5\"]\n )",
"def test_remove_invalid_source_invalid_type(self):\n path = 'test/sample_proxysources/valid_metadata'\n os.environ['PROXY_PATH'] = path\n correct_types = ValidMetadataSource.metadata.get('type')\n\n m = ProxySourceManager()\n m.remove_source(source_name='not_the_correct_name',\n proxy_type='invalid_proxy_type')\n for t in correct_types:\n self.assertEqual(len(m.proxies_per_type.get(t)), 1)",
"def test_remove_valid_source_invalid_type(self):\n name = 'valid_metadata'\n path = 'test/sample_proxysources/' + name\n os.environ['PROXY_PATH'] = path\n incorrect_type = 'invalid_proxy_type'\n correct_types = ValidMetadataSource.metadata.get('type')\n\n m = ProxySourceManager()\n m.remove_source(source_name=name, proxy_type=incorrect_type)\n for t in correct_types:\n self.assertEqual(len(m.proxies_per_type.get(t)), 1)",
"def check_availability(self, proxy):\n if self.db.select_proxy(proxy).fetchone():\n return True\n return False",
"def __checkNoProxy(self, url):\n #FIXME assumes only 1 host\n no_proxy = os.environ.get(\"no_proxy\")\n if no_proxy is not None:\n no_proxies = no_proxy.split(\",\")\n for noproxy in no_proxies:\n noproxy = noproxy.strip()\n index = url.find(noproxy)\n if index > -1:\n # disable proxy settings for urllib\n proxy_support = urllib2.ProxyHandler({})\n opener = urllib2.build_opener(proxy_support)\n urllib2.install_opener(opener)\n if os.environ.has_key(\"http_proxy\"):\n print \"disabling proxy...\"\n del os.environ[\"http_proxy\"]\n break",
"def supports_proxy(self):\n return # boolean",
"def ip_is_proxy(self, ip):\n api_key = self.api_key\n self.update_progressbar('set request to is it proxy', 20)\n if self.parent:\n self.parent.check_point()\n try:\n r = requests.get(\n 'http://api.ip2proxy.com/?ip=' + ip + '&key=' + api_key + '&package=PX4&format=json')\n except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):\n raise NetworkError(' can not access api')\n if r.status_code == 200:\n result = json.loads(r.content.decode())\n self.update_progressbar('parsing result', 60)\n if self.parent:\n self.parent.check_point()\n if 'response' in result:\n temp = list()\n if result['response'] == 'OK':\n if result.items():\n # parsing result\n for k, v in result.items():\n if k in ['countryName', 'regionName', 'cityName', 'isp']:\n temp.append({self.ip_is_proxy_keys[k]: v, \"type\": 11})\n\n elif k in ['proxyType']:\n for each in v.split('/'):\n try:\n temp.append(\n {self.ip_is_proxy_keys[k]: self.usage_and_proxy_type[each],\n \"type\": 0})\n except KeyError:\n temp.append(\n {self.ip_is_proxy_keys[k]: v,\n \"type\": 0})\n\n elif k in ['isProxy']:\n temp.append({self.ip_is_proxy_keys[k]: v, \"type\": 0})\n else:\n pass\n final = {\"properties\": [], \"special_properties\": temp, \"results\": []}\n return final\n else:\n raise ResultNotFoundError('no result found for your request')\n else:\n if result['response'] == 'INSUFFICIENT CREDIT':\n raise InsufficientCredit(result['response'])\n elif result['response'] == 'INVALID ACCOUNT':\n raise WrongApiKeyError(result['response'])\n else:\n raise InternalModuleError(result['response'])\n else:\n raise NetworkError(r.status_code)\n else:\n if r.status_code == 404:\n raise ResultNotFoundError('no result found for your request')\n else:\n raise NetworkError('status_code ' + str(r.status_code))",
"def _check_required_url_properties(self):\n if not self.host:\n raise MissingProperty('host')\n if not self.command_path:\n raise MissingProperty('command_path')",
"def test_invalid_client_missing_person_address():\n party = copy.deepcopy(CLIENT_PARTY)\n del party['address']\n\n is_valid, errors = validate(party, 'clientParty', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid",
"async def test_validate_access_proxy(hass: HomeAssistant, provider) -> None:\n\n await async_setup_component(\n hass,\n \"http\",\n {\n \"http\": {\n CONF_TRUSTED_PROXIES: [\"192.168.128.0/31\", \"fd00::1\"],\n CONF_USE_X_FORWARDED_FOR: True,\n }\n },\n )\n provider.async_validate_access(ip_address(\"192.168.128.2\"))\n provider.async_validate_access(ip_address(\"fd00::2\"))\n with pytest.raises(tn_auth.InvalidAuthError):\n provider.async_validate_access(ip_address(\"192.168.128.0\"))\n with pytest.raises(tn_auth.InvalidAuthError):\n provider.async_validate_access(ip_address(\"192.168.128.1\"))\n with pytest.raises(tn_auth.InvalidAuthError):\n provider.async_validate_access(ip_address(\"fd00::1\"))",
"def supports_proxy_condition_record_type(self, proxy_condition_record_type):\n return # boolean",
"def test_remove_valid_source_valid_type(self):\n name = 'valid_metadata'\n path = 'test/sample_proxysources/valid_metadata'\n os.environ['PROXY_PATH'] = path\n types = ValidMetadataSource.metadata.get('type')\n\n m = ProxySourceManager()\n\n for t in types:\n m.remove_source(source_name=name, proxy_type=t)\n self.assertEqual(len(m.proxies_per_type.get(t)), 0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies the proxy is saved properly in frontend after saving it
|
def test_proxy_frontend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):
proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)
proxy.proxy_enable.check()
proxy.type.cancel_selected_value()
proxy.type.select("http")
proxy.dns_enable.check()
proxy.host.set_value("host")
proxy.port.set_value("3285")
proxy.username.set_value("Username")
proxy.password.set_value("Password")
self.assert_util(proxy.save, True)
|
[
"def test_proxy_backend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.save()\n self.assert_util(\n proxy.backend_conf_get.get_stanza(decrypt=True),\n {\n 'disabled': False,\n 'proxy_enabled': '1',\n 'proxy_port': '3285', \n 'proxy_rdns': '1',\n 'proxy_type': 'http',\n 'proxy_url': 'host',\n 'proxy_password': 'Password',\n 'proxy_username': 'Username'\n }\n )",
"def test_update_cloud_proxy(self):\n pass",
"def test_proxy_required_field_proxy_type(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.type.cancel_selected_value()\n self.assert_util(\n proxy.save,\n \"Proxy type can not be empty\",\n left_args={'expect_error': True}\n )",
"def test_proxy_required_field_host(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\") \n self.assert_util(\n proxy.save,\n \"Proxy Host can not be empty\",\n left_args={'expect_error': True}\n )\n proxy.dns_enable.check()",
"def test_save_service(self):\n response = self.client.get(reverse(save_service))\n self.assertEqual(response.status_code, 302)",
"def test_correction_assignment_by_proxy(self):\n obj = ScrapedCandidateProxy.objects.get(name='WINSTON, ALMA MARIE')\n self.assertEqual(obj.get_party().name, 'REPUBLICAN')",
"def check_availability(self, proxy):\n if self.db.select_proxy(proxy).fetchone():\n return True\n return False",
"def post_validate(self):\n pass",
"def mark_good(self, proxy):\n self.good.add(proxy)\n # ProxyStatInc",
"def test_success_plan_duplication(self):\n\n self.plan_destrezas.objetivos.set(\n [self.objetivo_1, self.objetivo_2])\n self.plan_destrezas.objetivos_generales.set(\n [self.general_1, self.general_2])\n self.plan_destrezas.destrezas.set(\n [self.destreza_1, self.destreza_2])\n\n self.client.login(username='tester@tester.com',\n password='P455w0rd_testing',)\n url = reverse('plan_destrezas_duplicate',\n kwargs={'pk': self.plan_destrezas.pk})\n response = self.client.post(url, {}, follow=True)\n\n assert response.status_code == 200, 'Should return a success code'\n\n # Test success message\n messages = list(response.context.get('messages'))\n assert len(messages) == 1, 'Should return one message'\n assert messages[0].message == 'Plan de Destrezas duplicado '\\\n 'exitosamente.', 'Should return a success message'\n assert messages[0].tags == 'alert-success', \\\n 'Should return a success message'\n self.assertRedirects(response, reverse('plan_destrezas_list'))\n\n # Test plan de destreza\n plan_destrezas_new = PlanDestrezas.objects.last()\n\n assert self.plan_destrezas.pk != plan_destrezas_new.pk, \\\n 'Should be another instance'\n assert plan_destrezas_new.name == '{} (copia)'.format(\n self.plan_destrezas.name), 'Should have a new name'\n assert plan_destrezas_new.curso == self.plan_destrezas.curso, \\\n 'Should have the same property values'\n # Debe tener igual todos los campos many to many al original\n assert plan_destrezas_new.objetivos.first() == self.objetivo_1\n assert plan_destrezas_new.objetivos.last() == self.objetivo_2\n assert plan_destrezas_new.objetivos_generales.first() == self.general_1\n assert plan_destrezas_new.objetivos_generales.last() == self.general_2\n assert plan_destrezas_new.destrezas.first() == self.destreza_1\n assert plan_destrezas_new.destrezas.last() == self.destreza_2\n\n assert self.plan_destrezas.updated_at != \\\n plan_destrezas_new.updated_at, \\\n 'The updated_at field should not be copied'\n\n # Test second duplication\n\n request = RequestFactory().post('/', {})\n request.user = self.user\n request = add_middleware_to_request(request)\n\n PlanDestrezasDuplicateView.as_view()(\n request,\n pk=self.plan_destrezas.pk\n )\n\n plan_destrezas_new = PlanDestrezas.objects.last()\n assert plan_destrezas_new.name == '{} (copia 2)'.format(\n self.plan_destrezas.name)\n assert plan_destrezas_new.curso == self.plan_destrezas.curso\n\n # Test third duplication\n\n PlanDestrezasDuplicateView.as_view()(\n request,\n pk=self.plan_destrezas.pk\n )\n\n plan_destrezas_new = PlanDestrezas.objects.last()\n assert plan_destrezas_new.name == '{} (copia 3)'.format(\n self.plan_destrezas.name)\n assert plan_destrezas_new.curso == self.plan_destrezas.curso",
"def test_create_cloud_proxy(self):\n pass",
"def test_docu_vieware_save(self):\n pass",
"def set_proxy(proxy: str) -> bool:\n resp = get_config()\n if not resp:\n return False\n data = resp[\"result\"]\n path = resp[\"path\"]\n data[\"proxy\"] = proxy\n with open(path, \"w\") as file:\n json.dump(data, file, sort_keys=True, indent=\"\")\n return True",
"def test_save_form_url(self):\r\n resp = self.client['maintainer'].post(self.url_edit, {\r\n 'source_file_url': self.SFILE, 'auto_update': 'on',\r\n 'sourcefile': '', 'accept_translations': 'on',\r\n 'slug': self.resource.slug, 'name': self.resource.name, })\r\n self.assertEquals(self.resource.url_info.source_file_url, self.SFILE)\r\n resp = self.client['maintainer'].get(self.url_edit)\r\n self.assertContains(resp, self.SFILE)",
"def test_object_with_validation_errors_shall_not_be_stored(self):\n allowed_operation = AllowedOperation(allowed_area=MultiPolygon(), root_metadata=self.root_metadata)\n try:\n allowed_operation.save()\n print('saved')\n except ValidationError:\n pass\n finally:\n self.assertFalse(AllowedOperation.objects.all().exists(),\n msg='The AllowedOperation table shall be empty after trying to save empty geometry data.')",
"async def _before_save(self) -> None:",
"def test_geocode_on_save(self):\n self._select_geocoder()\n loc = Location(name=\"The Piton Foundation\",\n address=\"370 17th St\",\n address2=\"#5300\",\n city=\"Denver\",\n state=\"CO\",\n postcode=\"80202\")\n loc.save()\n self.assertApxEqual(loc.lat, 39.7438167)\n self.assertApxEqual(loc.lng, -104.9884953)\n self.assertApxEqual(loc.point.x, -104.9884953)\n self.assertApxEqual(loc.point.y, 39.7438167)",
"def save_proxies():\n global output, pl\n d = os.path.dirname(output)\n if not os.path.exists(d):\n print 'Directory \"' + d + '\" not found. Creating.'\n os.makedirs(d)\n\n with open(output, 'w+') as outfile:\n outfile.write('\\n'.join(pl))\n print 'Done! ' + str(len(pl)) + ' confirmed proxies saved to file \"' + output + '\".'",
"def test_save_substitute(self):\n\n # count substitutes number to be able to compare later to new substitutes number\n old_substitutes = Substitute.objects.count()\n product_id = self.substitute.id\n # get response\n self.client.get(reverse('open_food_facts:save_substitute', args=(product_id, )))\n new_substitutes = Substitute.objects.count()\n # if there's one more substitute in the database after calling the save_substitute view,\n # it means the substitute was saved\n self.assertEqual(new_substitutes, old_substitutes + 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies the proxy is saved properly in frontend after saving it
|
def test_proxy_backend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):
proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)
proxy.proxy_enable.check()
proxy.type.cancel_selected_value()
proxy.type.select("http")
proxy.dns_enable.check()
proxy.host.set_value("host")
proxy.port.set_value("3285")
proxy.username.set_value("Username")
proxy.password.set_value("Password")
proxy.save()
self.assert_util(
proxy.backend_conf_get.get_stanza(decrypt=True),
{
'disabled': False,
'proxy_enabled': '1',
'proxy_port': '3285',
'proxy_rdns': '1',
'proxy_type': 'http',
'proxy_url': 'host',
'proxy_password': 'Password',
'proxy_username': 'Username'
}
)
|
[
"def test_proxy_frontend_validation(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n self.assert_util(proxy.save, True)",
"def test_update_cloud_proxy(self):\n pass",
"def test_proxy_required_field_proxy_type(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.host.set_value(\"host\")\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\")\n proxy.type.cancel_selected_value()\n self.assert_util(\n proxy.save,\n \"Proxy type can not be empty\",\n left_args={'expect_error': True}\n )",
"def test_proxy_required_field_host(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n proxy = Proxy(TA_NAME, TA_PROXY_URL, TA_CONF, ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n proxy.proxy_enable.check()\n proxy.type.cancel_selected_value()\n proxy.type.select(\"http\")\n proxy.dns_enable.check()\n proxy.port.set_value(\"3285\")\n proxy.username.set_value(\"Username\")\n proxy.password.set_value(\"Password\") \n self.assert_util(\n proxy.save,\n \"Proxy Host can not be empty\",\n left_args={'expect_error': True}\n )\n proxy.dns_enable.check()",
"def test_save_service(self):\n response = self.client.get(reverse(save_service))\n self.assertEqual(response.status_code, 302)",
"def test_correction_assignment_by_proxy(self):\n obj = ScrapedCandidateProxy.objects.get(name='WINSTON, ALMA MARIE')\n self.assertEqual(obj.get_party().name, 'REPUBLICAN')",
"def check_availability(self, proxy):\n if self.db.select_proxy(proxy).fetchone():\n return True\n return False",
"def post_validate(self):\n pass",
"def mark_good(self, proxy):\n self.good.add(proxy)\n # ProxyStatInc",
"def test_success_plan_duplication(self):\n\n self.plan_destrezas.objetivos.set(\n [self.objetivo_1, self.objetivo_2])\n self.plan_destrezas.objetivos_generales.set(\n [self.general_1, self.general_2])\n self.plan_destrezas.destrezas.set(\n [self.destreza_1, self.destreza_2])\n\n self.client.login(username='tester@tester.com',\n password='P455w0rd_testing',)\n url = reverse('plan_destrezas_duplicate',\n kwargs={'pk': self.plan_destrezas.pk})\n response = self.client.post(url, {}, follow=True)\n\n assert response.status_code == 200, 'Should return a success code'\n\n # Test success message\n messages = list(response.context.get('messages'))\n assert len(messages) == 1, 'Should return one message'\n assert messages[0].message == 'Plan de Destrezas duplicado '\\\n 'exitosamente.', 'Should return a success message'\n assert messages[0].tags == 'alert-success', \\\n 'Should return a success message'\n self.assertRedirects(response, reverse('plan_destrezas_list'))\n\n # Test plan de destreza\n plan_destrezas_new = PlanDestrezas.objects.last()\n\n assert self.plan_destrezas.pk != plan_destrezas_new.pk, \\\n 'Should be another instance'\n assert plan_destrezas_new.name == '{} (copia)'.format(\n self.plan_destrezas.name), 'Should have a new name'\n assert plan_destrezas_new.curso == self.plan_destrezas.curso, \\\n 'Should have the same property values'\n # Debe tener igual todos los campos many to many al original\n assert plan_destrezas_new.objetivos.first() == self.objetivo_1\n assert plan_destrezas_new.objetivos.last() == self.objetivo_2\n assert plan_destrezas_new.objetivos_generales.first() == self.general_1\n assert plan_destrezas_new.objetivos_generales.last() == self.general_2\n assert plan_destrezas_new.destrezas.first() == self.destreza_1\n assert plan_destrezas_new.destrezas.last() == self.destreza_2\n\n assert self.plan_destrezas.updated_at != \\\n plan_destrezas_new.updated_at, \\\n 'The updated_at field should not be copied'\n\n # Test second duplication\n\n request = RequestFactory().post('/', {})\n request.user = self.user\n request = add_middleware_to_request(request)\n\n PlanDestrezasDuplicateView.as_view()(\n request,\n pk=self.plan_destrezas.pk\n )\n\n plan_destrezas_new = PlanDestrezas.objects.last()\n assert plan_destrezas_new.name == '{} (copia 2)'.format(\n self.plan_destrezas.name)\n assert plan_destrezas_new.curso == self.plan_destrezas.curso\n\n # Test third duplication\n\n PlanDestrezasDuplicateView.as_view()(\n request,\n pk=self.plan_destrezas.pk\n )\n\n plan_destrezas_new = PlanDestrezas.objects.last()\n assert plan_destrezas_new.name == '{} (copia 3)'.format(\n self.plan_destrezas.name)\n assert plan_destrezas_new.curso == self.plan_destrezas.curso",
"def test_create_cloud_proxy(self):\n pass",
"def test_docu_vieware_save(self):\n pass",
"def set_proxy(proxy: str) -> bool:\n resp = get_config()\n if not resp:\n return False\n data = resp[\"result\"]\n path = resp[\"path\"]\n data[\"proxy\"] = proxy\n with open(path, \"w\") as file:\n json.dump(data, file, sort_keys=True, indent=\"\")\n return True",
"def test_save_form_url(self):\r\n resp = self.client['maintainer'].post(self.url_edit, {\r\n 'source_file_url': self.SFILE, 'auto_update': 'on',\r\n 'sourcefile': '', 'accept_translations': 'on',\r\n 'slug': self.resource.slug, 'name': self.resource.name, })\r\n self.assertEquals(self.resource.url_info.source_file_url, self.SFILE)\r\n resp = self.client['maintainer'].get(self.url_edit)\r\n self.assertContains(resp, self.SFILE)",
"def test_object_with_validation_errors_shall_not_be_stored(self):\n allowed_operation = AllowedOperation(allowed_area=MultiPolygon(), root_metadata=self.root_metadata)\n try:\n allowed_operation.save()\n print('saved')\n except ValidationError:\n pass\n finally:\n self.assertFalse(AllowedOperation.objects.all().exists(),\n msg='The AllowedOperation table shall be empty after trying to save empty geometry data.')",
"async def _before_save(self) -> None:",
"def test_geocode_on_save(self):\n self._select_geocoder()\n loc = Location(name=\"The Piton Foundation\",\n address=\"370 17th St\",\n address2=\"#5300\",\n city=\"Denver\",\n state=\"CO\",\n postcode=\"80202\")\n loc.save()\n self.assertApxEqual(loc.lat, 39.7438167)\n self.assertApxEqual(loc.lng, -104.9884953)\n self.assertApxEqual(loc.point.x, -104.9884953)\n self.assertApxEqual(loc.point.y, 39.7438167)",
"def save_proxies():\n global output, pl\n d = os.path.dirname(output)\n if not os.path.exists(d):\n print 'Directory \"' + d + '\" not found. Creating.'\n os.makedirs(d)\n\n with open(output, 'w+') as outfile:\n outfile.write('\\n'.join(pl))\n print 'Done! ' + str(len(pl)) + ' confirmed proxies saved to file \"' + output + '\".'",
"def test_save_substitute(self):\n\n # count substitutes number to be able to compare later to new substitutes number\n old_substitutes = Substitute.objects.count()\n product_id = self.substitute.id\n # get response\n self.client.get(reverse('open_food_facts:save_substitute', args=(product_id, )))\n new_substitutes = Substitute.objects.count()\n # if there's one more substitute in the database after calling the save_substitute view,\n # it means the substitute was saved\n self.assertEqual(new_substitutes, old_substitutes + 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Configure Matplotlib to draw figures for my thesis. The `size` argument can be a tuple (width, height) or a string indicating whether it whether the figure takes the `full` text width or a `fraction` of it (in which case the `fraction` can be specified). In both cases, the height is automatically determined using the golden ratio.
|
def _set_rcparams(size, fraction=0.85, subplots=(1, 1)):
if type(size) is tuple:
figsize = size
else:
# Set height and width.
width = TEXT_WIDTH
# Scale height according to golden ratio and number of subplots.
height = width / GOLDEN_RATIO # * (subplots[0] / subplots[1])
if size == 'full' or size is None:
figsize = (width, height)
elif size == 'fraction':
figsize = (width * fraction, height * fraction)
else:
raise ValueError(f'Size "{size}" invalid')
return {
"figure.autolayout": True, # Makes sure the figure is neat & tight.
"figure.figsize": figsize,
"figure.dpi": 150, # Displays figures nicely in notebooks.
"axes.linewidth": 0.5, # Matplotlib's current default is 0.8.
"lines.linewidth": 1.0,
"lines.markersize": 4,
"xtick.major.width": 0.5,
"xtick.minor.width": 0.5,
"xtick.labelsize": 9,
"ytick.major.width": 0.5,
"ytick.minor.width": 0.5,
"ytick.labelsize": 9,
"text.usetex": True, # Use LaTeX to write all text
"font.family": "serif", # Use serif rather than sans-serif
"font.serif": "lmodern",
"font.size": 11,
"axes.titlesize": 11, # LaTeX default is 10pt font.
"axes.labelsize": 9, # LaTeX default is 10pt font.
"legend.fontsize": 9, # Make the legend/label fonts a little smaller
"legend.frameon": True, # Remove the black frame around the legend
"pgf.texsystem": "xelatex", # Use Xelatex which is TTF font aware
"pgf.rcfonts": False, # Use pgf.preamble, ignore Matplotlib RC
# The thesis's font is Latin Modern, but I cannot make it work properly
# with Matplotlib (e.g., with `\textsc`). Using the default Computer
# Modern is good enough, as the visual difference is imperceptible.
"pgf.preamble": ''.join(
[
# r'\usepackage{fontspec}',
# r'\usepackage{unicode-math}',
# r'\usepackage{lmodern}',
# r'\setmainfont{Latin Modern Math}',
# r'\setmathfont{Latin Modern Math}',
]
),
}
|
[
"def set_figsize(self, sizes):\n self.__figsize = sizes\n return self",
"def figsize(scale):\n fig_width_pt = 469.755 # Get this from LaTeX using \\the\\textwidth\n inches_per_pt = 1.0 / 72.27 # Convert pt to inch\n golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this)\n fig_width = fig_width_pt * inches_per_pt * scale # width in inches\n fig_height = fig_width * golden_mean # height in inches\n fig_size = [fig_width, fig_height]\n return fig_size\n\n\n # figure format settings",
"def set_figure_style(s, fontsize=16):\n if s.lower() in ['paper', 'p', 'publication']:\n mpl.rcParams['font.size'] = 9.0\n elif s.lower() in ['presentation', 'talk', 't']:\n mpl.rcParams['font.size'] = fontsize",
"def _setup_figure(self):\n if self.fig_size:\n size = self.fig_size\n else:\n size = 6, 1.0 + 0.42 * len(self.designs) * self.rows\n\n self.fig = plt.figure(figsize=size)",
"def figsize(size=None, size_y=None): # Was imsize(), but I think this is better\n \n # If two arguments are passed, merge them into a tuple\n \n if (size_y):\n size = (size, size_y)\n \n # If one (or two) argument is passed, do the action\n \n if size:\n size_current = matplotlib.rcParams['figure.figsize'] # Grab old value, if set\n \n if hasattr(hbt.figsize, 'saved'):\n hbt.figsize.saved.append(size_current) # Add to list of saved values\n else:\n hbt.figsize.saved = [size_current] # If there is no list of saved values, init it.\n \n matplotlib.rcParams['figure.figsize'] = size # Set the new value \n \n # if zero arguments are passed, then restore old value:\n \n else:\n hbt.figsize_restore()",
"def set_with_subplot_size(self, size_inch=3.5, size_mm=None, size_ratio=None):\n if size_mm:\n size_inch = size_mm * 0.0393700787\n self.subplot_size_inch = size_inch\n self.subplot_size_ratio = size_ratio",
"def set_style():\n\n fig_width_1col = 3.4 # figure width in inch\n fig_width_2col = 7.0 # figure width in inch\n fig_aspect_ratio = 0.66 # width to height aspect ratio\n font_size = 8 # font size in pt\n font_size_small = 6 # font size in pt\n font_scheme = 'type2' # options: \n # None - default matplotlib fonts\n # 'type1' - text: Helvetica, math: Computer modern\n # 'type2' - text: Helvetica, math: Helvetica \n\n mpl.rcParams['figure.figsize'] = fig_width_1col, fig_aspect_ratio*fig_width_1col\n mpl.rcParams['axes.labelsize'] = font_size\n mpl.rcParams['font.size'] = font_size\n mpl.rcParams['legend.fontsize'] = font_size_small\n mpl.rcParams['xtick.labelsize'] = font_size\n mpl.rcParams['ytick.labelsize'] = font_size\n mpl.rcParams['xtick.direction'] = 'out'\n mpl.rcParams['ytick.direction'] = 'out'\n mpl.rcParams['lines.linewidth'] = 1.0\n mpl.rcParams['axes.linewidth'] = 0.5\n\n if font_scheme == 'type1':\n mpl.rcParams['text.usetex'] = True\n mpl.rcParams['font.family'] = 'sans-serif'\n mpl.rcParams['font.sans-serif'] = 'Helvetica'\n mpl.rcParams['mathtext.fontset'] = 'cm'\n\n if font_scheme == 'type2':\n mpl.rcParams['text.usetex'] = True\n mpl.rcParams['text.latex.preamble'] = [\n r'\\usepackage{siunitx}',\n r'\\sisetup{detect-all}',\n r'\\usepackage{helvet}',\n r'\\usepackage{sansmath}',\n r'\\sansmath'\n ]",
"def figure_kwargs(ftype=None, subplot=(None,None), ss=None,\n legendscale=None):\n if subplot == (None,None): subplot=1,1 #Nhigh,Nwide\n if ftype is None:\n figsize=None #default is 8,6\n titlesize=None #default is 'large' (=14.5 pt)\n legendsize=None #default is 'large' (=14.5 pt)\n labelsize=None #default is 'medium' (=12 pt)\n ticksize=None #default is 'medium' (=12 pt)\n subplots_adjust={\n #defaults\n 'left':0.12, 'bottom':0.1, 'right':0.9,\n 'top':0.9, 'wspace':0.2, 'hspace':0.2}\n# 'left':None, 'bottom':None, 'right':None,\n# 'top':None, 'wspace':None, 'hspace':None}\n else:\n #For 8\" wide figures scaled to 3.25\" (maximum PoP width, e.g.),\n #fontsize of 20 pt gives a scaled fontsize >= 8 pt,\n #but fontsize of 29 pt is the least such that sub/superscripts\n #also give scaled fontsize >= 8 pt.\n #This code does not allow for sub/superscripts,\n #and simply uses titlesize\n #scaled to 24 to keep about the same proportions\n #to labelsize and ticksize as in the default case,\n #and uses legendsize\n #scaled relatively down compared to the default case\n #to keep the legend small compared to the plotted data.\n #Note mathtext (used with values like r'$latex$')\n #does not seem to look as big as the fontsize value,\n #so avoid using mathtext.\n \n titlesize=24\n if legendscale: legendsize=24\n else: legendsize=20\n labelsize=20\n ticksize=20\n if ftype == 'landscape': figsize=8,6\n# #good values for landscape\n# subplots_adjust={\n# 'left': 0.12, 'bottom': 0.12, 'right':0.94,\n# 'top':0.92, 'wspace':0.45, 'hspace':0.65}\n elif ftype == 'square': figsize=8,8\n elif ftype == 'portrait': figsize=8,10.6666\n else: figsize=8,8\n Width=figsize[0] #actual sizes in inches\n Height=figsize[1]\n #Original values\n# Dwidth=1.205 #actual width in inches needed for fontsizes\n# Dheight=1.177 #actual height in inches needed for fontsizes\n #Convenient values\n D=1.125\n Dwidth=D #actual width in inches needed for fontsizes\n Dheight=D #actual height in inches needed for fontsizes\n #These next two fractions of whole page\n #don't change for width always = 8\".\n left=0.12\n right=0.96\n #These next two fractions of whole page\n #must get bigger (i.e. further from 0 or 1, respectively)\n #as the height of page decreases, and vice versa;\n #i.e. they vary inversely.\n bottom=0.12*6./Height\n top=1.-0.08*6./Height\n #These next two fractions of a single subplot size\n #vary in a complicated way with the other parameters\n #to keep a constant actual spacing available for text.\n Nhigh=subplot[0]\n Nwide=subplot[1]\n wspace=Dwidth*Nwide/(Width*(right-left)-Dwidth*(Nwide-1))\n hspace=Dheight*Nhigh/(Height*(top-bottom)-Dheight*(Nhigh-1))\n subplots_adjust={\n 'left':left, 'bottom':bottom, 'right':right,\n 'top':top, 'wspace':wspace, 'hspace':hspace}\n return {\n 'figsize':figsize, 'titlesize':titlesize,\n 'legendsize':legendsize, 'labelsize':labelsize,\n 'ticksize':ticksize, 'subplots_adjust':subplots_adjust}",
"def newfig(scale=1.0, ratio=0):\n\n #width in x*\\textwidth scale (0,1]\n fig = plt.figure(figsize=figsize(scale, ratio))\n ax = fig.add_subplot(111)\n return fig, ax",
"def fontsize(size=None):\n \n font = {'family' : 'sans-serif',\n 'weight' : 'normal',\n 'size' : size}\n\n size_current = matplotlib.rcParams['font.size']\n \n if size:\n if hasattr(hbt.fontsize, 'saved'):\n hbt.fontsize.saved.append(size_current) # Add to list of saved values\n else:\n hbt.fontsize.saved = [size_current] # If there is no list of saved values, init it.\n \n # Now set the actual size\n \n matplotlib.rc('font', **font)\n \n else:\n hbt.fontsize_restore()",
"def _setup_synthesis_fig(self, fig, axes_idx, figsize,\n plot_synthesized_image=True, plot_loss=True,\n plot_representation_error=True,\n plot_image_hist=False, plot_rep_comparison=False,\n plot_signal_comparison=False,\n synthesized_image_width=1, loss_width=1,\n representation_error_width=1, image_hist_width=1,\n rep_comparison_width=1, signal_comparison_width=1):\n n_subplots = 0\n axes_idx = axes_idx.copy()\n width_ratios = []\n if plot_synthesized_image:\n n_subplots += 1\n width_ratios.append(synthesized_image_width)\n if 'image' not in axes_idx.keys():\n axes_idx['image'] = _find_min_int(axes_idx.values())\n if plot_loss:\n n_subplots += 1\n width_ratios.append(loss_width)\n if 'loss' not in axes_idx.keys():\n axes_idx['loss'] = _find_min_int(axes_idx.values())\n if plot_representation_error:\n n_subplots += 1\n width_ratios.append(representation_error_width)\n if 'rep_error' not in axes_idx.keys():\n axes_idx['rep_error'] = _find_min_int(axes_idx.values())\n if plot_image_hist:\n n_subplots += 1\n width_ratios.append(image_hist_width)\n if 'hist' not in axes_idx.keys():\n axes_idx['hist'] = _find_min_int(axes_idx.values())\n if plot_rep_comparison:\n n_subplots += 1\n width_ratios.append(rep_comparison_width)\n if 'rep_comp' not in axes_idx.keys():\n axes_idx['rep_comp'] = _find_min_int(axes_idx.values())\n if plot_signal_comparison:\n n_subplots += 1\n width_ratios.append(signal_comparison_width)\n if 'signal_comp' not in axes_idx.keys():\n axes_idx['signal_comp'] = _find_min_int(axes_idx.values())\n if fig is None:\n width_ratios = np.array(width_ratios)\n if figsize is None:\n # we want (5, 5) for each subplot, with a bit of room between\n # each subplot\n figsize = ((width_ratios*5).sum() + width_ratios.sum()-1, 5)\n width_ratios = width_ratios / width_ratios.sum()\n fig, axes = plt.subplots(1, n_subplots, figsize=figsize,\n gridspec_kw={'width_ratios': width_ratios})\n if n_subplots == 1:\n axes = [axes]\n else:\n axes = fig.axes\n return fig, axes, axes_idx",
"def set_size(self, size):\n self.height = int(size)\n self.width = int(size) * 2",
"def auto_figure_size(shape, disp_cbar=False, ratio=1.0):\n length, width = shape\n plot_shape = [width*1.25, length]\n if not disp_cbar:\n plot_shape = [width, length]\n fig_scale = min(min_figsize_single/min(plot_shape),\n max_figsize_single/max(plot_shape),\n max_figsize_height/plot_shape[1])\n fig_size = [i*fig_scale*ratio for i in plot_shape]\n return fig_size",
"def get_fig_size(ratio=None, scale=None):\n ratio = 4 / 3.0 if ratio is None else ratio\n scale = 1.0 if scale is None else scale\n height = 5\n width = height * ratio\n return (width * scale, height * scale)",
"def test_plot_text_fontsize():\n fig = plt.figure(figsize=(3, 3))\n ax = plt.subplot(1, 1, 1)\n\n # testing data\n x = np.array([1])\n y = np.array([2])\n\n # Make the plot\n sp = StationPlot(ax, x, y, fontsize=36)\n sp.plot_text('NW', ['72'], fontsize=24)\n sp.plot_text('SW', ['60'], fontsize=4)\n\n sp.ax.set_xlim(0, 3)\n sp.ax.set_ylim(0, 3)\n\n return fig",
"def scale_plot_size(factor=1.5):\n import matplotlib as mpl\n\n default_dpi = mpl.rcParamsDefault[\"figure.dpi\"]\n mpl.rcParams[\"figure.dpi\"] = default_dpi * factor",
"def update_default_figsize(self, fig_width_pt):\n self.fig_width_pt = fig_width_pt\n mpl.rcParams.update({\"figure.figsize\": self.figsize()})",
"def test_plot_symbol_fontsize():\n fig = plt.figure(figsize=(3, 3))\n ax = plt.subplot(1, 1, 1)\n\n sp = StationPlot(ax, [0], [0], fontsize=8, spacing=32)\n sp.plot_symbol('E', [92], current_weather)\n sp.plot_symbol('W', [96], current_weather, fontsize=100)\n\n return fig",
"def resize_fig(self):\n class Event(): # pylint: disable=too-few-public-methods\n \"\"\" Event class that needs to be passed to plotcanvas.resize \"\"\"\n pass\n Event.width = self.winfo_width()\n Event.height = self.winfo_height()\n self.plotcanvas.resize(Event) # pylint: disable=no-value-for-parameter"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns number of wires in the circuit.
|
def num_wires(self) -> int:
return self._num_wires
|
[
"def getNeuronCount(self):\r\n return self.network.neuronCount",
"def wiimote_TotalConnected():\n return _wiimote.wiimote_TotalConnected()",
"def number_of_nodes(self):\n\t\treturn number_of_nodes(self.network)",
"def getNumConnections(self) -> \"int\":\n return _coin.SoField_getNumConnections(self)",
"def pumpkinCount(self):\n pCount = 0\n for space in self.board[9]:\n if space == 6:\n pCount += 1\n return pCount",
"def GetNumberOfChannels(self):\n\t\treturn len(self.__analog_routes)",
"def _get_count(self) -> \"size_t\" :\n return _core.Workspaces__get_count(self)",
"def get_device_count(self) -> int:\n return native.CorsairGetDeviceCount()",
"def n_slots(self) -> int:\n return self._w",
"def numeroElements(self):\n count=0\n for c in self._components:\n count+=1\n return count",
"def getNumConnections(self) -> \"int\":\n return _coin.SoEngineOutput_getNumConnections(self)",
"def get_num_analogs(self):\n return self.shape[1]",
"def number_of_drivers(self):\n return self.number_of_nodes()",
"def get_count(self):\n return self.hand.compute_bj_count()",
"def get_num_condos(self):\n return self._num_condos",
"def count() -> int:\n return len(self._devices)",
"def working_nr_resources(self):\n return len(self.available_resources) + len(self.busy_resources) + len(self.reserved_resources)",
"def num_bits(self):\n raise NotImplementedError",
"def TotalConnected():\n return _wiimote.wiimote_TotalConnected()",
"def numPixels(self):\n return ws.ws2811_channel_t_count_get(self._channel)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests for various values passed to convert_integer_to_roman_numerals.
|
def test_convert_integer_to_roman_numerals(self):
def assert_convert_integer_to_roman_numerals(integer, roman):
self.assertEqual(
views.convert_integer_to_roman_numerals(integer), roman
)
assert_convert_integer_to_roman_numerals(6, 'VI')
assert_convert_integer_to_roman_numerals(9, 'IX')
assert_convert_integer_to_roman_numerals(18, 'XVIII')
assert_convert_integer_to_roman_numerals(19, 'XIX')
assert_convert_integer_to_roman_numerals(38, 'XXXVIII')
assert_convert_integer_to_roman_numerals(39, 'XXXIX')
assert_convert_integer_to_roman_numerals(40, 'XL')
assert_convert_integer_to_roman_numerals(98, 'XCVIII')
assert_convert_integer_to_roman_numerals(388, 'CCCLXXXVIII')
assert_convert_integer_to_roman_numerals(499, 'CDXCIX')
assert_convert_integer_to_roman_numerals(867, 'DCCCLXVII')
assert_convert_integer_to_roman_numerals(1998, 'MCMXCVIII')
assert_convert_integer_to_roman_numerals(3999, 'MMMCMXCIX')
|
[
"def test_conversion_of_one(self):\n r = RomanNumeral()\n\n self.assertEqual(r.convert(1), 'I')\n self.assertEqual(r.convert(2), 'II')\n self.assertEqual(r.convert(3), 'III')\n self.assertEqual(r.convert(4), 'IV')\n self.assertEqual(r.convert(5), 'V')\n self.assertEqual(r.convert(9), 'IX')\n self.assertEqual(r.convert(10), 'X')",
"def test_valid_roman_numerals_set(self):\n data_provider = ['V', 'V', 'X', 'L', 'C', 'D', 'M', 'XL']\n for test_number in range(data_provider.__len__()):\n with self.subTest(i=test_number):\n roman_numerals_validator = RomanNumeralsValidator()\n self.assertTrue(roman_numerals_validator.validate(data_provider.pop()), True)",
"def testOct2Int(self):\n\n self.assertEqual(oct2Int(0o12), 10)",
"def roman_to_int(n):\n if isinstance(n, str):\n raise TypeError(\"expected string, got %s\" % type(n))\n n = n.upper()\n i = result = 0\n for integer, numeral in numeral_map:\n while n[i:i + len(numeral)] == numeral:\n result += integer\n i += len(numeral)\n if int_to_roman(result) == n:\n return result\n else:\n raise ValueError('input is not a valid roman numeral: %s' % input)",
"def testInt(self):\n idx = self.d.GetHeaderNames().index('Int')\n \n query = 'Int == 1'\n result, ind = self.d.RunQuery(query)\n self.assertEqual('1', result[0][idx])\n \n query = 'Int < 1'\n result, ind = self.d.RunQuery(query)\n self.assertEqual('-5', result[0][idx])\n \n query = 'Int > 1'\n result, ind = self.d.RunQuery(query)\n ints = []\n for i in range(len(result)):\n ints.append(result[i][idx])\n self.assertEqual(['2','2','4','6','6'], ints)",
"def test_octal_converter(self):\n self._test_converter(converter.baseOct)",
"def test_numbers(self):\n for numbers, text, postfix in (\n ([1, 2, None, 34], \"NUM 1 2 34\", \"\"),\n ([10, 20, None, 340], \"NUM 1 2 34\", \"0\"),\n ):\n for i, number in enumerate(gfs._numbers(text, postfix=postfix)):\n num = numbers[i]\n if num is None:\n self.assertIsNone(number)\n else:\n repr = str(num)\n if postfix:\n repr = repr[: -len(postfix)]\n self.assert_number(number, repr, num)",
"def testBlank(self):\n self.assertRaises(roman_noRe.InvalidRomanNumeralError,roman_noRe.fromRoman,\"\")",
"def testInt2Oct(self):\n\n self.assertEqual(int2Oct(10), '0o12')",
"def test_integers(self):\n self.assertTrue(type(sumOfNumbers(1,2)) is int)",
"def test_int() -> None:\n assert IntLiteral(0).mask == 0\n assert IntLiteral(1).mask == 1\n assert IntLiteral(123).mask == 123\n assert IntLiteral(-1).mask == -1\n assert IntLiteral(-9).mask == -9",
"def test_string_to_integer(self):\n \n # Test something which is meant to throw an error\n result = app.string_to_integer(\"5.0\")\n assert result == \"Error.\"\n \n # Test something which is meant to work successfully\n result = app.string_to_integer(\"900\")\n assert result == 900",
"def test_invalid_numeral(self):\n numeral = \"o\"\n response = validate(numeral)\n self.assertFalse(response['statusCode'] == 200, numeral + \" should not be valid numeral\")\n\n numeral = \"abc\"\n response = validate(numeral)\n self.assertFalse(response['statusCode'] == 200, numeral + \" should not be valid numeral\")",
"def int_to_roman(input):\n\n if not isinstance(input, int):\n raise TypeError(\"expected integer, got {}\".format(type(input)))\n if not 0 < input < 4000:\n raise ValueError(\"Argument must be between 1 and 3999\")\n ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)\n nums = ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV',\n 'I')\n result = \"\"\n for i in range(len(ints)):\n count = int(input // ints[i])\n result += nums[i] * count\n input -= ints[i] * count\n\n return result",
"def testDataIsInteger(self):\n registry_value = fake.FakeWinRegistryValue(\n u'MRUListEx', data_type=definitions.REG_BINARY)\n\n self.assertFalse(registry_value.DataIsInteger())\n\n registry_value = fake.FakeWinRegistryValue(\n u'Count', data_type=definitions.REG_DWORD)\n\n self.assertTrue(registry_value.DataIsInteger())",
"def test_numeros_negativos_aleatorios(self):\n numero_por_extenso = NumeroPorExtenso()\n assert numero_por_extenso.get(-1000) == 'menos mil'\n assert numero_por_extenso.get(-1001) == 'menos mil e um'\n assert numero_por_extenso.get(-1012) == 'menos mil e doze'\n assert numero_por_extenso.get(-1020) == 'menos mil e vinte'\n assert numero_por_extenso.get(-1022) == 'menos mil e vinte e dois'\n\n assert numero_por_extenso.get(-1100) == 'menos mil e cem'\n assert numero_por_extenso.get(-1101) == 'menos mil cento e um'\n assert numero_por_extenso.get(-1199) == 'menos mil cento e noventa e nove'\n\n assert numero_por_extenso.get(-11000) == 'menos onze mil'\n assert numero_por_extenso.get(-25123) == 'menos vinte e cinco mil cento e vinte e três'\n\n assert numero_por_extenso.get(-99999) == 'menos noventa e nove mil novecentos e noventa e nove'",
"def roman2int(n):\n n = str(n).upper()\n result = 0\n i = 0\n for integer, numeral in _numeral_map:\n while n[i:i + len(numeral)] == numeral:\n result += integer\n i += len(numeral)\n\n return result",
"def roman_to_int(self, roman_string):\n return super(GalaxyDenominations, self).roman_to_int(roman_string)",
"def to_roman( a_num ):\n result = \"\"\n a_num = int( a_num )\n for num in roman_dict:# crowling trough roman numeral dictionary\n count = int( a_num / roman_dict[num] )# figuring out how many roman numerals in arabic number are\n if count == 1:\n result += num # if it is only one instance of roman numeral, then adding it to resoult\n elif count > 1: \n for i in range( count ): # otherwise adding few numerals \n result += num\n a_num -= roman_dict[num] * count # calculating remaining arabic number\n return str(result)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Infer the role of the agent from an incoming/outgoing first message
|
def role_from_first_message(message: Message) -> Dialogue.Role:
return DefaultDialogue.Role.AGENT
|
[
"def role_from_first_message(message: Message) -> Dialogue.Role:\n return BaseOefSearchDialogue.Role.AGENT",
"def import_role(self, msg):\n self.role = msg.data",
"def get_other_agent(self, agent):\n if agent is None:\n return None\n if agent == self.home_agent:\n return self.away_agent\n return self.home_agent",
"def find_first_agent_of_type(game, role):\n agent_list = None\n if role == 'health-center':\n agent_list = game.simulation.health_centers\n elif role == 'distributor':\n if game.study_name == 'beerGame':\n agent_list = [k for k in game.simulation.distributors if k.agent_name == 'DS']\n else:\n agent_list = game.simulation.distributors\n elif role == 'wholesaler':\n if game.study_name == 'beerGame':\n agent_list = [k for k in game.simulation.distributors if k.agent_name == 'WS']\n else:\n agent_list = game.simulation.distributors\n elif role == 'manufacturer':\n agent_list = game.simulation.manufacturers\n\n for agent in agent_list:\n for human_player_agent in game.user_id_to_agent_map:\n if not game.user_id_to_agent_map[human_player_agent] is agent:\n continue\n return agent\n\n return None",
"def role(self) -> str:\n if self._connection.ice_controlling:\n return \"controlling\"\n else:\n return \"controlled\"",
"def massage_roles(self):\n if not self.opts.role:\n self.guess_role()\n if self.opts.role:\n self.opts.role = [xx.lower() for xx in self.opts.role]\n for role in [nrole for nrole in VALID_ROLES\n if nrole[:4] == 'node']:\n if role in self.opts.role and not 'node' in self.opts.role:\n self.opts.role.append('node')\n if 'broker' in self.opts.role and not 'client' in self.opts.role:\n self.opts.role.append('client')\n self.logger.info('Please note: --role=broker implicitly '\n 'enables --role=client to ensure /usr/bin/rhc '\n 'is available for testing and '\n 'troubleshooting.')",
"def my_role_sub(self):\n namespace = \"/aimbot_\" + self.team_side + \"/team/roles/\"\n rospy.Subscriber(namespace + 'ally' + str(self.num), Int16, lambda msg: self.import_role(msg))",
"def _get_actor(self):\n return self.contract.actor",
"def Recv_LoginAck(data):\n global g_actorId\n global g_actorDict\n (g_actorId,) = struct.unpack(\"<B\", data[0])\n print g_programName+'Login ok, local actor id is %s' % (g_actorId,)\n print g_inputPrompt\n g_actorDict[g_actorId] = actor.Actor()\n return data[1:]",
"def _determine_node_roles(self, context=None):\n\n # Clear old roles\n self.nodes_to_roles.update({k: set() for k in self.nodes_to_roles})\n\n # Assign required_node_roles\n for node_role_pair in self.required_node_roles:\n self._add_node_role(node_role_pair[0], node_role_pair[1])\n\n # Get ORIGIN and TERMINAL Nodes using self.scheduler.consideration_queue\n if self.scheduler.consideration_queue:\n self._determine_origin_and_terminal_nodes_from_consideration_queue()\n\n # INPUT\n for node in self.get_nodes_by_role(NodeRole.ORIGIN):\n self._add_node_role(node, NodeRole.INPUT)\n\n # CYCLE\n for node in self.graph_processing.cycle_vertices:\n self._add_node_role(node, NodeRole.CYCLE)\n\n # FEEDBACK_SENDER and FEEDBACK_RECEIVER\n for receiver in self.graph_processing.vertices:\n for sender, typ in receiver.source_types.items():\n if typ is EdgeType.FEEDBACK:\n self._add_node_role(\n sender.component,\n NodeRole.FEEDBACK_SENDER\n )\n self._add_node_role(\n receiver.component,\n NodeRole.FEEDBACK_RECEIVER\n )\n\n # region\n # # MODIFIED 4/25/20 OLD NOTES:\n # # If no OUTPUT nodes were explicitly specified as required_roles by *user* , assign them:\n # # - if there are LearningMechanisms, OUTPUT node is the last non-learning-related node.\n # # - if there are no TERMINAL nodes either, then the last node added to the Composition becomes the OUTPUT node.\n # # - ignore OUTPUT nodes in learning pathways as those are assigned automatically in add_linear_learning_pathway\n # # and don't want that to suppress normal assignment of TERMINAL nodes in non-learning pathways as OUTPUT nodes\n # # (if user has not specified any as required roles)\n # # # Assign TERMINAL role to nodes that are last in the scheduler's consideration queue that are:\n # # # - not used for Learning;\n # # # - not ControlMechanisms or ObjectiveMechanisms that project to them;\n # # # - do not project to any other nodes.\n # #\n # # # First, find last `consideration_set <consideration_set>` in scheduler that does not contain only\n # # # learning-related nodes, ControlMechanism(s) or control-related ObjectiveMechanism(s);\n # # # note: get copy of the consideration_set, as don't want to modify one actually used by scheduler\n # # # Next, remove any learning-related nodes, ControlMechanism(s) or control-related\n # # # ObjectiveMechanism(s) that may have \"snuck in\" (i.e., happen to be in the set)\n # # # Then, add any nodes that are not learning-related or a ControlMechanism,\n # # # and that have *no* efferent Projections\n # # # IMPLEMENTATION NOTE:\n # # # Do this here, as the list considers entire sets in the consideration queue,\n # # # and a node with no efferents may be in the same set as one with efferents\n # # # if they have the same dependencies.\n # # Assign TERMINAL role to nodes that are last in the scheduler's consideration queue that are:\n # # - not used for Learning;\n # # - not ControlMechanisms or ObjectiveMechanisms that project to them;\n # # - do not project to any other nodes.\n # # FIX 4/25/20 [JDC]: MISSES ObjectiveMechanism BURIED IN LAST CONSIDERATION SET\n # # First, find last `consideration_set <consideration_set>` in scheduler that does not contain only\n # # learning-related nodes, ControlMechanism(s) or control-related ObjectiveMechanism(s);\n # # note: get copy of the consideration_set, as don't want to modify one actually used by scheduler\n # # MODIFIED 4/25/20 END\n # endregion\n\n # MODIFIED 4/25/20 NEW:\n # FIX 4/25/20 [JDC]: NEED TO AVOID AUTOMATICALLY (RE-)ASSIGNING ONES REMOVED BY exclude_node_roles\n # - Simply execlude any LEARNING_OBJECTIVE and CONTROL_OBJECTIVE that project only to ModulatoryMechanism\n # - NOTE IN PROGRAM ERROR FAILURE TO ASSIGN CONTROL_OBJECTIVE\n\n # OUTPUT\n\n for node in self.nodes:\n\n # Assign OUTPUT if node is TERMINAL...\n if NodeRole.TERMINAL in self.get_roles_by_node(node):\n # unless it is a ModulatoryMechanism\n if (isinstance(node, ModulatoryMechanism_Base)\n # # FIX: WHY WOULD SUCH AN ObjectiveMechanism BE TERMINAL IF IT PROJECTS TO A MODULATORY_MECHANISM\n # # (IS THIS BECAUSE MODULATORY MECH GETS DISCOUNTED FROM BEING TERMINAL IN graph_processing?)\n # # or an ObjectiveMechanism associated with ControlMechanism or LearningMechanism\n # or any(role in self.get_roles_by_node(node) for role in {NodeRole.CONTROL_OBJECTIVE,\n # NodeRole.CONTROLLER_OBJECTIVE,\n # NodeRole.LEARNING_OBJECTIVE})\n ):\n continue\n else:\n self._add_node_role(node, NodeRole.OUTPUT)\n\n # Assign OUTPUT to any non-TERMINAL Nodes\n else:\n\n # IMPLEMENTATION NOTE:\n # This version allows LEARNING_OBJECTIVE to be assigned as OUTPUT\n # The alternate version below restricts OUTPUT only to RecurrentTransferMechasnism\n # # Assign OUTPUT if node projects only to itself and/or a LearningMechanism\n # # (i.e., it is either a RecurrentTransferMechanism configured for learning\n # # or the LEARNING_OBJECTIVE of a `learning pathway <Composition_Learning_Pathway>`\n # if all(p.receiver.owner is node or isinstance(p.receiver.owner, LearningMechanism)\n # for p in node.efferents):\n # self._add_node_role(node, NodeRole.OUTPUT)\n # continue\n\n # Assign OUTPUT if it is an `RecurrentTransferMechanism` configured for learning\n # and doesn't project to any Nodes other than its `AutoassociativeLearningMechanism`\n # (this is not picked up as a `TERMINAL` since it projects to the `AutoassociativeLearningMechanism`)\n # but can (or already does) project to an output_CIM\n if all((p.receiver.owner is node\n or isinstance(p.receiver.owner, AutoAssociativeLearningMechanism)\n or p.receiver.owner is self.output_CIM)\n for p in node.efferents):\n self._add_node_role(node, NodeRole.OUTPUT)\n continue\n\n # Assign OUTPUT only if the node is not:\n # - the TARGET_MECHANISM of a `learning Pathway <Composition_Learning_Pathway>`\n # - a ModulatoryMechanism\n # and the node projects only to:\n # - an ObjectiveMechanism designated as CONTROL_OBJECTIVE, CONTROLLER_OBJECTIVE or LEARNING_OBJECTIVE\n # - and/or directly to a ControlMechanism but is not an ObjectiveMechanism\n # - and/or (already projects) to output_CIM\n if NodeRole.TARGET in self.get_roles_by_node(node):\n continue\n if isinstance(node, ModulatoryMechanism_Base):\n continue\n if all((any(p.receiver.owner in self.get_nodes_by_role(role)\n for role in {NodeRole.CONTROL_OBJECTIVE,\n NodeRole.CONTROLLER_OBJECTIVE,\n NodeRole.LEARNING_OBJECTIVE})\n or p.receiver.owner is self.output_CIM\n or (isinstance(p.receiver.owner, ControlMechanism) and not isinstance(node, ObjectiveMechanism)))\n for p in node.efferents):\n self._add_node_role(node, NodeRole.OUTPUT)\n # MODIFIED 4/25/20 END\n\n # Assign SINGLETON and INTERNAL nodes\n for node in self.nodes:\n if all(n in self.nodes_to_roles[node] for n in {NodeRole.ORIGIN, NodeRole.TERMINAL}):\n self._add_node_role(node, NodeRole.SINGLETON)\n if not any(n in self.nodes_to_roles[node] for n in {NodeRole.ORIGIN, NodeRole.TERMINAL}):\n self._add_node_role(node, NodeRole.INTERNAL)\n\n # Finally, remove any NodeRole assignments specified in excluded_node_roles\n for node in self.nodes:\n for node, role in self.excluded_node_roles:\n if role in self.get_roles_by_node(node):\n self._remove_node_role(node, role)\n\n # Manual override to avoid INPUT/OUTPUT setting, which would cause\n # CIMs to be created, which is not correct for controllers\n if self.controller is not None:\n self.nodes_to_roles[self.controller] = {NodeRole.CONTROLLER}",
"def get_agent(self, *, agent_name: str) -> NodeAgentDefinition:",
"async def get_new_role(self, timeout: int) -> typing.Union[discord.Role, str, None]:\n\n try:\n role = await self.bot.wait_for('message', check=self.bot.checks.wait_for_message_check(self.ctx),\n timeout=timeout)\n except asyncio.TimeoutError:\n await self.ctx.send(\":zzz: You took too long to reply.\")\n return None\n\n if not role.content:\n await self.ctx.send(\":x: You didn't reply with text.\")\n return None\n\n try:\n role_object = await FlowCaseInsensitiveRole().convert(self.ctx, role.content)\n except commands.BadArgument:\n role_object = role.content\n\n if not role_object:\n return role.content\n\n return role_object",
"def receiveMessage(self, msg, sender):\n if not isinstance(msg, ActorMessage):\n return\n if msg.action == EventsActorAction.EVENTS_ADD:\n db_actor = self.createActor(actorClass=DbActor,\n globalName=ActorGlobalName.DB_ACTOR)\n message = ActorMessage(action=msg.action, payload=msg.payload,\n customer_id=msg.customer_id, response_to=sender)\n self.send(db_actor, message)\n if msg.action == EventsActorAction.EVENTS_GET:\n db_actor = self.createActor(actorClass=DbActor)\n message = ActorMessage(action=msg.action, payload=msg.payload,\n customer_id=msg.customer_id, response_to=sender)\n self.send(db_actor, message)\n if msg.action == EventsActorAction.EVENTS_LIST:\n db_actor = self.createActor(actorClass=DbActor)\n message = ActorMessage(action=msg.action, payload=msg.payload,\n customer_id=msg.customer_id, response_to=sender)\n self.send(db_actor, message)\n if msg.action == EventsActorAction.EVENTS_PURCHASE:\n db_actor = self.createActor(actorClass=DbActor,\n globalName=ActorGlobalName.DB_ACTOR)\n message = ActorMessage(action=msg.action, payload=msg.payload,\n customer_id=msg.customer_id, response_to=sender)\n self.send(db_actor, message)\n if msg.action == EventsActorAction.EVENTS_SALES:\n db_actor = self.createActor(actorClass=DbActor)\n message = ActorMessage(action=msg.action, payload=msg.payload,\n customer_id=msg.customer_id, response_to=sender)\n self.send(db_actor, message)",
"def role(self):\n if self.case_status == 'adoption':\n return ''\n return self.user_role",
"def __receive_agent_id(self):\n self.id = self.__receive_int()",
"def _roles_via_relationship(actor, relationship, actor_attr, roles, offer_map):\n relobj = None # Role-granting object found via the relationship\n\n # There is no actor_attr. Check if the relationship is a RoleMixin and call\n # roles_for to get offered roles, then remap using the offer map.\n if actor_attr is None:\n if isinstance(relationship, RoleMixin):\n offered_roles = relationship.roles_for(actor)\n if offer_map:\n offered_roles = set(\n chain.from_iterable(\n offer_map[role] for role in offered_roles if role in offer_map\n )\n )\n return offered_roles\n raise TypeError(\n f\"{relationship!r} is not a RoleMixin and no actor attribute was specified\"\n )\n\n # We have a relationship. If it's a collection, find the item in it that relates\n # to the actor.\n if isinstance(relationship, (AppenderMixin, Query)):\n # Query-like relationship. Run a query. It is possible to have multiple matches\n # for the actor, so use .first()\n # TODO: Consider retrieving all and consolidating roles from across them in case\n # the objects are RoleGrantABC. This is not a current requirement and so is not\n # currently supported; using the .first() object is sufficient\n if isinstance(actor_attr, QueryableAttribute):\n relobj = relationship.filter(operator.eq(actor_attr, actor)).first()\n else:\n relobj = relationship.filter_by(**{actor_attr: actor}).first()\n elif isinstance(relationship, abc.Iterable):\n # List-like object. Scan through it looking for item related to actor.\n # Note: strings are also collections. Checking for abc.Iterable is only safe\n # here because of the unlikeliness of a string relationship. If that becomes\n # necessary in future, add `and not isinstance(relationship, str)`\n for relitem in relationship:\n if getattr(relitem, actor_attr) == actor:\n relobj = relitem\n break\n\n # Not any sort of collection. May be a scalar relationship\n elif getattr(relationship, actor_attr) == actor:\n relobj = relationship\n if not relobj:\n # Didn't find a relationship object. Actor gets no roles\n return ()\n\n # We have a related object. Get roles from it\n if isinstance(relobj, RoleGrantABC):\n # If this object grants roles, get them. It may not grant the one we're looking\n # for and that's okay. Grab the others\n offered_roles = relobj.offered_roles\n # But if we have an offer_map, remap the roles and only keep the ones\n # specified in the map\n if offer_map:\n offered_roles = set(\n chain.from_iterable(\n offer_map[role] for role in offered_roles if role in offer_map\n )\n )\n return offered_roles\n # Not a role granting object. Implies that the default roles are granted\n # by its very existence.\n return roles",
"def get_agent(self):\n return # osid.authentication.Agent",
"def weighting_syntactic_role(entity_role: str) -> int:\n if entity_role == u\"S\":\n return 3\n elif entity_role == u\"O\":\n return 2\n elif entity_role == u\"X\":\n return 1\n\n return 0",
"def get_role(row):\n role = row[6]\n\n # Normalize roles Lead Link and Rep Link, as they contain the circle name as well\n if \"Lead Link\" in role:\n role = \"Lead Link\"\n\n if \"Rep Link\" in role:\n role = \"Rep Link\"\n\n return role"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get if it is a seller search.
|
def is_seller_search(self) -> bool:
assert self._is_seller_search is not None, "is_seller_search not set!"
return self._is_seller_search
|
[
"def is_bestseller(self, book) -> bool:\n\n product = WebDriverWait(self.driver, 20).until(\n EC.presence_of_element_located(\n (By.XPATH, '//div[@data-cel-widget=\"search_result_1\"]')\n )\n )\n ans = WebDriverWait(product, 20).until(\n EC.presence_of_all_elements_located(\n (By.XPATH, \".//a\")\n )\n )[0].text\n # ans = product.find_elements_by_xpath(\".//a\")[0].text\n if ans == \"Best Seller\" or \"Amazon Charts\" in ans:\n is_bestseller = True\n else:\n is_bestseller = False\n\n book_link = None\n for p in product.find_elements_by_xpath(\".//a\"):\n if book in p.text:\n book_link = p\n break\n if book_link: book_link.click()\n\n return is_bestseller",
"def is_seller(self) -> bool:\n keywords = ['budget']\n for word in keywords:\n if word in self.content.lower():\n return False\n return True",
"def search():\n search_queue = deque()\n search_queue += graph['me']\n searched = [] # already searched people\n while search_queue:\n person = search_queue.popleft()\n if person not in searched:\n if person_is_seller(person):\n print(f'{person} sells mango')\n return True\n else:\n search_queue += graph[person]\n searched.append(person)\n print('Seller not found')\n return False",
"def is_search_request(self):\n return True",
"def reseller_request(self, env):\n\n valid_groups = self.conf.get('groups',\n 'reseller,.reseller_admin').split(',')\n\n #used by keystone\n ks_identity = env.get('keystone.identity')\n if ks_identity and (set(ks_identity['roles']) & set(valid_groups)):\n return True\n\n #used by tempauth and swauth\n user = env.get('REMOTE_USER')\n if user and (set(user.split(',')) & set(valid_groups)):\n return True\n\n return False",
"def get(self):\n \"\"\"\n @api {get} /seller/:sellerId Get a seller\n @apiVersion 1.0.0\n @apiGroup Seller\n\n @apiUrlParam {Integer} sellerId\n \"\"\"\n return self",
"def supports_grade_system_search(self):\n return # boolean",
"def search(self):\n user = self.get_search_phrase()\n if self.requester.is_staff:\n res = TempCharge.objects.for_reseller(self.reseller).filter(user__first_name__icontains=user)\n if user:\n res = res.filter(user__first_name__icontains=user)\n else:\n res = TempCharge.objects.own(self.req)\n if user:\n res = res.filter(charger__first_name__icontains=user)\n return res",
"def isFound(self) -> \"SbBool\":\n return _coin.SoSearchAction_isFound(self)",
"def get_search_provider(only_active=True):\n from indico.modules.search.controllers import InternalSearch\n providers = values_from_signal(signals.core.get_search_providers.send(), as_list=True)\n\n if not providers:\n return InternalSearch\n elif len(providers) == 1:\n provider = providers[0]\n return provider if not only_active or provider.active else InternalSearch\n else:\n providers_str = ', '.join(f'{x.__module__}.{x.__name__}' for x in providers)\n raise RuntimeError(f'Only one search provider can be defined (found: {providers_str})')",
"def IsOnSale(self):\n return self.__isOnSale",
"def get_sales_by_seller(seller):\n \n return Sale.query.filter(Sale.seller_name==seller.seller_name).all().order_by(\"date\")",
"def test_best_seller_result(self):\n self.add_products_and_sales()\n link = self.admin.best_seller(self.company)\n self.assertIn('<a class=\"model_change_link\" href=', link)\n self.assertIn('change', link)\n self.assertIn(f'?_changelist_filters=company__id__exact%3D{self.company.pk}', link)\n self.assertIn(str(self.products[0]), link)",
"def get_search_query(request):\n return (request.GET.get('q') or request.session.get('query') or\n get_setting('DEFAULT_QUERY'))",
"def supports_grade_system_search_record_type(self, grade_system_search_record_type):\n return # boolean",
"def _search_for_artist(self, artist):\n token = self._generate_token()\n if token:\n sp = Spotify(client_credentials_manager=token)\n search_results = sp.search(q=artist, type='artist')\n try:\n first_result = search_results['artists']['items'][0]\n return first_result\n except IndexError:\n pass",
"def is_sell(self) -> bool:\n return self.side == TradeSide.SELL",
"def find_drink(self, order_name):\r\n for item in self.menu:\r\n if item.code == order_name:\r\n return item\r\n print(\"Sorry that item is not available.\")\r\n return False",
"def check_if_user_is_a_subscriber(request):\n try:\n this_consumer = request.session['consumer']\n this_subscriber = this_consumer['subscriber']\n subscriber_id = this_subscriber['subscriber_id']\n if subscriber_id:\n return True\n except KeyError:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Infer the role of the agent from an incoming/outgoing first message
|
def role_from_first_message(message: Message) -> Dialogue.Role:
return BaseOefSearchDialogue.Role.AGENT
|
[
"def role_from_first_message(message: Message) -> Dialogue.Role:\n return DefaultDialogue.Role.AGENT",
"def import_role(self, msg):\n self.role = msg.data",
"def get_other_agent(self, agent):\n if agent is None:\n return None\n if agent == self.home_agent:\n return self.away_agent\n return self.home_agent",
"def find_first_agent_of_type(game, role):\n agent_list = None\n if role == 'health-center':\n agent_list = game.simulation.health_centers\n elif role == 'distributor':\n if game.study_name == 'beerGame':\n agent_list = [k for k in game.simulation.distributors if k.agent_name == 'DS']\n else:\n agent_list = game.simulation.distributors\n elif role == 'wholesaler':\n if game.study_name == 'beerGame':\n agent_list = [k for k in game.simulation.distributors if k.agent_name == 'WS']\n else:\n agent_list = game.simulation.distributors\n elif role == 'manufacturer':\n agent_list = game.simulation.manufacturers\n\n for agent in agent_list:\n for human_player_agent in game.user_id_to_agent_map:\n if not game.user_id_to_agent_map[human_player_agent] is agent:\n continue\n return agent\n\n return None",
"def role(self) -> str:\n if self._connection.ice_controlling:\n return \"controlling\"\n else:\n return \"controlled\"",
"def massage_roles(self):\n if not self.opts.role:\n self.guess_role()\n if self.opts.role:\n self.opts.role = [xx.lower() for xx in self.opts.role]\n for role in [nrole for nrole in VALID_ROLES\n if nrole[:4] == 'node']:\n if role in self.opts.role and not 'node' in self.opts.role:\n self.opts.role.append('node')\n if 'broker' in self.opts.role and not 'client' in self.opts.role:\n self.opts.role.append('client')\n self.logger.info('Please note: --role=broker implicitly '\n 'enables --role=client to ensure /usr/bin/rhc '\n 'is available for testing and '\n 'troubleshooting.')",
"def my_role_sub(self):\n namespace = \"/aimbot_\" + self.team_side + \"/team/roles/\"\n rospy.Subscriber(namespace + 'ally' + str(self.num), Int16, lambda msg: self.import_role(msg))",
"def _get_actor(self):\n return self.contract.actor",
"def Recv_LoginAck(data):\n global g_actorId\n global g_actorDict\n (g_actorId,) = struct.unpack(\"<B\", data[0])\n print g_programName+'Login ok, local actor id is %s' % (g_actorId,)\n print g_inputPrompt\n g_actorDict[g_actorId] = actor.Actor()\n return data[1:]",
"def _determine_node_roles(self, context=None):\n\n # Clear old roles\n self.nodes_to_roles.update({k: set() for k in self.nodes_to_roles})\n\n # Assign required_node_roles\n for node_role_pair in self.required_node_roles:\n self._add_node_role(node_role_pair[0], node_role_pair[1])\n\n # Get ORIGIN and TERMINAL Nodes using self.scheduler.consideration_queue\n if self.scheduler.consideration_queue:\n self._determine_origin_and_terminal_nodes_from_consideration_queue()\n\n # INPUT\n for node in self.get_nodes_by_role(NodeRole.ORIGIN):\n self._add_node_role(node, NodeRole.INPUT)\n\n # CYCLE\n for node in self.graph_processing.cycle_vertices:\n self._add_node_role(node, NodeRole.CYCLE)\n\n # FEEDBACK_SENDER and FEEDBACK_RECEIVER\n for receiver in self.graph_processing.vertices:\n for sender, typ in receiver.source_types.items():\n if typ is EdgeType.FEEDBACK:\n self._add_node_role(\n sender.component,\n NodeRole.FEEDBACK_SENDER\n )\n self._add_node_role(\n receiver.component,\n NodeRole.FEEDBACK_RECEIVER\n )\n\n # region\n # # MODIFIED 4/25/20 OLD NOTES:\n # # If no OUTPUT nodes were explicitly specified as required_roles by *user* , assign them:\n # # - if there are LearningMechanisms, OUTPUT node is the last non-learning-related node.\n # # - if there are no TERMINAL nodes either, then the last node added to the Composition becomes the OUTPUT node.\n # # - ignore OUTPUT nodes in learning pathways as those are assigned automatically in add_linear_learning_pathway\n # # and don't want that to suppress normal assignment of TERMINAL nodes in non-learning pathways as OUTPUT nodes\n # # (if user has not specified any as required roles)\n # # # Assign TERMINAL role to nodes that are last in the scheduler's consideration queue that are:\n # # # - not used for Learning;\n # # # - not ControlMechanisms or ObjectiveMechanisms that project to them;\n # # # - do not project to any other nodes.\n # #\n # # # First, find last `consideration_set <consideration_set>` in scheduler that does not contain only\n # # # learning-related nodes, ControlMechanism(s) or control-related ObjectiveMechanism(s);\n # # # note: get copy of the consideration_set, as don't want to modify one actually used by scheduler\n # # # Next, remove any learning-related nodes, ControlMechanism(s) or control-related\n # # # ObjectiveMechanism(s) that may have \"snuck in\" (i.e., happen to be in the set)\n # # # Then, add any nodes that are not learning-related or a ControlMechanism,\n # # # and that have *no* efferent Projections\n # # # IMPLEMENTATION NOTE:\n # # # Do this here, as the list considers entire sets in the consideration queue,\n # # # and a node with no efferents may be in the same set as one with efferents\n # # # if they have the same dependencies.\n # # Assign TERMINAL role to nodes that are last in the scheduler's consideration queue that are:\n # # - not used for Learning;\n # # - not ControlMechanisms or ObjectiveMechanisms that project to them;\n # # - do not project to any other nodes.\n # # FIX 4/25/20 [JDC]: MISSES ObjectiveMechanism BURIED IN LAST CONSIDERATION SET\n # # First, find last `consideration_set <consideration_set>` in scheduler that does not contain only\n # # learning-related nodes, ControlMechanism(s) or control-related ObjectiveMechanism(s);\n # # note: get copy of the consideration_set, as don't want to modify one actually used by scheduler\n # # MODIFIED 4/25/20 END\n # endregion\n\n # MODIFIED 4/25/20 NEW:\n # FIX 4/25/20 [JDC]: NEED TO AVOID AUTOMATICALLY (RE-)ASSIGNING ONES REMOVED BY exclude_node_roles\n # - Simply execlude any LEARNING_OBJECTIVE and CONTROL_OBJECTIVE that project only to ModulatoryMechanism\n # - NOTE IN PROGRAM ERROR FAILURE TO ASSIGN CONTROL_OBJECTIVE\n\n # OUTPUT\n\n for node in self.nodes:\n\n # Assign OUTPUT if node is TERMINAL...\n if NodeRole.TERMINAL in self.get_roles_by_node(node):\n # unless it is a ModulatoryMechanism\n if (isinstance(node, ModulatoryMechanism_Base)\n # # FIX: WHY WOULD SUCH AN ObjectiveMechanism BE TERMINAL IF IT PROJECTS TO A MODULATORY_MECHANISM\n # # (IS THIS BECAUSE MODULATORY MECH GETS DISCOUNTED FROM BEING TERMINAL IN graph_processing?)\n # # or an ObjectiveMechanism associated with ControlMechanism or LearningMechanism\n # or any(role in self.get_roles_by_node(node) for role in {NodeRole.CONTROL_OBJECTIVE,\n # NodeRole.CONTROLLER_OBJECTIVE,\n # NodeRole.LEARNING_OBJECTIVE})\n ):\n continue\n else:\n self._add_node_role(node, NodeRole.OUTPUT)\n\n # Assign OUTPUT to any non-TERMINAL Nodes\n else:\n\n # IMPLEMENTATION NOTE:\n # This version allows LEARNING_OBJECTIVE to be assigned as OUTPUT\n # The alternate version below restricts OUTPUT only to RecurrentTransferMechasnism\n # # Assign OUTPUT if node projects only to itself and/or a LearningMechanism\n # # (i.e., it is either a RecurrentTransferMechanism configured for learning\n # # or the LEARNING_OBJECTIVE of a `learning pathway <Composition_Learning_Pathway>`\n # if all(p.receiver.owner is node or isinstance(p.receiver.owner, LearningMechanism)\n # for p in node.efferents):\n # self._add_node_role(node, NodeRole.OUTPUT)\n # continue\n\n # Assign OUTPUT if it is an `RecurrentTransferMechanism` configured for learning\n # and doesn't project to any Nodes other than its `AutoassociativeLearningMechanism`\n # (this is not picked up as a `TERMINAL` since it projects to the `AutoassociativeLearningMechanism`)\n # but can (or already does) project to an output_CIM\n if all((p.receiver.owner is node\n or isinstance(p.receiver.owner, AutoAssociativeLearningMechanism)\n or p.receiver.owner is self.output_CIM)\n for p in node.efferents):\n self._add_node_role(node, NodeRole.OUTPUT)\n continue\n\n # Assign OUTPUT only if the node is not:\n # - the TARGET_MECHANISM of a `learning Pathway <Composition_Learning_Pathway>`\n # - a ModulatoryMechanism\n # and the node projects only to:\n # - an ObjectiveMechanism designated as CONTROL_OBJECTIVE, CONTROLLER_OBJECTIVE or LEARNING_OBJECTIVE\n # - and/or directly to a ControlMechanism but is not an ObjectiveMechanism\n # - and/or (already projects) to output_CIM\n if NodeRole.TARGET in self.get_roles_by_node(node):\n continue\n if isinstance(node, ModulatoryMechanism_Base):\n continue\n if all((any(p.receiver.owner in self.get_nodes_by_role(role)\n for role in {NodeRole.CONTROL_OBJECTIVE,\n NodeRole.CONTROLLER_OBJECTIVE,\n NodeRole.LEARNING_OBJECTIVE})\n or p.receiver.owner is self.output_CIM\n or (isinstance(p.receiver.owner, ControlMechanism) and not isinstance(node, ObjectiveMechanism)))\n for p in node.efferents):\n self._add_node_role(node, NodeRole.OUTPUT)\n # MODIFIED 4/25/20 END\n\n # Assign SINGLETON and INTERNAL nodes\n for node in self.nodes:\n if all(n in self.nodes_to_roles[node] for n in {NodeRole.ORIGIN, NodeRole.TERMINAL}):\n self._add_node_role(node, NodeRole.SINGLETON)\n if not any(n in self.nodes_to_roles[node] for n in {NodeRole.ORIGIN, NodeRole.TERMINAL}):\n self._add_node_role(node, NodeRole.INTERNAL)\n\n # Finally, remove any NodeRole assignments specified in excluded_node_roles\n for node in self.nodes:\n for node, role in self.excluded_node_roles:\n if role in self.get_roles_by_node(node):\n self._remove_node_role(node, role)\n\n # Manual override to avoid INPUT/OUTPUT setting, which would cause\n # CIMs to be created, which is not correct for controllers\n if self.controller is not None:\n self.nodes_to_roles[self.controller] = {NodeRole.CONTROLLER}",
"def get_agent(self, *, agent_name: str) -> NodeAgentDefinition:",
"async def get_new_role(self, timeout: int) -> typing.Union[discord.Role, str, None]:\n\n try:\n role = await self.bot.wait_for('message', check=self.bot.checks.wait_for_message_check(self.ctx),\n timeout=timeout)\n except asyncio.TimeoutError:\n await self.ctx.send(\":zzz: You took too long to reply.\")\n return None\n\n if not role.content:\n await self.ctx.send(\":x: You didn't reply with text.\")\n return None\n\n try:\n role_object = await FlowCaseInsensitiveRole().convert(self.ctx, role.content)\n except commands.BadArgument:\n role_object = role.content\n\n if not role_object:\n return role.content\n\n return role_object",
"def receiveMessage(self, msg, sender):\n if not isinstance(msg, ActorMessage):\n return\n if msg.action == EventsActorAction.EVENTS_ADD:\n db_actor = self.createActor(actorClass=DbActor,\n globalName=ActorGlobalName.DB_ACTOR)\n message = ActorMessage(action=msg.action, payload=msg.payload,\n customer_id=msg.customer_id, response_to=sender)\n self.send(db_actor, message)\n if msg.action == EventsActorAction.EVENTS_GET:\n db_actor = self.createActor(actorClass=DbActor)\n message = ActorMessage(action=msg.action, payload=msg.payload,\n customer_id=msg.customer_id, response_to=sender)\n self.send(db_actor, message)\n if msg.action == EventsActorAction.EVENTS_LIST:\n db_actor = self.createActor(actorClass=DbActor)\n message = ActorMessage(action=msg.action, payload=msg.payload,\n customer_id=msg.customer_id, response_to=sender)\n self.send(db_actor, message)\n if msg.action == EventsActorAction.EVENTS_PURCHASE:\n db_actor = self.createActor(actorClass=DbActor,\n globalName=ActorGlobalName.DB_ACTOR)\n message = ActorMessage(action=msg.action, payload=msg.payload,\n customer_id=msg.customer_id, response_to=sender)\n self.send(db_actor, message)\n if msg.action == EventsActorAction.EVENTS_SALES:\n db_actor = self.createActor(actorClass=DbActor)\n message = ActorMessage(action=msg.action, payload=msg.payload,\n customer_id=msg.customer_id, response_to=sender)\n self.send(db_actor, message)",
"def role(self):\n if self.case_status == 'adoption':\n return ''\n return self.user_role",
"def __receive_agent_id(self):\n self.id = self.__receive_int()",
"def _roles_via_relationship(actor, relationship, actor_attr, roles, offer_map):\n relobj = None # Role-granting object found via the relationship\n\n # There is no actor_attr. Check if the relationship is a RoleMixin and call\n # roles_for to get offered roles, then remap using the offer map.\n if actor_attr is None:\n if isinstance(relationship, RoleMixin):\n offered_roles = relationship.roles_for(actor)\n if offer_map:\n offered_roles = set(\n chain.from_iterable(\n offer_map[role] for role in offered_roles if role in offer_map\n )\n )\n return offered_roles\n raise TypeError(\n f\"{relationship!r} is not a RoleMixin and no actor attribute was specified\"\n )\n\n # We have a relationship. If it's a collection, find the item in it that relates\n # to the actor.\n if isinstance(relationship, (AppenderMixin, Query)):\n # Query-like relationship. Run a query. It is possible to have multiple matches\n # for the actor, so use .first()\n # TODO: Consider retrieving all and consolidating roles from across them in case\n # the objects are RoleGrantABC. This is not a current requirement and so is not\n # currently supported; using the .first() object is sufficient\n if isinstance(actor_attr, QueryableAttribute):\n relobj = relationship.filter(operator.eq(actor_attr, actor)).first()\n else:\n relobj = relationship.filter_by(**{actor_attr: actor}).first()\n elif isinstance(relationship, abc.Iterable):\n # List-like object. Scan through it looking for item related to actor.\n # Note: strings are also collections. Checking for abc.Iterable is only safe\n # here because of the unlikeliness of a string relationship. If that becomes\n # necessary in future, add `and not isinstance(relationship, str)`\n for relitem in relationship:\n if getattr(relitem, actor_attr) == actor:\n relobj = relitem\n break\n\n # Not any sort of collection. May be a scalar relationship\n elif getattr(relationship, actor_attr) == actor:\n relobj = relationship\n if not relobj:\n # Didn't find a relationship object. Actor gets no roles\n return ()\n\n # We have a related object. Get roles from it\n if isinstance(relobj, RoleGrantABC):\n # If this object grants roles, get them. It may not grant the one we're looking\n # for and that's okay. Grab the others\n offered_roles = relobj.offered_roles\n # But if we have an offer_map, remap the roles and only keep the ones\n # specified in the map\n if offer_map:\n offered_roles = set(\n chain.from_iterable(\n offer_map[role] for role in offered_roles if role in offer_map\n )\n )\n return offered_roles\n # Not a role granting object. Implies that the default roles are granted\n # by its very existence.\n return roles",
"def get_agent(self):\n return # osid.authentication.Agent",
"def weighting_syntactic_role(entity_role: str) -> int:\n if entity_role == u\"S\":\n return 3\n elif entity_role == u\"O\":\n return 2\n elif entity_role == u\"X\":\n return 1\n\n return 0",
"def get_role(row):\n role = row[6]\n\n # Normalize roles Lead Link and Rep Link, as they contain the circle name as well\n if \"Lead Link\" in role:\n role = \"Lead Link\"\n\n if \"Rep Link\" in role:\n role = \"Rep Link\"\n\n return role"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that accessing the observable return types through qml.operation emit a warning.
|
def test_obersvablereturntypes_import_warnings(return_type):
with pytest.warns(UserWarning, match=r"is deprecated"):
getattr(qml.operation, return_type)
|
[
"def test_observable_not_returned(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"All measured observables\"):\n node(0.5)",
"def test_return_of_non_observable(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(wires=0)), 0.3\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"must return either\"):\n node(0.5)",
"def test_mul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Cannot multiply Observable by\"):\n _ = \"dummy\" * qml.PauliX(0)",
"def test_type_hints_not_raises(self):\n x = 1\n self.assertEqual(type_hint_test(x, 'x', x), x, msg=\"Type hint didn't raise an error.\")",
"def test_propagator_type_consistency(self):",
"def test_wrong_args_observe():\n dt1 = DynamicAtom()\n\n with pytest.raises(TypeError) as excinfo:\n dt1.observe(\"val\")\n assert \"2 or 3 arguments\" in excinfo.exconly()\n\n with pytest.raises(TypeError) as excinfo:\n dt1.observe(\"val\", lambda change: change, ChangeType.ANY, \"bar\")\n assert \"2 or 3 arguments\" in excinfo.exconly()\n\n with pytest.raises(TypeError) as excinfo:\n dt1.observe(1, lambda change: change)\n assert \"iterable\" in excinfo.exconly()\n\n with pytest.raises(TypeError) as excinfo:\n dt1.observe((1, 1), lambda change: change)\n assert \"str\" in excinfo.exconly()\n\n with pytest.raises(TypeError) as excinfo:\n dt1.observe(\"val\", 1)\n assert \"callable\" in excinfo.exconly()\n\n with pytest.raises(TypeError) as excinfo:\n dt1.observe(\"val\", lambda change: change, \"foo\")\n assert \"int\" in excinfo.exconly()",
"def test_warning_get_parameter_shift(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_wires = 1\n num_params = 1\n grad_recipe = (\"Dummy recipe\",)\n\n op = DummyOp(0.1, wires=0)\n with pytest.warns(UserWarning, match=\"get_parameter_shift is deprecated\"):\n assert op.get_parameter_shift(0) == \"Dummy recipe\"",
"def test_warning_direction():\n with pytest.warns(UserWarning):\n get_wind_components(4.,554.)",
"def HasExtractionWarnings(self):\n return self._store.HasExtractionWarnings()",
"def test_not_converged_warning():\n ncw = NotConvergedWarning(\"Estimator\", 0.1)\n assert_true(ncw.estimator == \"Estimator\")\n assert_true(ncw.increment == 0.1)\n assert_true(ncw.__str__() == \"[Estimator] only reached increment %.3e\" % 0.1)",
"def test_warning_direction():\n with pytest.warns(UserWarning):\n get_wind_components(3,361)",
"def test_get_observable_ids(petab_problem): # pylint: disable=W0621\n assert set(petab_problem.get_observable_ids()) == {'observable_1'}",
"def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))",
"def GetExtractionWarnings(self):\n return self._store.GetExtractionWarnings()",
"def test_good_eval_warnings(self, warn_type):\n warn_msg = 'test warning'\n\n # Raise the desired warning\n with warnings.catch_warnings(record=True) as war:\n warnings.warn(warn_msg, warn_type)\n\n # Evaluate the warning output\n testing.eval_warnings(war, [warn_msg], warn_type)\n return",
"def test_eval_warnings_bad_type(self, warn_type):\n warn_msg = 'test warning'\n bad_type = UserWarning if warn_type != UserWarning else BytesWarning\n\n # Raise the desired warning\n with warnings.catch_warnings(record=True) as war:\n warnings.warn(warn_msg, warn_type)\n\n # Catch and evaluate the expected error\n with pytest.raises(AssertionError) as aerr:\n testing.eval_warnings(war, [warn_msg], bad_type)\n\n assert str(aerr).find('bad warning type for message') >= 0\n return",
"def test_all_not_implemented(self, op):\n lvalue = common.LvalueLHSRHSNotImplemented()\n rvalue = common.LvalueLHSRHSNotImplemented()\n with pytest.raises(TypeError):\n op(lvalue, rvalue)\n assert lvalue.icalled == 1\n # https://bugs.python.org/issue38302\n if sys.version_info[:2] < (3, 10) and op.__name__ == \"ipow\":\n return\n assert lvalue.called == 1\n assert not lvalue.rcalled\n assert not rvalue.icalled\n assert not rvalue.called\n assert not rvalue.rcalled",
"def test_warn_severity(check_plugin):\n error = check_plugin.warning('A123', 'No worries, its just a warning')\n assert error.severity == Nit.WARNING",
"def test_builtins_cast_return_none():\n assert m.return_none_string() is None\n assert m.return_none_char() is None\n assert m.return_none_bool() is None\n assert m.return_none_int() is None\n assert m.return_none_float() is None\n assert m.return_none_pair() is None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that an exception is raised if called with wrong number of wires
|
def test_incorrect_num_wires(self):
class DummyOp(qml.operation.Operator):
r"""Dummy custom operator"""
num_wires = 1
with pytest.raises(ValueError, match="wrong number of wires"):
DummyOp(0.5, wires=[0, 1])
|
[
"def test_check_wires_exception(self, wires):\n with pytest.raises(ValueError, match=\"wires must be a positive integer\"):\n check_wires(wires=wires)",
"def test_raises(self):\n with pytest.raises(InstanceCountError):\n self.test_wbn.fit(\n data=SAMPLE_DATASET.data, target=SAMPLE_DATASET.target[:1]\n )",
"def test_check_num_layers_exception(self, inpt, repeat):\n with pytest.raises(ValueError, match=\"The first dimension of all parameters\"):\n check_number_of_layers(inpt)",
"def test_passed_tooManyArgs(self):\n\n def func(a, b):\n pass\n\n self.assertRaises(TypeError, self.checkPassed, func, 1, 2, 3)",
"def test_operation_on_nonexistant_wire(self, operable_mock_device_2_wires):\n\n operable_mock_device_2_wires.num_wires = 2\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 2])\n return qml.expval(qml.PauliZ(0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)",
"def test_non_unique_wires(self):\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator\"\"\"\n num_wires = 1\n\n with pytest.raises(qml.wires.WireError, match=\"Wires must be unique\"):\n DummyOp(0.5, wires=[1, 1], do_queue=False)",
"def test_trustworthiness_n_neighbors_error():\n regex = \"n_neighbors .+ should be less than .+\"\n rng = np.random.RandomState(42)\n X = rng.rand(7, 4)\n X_embedded = rng.rand(7, 2)\n with pytest.raises(ValueError, match=regex):\n trustworthiness(X, X_embedded, n_neighbors=5)\n\n trust = trustworthiness(X, X_embedded, n_neighbors=3)\n assert 0 <= trust <= 1",
"def test_town_checking_fail(self):\r\n for town in self.bad_towns:\r\n with self.assertRaises(ValueError) as context:\r\n hw.Garage.town_checking(town)\r\n self.assertTrue('Town should be instance of TOWNS!' in context.exception.args)",
"def test_no_addN_on_exception(self):\n g = Graph()\n trips = [(URIRef(\"a\"), URIRef(\"b%d\" % i), URIRef(\"c%d\" % i)) for i in range(12)]\n\n try:\n with BatchAddGraph(g, batch_size=10) as cut:\n for i, trip in enumerate(trips):\n cut.add(trip)\n if i == 11:\n raise Exception(\"myexc\")\n except Exception as e:\n if str(e) != \"myexc\":\n pass\n self.assertEqual(10, len(g))",
"def test_radio_bulk_fail(self):\n radiourls = [\"http://qrrbrbirlbel.yu/\", \"http://zeekyboogydoog.su/\"]\n with mock.patch('bbarchivist.networkutils.availability', mock.MagicMock(return_value=False)):\n with mock.patch('builtins.input', mock.MagicMock(return_value=\"n\")):\n with pytest.raises(SystemExit):\n bs.check_radio_bulk(radiourls, \"10.3.2.2639\")",
"def test_build_ops_error():\n qubit = cirq.LineQubit.range(1)\n with pytest.raises(ValueError):\n cirq_utils.qubit_op_to_gate('W', qubit[0])",
"def test_create_no_xml_too_many_attempts(self):\n hypervisor = mock.Mock()\n hypervisor.listNetworks.return_value = []\n hypervisor.networkCreateXML.side_effect = libvirt.libvirtError('BOOM')\n with self.assertRaises(RuntimeError) as error:\n network.create(hypervisor, 'foo', {}, max_attempts=1)\n self.assertEqual(str(error), \"Too many attempts (1) to get a valid IP address.\")",
"def test_num_unknown_species():\n jung = Jungle()\n nt.assert_raises(ValueError, jung.total_num_animals, species='Zebra')",
"def test_observable_on_nonexistant_wire(self, operable_mock_device_2_wires):\n\n operable_mock_device_2_wires.num_wires = 2\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(2))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)",
"def test_length_not_match_error(self, n_status, n_wires):\n with pytest.raises(\n ValueError,\n match=\"Wires length and flipping state length does not match, they must be equal length \",\n ):\n qml.FlipSign(n_status, wires=n_wires)",
"def test_double_excitation_unitary_exceptions(self, weight, wires1, wires2, msg_match):\n dev = qml.device(\"default.qubit\", wires=10)\n\n def circuit(weight=weight):\n DoubleExcitationUnitary(weight=weight, wires1=wires1, wires2=wires2)\n return qml.expval(qml.PauliZ(0))\n\n qnode = qml.QNode(circuit, dev)\n\n with pytest.raises(ValueError, match=msg_match):\n qnode(weight=weight)",
"def test_single_excitation_unitary_exceptions(self, weight, single_wires, msg_match):\n dev = qml.device(\"default.qubit\", wires=5)\n\n def circuit(weight=weight):\n SingleExcitationUnitary(weight=weight, wires=single_wires)\n return qml.expval(qml.PauliZ(0))\n\n qnode = qml.QNode(circuit, dev)\n\n with pytest.raises(ValueError, match=msg_match):\n qnode(weight=weight)",
"def test_same_wires(self):\n\n with pytest.raises(qml.QuantumFunctionError, match=\"The target wires and estimation wires\"):\n QuantumPhaseEstimation(np.eye(2), target_wires=[0, 1], estimation_wires=[1, 2])",
"def testWelchsTTest_EmptySample_RaisesError(self):\n with self.assertRaises(RuntimeError):\n ttest.WelchsTTest([], [])\n with self.assertRaises(RuntimeError):\n ttest.WelchsTTest([], [1, 2, 3])\n with self.assertRaises(RuntimeError):\n ttest.WelchsTTest([1, 2, 3], [])",
"def test_out_of_range(self):\n self.assert_initialize_driver()\n self.assert_set_exception(Parameter.CYCLE_TIME, 14)\n self.assert_set_exception(Parameter.CYCLE_TIME, 3601)\n\n # verify we can set read/write parameters\n self.assert_set(Parameter.CYCLE_TIME, 30)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that an exception is raised if called with identical wires
|
def test_non_unique_wires(self):
class DummyOp(qml.operation.Operator):
r"""Dummy custom operator"""
num_wires = 1
with pytest.raises(qml.wires.WireError, match="Wires must be unique"):
DummyOp(0.5, wires=[1, 1], do_queue=False)
|
[
"def test_same_wires(self):\n\n with pytest.raises(qml.QuantumFunctionError, match=\"The target wires and estimation wires\"):\n QuantumPhaseEstimation(np.eye(2), target_wires=[0, 1], estimation_wires=[1, 2])",
"def test_should_check_raise_multiple_register_errors(self):\n instance = error_.register_exception(\n IndividualException\n ).register_parser(idividual_exception_parser)\n\n instance.register_exception(OtherException)\n instance.register_exception(NewException)\n\n with pytest.raises(IndividualException):\n second_name = \"Kovv\"\n last_name = \"Olek\"\n instance.raise_individual_exception(second_name, last_name)\n assert instance.second_name == second_name\n assert instance.last_name == last_name\n\n with pytest.raises(OtherException):\n instance.raise_other_exception()\n\n with pytest.raises(NewException):\n instance.raise_new_exception()",
"def test_double_excitation_unitary_exceptions(self, weight, wires1, wires2, msg_match):\n dev = qml.device(\"default.qubit\", wires=10)\n\n def circuit(weight=weight):\n DoubleExcitationUnitary(weight=weight, wires1=wires1, wires2=wires2)\n return qml.expval(qml.PauliZ(0))\n\n qnode = qml.QNode(circuit, dev)\n\n with pytest.raises(ValueError, match=msg_match):\n qnode(weight=weight)",
"def test_single_excitation_unitary_exceptions(self, weight, single_wires, msg_match):\n dev = qml.device(\"default.qubit\", wires=5)\n\n def circuit(weight=weight):\n SingleExcitationUnitary(weight=weight, wires=single_wires)\n return qml.expval(qml.PauliZ(0))\n\n qnode = qml.QNode(circuit, dev)\n\n with pytest.raises(ValueError, match=msg_match):\n qnode(weight=weight)",
"def test_operation_on_nonexistant_wire(self, operable_mock_device_2_wires):\n\n operable_mock_device_2_wires.num_wires = 2\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 2])\n return qml.expval(qml.PauliZ(0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)",
"def test_no_default(self):\n foobar = MultiMethod('foobar')\n self.assertRaises(Exception, foobar, 1.2, 1.3)",
"def test_raise_exception_if_exceptions(self):\n # pylint: disable=W0702\n obj = putil.exh.ExHandle()\n def func3(cond1=False, cond2=False, cond3=False, cond4=False):\n exobj = putil.exh.ExHandle()\n exobj.add_exception(\n 'my_exception1',\n RuntimeError,\n 'This is an exception'\n )\n exobj.add_exception(\n 'my_exception2',\n OSError,\n 'This is an exception with a *[fname]* field'\n )\n exobj.raise_exception_if(\n 'my_exception1',\n cond1,\n edata=None\n )\n exobj.raise_exception_if(\n 'my_exception2',\n cond2,\n edata={'field':'fname', 'value':'my_file.txt'}\n )\n if cond3:\n exobj.raise_exception_if('my_exception3', False)\n if cond4:\n exobj.raise_exception_if(\n 'my_exception2',\n cond4,\n edata={'field':'not_a_field', 'value':'my_file.txt'}\n )\n return exobj\n putil.test.assert_exception(\n obj.raise_exception_if,\n {'exname':5, 'condition':False},\n RuntimeError,\n 'Argument `exname` is not valid'\n )\n putil.test.assert_exception(\n obj.raise_exception_if,\n {'exname':'my_exception', 'condition':5},\n RuntimeError,\n 'Argument `condition` is not valid'\n )\n putil.test.assert_exception(\n obj.raise_exception_if,\n {'exname':'my_exception', 'condition':False, 'edata':354},\n RuntimeError,\n 'Argument `edata` is not valid'\n )\n putil.test.assert_exception(\n obj.raise_exception_if,\n {\n 'exname':'my_exception',\n 'condition':False,\n 'edata':{'field':'my_field'}\n },\n RuntimeError,\n 'Argument `edata` is not valid'\n )\n putil.test.assert_exception(\n obj.raise_exception_if,\n {\n 'exname':'my_exception',\n 'condition':False,\n 'edata':{'field':3, 'value':5}\n },\n RuntimeError,\n 'Argument `edata` is not valid'\n )\n putil.test.assert_exception(\n obj.raise_exception_if,\n {'exname':'my_exception', 'condition':False, 'edata':{'value':5}},\n RuntimeError,\n 'Argument `edata` is not valid'\n )\n putil.test.assert_exception(\n obj.raise_exception_if,\n {\n 'exname':'my_exception',\n 'condition':False,\n 'edata':[\n {'field':'my_field1', 'value':5}, {'field':'my_field'}\n ]\n },\n RuntimeError,\n 'Argument `edata` is not valid'\n )\n putil.test.assert_exception(\n obj.raise_exception_if,\n {\n 'exname':'my_exception', 'condition':False,\n 'edata':[\n {'field':'my_field1', 'value':5}, {'field':3, 'value':5}\n ]\n },\n RuntimeError,\n 'Argument `edata` is not valid'\n )\n putil.test.assert_exception(\n obj.raise_exception_if,\n {\n 'exname':'my_exception',\n 'condition':False,\n 'edata':[{'field':'my_field1', 'value':5}, {'value':5}]\n },\n RuntimeError,\n 'Argument `edata` is not valid'\n )\n putil.test.assert_exception(\n func3,\n {'cond1':True, 'cond2':False},\n RuntimeError,\n 'This is an exception'\n )\n putil.test.assert_exception(\n func3,\n {'cond2':True},\n OSError,\n 'This is an exception with a my_file.txt field'\n )\n putil.test.assert_exception(\n func3,\n {'cond3':True},\n ValueError,\n 'Exception name my_exception3 not found'\n )\n putil.test.assert_exception(\n func3,\n {'cond4':True},\n RuntimeError,\n 'Field not_a_field not in exception message'\n )\n exobj = func3() # Test that edata=None works\n cdb = exobj._ex_dict\n if not cdb:\n assert False\n for exname, erec in cdb.items():\n mname = 'test_exh.test_raise_exception.func3'\n if exname.endswith('/{0}.my_exception1'.format(mname)):\n assert erec['function'].endswith('{0}'.format(mname))\n assert erec['type'] == RuntimeError\n assert erec['msg'] == 'This is an exception'\n assert erec['raised']\n if exname.endswith('/{0}.my_exception2'.format(mname)):\n assert erec['function'].endswith('{0}'.format(mname))\n assert erec['type'] == OSError\n assert (\n erec['msg']\n ==\n 'This is an exception with a *[fname]* field'\n )\n assert erec['raised']\n exobj = putil.exh.ExHandle(full_cname=True)\n def func_base(exobj, cond):\n \"\"\" Test raised field \"\"\"\n exobj.add_exception(\n 'multi_path_exception',\n RuntimeError,\n 'Invalid condition'\n )\n exobj.raise_exception_if(\n exname='multi_path_exception', condition=cond\n )\n def func_mid(exobj, cond):\n \"\"\" Add multi-path to exception object \"\"\"\n func_base(exobj, cond)\n def func_top(exobj, cond):\n \"\"\" Add another multi-path to exception object \"\"\"\n func_mid(exobj, cond)\n # Mangle \"natural\" order to test __str__, which\n # sorts the function names\n func_top(exobj, False)\n func_base(exobj, False)\n func_mid(exobj, False)\n try:\n func_mid(exobj, True)\n except:\n pass\n entry = exobj._ex_dict[list(exobj._ex_dict.keys())[0]]\n root = 'tests.test_exh.TestExHandle.test_raise_exception_if_exceptions'\n assert (\n entry['function']\n ==\n [\n '{0}/{0}.func_top/{0}.func_mid/{0}.func_base'.format(root),\n '{0}/{0}.func_base'.format(root),\n '{0}/{0}.func_mid/{0}.func_base'.format(root)\n ]\n )\n assert entry['raised'] == [False, False, True]\n stxt = str(exobj).split('\\n')[3:]\n assert not stxt[0].endswith(' [raised]')\n assert stxt[1].endswith(' [raised]')\n assert not stxt[2].endswith(' [raised]')\n db = exobj.exceptions_db\n assert db[0]['name'] == (\n '{0}/{0}.func_top/{0}.func_mid/{0}.func_base'.format(root)\n )\n assert db[0]['data'] == 'RuntimeError (Invalid condition)'\n assert db[1]['name'] == (\n '{0}/{0}.func_base'.format(root)\n )\n assert db[1]['data'] == 'RuntimeError (Invalid condition)'\n assert db[2]['name'] == (\n '{0}/{0}.func_mid/{0}.func_base'.format(root)\n )\n assert db[2]['data'] == 'RuntimeError (Invalid condition)*'\n #\n try:\n func_top(exobj, True)\n except:\n pass\n assert entry['raised'] == [True, False, True]\n stxt = str(exobj).split('\\n')[3:]\n assert not stxt[0].endswith(' [raised]')\n assert stxt[1].endswith(' [raised]')\n assert stxt[2].endswith(' [raised]')\n db = exobj.exceptions_db\n assert db[0]['name'] == (\n '{0}/{0}.func_top/{0}.func_mid/{0}.func_base'.format(root)\n )\n assert db[0]['data'] == 'RuntimeError (Invalid condition)*'\n assert db[1]['name'] == (\n '{0}/{0}.func_base'.format(root)\n )\n assert db[1]['data'] == 'RuntimeError (Invalid condition)'\n assert db[2]['name'] == (\n '{0}/{0}.func_mid/{0}.func_base'.format(root)\n )\n assert db[2]['data'] == 'RuntimeError (Invalid condition)*'\n #\n try:\n func_base(exobj, True)\n except:\n pass\n assert entry['raised'] == [True, True, True]\n stxt = str(exobj).split('\\n')[3:]\n assert stxt[0].endswith(' [raised]')\n assert stxt[1].endswith(' [raised]')\n assert stxt[2].endswith(' [raised]')\n db = exobj.exceptions_db\n assert db[0]['name'] == (\n '{0}/{0}.func_top/{0}.func_mid/{0}.func_base'.format(root)\n )\n assert db[0]['data'] == 'RuntimeError (Invalid condition)*'\n assert db[1]['name'] == (\n '{0}/{0}.func_base'.format(root)\n )\n assert db[1]['data'] == 'RuntimeError (Invalid condition)*'\n assert db[2]['name'] == (\n '{0}/{0}.func_mid/{0}.func_base'.format(root)\n )\n assert db[2]['data'] == 'RuntimeError (Invalid condition)*'",
"def test_multiple_paths_to_same_exception(self):\n def exdef(obj):\n obj.add_exception(\n 'my_exception', RuntimeError, 'This is the exception'\n )\n def funca(obj):\n exdef(obj)\n def funcb(obj):\n exdef(obj)\n exobj = putil.exh.ExHandle(full_cname=True)\n funca(exobj)\n funcb(exobj)\n exdb = sorted(exobj.exceptions_db, key=lambda item: item['name'])\n assert len(exdb) == 2\n assert exdb[0]['data'] == 'RuntimeError (This is the exception)'\n assert exdb[1]['data'] == 'RuntimeError (This is the exception)'\n cname = (\n 'tests.test_exh.TestExHandle.test_multiple_paths_to_same_exception'\n )\n assert exdb[0]['name'].endswith(\n '{0}/{0}.funca/{0}.exdef'.format(cname)\n )\n assert exdb[1]['name'].endswith(\n '{0}/{0}.funcb/{0}.exdef'.format(cname)\n )\n str_in = putil.misc.flatten_list([\n item.split('\\n') for item in str(exobj).split('\\n\\n')\n ])\n fstring = cname+'/'+cname+'.func{0}/'+cname+'.exdef'\n assert str_in[0].endswith('/my_exception')\n assert str_in[1] == 'Type : RuntimeError'\n assert str_in[2] == 'Message : This is the exception'\n assert str_in[3].startswith('Function: ')\n assert (str_in[3].endswith(fstring.format('a')) or\n str_in[3].endswith(fstring.format('b')))\n assert str_in[4].startswith(' ')\n assert str_in[4].endswith(fstring.format(\n 'a' if str_in[3].endswith(fstring.format('b')) else 'b'\n ))",
"def test_does_not_exist_exceptions_are_not_shared_between_model(self):\n\n class Model1(Model):\n\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\n\n class Model2(Model):\n\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\n\n try:\n raise Model1.DoesNotExist\n except Model2.DoesNotExist:\n assert False, \"Model1 exception should not be caught by Model2\"\n except Model1.DoesNotExist:\n #expected\n pass",
"def test_check_wires_exception(self, wires):\n with pytest.raises(ValueError, match=\"wires must be a positive integer\"):\n check_wires(wires=wires)",
"def test_observable_on_nonexistant_wire(self, operable_mock_device_2_wires):\n\n operable_mock_device_2_wires.num_wires = 2\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(2))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)",
"def test_Reraise(self):\r\n #raise from outside\r\n try:\r\n outer(self.TestClass, *(self.Attributes['args']))\r\n except self.TestClass as errTest:\r\n #proper\r\n try:\r\n raise self.TestClass(*(self.Attributes['args']),\r\n objTraceback = errTest.Traceback)\r\n except self.TestClass as errTestNew:\r\n self.assertListEqual(errTest.CallChain, errTestNew.CallChain,\r\n 'CallChain is not preserved')\r\n self.assertEqual(errTest.Info, errTestNew.Info,\r\n 'Info is not preserved')\r\n del errTestNew\r\n #wrong type of the traceback argument - must be ignored - i.e. as\r\n #+ raised inside\r\n try:\r\n raise self.TestClass(*(self.Attributes['args']),\r\n objTraceback = 1)\r\n except self.TestClass as errTestNew:\r\n self.assertEqual(len(errTestNew.CallChain), 1,\r\n 'CallChain is wrong')\r\n del errTestNew\r\n del errTest",
"def test_raise_error_fewer_than_2_operands(self):\n with pytest.raises(ValueError, match=\"Require at least two operators to combine;\"):\n _ = ValidOp(qml.PauliX(0))",
"def test_key_and_url_set_simultaneously(eld):\n with pytest.raises(ValueError, match=\"can not both be set at the same\"):\n eld.get_data(key=\"foo\", url=\"bar\")",
"def test_add_event_incorrect():\n\n assert_raises(exceptions.InvalidValue, events.add, 3, 2, \"Test Event\")",
"def test_should_check_raise_exception_class(self):\n instance = error_.register_exception(\n CommonException\n ).register_parser(common_exception_parser)\n name = \"Andy\"\n with pytest.raises(CommonException):\n instance.raise_common_exception(name)",
"def test_create_inbound_shipment_exceptions(self):\n # Proper inputs (initial setup)\n shipment_id = \"is_a_string\"\n shipment_name = \"is_a_string\"\n destination = \"is_a_string\"\n items = [{\"sku\": \"something\", \"quantity\": 6}]\n\n # 1: `shipment_id` not a string: raises AssertionError\n shipment_id = {\"not\": \"a string\"}\n with self.assertRaises(AssertionError):\n self.api.create_inbound_shipment(\n shipment_id, shipment_name, destination, items\n )\n shipment_id = \"is_a_string\" # reset\n\n # 2: `shipment_name` not a string: raises AssertionError\n shipment_name = {\"not\": \"a string\"}\n with self.assertRaises(AssertionError):\n self.api.create_inbound_shipment(\n shipment_id, shipment_name, destination, items\n )\n shipment_name = \"is_a_string\" # reset\n\n # 3: `destination` not a string: raises AssertionError\n destination = {\"not\": \"a string\"}\n with self.assertRaises(AssertionError):\n self.api.create_inbound_shipment(\n shipment_id, shipment_name, destination, items\n )\n destination = \"is_a_string\" # reset\n\n # 4: `items` empty: raises MWSError\n items = []\n with self.assertRaises(MWSError):\n self.api.create_inbound_shipment(\n shipment_id, shipment_name, destination, items\n )\n items = [{\"sku\": \"something\", \"quantity\": 6}] # reset\n\n # 5: wipe out the `from_address` for the API class before calling: raises MWSError\n self.api.from_address = None\n with self.assertRaises(MWSError):\n self.api.create_inbound_shipment(\n shipment_id, shipment_name, destination, items\n )",
"def throwException(self):",
"def test_reconstruct_not_raised(self, *shapes):\n self.assert_exception_is_not_raised(matting.reconstruct, shapes)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r"""Test that initialization of an operator with broadcasted parameters works and sets the ``batch_size`` correctly.
|
def test_broadcasted_params(self, params, exp_batch_size):
class DummyOp(qml.operation.Operator):
r"""Dummy custom operator that declares ndim_params as a class property"""
ndim_params = (0, 2)
num_wires = 1
op = DummyOp(*params, wires=0)
assert op.ndim_params == (0, 2)
assert op._batch_size == exp_batch_size
|
[
"def test_broadcasted_params(self, params, exp_batch_size):\n import jax\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(jax.numpy.array(p) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n import tensorflow as tf\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(tf.Variable(p) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(pnp.array(p, requires_grad=True) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n import torch\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(torch.tensor(p, requires_grad=True) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_different_batch_sizes_raises_error(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n with pytest.raises(\n ValueError, match=\"Broadcasting was attempted but the broadcasted dimensions\"\n ):\n _ = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3, 4]), wires=2))",
"def test_batch_size_not_all_batched(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n op = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3]), wires=2))\n assert op.batch_size == 3",
"def test_batch_size_all_batched(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n op = ValidOp(base, base, base)\n assert op.batch_size == 3",
"def test_expand_matrix_usage_in_operator_class_broadcasted(self, tol):\n\n perm = [0, 2, 1, 3]\n permuted_matrix = self.base_matrix_2_broadcasted[:, perm][:, :, perm]\n\n expanded_matrix = np.tensordot(\n np.tensordot(\n np.kron(SWAP, I),\n np.kron(I_broadcasted, self.base_matrix_2_broadcasted),\n axes=[[1], [1]],\n ),\n np.kron(SWAP, I),\n axes=[[2], [0]],\n )\n expanded_matrix = np.moveaxis(expanded_matrix, 0, -2)\n\n class DummyOp(qml.operation.Operator):\n num_wires = 2\n\n def compute_matrix(*params, **hyperparams):\n return self.base_matrix_2_broadcasted\n\n op = DummyOp(wires=[0, 2])\n assert np.allclose(op.matrix(), self.base_matrix_2_broadcasted, atol=tol)\n assert np.allclose(op.matrix(wire_order=[2, 0]), permuted_matrix, atol=tol)\n assert np.allclose(op.matrix(wire_order=[0, 1, 2]), expanded_matrix, atol=tol)",
"def test_batch_size_None(self):\n prod_op = ValidOp(qml.PauliX(0), qml.RX(1.0, wires=0))\n assert prod_op.batch_size is None",
"def check_elementwise_random(op='sum', shape=(1, 3, 224, 224)):\n a = mx.sym.Variable('a')\n b = mx.sym.Variable('b')\n if op == 'sum':\n sym = a + b\n elif op == 'sub':\n sym = a - b\n elif op == 'mul':\n sym = a * b\n\n a_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n b_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n\n executor = sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y = executor.forward(is_train=False, a=a_data, b=b_data)\n trt_sym = sym.get_backend_symbol('TensorRT')\n original_precision_value = mx.contrib.tensorrt.get_use_fp16()\n try:\n mx.contrib.tensorrt.set_use_fp16(True)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt = executor.forward(is_train=False, a=a_data, b=b_data)\n mx.contrib.tensorrt.set_use_fp16(False)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt_fp32 = executor.forward(is_train=False, a=a_data, b=b_data)\n assert_almost_equal(y[0].asnumpy(), y_trt[0].asnumpy(), 1e-1, 1e-2)\n assert_almost_equal(y[0].asnumpy(), y_trt_fp32[0].asnumpy(), 1e-4, 1e-4)\n finally:\n mx.contrib.tensorrt.set_use_fp16(original_precision_value)",
"def test_no_expansion_broadcasted(self):\n res = qml.operation.expand_matrix(\n self.base_matrix_2_broadcasted, wires=[0, 2], wire_order=[0, 2]\n )\n assert np.allclose(self.base_matrix_2_broadcasted, res)",
"def testNoneBatch(self, constructor, kwargs):\n vqvae_module = constructor(**kwargs)\n inputs = jnp.zeros([0, 5, 5, kwargs['embedding_dim']])\n vqvae_module(inputs, is_training=False)",
"def test_conditional_broadcasting(session_tf, full_cov, white, conditional_type):\n X_ = tf.placeholder(tf.float64, [None, None])\n q_mu = np.random.randn(Data.M, Data.Dy)\n q_sqrt = np.tril(np.random.randn(Data.Dy, Data.M, Data.M), -1)\n\n if conditional_type == \"Z\":\n feat = Data.Z\n kern = gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5)\n elif conditional_type == \"inducing_points\":\n feat = gpflow.features.InducingPoints(Data.Z)\n kern = gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5)\n elif conditional_type == \"mixing\":\n # variational params have different output dim in this case\n q_mu = np.random.randn(Data.M, Data.L)\n q_sqrt = np.tril(np.random.randn(Data.L, Data.M, Data.M), -1)\n feat = mf.MixedKernelSharedMof(gpflow.features.InducingPoints(Data.Z))\n kern = mk.SeparateMixedMok(\n kernels=[gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5) for _ in range(Data.L)],\n W=Data.W\n )\n\n if conditional_type == \"mixing\" and full_cov:\n pytest.skip(\"combination is not implemented\")\n\n num_samples = 5\n sample_tf, mean_tf, cov_tf = sample_conditional(\n X_,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n\n ss, ms, vs = [], [], []\n for X in Data.SX:\n s, m, v = session_tf.run([sample_tf, mean_tf, cov_tf], {X_: X})\n ms.append(m)\n vs.append(v)\n ss.append(s)\n\n ms = np.array(ms)\n vs = np.array(vs)\n ss = np.array(ss)\n\n ss_S12, ms_S12, vs_S12 = session_tf.run(\n sample_conditional(\n Data.SX,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n )\n\n ss_S1_S2, ms_S1_S2, vs_S1_S2 = session_tf.run(\n sample_conditional(\n Data.S1_S2_X,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n )\n\n assert_allclose(ss_S12.shape, ss.shape)\n assert_allclose(ms_S12, ms)\n assert_allclose(vs_S12, vs)\n assert_allclose(ms_S1_S2.reshape(Data.S1 * Data.S2, Data.N, Data.Dy), ms)\n assert_allclose(ss_S1_S2.shape, [Data.S1, Data.S2, num_samples, Data.N, Data.Dy])\n\n if full_cov:\n assert_allclose(vs_S1_S2.reshape(Data.S1 * Data.S2, Data.Dy, Data.N, Data.N), vs)\n else:\n assert_allclose(vs_S1_S2.reshape(Data.S1 * Data.S2, Data.N, Data.Dy), vs)",
"def _setup_prediction_op(self):",
"def test_batchnorm():\n reset()\n\n # create model with a mix of pretrained and new weights\n # NOTE: the pretrained layers will be initialized by Keras on creation, while the new Dense\n # layer will remain uninitialized\n input_shape = (64,64,3)\n inputs = tf.keras.layers.Input(shape=input_shape)\n x = tf.keras.layers.Dense(1)(inputs)\n bn = tf.keras.layers.BatchNormalization()\n x = bn(x)\n logits = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs=inputs, outputs=logits, name=\"model\")\n\n # create session, force tf.Keras to use it\n config = tf.ConfigProto(allow_soft_placement=True)#, log_device_placement=True)\n sess = tf.Session(config=config)\n tf.keras.backend.set_session(sess)\n\n #sess.run(tf.global_variables_initializer())\n initialize_variables(sess)\n\n # moving mean & std dev before training\n mu, std = sess.run([bn.moving_mean, bn.moving_variance])\n\n x = np.random.randn(10, *input_shape)\n\n # training mode (should use batch mean & std dev)\n out1 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 1})\n out2 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 1})\n assert np.array_equal(out1, out2)\n\n # non-training mode (should use internal moving average and std dev)\n out3 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 0})\n out4 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 0})\n assert np.array_equal(out3, out4)\n assert not np.allclose(out3, out1)\n\n # training mode again\n out5 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 1})\n assert np.array_equal(out5, out1)\n\n # update ops (update internal moving average and std dev)\n sess.run(model.updates, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 1})\n\n # train again (should not be affected by the updates)\n out6 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 1})\n assert np.array_equal(out6, out1)\n\n # non-train again (should use updated moving average and std dev)\n out7 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 0})\n assert not np.array_equal(out7, out6) # not equal to train\n assert not np.array_equal(out7, out3) # not equal to previous test due to update ops",
"def test_single_qubit_parameters(self, init_state, op, func, theta, rep, tol):\n dev = DefaultTensorTF(wires=1, representation=rep)\n state = init_state(1)\n\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [op(theta, wires=0)]\n dev.execute(queue, [], {})\n\n res = dev._state().numpy().flatten()\n expected = func(theta) @ state\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def test_expansion_broadcasted(self):\n res = qml.operation.expand_matrix(\n self.base_matrix_1_broadcasted, wires=[2], wire_order=[0, 2]\n )\n expected = np.array(\n [\n [\n [1, 2, 0, 0],\n [3, 4, 0, 0],\n [0, 0, 1, 2],\n [0, 0, 3, 4],\n ],\n [\n [5, 6, 0, 0],\n [7, 8, 0, 0],\n [0, 0, 5, 6],\n [0, 0, 7, 8],\n ],\n [\n [9, 10, 0, 0],\n [11, 12, 0, 0],\n [0, 0, 9, 10],\n [0, 0, 11, 12],\n ],\n ]\n )\n assert np.allclose(expected, res)\n\n res = qml.operation.expand_matrix(\n self.base_matrix_1_broadcasted, wires=[2], wire_order=[2, 0]\n )\n expected = np.array(\n [\n [\n [1, 0, 2, 0],\n [0, 1, 0, 2],\n [3, 0, 4, 0],\n [0, 3, 0, 4],\n ],\n [\n [5, 0, 6, 0],\n [0, 5, 0, 6],\n [7, 0, 8, 0],\n [0, 7, 0, 8],\n ],\n [\n [9, 0, 10, 0],\n [0, 9, 0, 10],\n [11, 0, 12, 0],\n [0, 11, 0, 12],\n ],\n ]\n )\n assert np.allclose(expected, res)",
"def test_operator_create_operator(self):\n pass",
"def test_batch(x_batch_norm, x_batch_unnorm):\n # NOTE: closes over x_batch_np & x_batch_norm_correct_np\n assert x_batch_norm.dtype == x_batch_norm_correct_np.dtype\n assert x_batch_unnorm.dtype == x_batch_np.dtype\n assert np.allclose(x_batch_norm, x_batch_norm_correct_np)\n assert not np.allclose(x_batch_norm, x_batch_np)\n assert np.all(np.max(x_batch_norm, axis=(0,1,2)) <= 1)\n assert np.all(np.max(x_batch_norm, axis=(0,1,2)) > 0)\n assert np.all(np.min(x_batch_norm, axis=(0,1,2)) >= -1)\n assert np.all(np.min(x_batch_norm, axis=(0,1,2)) < 0)\n assert np.allclose(x_batch_unnorm, x_batch_unnorm_np) #, atol=1e-7)",
"def testTruncatingDispatcher(self):\n # batch = 1\n # length = 3\n # num_experts = 2\n expert_capacity = 2\n requests = tf.constant([\n [[True, False],\n [True, True],\n [True, False]],\n [[False, False],\n [False, True],\n [True, False]]\n ], dtype=tf.float32)\n dispatcher = expert_utils.TruncatingDispatcher(requests, expert_capacity)\n x = tf.constant([\n [[3, 4],\n [5, 6],\n [7, 8]],\n [[2, 3],\n [4, 5],\n [6, 7]]\n ], dtype=tf.float32)\n dispatched = dispatcher.dispatch(x)\n dispatched_expected = [\n [[[3, 4], [5, 6]],\n [[5, 6], [3, 4]]],\n [[[6, 7], [2, 3]],\n [[4, 5], [2, 3]]]\n ]\n y = [\n [[[7, 12], [11, 30]],\n [[-1, 30], [9, 9]]],\n [[[13, 42], [9, 9]],\n [[-1, 20], [9, 9]]]\n ]\n combined = dispatcher.combine(y)\n combined_expected = [\n [[7, 12],\n [10, 60],\n [0, 0]],\n [[0, 0],\n [-1, 20],\n [13, 42]]\n ]\n nonpadding = dispatcher.nonpadding()\n nonpadding_expected = [\n [[1, 1],\n [1, 0]],\n [[1, 0],\n [1, 0]]\n ]\n gates = dispatcher.gates()\n gates_expected = [\n [[1, 0],\n [1, 1],\n [0, 0]],\n [[0, 0],\n [0, 1],\n [1, 0]]\n ]\n\n with self.test_session() as sess:\n self._verify_value(sess, dispatched, dispatched_expected)\n self._verify_value(sess, combined, combined_expected)\n self._verify_value(sess, nonpadding, nonpadding_expected)\n self._verify_value(sess, gates, gates_expected)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r"""Test that initialization of an operator with broadcasted parameters works and sets the ``batch_size`` correctly with Autograd parameters.
|
def test_broadcasted_params(self, params, exp_batch_size):
class DummyOp(qml.operation.Operator):
r"""Dummy custom operator that declares ndim_params as a class property"""
ndim_params = (0, 2)
num_wires = 1
params = tuple(pnp.array(p, requires_grad=True) for p in params)
op = DummyOp(*params, wires=0)
assert op.ndim_params == (0, 2)
assert op._batch_size == exp_batch_size
|
[
"def test_broadcasted_params(self, params, exp_batch_size):\n import jax\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(jax.numpy.array(p) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n import torch\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(torch.tensor(p, requires_grad=True) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n import tensorflow as tf\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(tf.Variable(p) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_different_batch_sizes_raises_error(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n with pytest.raises(\n ValueError, match=\"Broadcasting was attempted but the broadcasted dimensions\"\n ):\n _ = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3, 4]), wires=2))",
"def test_batch_size_not_all_batched(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n op = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3]), wires=2))\n assert op.batch_size == 3",
"def test_batch_size_all_batched(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n op = ValidOp(base, base, base)\n assert op.batch_size == 3",
"def check_elementwise_random(op='sum', shape=(1, 3, 224, 224)):\n a = mx.sym.Variable('a')\n b = mx.sym.Variable('b')\n if op == 'sum':\n sym = a + b\n elif op == 'sub':\n sym = a - b\n elif op == 'mul':\n sym = a * b\n\n a_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n b_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n\n executor = sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y = executor.forward(is_train=False, a=a_data, b=b_data)\n trt_sym = sym.get_backend_symbol('TensorRT')\n original_precision_value = mx.contrib.tensorrt.get_use_fp16()\n try:\n mx.contrib.tensorrt.set_use_fp16(True)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt = executor.forward(is_train=False, a=a_data, b=b_data)\n mx.contrib.tensorrt.set_use_fp16(False)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt_fp32 = executor.forward(is_train=False, a=a_data, b=b_data)\n assert_almost_equal(y[0].asnumpy(), y_trt[0].asnumpy(), 1e-1, 1e-2)\n assert_almost_equal(y[0].asnumpy(), y_trt_fp32[0].asnumpy(), 1e-4, 1e-4)\n finally:\n mx.contrib.tensorrt.set_use_fp16(original_precision_value)",
"def test_single_qubit_parameters(self, init_state, op, func, theta, rep, tol):\n dev = DefaultTensorTF(wires=1, representation=rep)\n state = init_state(1)\n\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [op(theta, wires=0)]\n dev.execute(queue, [], {})\n\n res = dev._state().numpy().flatten()\n expected = func(theta) @ state\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def test_expand_matrix_usage_in_operator_class_broadcasted(self, tol):\n\n perm = [0, 2, 1, 3]\n permuted_matrix = self.base_matrix_2_broadcasted[:, perm][:, :, perm]\n\n expanded_matrix = np.tensordot(\n np.tensordot(\n np.kron(SWAP, I),\n np.kron(I_broadcasted, self.base_matrix_2_broadcasted),\n axes=[[1], [1]],\n ),\n np.kron(SWAP, I),\n axes=[[2], [0]],\n )\n expanded_matrix = np.moveaxis(expanded_matrix, 0, -2)\n\n class DummyOp(qml.operation.Operator):\n num_wires = 2\n\n def compute_matrix(*params, **hyperparams):\n return self.base_matrix_2_broadcasted\n\n op = DummyOp(wires=[0, 2])\n assert np.allclose(op.matrix(), self.base_matrix_2_broadcasted, atol=tol)\n assert np.allclose(op.matrix(wire_order=[2, 0]), permuted_matrix, atol=tol)\n assert np.allclose(op.matrix(wire_order=[0, 1, 2]), expanded_matrix, atol=tol)",
"def test_batch_size_None(self):\n prod_op = ValidOp(qml.PauliX(0), qml.RX(1.0, wires=0))\n assert prod_op.batch_size is None",
"def distributed_shampoo(learning_rate,\n block_size,\n beta1=0.9,\n beta2=0.999,\n diagonal_epsilon=1e-10,\n matrix_epsilon=1e-6,\n weight_decay=0.0,\n start_preconditioning_step=1,\n preconditioning_compute_steps=1,\n statistics_compute_steps=1,\n best_effort_shape_interpretation=True,\n graft_type=GraftingType.SGD,\n nesterov=True,\n exponent_override=0,\n batch_axis_name=None,\n mesh_axis_names=None,\n num_devices_for_pjit=None,\n shard_optimizer_states=False,\n inverse_failure_threshold=0.1,\n moving_average_for_momentum=False,\n skip_preconditioning_dim_size_gt=4096,\n clip_by_scaled_gradient_norm=None,\n precision=lax.Precision.HIGHEST):\n\n def sharded_init_fn(params):\n params_flat, treedef = jax.tree_flatten(params)\n # Find max size to pad to.\n max_size = 0\n for param in params_flat:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n if not _skip_preconditioning(param):\n shapes = preconditioner.shapes_for_preconditioners()\n sizes = [s[0] for s in shapes]\n max_size = max(max(sizes), max_size)\n\n padded_statistics = []\n padded_preconditioners = []\n local_stats_flat = []\n for param in params_flat:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n shapes = preconditioner.shapes_for_preconditioners()\n sizes = []\n\n statistics = []\n preconditioners = []\n index_start = len(padded_statistics)\n if not _skip_preconditioning(param):\n sizes = [s[0] for s in shapes]\n shapes = preconditioner.shapes_for_preconditioners()\n statistics = [matrix_epsilon * jnp.eye(max_size) for s in shapes]\n preconditioners = [jnp.eye(max_size) for s in shapes]\n padded_statistics.extend(statistics)\n padded_preconditioners.extend(preconditioners)\n\n adagrad_statistics = []\n if graft_type != GraftingType.SGD:\n adagrad_statistics = jnp.zeros_like(param)\n local_stats_flat.append(\n LocalShardedParameterStats(adagrad_statistics, jnp.zeros_like(param),\n jnp.zeros_like(param), index_start, sizes))\n\n local_stats = jax.tree_unflatten(treedef, local_stats_flat)\n # Pad the statistics and preconditioner matrices to be a multiple of\n # num devices.\n # TODO(rohananil): Relax to only the size of the mesh axis where the dim\n # is split on.\n to_pad = -len(padded_statistics) % num_devices_for_pjit\n padded_statistics.extend([\n jnp.eye(max_size, dtype=padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n padded_preconditioners.extend([\n jnp.eye(max_size, dtype=padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n global_stats = GlobalShardedParameterStats(\n jnp.stack(padded_statistics), jnp.stack(padded_preconditioners))\n return ShampooState(\n count=jnp.zeros([], jnp.int32),\n stats=ShardedShampooStats(global_stats, local_stats))\n\n def sharded_update_fn(grads, state, params):\n \"\"\"Transform the input gradient and update all statistics in sharded mode.\n\n Args:\n grads: the gradient tensors for the parameters.\n state: a named tuple containing the state of the optimizer\n params: the parameters that should be updated.\n\n Returns:\n A tuple containing the new parameters and the new optimizer state.\n \"\"\"\n params_flat, treedef = jax.tree_flatten(params)\n grads_flat = treedef.flatten_up_to(grads)\n\n global_stats = state.stats.global_stats\n local_stats_flat = treedef.flatten_up_to(state.stats.local_stats)\n stats_flat = [\n _convert_to_parameter_stats(global_stats, local_stat)\n for local_stat in local_stats_flat\n ]\n new_stats_flat = jax.tree_multimap(\n lambda g, s, p: _compute_stats(g, s, p, state.count), grads_flat,\n stats_flat, params_flat)\n\n exponents = []\n for stat, param in zip(new_stats_flat, params_flat):\n num_statistics = len(stat.statistics)\n if num_statistics > 0:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n exponent = (\n preconditioner.exponent_for_preconditioner()\n if exponent_override == 0 else exponent_override)\n exponents.extend([exponent] * num_statistics)\n\n outputs = jax.tree_multimap(\n lambda g, s, p: _transform_grad(g, s, p, state.count), grads_flat,\n new_stats_flat, params_flat)\n updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())\n\n updates = jax.tree_unflatten(treedef, updates_flat)\n # Create new local_stats\n new_local_stats_flat = [\n _convert_from_parameter_stats(new_stat, local_stat)\n for new_stat, local_stat in zip(new_stats_flat, local_stats_flat)\n ]\n new_local_stats = jax.tree_unflatten(treedef, new_local_stats_flat)\n\n max_size = global_stats.statistics.shape[1]\n new_padded_statistics = []\n for stat in new_stats_flat:\n new_padded_statistics.extend(\n [pad_matrix(stat, max_size) for stat in stat.statistics])\n\n # Create global stats\n # TODO(rohananil): Preconditioner is not updated every step, so cost of\n # stack/pad can be obviated away.\n # Pad the statistics and preconditioner matrices to be a multiple of\n # num devices.\n # TODO(rohananil): Relax to only the size of the mesh axis where the dim\n # is split on.\n to_pad = -len(new_padded_statistics) % num_devices_for_pjit\n new_padded_statistics.extend([\n jnp.eye(max_size, dtype=new_padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n exponents.extend([1 for _ in range(to_pad)])\n\n def _matrix_inverse_pth_root_vmap(xs, ps):\n mi_pth_root = functools.partial(\n matrix_inverse_pth_root,\n ridge_epsilon=matrix_epsilon,\n precision=precision)\n preconditioners, errors = jax.vmap(mi_pth_root)(xs, ps)\n return preconditioners, errors\n\n def _internal_inverse_pth_root_all():\n preconditioners, errors = _matrix_inverse_pth_root_vmap(\n global_stats.statistics, jnp.stack(exponents))\n return preconditioners, errors\n\n if preconditioning_compute_steps == 1:\n new_preconditioners, errors = _internal_inverse_pth_root_all()\n else:\n # Passing statistics instead of preconditioners as they are similarly\n # shaped tensors. Note statistics will be ignored as we are passing in\n # a large init value for error.\n preconditioners_init = global_stats.statistics\n errors_init = np.stack([inverse_failure_threshold] * len(exponents))\n init_state = [preconditioners_init, errors_init]\n perform_step = state.count % preconditioning_compute_steps == 0\n new_preconditioners, errors = efficient_cond(\n perform_step, _internal_inverse_pth_root_all, init_state)\n\n errors = errors.reshape((-1, 1, 1))\n predicate = jnp.logical_or(\n jnp.isnan(errors),\n errors >= inverse_failure_threshold).astype(new_preconditioners.dtype)\n # TODO(rohananil): Check for numerical instabilities.\n new_conditional_preconditioners = (\n predicate * global_stats.preconditioners +\n (1.0 - predicate) * new_preconditioners)\n new_global_stats = GlobalShardedParameterStats(\n jnp.stack(new_padded_statistics), new_conditional_preconditioners)\n new_shampoo_state = ShampooState(\n count=state.count + 1,\n stats=ShardedShampooStats(new_global_stats, new_local_stats))\n return updates, new_shampoo_state\n\n def init_fn(params):\n \"\"\"Initialise the optimiser's state.\"\"\"\n\n def _init(param):\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n statistics = []\n preconditioners = []\n if not _skip_preconditioning(param):\n shapes = preconditioner.shapes_for_preconditioners()\n statistics = [matrix_epsilon * jnp.eye(s[0]) for s in shapes]\n preconditioners = [jnp.eye(s[0]) for s in shapes]\n\n adagrad_statistics = []\n if graft_type != GraftingType.SGD:\n adagrad_statistics = jnp.zeros_like(param)\n return ParameterStats(adagrad_statistics, statistics, preconditioners,\n jnp.zeros_like(param), jnp.zeros_like(param))\n\n return ShampooState(\n count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params))\n\n def _skip_preconditioning(param):\n return len(param.shape) < 1 or any(\n [s > skip_preconditioning_dim_size_gt for s in param.shape])\n\n def _compute_stats(grad, state, param, step):\n \"\"\"Compute per-parameter statistics.\"\"\"\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n new_statistics = [[]] * len(state.statistics)\n w1 = beta2\n w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)\n if not _skip_preconditioning(param):\n\n def compute_updated_statistics():\n new_stats = preconditioner.statistics_from_grad(grad)\n new_stats_accumulators = []\n for stat, stat_accumulator in zip(new_stats, state.statistics):\n new_stats_accumulators.append(w1 * stat_accumulator + w2 * stat)\n return new_stats_accumulators\n\n if statistics_compute_steps > 1:\n perform_step = step % statistics_compute_steps == 0\n init_state = state.statistics\n new_statistics = list(\n efficient_cond(perform_step, compute_updated_statistics,\n init_state))\n else:\n new_statistics = compute_updated_statistics()\n return ParameterStats(state.diagonal_statistics, new_statistics,\n state.preconditioners, state.diagonal_momentum,\n state.momentum)\n\n def _compute_preconditioners(states, params, step):\n \"\"\"Compute preconditioners for statistics.\"\"\"\n statistics = []\n num_statistics_per_state = []\n original_shapes = []\n exponents = []\n max_size = 0\n prev_preconditioners = []\n for state, param in zip(states, params):\n num_statistics = len(state.statistics)\n num_statistics_per_state.append(num_statistics)\n original_shapes_for_state = []\n if num_statistics > 0:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n for statistic in state.statistics:\n exponents.append(preconditioner.exponent_for_preconditioner(\n ) if exponent_override == 0 else exponent_override)\n original_shapes_for_state.append(statistic.shape)\n max_size = max(max_size, statistic.shape[0])\n statistics.extend(state.statistics)\n prev_preconditioners.extend(state.preconditioners)\n original_shapes.extend(original_shapes_for_state)\n num_statistics = len(statistics)\n\n if batch_axis_name:\n num_devices = lax.psum(1, batch_axis_name)\n\n # Pad statistics and exponents to next multiple of num_devices.\n packed_statistics = [pad_matrix(stat, max_size) for stat in statistics]\n to_pad = -num_statistics % num_devices\n packed_statistics.extend([\n jnp.eye(max_size, dtype=packed_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n exponents.extend([1 for _ in range(to_pad)])\n\n if not packed_statistics:\n return states\n # Batch statistics and exponents so that so that leading axis is\n # num_devices.\n def _batch(statistics, exponents, num_devices):\n assert len(statistics) == len(exponents)\n n = len(statistics)\n b = int(n / num_devices)\n batched_statistics = [\n jnp.stack(statistics[idx:idx + b]) for idx in range(0, n, b)\n ]\n batched_exponents = [\n jnp.stack(exponents[idx:idx + b]) for idx in range(0, n, b)\n ]\n return jnp.stack(batched_statistics), jnp.stack(batched_exponents)\n\n # Unbatch values across leading axis and return a list of elements.\n def _unbatch(batched_values):\n b1, b2 = batched_values.shape[0], batched_values.shape[1]\n results = []\n for v_array in jnp.split(\n batched_values, indices_or_sections=b1, axis=0):\n v_array = jnp.squeeze(v_array)\n # b2 = batches (number of preconditioner computation) per core.\n if b2 > 1:\n for v in jnp.split(v_array, indices_or_sections=b2, axis=0):\n results.append(jnp.squeeze(v))\n else:\n results.append(v_array)\n return results\n\n all_statistics, all_exponents = _batch(packed_statistics, exponents,\n num_devices)\n else:\n to_pad = -num_statistics % num_devices_for_pjit\n padded_statistics = [pad_matrix(stat, max_size) for stat in statistics]\n padded_statistics.extend([\n jnp.eye(max_size, dtype=padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n exponents.extend([1 for _ in range(to_pad)])\n all_statistics = jnp.stack(padded_statistics)\n all_exponents = jnp.stack(exponents)\n\n def _matrix_inverse_pth_root_vmap(xs, ps):\n mi_pth_root = functools.partial(\n matrix_inverse_pth_root,\n ridge_epsilon=matrix_epsilon,\n precision=precision)\n preconditioners, errors = jax.vmap(mi_pth_root)(xs, ps)\n return preconditioners, errors\n\n def _matrix_inverse_pth_root_pjit(xs, ps):\n mesh_axis_names_tuple = tuple(mesh_axis_names)\n # Partition the concatenated statistics matrix across all cores.\n partitioned_xs, partitioned_ps = pjit.pjit(\n lambda x, y: (x, y),\n in_axis_resources=None,\n out_axis_resources=pjit.PartitionSpec(mesh_axis_names_tuple,))(xs, ps)\n # Run matrix inverse pth root on each shard.\n partitioned_preconditioners, partitioned_errors = _matrix_inverse_pth_root_vmap(\n partitioned_xs, partitioned_ps)\n # Recombine the outputs at each core.\n preconditioners, errors = pjit.pjit(\n lambda x, y: (x, y),\n in_axis_resources=(pjit.PartitionSpec(mesh_axis_names_tuple,),\n pjit.PartitionSpec(mesh_axis_names_tuple,)),\n out_axis_resources=(None, None))(partitioned_preconditioners,\n partitioned_errors)\n return preconditioners, errors\n\n if not batch_axis_name:\n def _internal_inverse_pth_root_all():\n preconditioners, errors = _matrix_inverse_pth_root_pjit(\n all_statistics, all_exponents)\n b1 = preconditioners.shape[0]\n def split(batched_values):\n return [\n jnp.squeeze(v) for v in jnp.split(\n batched_values, indices_or_sections=b1, axis=0)\n ]\n\n return split(preconditioners), split(errors)\n\n if preconditioning_compute_steps == 1:\n preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()\n else:\n # Passing statistics instead of preconditioners as they are similarly\n # shaped tensors. Note statistics will be ignored as we are passing in\n # a large init value for error.\n preconditioners_init = padded_statistics\n errors_init = [inverse_failure_threshold] * len(padded_statistics)\n init_state = [preconditioners_init, errors_init]\n perform_step = step % preconditioning_compute_steps == 0\n preconditioners_flat, errors_flat = efficient_cond(\n perform_step, _internal_inverse_pth_root_all, init_state)\n else:\n\n def _internal_inverse_pth_root_all():\n preconditioners = jnp.array(all_statistics)\n current_replica = lax.axis_index(batch_axis_name)\n preconditioners, errors = _matrix_inverse_pth_root_vmap(\n all_statistics[current_replica], all_exponents[current_replica])\n preconditioners = jax.lax.all_gather(preconditioners, batch_axis_name)\n errors = jax.lax.all_gather(errors, batch_axis_name)\n preconditioners_flat = _unbatch(preconditioners)\n errors_flat = _unbatch(errors)\n return preconditioners_flat, errors_flat\n\n if preconditioning_compute_steps == 1:\n preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()\n else:\n # Passing statistics instead of preconditioners as they are similarly\n # shaped tensors. Note statistics will be ignored as we are passing in\n # a large init value for error.\n preconditioners_init = packed_statistics\n errors_init = ([inverse_failure_threshold] * len(packed_statistics))\n init_state = [preconditioners_init, errors_init]\n perform_step = step % preconditioning_compute_steps == 0\n preconditioners_flat, errors_flat = efficient_cond(\n perform_step, _internal_inverse_pth_root_all, init_state)\n\n def _skip(error):\n condition = jnp.logical_or(\n jnp.isnan(error), error >= inverse_failure_threshold)\n return condition.astype(error.dtype)\n\n def _select_preconditioner(error, new_p, old_p):\n return lax.cond(\n _skip(error), lambda _: old_p, lambda _: new_p, operand=None)\n\n new_preconditioners_flat = []\n for p, shape, prev_p, error in zip(preconditioners_flat, original_shapes,\n prev_preconditioners, errors_flat):\n new_preconditioners_flat.append(\n _select_preconditioner(error, p[:shape[0], :shape[1]], prev_p))\n\n assert len(states) == len(num_statistics_per_state)\n assert len(new_preconditioners_flat) == num_statistics\n\n # Add back empty preconditioners so we that we can set the optimizer state.\n preconditioners_for_states = []\n idx = 0\n for num_statistics, state in zip(num_statistics_per_state, states):\n if num_statistics == 0:\n preconditioners_for_states.append([])\n else:\n preconditioners_for_state = new_preconditioners_flat[idx:idx +\n num_statistics]\n assert len(state.statistics) == len(preconditioners_for_state)\n preconditioners_for_states.append(preconditioners_for_state)\n idx += num_statistics\n new_states = []\n for state, new_preconditioners in zip(states, preconditioners_for_states):\n new_states.append(\n ParameterStats(state.diagonal_statistics, state.statistics,\n new_preconditioners, state.diagonal_momentum,\n state.momentum))\n\n return new_states\n\n def _transform_grad(grad, state, param, step):\n \"\"\"Transform per-parameter gradients.\"\"\"\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n sgd_update = grad\n new_diagonal_statistics = state.diagonal_statistics\n if graft_type == GraftingType.ADAGRAD:\n new_diagonal_statistics = state.diagonal_statistics + jnp.square(grad)\n adagrad_update = grad / (\n jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon)\n grafting_update = adagrad_update\n elif (graft_type == GraftingType.RMSPROP or\n graft_type == GraftingType.RMSPROP_NORMALIZED):\n\n scaled_grad = grad\n if graft_type == GraftingType.RMSPROP_NORMALIZED:\n scaled_grad = grad / jnp.linalg.norm(grad)\n\n w1 = beta2\n w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)\n\n new_diagonal_statistics = (\n w1 * state.diagonal_statistics + w2 * jnp.square(scaled_grad))\n rmsprop_update = scaled_grad / (\n jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon)\n\n if clip_by_scaled_gradient_norm:\n scaled_grad_norm = jnp.linalg.norm(rmsprop_update) / (\n jnp.sqrt(float(rmsprop_update.size)))\n clipping_denom = jnp.maximum(\n 1., scaled_grad_norm / clip_by_scaled_gradient_norm)\n rmsprop_update /= clipping_denom\n\n grafting_update = rmsprop_update\n else:\n grafting_update = sgd_update\n\n precond_grad = grad\n if not _skip_preconditioning(param):\n precond_grad = preconditioner.preconditioned_grad(precond_grad,\n state.preconditioners)\n else:\n precond_grad = grafting_update\n\n grafting_update_norm = jnp.linalg.norm(grafting_update)\n precond_grad_norm = jnp.linalg.norm(precond_grad)\n\n multiplier = (grafting_update_norm / (precond_grad_norm + 1e-16))\n shampoo_update = precond_grad * multiplier\n\n shampoo_update_with_wd = shampoo_update\n grafting_update_with_wd = grafting_update\n if weight_decay != 0:\n shampoo_update_with_wd = shampoo_update + weight_decay * param\n grafting_update_with_wd = grafting_update + weight_decay * param\n\n w = (1.0 - beta1) if moving_average_for_momentum else 1.0\n shampoo_update_with_wd_momentum = (\n state.momentum * beta1 + w * shampoo_update_with_wd)\n grafting_update_with_wd_momentum = (\n state.diagonal_momentum * beta1 + w * grafting_update_with_wd)\n\n run_shampoo = (step >= start_preconditioning_step).astype(\n grafting_update_with_wd_momentum.dtype)\n\n momentum_update = (\n run_shampoo * shampoo_update_with_wd_momentum +\n (1.0 - run_shampoo) * grafting_update_with_wd_momentum)\n\n wd_update = (\n run_shampoo * shampoo_update_with_wd +\n (1.0 - run_shampoo) * grafting_update_with_wd)\n\n if nesterov:\n momentum_update = w * wd_update + beta1 * momentum_update\n\n lr = learning_rate\n if callable(learning_rate):\n lr = learning_rate(step)\n transformed_update = -1.0 * lr * momentum_update\n\n param_stats = ParameterStats(new_diagonal_statistics, state.statistics,\n state.preconditioners,\n grafting_update_with_wd_momentum,\n shampoo_update_with_wd_momentum)\n return transformed_update, param_stats\n\n def update_fn(grads, state, params):\n \"\"\"Transform the input gradient and update all statistics.\n\n Args:\n grads: the gradient tensors for the parameters.\n state: a named tuple containing the state of the optimizer\n params: the parameters that should be updated.\n\n Returns:\n A tuple containing the new parameters and the new optimizer state.\n \"\"\"\n params_flat, treedef = jax.tree_flatten(params)\n stats_flat = treedef.flatten_up_to(state.stats)\n grads_flat = treedef.flatten_up_to(grads)\n\n new_stats_flat = jax.tree_multimap(\n lambda g, s, p: _compute_stats(g, s, p, state.count), grads_flat,\n stats_flat, params_flat)\n new_stats_flat = _compute_preconditioners(new_stats_flat, params_flat,\n state.count)\n\n outputs = jax.tree_multimap(\n lambda g, s, p: _transform_grad(g, s, p, state.count), grads_flat,\n new_stats_flat, params_flat)\n updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())\n\n updates = jax.tree_unflatten(treedef, updates_flat)\n new_stats = jax.tree_unflatten(treedef, new_stats_flat)\n\n new_state = ShampooState(\n count=state.count+1, stats=new_stats)\n return updates, new_state\n\n if shard_optimizer_states:\n return optax.GradientTransformation(sharded_init_fn, sharded_update_fn)\n else:\n return optax.GradientTransformation(init_fn, update_fn)",
"def test_qc_custom_gradient_training_loop_param_learning(self):\n\n tf.compat.v1.reset_default_graph()\n tf.compat.v1.set_random_seed(0)\n np.random.seed(0)\n with tf.device('/cpu:0'):\n inputs = tf.keras.Input(shape=(32, 32, 1,))\n conv_op = tf.keras.layers.Conv2D(1, (2, 2),\n kernel_initializer=tf.random_uniform_initializer(-1, 2),\n bias_initializer='random_uniform',\n padding='SAME')(inputs)\n relu_op = tf.nn.relu(conv_op)\n reshape = tf.keras.layers.Flatten()(relu_op)\n _ = tf.keras.layers.Dense(10)(reshape)\n\n sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())\n initialize_uninitialized_vars(sess)\n\n # create quantsim model without config file\n sim = QuantizationSimModel(sess, ['input_1'], ['dense/BiasAdd'], use_cuda=False,\n quant_scheme=QuantScheme.training_range_learning_with_tf_init)\n\n for quant_op_name in sim._param_quantizers.keys():\n print(sim._param_quantizers[quant_op_name])\n\n for quant_op_name in sim._activation_quantizers.keys():\n print(sim._activation_quantizers[quant_op_name])\n\n def dummy_forward_pass(sess, args):\n model_output = sess.graph.get_tensor_by_name('dense/MatMul:0')\n model_input = sess.graph.get_tensor_by_name('input_1:0')\n shape = model_input.shape\n dummy_input = np.random.randn(1, shape[1], shape[2], shape[3])\n sess.run(model_output, feed_dict={model_input: dummy_input})\n\n conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')\n relu_output_quant_op = sim.session.graph.get_operation_by_name('Relu_quantized')\n\n # enable input\n sim.compute_encodings(dummy_forward_pass, None)\n\n inp_tensor = sim.session.graph.get_tensor_by_name('input_1:0')\n np.random.seed(0)\n w_shape = inp_tensor.shape\n batches = 32\n inp_data = np.random.rand(batches, w_shape[1], w_shape[2], w_shape[3])\n logits = sim.session.graph.get_tensor_by_name('dense/MatMul:0')\n\n labels = np.random.randint(10, size=batches)\n one_hot_labels = np.eye(10)[labels]\n\n with sim.session.graph.as_default():\n var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)\n labels_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 10], name='labels')\n loss = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels_placeholder, logits=logits)\n\n update_ops = []\n global_step = tf.compat.v1.train.create_global_step()\n initialize_uninitialized_vars(sim.session)\n\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=1e-3)\n gradients = optimizer.compute_gradients(loss, var_list)\n\n sim.compute_encodings(dummy_forward_pass, None)\n grad_updates = optimizer.apply_gradients(gradients, global_step=global_step)\n update_ops.append(grad_updates)\n update_op = tf.group(*update_ops)\n\n conv_inp_tensor = conv2d_weight_quant_op.inputs[0]\n grads = tf.gradients(loss, [conv_inp_tensor,\n conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min],\n conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max]])\n dqbydx, dqbydmin, dqbydmax = grads\n input_gradient = sim.session.run([dqbydx], feed_dict={inp_tensor: inp_data,\n labels_placeholder: one_hot_labels})[0]\n min_gradient = sim.session.run([dqbydmin], feed_dict={inp_tensor: inp_data,\n labels_placeholder: one_hot_labels})[0]\n max_gradient = sim.session.run([dqbydmax], feed_dict={inp_tensor: inp_data,\n labels_placeholder: one_hot_labels})[0]\n\n weights_before_train = sim.session.run(conv2d_weight_quant_op.inputs[0])\n encoding_min_before_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])\n encoding_max_before_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])\n relu_output_encoding_min_before_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_min])\n relu_output_encoding_max_before_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_max])\n with tf.control_dependencies([update_op]):\n train_op = tf.identity(loss, name='train_op')\n\n for quant_op_name in sim._param_quantizers.keys():\n print(quant_op_name + '_min_before_train = ' + str(sim.session.run(\n sim.session.graph.get_operation_by_name(quant_op_name).inputs[QuantizeOpIndices.encoding_min])))\n print(quant_op_name + '_max_before_train = ' + str(sim.session.run(\n sim.session.graph.get_operation_by_name(quant_op_name).inputs[QuantizeOpIndices.encoding_max])))\n\n # start training\n _ = sim.session.run(train_op, feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})\n\n for quant_op_name in sim._param_quantizers.keys():\n print(quant_op_name + '_min = ' + str(sim.session.run(sim.session.graph.get_operation_by_name\n (quant_op_name).inputs[\n QuantizeOpIndices.encoding_min])))\n print(quant_op_name + '_max = ' + str(sim.session.run(sim.session.graph.get_operation_by_name\n (quant_op_name).inputs[\n QuantizeOpIndices.encoding_max])))\n\n weights_after_train = sim.session.run(conv2d_weight_quant_op.inputs[0])\n relu_output_encoding_min_after_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_min])\n relu_output_encoding_max_after_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_max])\n encoding_min_after_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])\n encoding_max_after_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])\n\n assert not np.allclose(weights_before_train, weights_after_train, atol=1e-6)\n assert encoding_min_before_train != encoding_min_after_train\n assert encoding_max_before_train != encoding_max_after_train\n assert relu_output_encoding_min_before_train != relu_output_encoding_min_after_train\n assert relu_output_encoding_max_before_train != relu_output_encoding_max_after_train\n\n\n baseline = sim.session.run(logits, feed_dict={inp_tensor: inp_data})\n sim.export('/tmp', 'quant_sim_model')\n after_sim_export = sim.session.run(logits, feed_dict={inp_tensor: inp_data})\n assert np.allclose(baseline, after_sim_export)\n\n\n sess.close()\n sim.session.close()",
"def test_no_expansion_broadcasted(self):\n res = qml.operation.expand_matrix(\n self.base_matrix_2_broadcasted, wires=[0, 2], wire_order=[0, 2]\n )\n assert np.allclose(self.base_matrix_2_broadcasted, res)",
"def testNoneBatch(self, constructor, kwargs):\n vqvae_module = constructor(**kwargs)\n inputs = jnp.zeros([0, 5, 5, kwargs['embedding_dim']])\n vqvae_module(inputs, is_training=False)",
"def test_combine_parallel_dense_flat_biasadd():\n\n def before(x, w1, w2, b1, b2):\n args = [x, w1, w2, b1, b2]\n y1 = relay.nn.dense(x, w1)\n y2 = relay.nn.dense(x, w2)\n y1 = relay.add(y1, b1)\n y2 = relay.add(y2, b2)\n y = relay.Tuple((y1, y2))\n return relay.Function(args, y)\n\n def expected(x, w1, w2, b1, b2, j, bias_shape1, bias_shape2):\n args = [x, w1, w2, b1, b2]\n w_stacked = relay.concatenate((w1, w2), axis=0)\n y = relay.nn.dense(x, w_stacked, units=3 * j)\n n_out_dims = max(len(bias_shape1), 2)\n if len(bias_shape1) == 0:\n b1 = relay.repeat(relay.expand_dims(b1, -1), j, 0)\n elif bias_shape1[-1] == 1:\n b1 = relay.repeat(b1, j, len(bias_shape1) - 1)\n if len(bias_shape2) == 0:\n b2 = relay.repeat(relay.expand_dims(b2, -1), 2 * j, 0)\n elif bias_shape2[-1] == 1:\n b2 = relay.repeat(b2, 2 * j, len(bias_shape2) - 1)\n b = relay.concatenate((b1, b2), axis=max(0, len(bias_shape1) - 1))\n y = relay.add(y, b)\n begin = [0 for _ in range(n_out_dims - 1)]\n end = [-1 for _ in range(n_out_dims - 1)]\n strides = [1 for _ in range(n_out_dims)]\n y1 = relay.strided_slice(\n y, begin=begin + [0], end=end + [j], strides=strides, slice_mode=\"size\"\n )\n y2 = relay.strided_slice(\n y, begin=begin + [j], end=end + [2 * j], strides=strides, slice_mode=\"size\"\n )\n return relay.Function(args, relay.Tuple((y1, y2)))\n\n def check(i, j, k, bias_shape1, bias_shape2):\n x = relay.var(\"x\", shape=(i, k))\n w1 = relay.var(\"w1\", shape=(j, k))\n w2 = relay.var(\"w2\", shape=(2 * j, k))\n b1 = relay.var(\"b1\", shape=bias_shape1)\n b2 = relay.var(\"b2\", shape=bias_shape2)\n\n y_before = before(x, w1, w2, b1, b2)\n combine_pass = transform.CombineParallelDense(min_num_branches=2, to_batch=False)\n y = run_opt_pass(y_before, combine_pass)\n y_expected = expected(x, w1, w2, b1, b2, j, bias_shape1, bias_shape2)\n y_expected = run_opt_pass(y_expected, transform.InferType())\n tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)\n\n check(3, 5, 4, (), ())\n check(3, 5, 4, (1,), (1,))\n check(3, 5, 4, (5,), (1,))\n check(3, 5, 4, (1,), (10,))\n check(3, 5, 4, (3, 1), (3, 1))\n check(3, 5, 4, (3, 5), (3, 10))\n check(3, 5, 4, (3, 1), (3, 10))\n check(3, 5, 4, (3, 5), (3, 1))\n check(3, 5, 4, (9, 3, 5), (9, 3, 10))\n check(3, 5, 4, (9, 3, 5), (9, 3, 1))\n check(3, 5, 4, (9, 3, 1), (9, 3, 10))",
"def test_weights_built(self):\n # Create the network\n with nengo.Network():\n a = nengo.Ensemble(200, 2)\n b = nengo.Ensemble(400, 2)\n a_b = nengo.Connection(\n a, b, solver=nengo.solvers.Lstsq(weights=True)\n )\n\n # Create the model and built the pre-synaptic Ensemble\n model = builder.Model()\n model.rng = np.random\n model.seeds[a] = 1\n model.seeds[b] = 2\n model.seeds[a_b] = 3\n ensemble.build_ensemble(model, a)\n ensemble.build_ensemble(model, b)\n\n # Now build the connection and check that the params seem sensible\n params = ensemble.build_from_ensemble_connection(model, a_b)\n assert params.decoders.shape == (200, 400)",
"def test_amplify_init():\n # Essentially Grover's to select 011 or 111\n desired = cz_gate + A_inv + diffusion_operator(\n qubits) + A + cz_gate + A_inv + diffusion_operator(qubits) + A\n created = amplify(A, A_inv, cz_gate, qubits, iters, init=False)\n\n compare_progs(desired, created)",
"def test_conditional_broadcasting(session_tf, full_cov, white, conditional_type):\n X_ = tf.placeholder(tf.float64, [None, None])\n q_mu = np.random.randn(Data.M, Data.Dy)\n q_sqrt = np.tril(np.random.randn(Data.Dy, Data.M, Data.M), -1)\n\n if conditional_type == \"Z\":\n feat = Data.Z\n kern = gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5)\n elif conditional_type == \"inducing_points\":\n feat = gpflow.features.InducingPoints(Data.Z)\n kern = gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5)\n elif conditional_type == \"mixing\":\n # variational params have different output dim in this case\n q_mu = np.random.randn(Data.M, Data.L)\n q_sqrt = np.tril(np.random.randn(Data.L, Data.M, Data.M), -1)\n feat = mf.MixedKernelSharedMof(gpflow.features.InducingPoints(Data.Z))\n kern = mk.SeparateMixedMok(\n kernels=[gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5) for _ in range(Data.L)],\n W=Data.W\n )\n\n if conditional_type == \"mixing\" and full_cov:\n pytest.skip(\"combination is not implemented\")\n\n num_samples = 5\n sample_tf, mean_tf, cov_tf = sample_conditional(\n X_,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n\n ss, ms, vs = [], [], []\n for X in Data.SX:\n s, m, v = session_tf.run([sample_tf, mean_tf, cov_tf], {X_: X})\n ms.append(m)\n vs.append(v)\n ss.append(s)\n\n ms = np.array(ms)\n vs = np.array(vs)\n ss = np.array(ss)\n\n ss_S12, ms_S12, vs_S12 = session_tf.run(\n sample_conditional(\n Data.SX,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n )\n\n ss_S1_S2, ms_S1_S2, vs_S1_S2 = session_tf.run(\n sample_conditional(\n Data.S1_S2_X,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n )\n\n assert_allclose(ss_S12.shape, ss.shape)\n assert_allclose(ms_S12, ms)\n assert_allclose(vs_S12, vs)\n assert_allclose(ms_S1_S2.reshape(Data.S1 * Data.S2, Data.N, Data.Dy), ms)\n assert_allclose(ss_S1_S2.shape, [Data.S1, Data.S2, num_samples, Data.N, Data.Dy])\n\n if full_cov:\n assert_allclose(vs_S1_S2.reshape(Data.S1 * Data.S2, Data.Dy, Data.N, Data.N), vs)\n else:\n assert_allclose(vs_S1_S2.reshape(Data.S1 * Data.S2, Data.N, Data.Dy), vs)",
"def __init__(self, filter_shape, image_shape, poolsize=(2, 2), \n activation_fn=sigmoid):\n self.filter_shape = filter_shape\n self.image_shape = image_shape\n self.poolsize = poolsize\n self.activation_fn=activation_fn\n # initialize weights and biases\n n_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))\n self.w = theano.shared(\n np.asarray(\n np.random.normal(loc=0, scale=np.sqrt(1.0/n_out), size=filter_shape),\n dtype=theano.config.floatX),\n borrow=True)\n self.b = theano.shared(\n np.asarray(\n np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),\n dtype=theano.config.floatX),\n borrow=True)\n self.params = [self.w, self.b]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r"""Test that initialization of an operator with broadcasted parameters works and sets the ``batch_size`` correctly with JAX parameters.
|
def test_broadcasted_params(self, params, exp_batch_size):
import jax
class DummyOp(qml.operation.Operator):
r"""Dummy custom operator that declares ndim_params as a class property"""
ndim_params = (0, 2)
num_wires = 1
params = tuple(jax.numpy.array(p) for p in params)
op = DummyOp(*params, wires=0)
assert op.ndim_params == (0, 2)
assert op._batch_size == exp_batch_size
|
[
"def test_broadcasted_params(self, params, exp_batch_size):\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(pnp.array(p, requires_grad=True) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n import torch\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(torch.tensor(p, requires_grad=True) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n import tensorflow as tf\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(tf.Variable(p) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_different_batch_sizes_raises_error(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n with pytest.raises(\n ValueError, match=\"Broadcasting was attempted but the broadcasted dimensions\"\n ):\n _ = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3, 4]), wires=2))",
"def test_batch_size_all_batched(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n op = ValidOp(base, base, base)\n assert op.batch_size == 3",
"def test_batch_size_not_all_batched(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n op = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3]), wires=2))\n assert op.batch_size == 3",
"def test_batch_size_None(self):\n prod_op = ValidOp(qml.PauliX(0), qml.RX(1.0, wires=0))\n assert prod_op.batch_size is None",
"def testNoneBatch(self, constructor, kwargs):\n vqvae_module = constructor(**kwargs)\n inputs = jnp.zeros([0, 5, 5, kwargs['embedding_dim']])\n vqvae_module(inputs, is_training=False)",
"def test_single_qubit_parameters(self, init_state, op, func, theta, rep, tol):\n dev = DefaultTensorTF(wires=1, representation=rep)\n state = init_state(1)\n\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [op(theta, wires=0)]\n dev.execute(queue, [], {})\n\n res = dev._state().numpy().flatten()\n expected = func(theta) @ state\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def distributed_shampoo(learning_rate,\n block_size,\n beta1=0.9,\n beta2=0.999,\n diagonal_epsilon=1e-10,\n matrix_epsilon=1e-6,\n weight_decay=0.0,\n start_preconditioning_step=1,\n preconditioning_compute_steps=1,\n statistics_compute_steps=1,\n best_effort_shape_interpretation=True,\n graft_type=GraftingType.SGD,\n nesterov=True,\n exponent_override=0,\n batch_axis_name=None,\n mesh_axis_names=None,\n num_devices_for_pjit=None,\n shard_optimizer_states=False,\n inverse_failure_threshold=0.1,\n moving_average_for_momentum=False,\n skip_preconditioning_dim_size_gt=4096,\n clip_by_scaled_gradient_norm=None,\n precision=lax.Precision.HIGHEST):\n\n def sharded_init_fn(params):\n params_flat, treedef = jax.tree_flatten(params)\n # Find max size to pad to.\n max_size = 0\n for param in params_flat:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n if not _skip_preconditioning(param):\n shapes = preconditioner.shapes_for_preconditioners()\n sizes = [s[0] for s in shapes]\n max_size = max(max(sizes), max_size)\n\n padded_statistics = []\n padded_preconditioners = []\n local_stats_flat = []\n for param in params_flat:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n shapes = preconditioner.shapes_for_preconditioners()\n sizes = []\n\n statistics = []\n preconditioners = []\n index_start = len(padded_statistics)\n if not _skip_preconditioning(param):\n sizes = [s[0] for s in shapes]\n shapes = preconditioner.shapes_for_preconditioners()\n statistics = [matrix_epsilon * jnp.eye(max_size) for s in shapes]\n preconditioners = [jnp.eye(max_size) for s in shapes]\n padded_statistics.extend(statistics)\n padded_preconditioners.extend(preconditioners)\n\n adagrad_statistics = []\n if graft_type != GraftingType.SGD:\n adagrad_statistics = jnp.zeros_like(param)\n local_stats_flat.append(\n LocalShardedParameterStats(adagrad_statistics, jnp.zeros_like(param),\n jnp.zeros_like(param), index_start, sizes))\n\n local_stats = jax.tree_unflatten(treedef, local_stats_flat)\n # Pad the statistics and preconditioner matrices to be a multiple of\n # num devices.\n # TODO(rohananil): Relax to only the size of the mesh axis where the dim\n # is split on.\n to_pad = -len(padded_statistics) % num_devices_for_pjit\n padded_statistics.extend([\n jnp.eye(max_size, dtype=padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n padded_preconditioners.extend([\n jnp.eye(max_size, dtype=padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n global_stats = GlobalShardedParameterStats(\n jnp.stack(padded_statistics), jnp.stack(padded_preconditioners))\n return ShampooState(\n count=jnp.zeros([], jnp.int32),\n stats=ShardedShampooStats(global_stats, local_stats))\n\n def sharded_update_fn(grads, state, params):\n \"\"\"Transform the input gradient and update all statistics in sharded mode.\n\n Args:\n grads: the gradient tensors for the parameters.\n state: a named tuple containing the state of the optimizer\n params: the parameters that should be updated.\n\n Returns:\n A tuple containing the new parameters and the new optimizer state.\n \"\"\"\n params_flat, treedef = jax.tree_flatten(params)\n grads_flat = treedef.flatten_up_to(grads)\n\n global_stats = state.stats.global_stats\n local_stats_flat = treedef.flatten_up_to(state.stats.local_stats)\n stats_flat = [\n _convert_to_parameter_stats(global_stats, local_stat)\n for local_stat in local_stats_flat\n ]\n new_stats_flat = jax.tree_multimap(\n lambda g, s, p: _compute_stats(g, s, p, state.count), grads_flat,\n stats_flat, params_flat)\n\n exponents = []\n for stat, param in zip(new_stats_flat, params_flat):\n num_statistics = len(stat.statistics)\n if num_statistics > 0:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n exponent = (\n preconditioner.exponent_for_preconditioner()\n if exponent_override == 0 else exponent_override)\n exponents.extend([exponent] * num_statistics)\n\n outputs = jax.tree_multimap(\n lambda g, s, p: _transform_grad(g, s, p, state.count), grads_flat,\n new_stats_flat, params_flat)\n updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())\n\n updates = jax.tree_unflatten(treedef, updates_flat)\n # Create new local_stats\n new_local_stats_flat = [\n _convert_from_parameter_stats(new_stat, local_stat)\n for new_stat, local_stat in zip(new_stats_flat, local_stats_flat)\n ]\n new_local_stats = jax.tree_unflatten(treedef, new_local_stats_flat)\n\n max_size = global_stats.statistics.shape[1]\n new_padded_statistics = []\n for stat in new_stats_flat:\n new_padded_statistics.extend(\n [pad_matrix(stat, max_size) for stat in stat.statistics])\n\n # Create global stats\n # TODO(rohananil): Preconditioner is not updated every step, so cost of\n # stack/pad can be obviated away.\n # Pad the statistics and preconditioner matrices to be a multiple of\n # num devices.\n # TODO(rohananil): Relax to only the size of the mesh axis where the dim\n # is split on.\n to_pad = -len(new_padded_statistics) % num_devices_for_pjit\n new_padded_statistics.extend([\n jnp.eye(max_size, dtype=new_padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n exponents.extend([1 for _ in range(to_pad)])\n\n def _matrix_inverse_pth_root_vmap(xs, ps):\n mi_pth_root = functools.partial(\n matrix_inverse_pth_root,\n ridge_epsilon=matrix_epsilon,\n precision=precision)\n preconditioners, errors = jax.vmap(mi_pth_root)(xs, ps)\n return preconditioners, errors\n\n def _internal_inverse_pth_root_all():\n preconditioners, errors = _matrix_inverse_pth_root_vmap(\n global_stats.statistics, jnp.stack(exponents))\n return preconditioners, errors\n\n if preconditioning_compute_steps == 1:\n new_preconditioners, errors = _internal_inverse_pth_root_all()\n else:\n # Passing statistics instead of preconditioners as they are similarly\n # shaped tensors. Note statistics will be ignored as we are passing in\n # a large init value for error.\n preconditioners_init = global_stats.statistics\n errors_init = np.stack([inverse_failure_threshold] * len(exponents))\n init_state = [preconditioners_init, errors_init]\n perform_step = state.count % preconditioning_compute_steps == 0\n new_preconditioners, errors = efficient_cond(\n perform_step, _internal_inverse_pth_root_all, init_state)\n\n errors = errors.reshape((-1, 1, 1))\n predicate = jnp.logical_or(\n jnp.isnan(errors),\n errors >= inverse_failure_threshold).astype(new_preconditioners.dtype)\n # TODO(rohananil): Check for numerical instabilities.\n new_conditional_preconditioners = (\n predicate * global_stats.preconditioners +\n (1.0 - predicate) * new_preconditioners)\n new_global_stats = GlobalShardedParameterStats(\n jnp.stack(new_padded_statistics), new_conditional_preconditioners)\n new_shampoo_state = ShampooState(\n count=state.count + 1,\n stats=ShardedShampooStats(new_global_stats, new_local_stats))\n return updates, new_shampoo_state\n\n def init_fn(params):\n \"\"\"Initialise the optimiser's state.\"\"\"\n\n def _init(param):\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n statistics = []\n preconditioners = []\n if not _skip_preconditioning(param):\n shapes = preconditioner.shapes_for_preconditioners()\n statistics = [matrix_epsilon * jnp.eye(s[0]) for s in shapes]\n preconditioners = [jnp.eye(s[0]) for s in shapes]\n\n adagrad_statistics = []\n if graft_type != GraftingType.SGD:\n adagrad_statistics = jnp.zeros_like(param)\n return ParameterStats(adagrad_statistics, statistics, preconditioners,\n jnp.zeros_like(param), jnp.zeros_like(param))\n\n return ShampooState(\n count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params))\n\n def _skip_preconditioning(param):\n return len(param.shape) < 1 or any(\n [s > skip_preconditioning_dim_size_gt for s in param.shape])\n\n def _compute_stats(grad, state, param, step):\n \"\"\"Compute per-parameter statistics.\"\"\"\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n new_statistics = [[]] * len(state.statistics)\n w1 = beta2\n w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)\n if not _skip_preconditioning(param):\n\n def compute_updated_statistics():\n new_stats = preconditioner.statistics_from_grad(grad)\n new_stats_accumulators = []\n for stat, stat_accumulator in zip(new_stats, state.statistics):\n new_stats_accumulators.append(w1 * stat_accumulator + w2 * stat)\n return new_stats_accumulators\n\n if statistics_compute_steps > 1:\n perform_step = step % statistics_compute_steps == 0\n init_state = state.statistics\n new_statistics = list(\n efficient_cond(perform_step, compute_updated_statistics,\n init_state))\n else:\n new_statistics = compute_updated_statistics()\n return ParameterStats(state.diagonal_statistics, new_statistics,\n state.preconditioners, state.diagonal_momentum,\n state.momentum)\n\n def _compute_preconditioners(states, params, step):\n \"\"\"Compute preconditioners for statistics.\"\"\"\n statistics = []\n num_statistics_per_state = []\n original_shapes = []\n exponents = []\n max_size = 0\n prev_preconditioners = []\n for state, param in zip(states, params):\n num_statistics = len(state.statistics)\n num_statistics_per_state.append(num_statistics)\n original_shapes_for_state = []\n if num_statistics > 0:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n for statistic in state.statistics:\n exponents.append(preconditioner.exponent_for_preconditioner(\n ) if exponent_override == 0 else exponent_override)\n original_shapes_for_state.append(statistic.shape)\n max_size = max(max_size, statistic.shape[0])\n statistics.extend(state.statistics)\n prev_preconditioners.extend(state.preconditioners)\n original_shapes.extend(original_shapes_for_state)\n num_statistics = len(statistics)\n\n if batch_axis_name:\n num_devices = lax.psum(1, batch_axis_name)\n\n # Pad statistics and exponents to next multiple of num_devices.\n packed_statistics = [pad_matrix(stat, max_size) for stat in statistics]\n to_pad = -num_statistics % num_devices\n packed_statistics.extend([\n jnp.eye(max_size, dtype=packed_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n exponents.extend([1 for _ in range(to_pad)])\n\n if not packed_statistics:\n return states\n # Batch statistics and exponents so that so that leading axis is\n # num_devices.\n def _batch(statistics, exponents, num_devices):\n assert len(statistics) == len(exponents)\n n = len(statistics)\n b = int(n / num_devices)\n batched_statistics = [\n jnp.stack(statistics[idx:idx + b]) for idx in range(0, n, b)\n ]\n batched_exponents = [\n jnp.stack(exponents[idx:idx + b]) for idx in range(0, n, b)\n ]\n return jnp.stack(batched_statistics), jnp.stack(batched_exponents)\n\n # Unbatch values across leading axis and return a list of elements.\n def _unbatch(batched_values):\n b1, b2 = batched_values.shape[0], batched_values.shape[1]\n results = []\n for v_array in jnp.split(\n batched_values, indices_or_sections=b1, axis=0):\n v_array = jnp.squeeze(v_array)\n # b2 = batches (number of preconditioner computation) per core.\n if b2 > 1:\n for v in jnp.split(v_array, indices_or_sections=b2, axis=0):\n results.append(jnp.squeeze(v))\n else:\n results.append(v_array)\n return results\n\n all_statistics, all_exponents = _batch(packed_statistics, exponents,\n num_devices)\n else:\n to_pad = -num_statistics % num_devices_for_pjit\n padded_statistics = [pad_matrix(stat, max_size) for stat in statistics]\n padded_statistics.extend([\n jnp.eye(max_size, dtype=padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n exponents.extend([1 for _ in range(to_pad)])\n all_statistics = jnp.stack(padded_statistics)\n all_exponents = jnp.stack(exponents)\n\n def _matrix_inverse_pth_root_vmap(xs, ps):\n mi_pth_root = functools.partial(\n matrix_inverse_pth_root,\n ridge_epsilon=matrix_epsilon,\n precision=precision)\n preconditioners, errors = jax.vmap(mi_pth_root)(xs, ps)\n return preconditioners, errors\n\n def _matrix_inverse_pth_root_pjit(xs, ps):\n mesh_axis_names_tuple = tuple(mesh_axis_names)\n # Partition the concatenated statistics matrix across all cores.\n partitioned_xs, partitioned_ps = pjit.pjit(\n lambda x, y: (x, y),\n in_axis_resources=None,\n out_axis_resources=pjit.PartitionSpec(mesh_axis_names_tuple,))(xs, ps)\n # Run matrix inverse pth root on each shard.\n partitioned_preconditioners, partitioned_errors = _matrix_inverse_pth_root_vmap(\n partitioned_xs, partitioned_ps)\n # Recombine the outputs at each core.\n preconditioners, errors = pjit.pjit(\n lambda x, y: (x, y),\n in_axis_resources=(pjit.PartitionSpec(mesh_axis_names_tuple,),\n pjit.PartitionSpec(mesh_axis_names_tuple,)),\n out_axis_resources=(None, None))(partitioned_preconditioners,\n partitioned_errors)\n return preconditioners, errors\n\n if not batch_axis_name:\n def _internal_inverse_pth_root_all():\n preconditioners, errors = _matrix_inverse_pth_root_pjit(\n all_statistics, all_exponents)\n b1 = preconditioners.shape[0]\n def split(batched_values):\n return [\n jnp.squeeze(v) for v in jnp.split(\n batched_values, indices_or_sections=b1, axis=0)\n ]\n\n return split(preconditioners), split(errors)\n\n if preconditioning_compute_steps == 1:\n preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()\n else:\n # Passing statistics instead of preconditioners as they are similarly\n # shaped tensors. Note statistics will be ignored as we are passing in\n # a large init value for error.\n preconditioners_init = padded_statistics\n errors_init = [inverse_failure_threshold] * len(padded_statistics)\n init_state = [preconditioners_init, errors_init]\n perform_step = step % preconditioning_compute_steps == 0\n preconditioners_flat, errors_flat = efficient_cond(\n perform_step, _internal_inverse_pth_root_all, init_state)\n else:\n\n def _internal_inverse_pth_root_all():\n preconditioners = jnp.array(all_statistics)\n current_replica = lax.axis_index(batch_axis_name)\n preconditioners, errors = _matrix_inverse_pth_root_vmap(\n all_statistics[current_replica], all_exponents[current_replica])\n preconditioners = jax.lax.all_gather(preconditioners, batch_axis_name)\n errors = jax.lax.all_gather(errors, batch_axis_name)\n preconditioners_flat = _unbatch(preconditioners)\n errors_flat = _unbatch(errors)\n return preconditioners_flat, errors_flat\n\n if preconditioning_compute_steps == 1:\n preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()\n else:\n # Passing statistics instead of preconditioners as they are similarly\n # shaped tensors. Note statistics will be ignored as we are passing in\n # a large init value for error.\n preconditioners_init = packed_statistics\n errors_init = ([inverse_failure_threshold] * len(packed_statistics))\n init_state = [preconditioners_init, errors_init]\n perform_step = step % preconditioning_compute_steps == 0\n preconditioners_flat, errors_flat = efficient_cond(\n perform_step, _internal_inverse_pth_root_all, init_state)\n\n def _skip(error):\n condition = jnp.logical_or(\n jnp.isnan(error), error >= inverse_failure_threshold)\n return condition.astype(error.dtype)\n\n def _select_preconditioner(error, new_p, old_p):\n return lax.cond(\n _skip(error), lambda _: old_p, lambda _: new_p, operand=None)\n\n new_preconditioners_flat = []\n for p, shape, prev_p, error in zip(preconditioners_flat, original_shapes,\n prev_preconditioners, errors_flat):\n new_preconditioners_flat.append(\n _select_preconditioner(error, p[:shape[0], :shape[1]], prev_p))\n\n assert len(states) == len(num_statistics_per_state)\n assert len(new_preconditioners_flat) == num_statistics\n\n # Add back empty preconditioners so we that we can set the optimizer state.\n preconditioners_for_states = []\n idx = 0\n for num_statistics, state in zip(num_statistics_per_state, states):\n if num_statistics == 0:\n preconditioners_for_states.append([])\n else:\n preconditioners_for_state = new_preconditioners_flat[idx:idx +\n num_statistics]\n assert len(state.statistics) == len(preconditioners_for_state)\n preconditioners_for_states.append(preconditioners_for_state)\n idx += num_statistics\n new_states = []\n for state, new_preconditioners in zip(states, preconditioners_for_states):\n new_states.append(\n ParameterStats(state.diagonal_statistics, state.statistics,\n new_preconditioners, state.diagonal_momentum,\n state.momentum))\n\n return new_states\n\n def _transform_grad(grad, state, param, step):\n \"\"\"Transform per-parameter gradients.\"\"\"\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n sgd_update = grad\n new_diagonal_statistics = state.diagonal_statistics\n if graft_type == GraftingType.ADAGRAD:\n new_diagonal_statistics = state.diagonal_statistics + jnp.square(grad)\n adagrad_update = grad / (\n jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon)\n grafting_update = adagrad_update\n elif (graft_type == GraftingType.RMSPROP or\n graft_type == GraftingType.RMSPROP_NORMALIZED):\n\n scaled_grad = grad\n if graft_type == GraftingType.RMSPROP_NORMALIZED:\n scaled_grad = grad / jnp.linalg.norm(grad)\n\n w1 = beta2\n w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)\n\n new_diagonal_statistics = (\n w1 * state.diagonal_statistics + w2 * jnp.square(scaled_grad))\n rmsprop_update = scaled_grad / (\n jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon)\n\n if clip_by_scaled_gradient_norm:\n scaled_grad_norm = jnp.linalg.norm(rmsprop_update) / (\n jnp.sqrt(float(rmsprop_update.size)))\n clipping_denom = jnp.maximum(\n 1., scaled_grad_norm / clip_by_scaled_gradient_norm)\n rmsprop_update /= clipping_denom\n\n grafting_update = rmsprop_update\n else:\n grafting_update = sgd_update\n\n precond_grad = grad\n if not _skip_preconditioning(param):\n precond_grad = preconditioner.preconditioned_grad(precond_grad,\n state.preconditioners)\n else:\n precond_grad = grafting_update\n\n grafting_update_norm = jnp.linalg.norm(grafting_update)\n precond_grad_norm = jnp.linalg.norm(precond_grad)\n\n multiplier = (grafting_update_norm / (precond_grad_norm + 1e-16))\n shampoo_update = precond_grad * multiplier\n\n shampoo_update_with_wd = shampoo_update\n grafting_update_with_wd = grafting_update\n if weight_decay != 0:\n shampoo_update_with_wd = shampoo_update + weight_decay * param\n grafting_update_with_wd = grafting_update + weight_decay * param\n\n w = (1.0 - beta1) if moving_average_for_momentum else 1.0\n shampoo_update_with_wd_momentum = (\n state.momentum * beta1 + w * shampoo_update_with_wd)\n grafting_update_with_wd_momentum = (\n state.diagonal_momentum * beta1 + w * grafting_update_with_wd)\n\n run_shampoo = (step >= start_preconditioning_step).astype(\n grafting_update_with_wd_momentum.dtype)\n\n momentum_update = (\n run_shampoo * shampoo_update_with_wd_momentum +\n (1.0 - run_shampoo) * grafting_update_with_wd_momentum)\n\n wd_update = (\n run_shampoo * shampoo_update_with_wd +\n (1.0 - run_shampoo) * grafting_update_with_wd)\n\n if nesterov:\n momentum_update = w * wd_update + beta1 * momentum_update\n\n lr = learning_rate\n if callable(learning_rate):\n lr = learning_rate(step)\n transformed_update = -1.0 * lr * momentum_update\n\n param_stats = ParameterStats(new_diagonal_statistics, state.statistics,\n state.preconditioners,\n grafting_update_with_wd_momentum,\n shampoo_update_with_wd_momentum)\n return transformed_update, param_stats\n\n def update_fn(grads, state, params):\n \"\"\"Transform the input gradient and update all statistics.\n\n Args:\n grads: the gradient tensors for the parameters.\n state: a named tuple containing the state of the optimizer\n params: the parameters that should be updated.\n\n Returns:\n A tuple containing the new parameters and the new optimizer state.\n \"\"\"\n params_flat, treedef = jax.tree_flatten(params)\n stats_flat = treedef.flatten_up_to(state.stats)\n grads_flat = treedef.flatten_up_to(grads)\n\n new_stats_flat = jax.tree_multimap(\n lambda g, s, p: _compute_stats(g, s, p, state.count), grads_flat,\n stats_flat, params_flat)\n new_stats_flat = _compute_preconditioners(new_stats_flat, params_flat,\n state.count)\n\n outputs = jax.tree_multimap(\n lambda g, s, p: _transform_grad(g, s, p, state.count), grads_flat,\n new_stats_flat, params_flat)\n updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())\n\n updates = jax.tree_unflatten(treedef, updates_flat)\n new_stats = jax.tree_unflatten(treedef, new_stats_flat)\n\n new_state = ShampooState(\n count=state.count+1, stats=new_stats)\n return updates, new_state\n\n if shard_optimizer_states:\n return optax.GradientTransformation(sharded_init_fn, sharded_update_fn)\n else:\n return optax.GradientTransformation(init_fn, update_fn)",
"def test_expand_matrix_usage_in_operator_class_broadcasted(self, tol):\n\n perm = [0, 2, 1, 3]\n permuted_matrix = self.base_matrix_2_broadcasted[:, perm][:, :, perm]\n\n expanded_matrix = np.tensordot(\n np.tensordot(\n np.kron(SWAP, I),\n np.kron(I_broadcasted, self.base_matrix_2_broadcasted),\n axes=[[1], [1]],\n ),\n np.kron(SWAP, I),\n axes=[[2], [0]],\n )\n expanded_matrix = np.moveaxis(expanded_matrix, 0, -2)\n\n class DummyOp(qml.operation.Operator):\n num_wires = 2\n\n def compute_matrix(*params, **hyperparams):\n return self.base_matrix_2_broadcasted\n\n op = DummyOp(wires=[0, 2])\n assert np.allclose(op.matrix(), self.base_matrix_2_broadcasted, atol=tol)\n assert np.allclose(op.matrix(wire_order=[2, 0]), permuted_matrix, atol=tol)\n assert np.allclose(op.matrix(wire_order=[0, 1, 2]), expanded_matrix, atol=tol)",
"def test_operator_create_operator(self):\n pass",
"def test_get_prop_samples_broadcasts_weights_correctly():\n desired_data = [{\n \"solver\": {\n \"sublattice_site_ratios\": [1],\n \"sublattice_occupancies\": [[[0, 0]], [[1, 1]]],\n \"sublattice_configurations\": [[[\"CU\", \"MG\"]], [[\"CU\", \"MG\"]]],\n \"mode\": \"manual\"\n },\n \"conditions\": {\n \"P\": [0, 1], \"T\": [0, 1, 2, 3]},\n \"values\": [[[0, 1], [2, 3], [4, 5], [6, 7]], [[8, 9], [10, 11], [12, 13], [14, 15]]],\n }]\n\n # No weight\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n assert np.all(np.isclose(np.asarray(calculate_dict[\"weights\"]), 1.0))\n\n # Scalar weight\n desired_data[0][\"weight\"] = 5.0\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n assert np.all(np.isclose(np.asarray(calculate_dict[\"weights\"]), 5.0))\n\n # 1D weights aligned in...\n # ... P\n desired_data[0][\"weight\"] = [[[1]], [[2]]]\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(\"P\",calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n\n # ... T\n desired_data[0][\"weight\"] = [[[1], [2], [3], [4]]]\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(\"T\",calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n\n # ... configs\n desired_data[0][\"weight\"] = [[[3, 4]]]\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(\"CONFIGS\", calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])\n\n # 3D weights aligned\n num_P = 2\n num_T = 4\n prescribed = [[(np.array([1, 2])*i*j).tolist() for j in range(1, num_T+1)] for i in range(1, num_P+1)]\n desired_data[0][\"weight\"] = prescribed\n calculate_dict = get_prop_samples(desired_data, [['CU', 'MG']])\n print(calculate_dict)\n assert calculate_dict[\"values\"].shape == (16,)\n assert calculate_dict[\"values\"].size == len(calculate_dict[\"weights\"])",
"def test_no_expansion_broadcasted(self):\n res = qml.operation.expand_matrix(\n self.base_matrix_2_broadcasted, wires=[0, 2], wire_order=[0, 2]\n )\n assert np.allclose(self.base_matrix_2_broadcasted, res)",
"def test_weights_built(self):\n # Create the network\n with nengo.Network():\n a = nengo.Ensemble(200, 2)\n b = nengo.Ensemble(400, 2)\n a_b = nengo.Connection(\n a, b, solver=nengo.solvers.Lstsq(weights=True)\n )\n\n # Create the model and built the pre-synaptic Ensemble\n model = builder.Model()\n model.rng = np.random\n model.seeds[a] = 1\n model.seeds[b] = 2\n model.seeds[a_b] = 3\n ensemble.build_ensemble(model, a)\n ensemble.build_ensemble(model, b)\n\n # Now build the connection and check that the params seem sensible\n params = ensemble.build_from_ensemble_connection(model, a_b)\n assert params.decoders.shape == (200, 400)",
"def check_elementwise_random(op='sum', shape=(1, 3, 224, 224)):\n a = mx.sym.Variable('a')\n b = mx.sym.Variable('b')\n if op == 'sum':\n sym = a + b\n elif op == 'sub':\n sym = a - b\n elif op == 'mul':\n sym = a * b\n\n a_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n b_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n\n executor = sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y = executor.forward(is_train=False, a=a_data, b=b_data)\n trt_sym = sym.get_backend_symbol('TensorRT')\n original_precision_value = mx.contrib.tensorrt.get_use_fp16()\n try:\n mx.contrib.tensorrt.set_use_fp16(True)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt = executor.forward(is_train=False, a=a_data, b=b_data)\n mx.contrib.tensorrt.set_use_fp16(False)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt_fp32 = executor.forward(is_train=False, a=a_data, b=b_data)\n assert_almost_equal(y[0].asnumpy(), y_trt[0].asnumpy(), 1e-1, 1e-2)\n assert_almost_equal(y[0].asnumpy(), y_trt_fp32[0].asnumpy(), 1e-4, 1e-4)\n finally:\n mx.contrib.tensorrt.set_use_fp16(original_precision_value)",
"def test_amplify_init():\n # Essentially Grover's to select 011 or 111\n desired = cz_gate + A_inv + diffusion_operator(\n qubits) + A + cz_gate + A_inv + diffusion_operator(qubits) + A\n created = amplify(A, A_inv, cz_gate, qubits, iters, init=False)\n\n compare_progs(desired, created)",
"def _setup_prediction_op(self):",
"def test_expansion_broadcasted(self):\n res = qml.operation.expand_matrix(\n self.base_matrix_1_broadcasted, wires=[2], wire_order=[0, 2]\n )\n expected = np.array(\n [\n [\n [1, 2, 0, 0],\n [3, 4, 0, 0],\n [0, 0, 1, 2],\n [0, 0, 3, 4],\n ],\n [\n [5, 6, 0, 0],\n [7, 8, 0, 0],\n [0, 0, 5, 6],\n [0, 0, 7, 8],\n ],\n [\n [9, 10, 0, 0],\n [11, 12, 0, 0],\n [0, 0, 9, 10],\n [0, 0, 11, 12],\n ],\n ]\n )\n assert np.allclose(expected, res)\n\n res = qml.operation.expand_matrix(\n self.base_matrix_1_broadcasted, wires=[2], wire_order=[2, 0]\n )\n expected = np.array(\n [\n [\n [1, 0, 2, 0],\n [0, 1, 0, 2],\n [3, 0, 4, 0],\n [0, 3, 0, 4],\n ],\n [\n [5, 0, 6, 0],\n [0, 5, 0, 6],\n [7, 0, 8, 0],\n [0, 7, 0, 8],\n ],\n [\n [9, 0, 10, 0],\n [0, 9, 0, 10],\n [11, 0, 12, 0],\n [0, 11, 0, 12],\n ],\n ]\n )\n assert np.allclose(expected, res)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r"""Test that initialization of an operator with broadcasted parameters works and sets the ``batch_size`` correctly with TensorFlow parameters.
|
def test_broadcasted_params(self, params, exp_batch_size):
import tensorflow as tf
class DummyOp(qml.operation.Operator):
r"""Dummy custom operator that declares ndim_params as a class property"""
ndim_params = (0, 2)
num_wires = 1
params = tuple(tf.Variable(p) for p in params)
op = DummyOp(*params, wires=0)
assert op.ndim_params == (0, 2)
assert op._batch_size == exp_batch_size
|
[
"def test_broadcasted_params(self, params, exp_batch_size):\n import jax\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(jax.numpy.array(p) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n import torch\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(torch.tensor(p, requires_grad=True) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(pnp.array(p, requires_grad=True) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_different_batch_sizes_raises_error(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n with pytest.raises(\n ValueError, match=\"Broadcasting was attempted but the broadcasted dimensions\"\n ):\n _ = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3, 4]), wires=2))",
"def test_batch_size_not_all_batched(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n op = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3]), wires=2))\n assert op.batch_size == 3",
"def test_batch_size_all_batched(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n op = ValidOp(base, base, base)\n assert op.batch_size == 3",
"def test_expand_matrix_usage_in_operator_class_broadcasted(self, tol):\n\n perm = [0, 2, 1, 3]\n permuted_matrix = self.base_matrix_2_broadcasted[:, perm][:, :, perm]\n\n expanded_matrix = np.tensordot(\n np.tensordot(\n np.kron(SWAP, I),\n np.kron(I_broadcasted, self.base_matrix_2_broadcasted),\n axes=[[1], [1]],\n ),\n np.kron(SWAP, I),\n axes=[[2], [0]],\n )\n expanded_matrix = np.moveaxis(expanded_matrix, 0, -2)\n\n class DummyOp(qml.operation.Operator):\n num_wires = 2\n\n def compute_matrix(*params, **hyperparams):\n return self.base_matrix_2_broadcasted\n\n op = DummyOp(wires=[0, 2])\n assert np.allclose(op.matrix(), self.base_matrix_2_broadcasted, atol=tol)\n assert np.allclose(op.matrix(wire_order=[2, 0]), permuted_matrix, atol=tol)\n assert np.allclose(op.matrix(wire_order=[0, 1, 2]), expanded_matrix, atol=tol)",
"def test_conditional_broadcasting(session_tf, full_cov, white, conditional_type):\n X_ = tf.placeholder(tf.float64, [None, None])\n q_mu = np.random.randn(Data.M, Data.Dy)\n q_sqrt = np.tril(np.random.randn(Data.Dy, Data.M, Data.M), -1)\n\n if conditional_type == \"Z\":\n feat = Data.Z\n kern = gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5)\n elif conditional_type == \"inducing_points\":\n feat = gpflow.features.InducingPoints(Data.Z)\n kern = gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5)\n elif conditional_type == \"mixing\":\n # variational params have different output dim in this case\n q_mu = np.random.randn(Data.M, Data.L)\n q_sqrt = np.tril(np.random.randn(Data.L, Data.M, Data.M), -1)\n feat = mf.MixedKernelSharedMof(gpflow.features.InducingPoints(Data.Z))\n kern = mk.SeparateMixedMok(\n kernels=[gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5) for _ in range(Data.L)],\n W=Data.W\n )\n\n if conditional_type == \"mixing\" and full_cov:\n pytest.skip(\"combination is not implemented\")\n\n num_samples = 5\n sample_tf, mean_tf, cov_tf = sample_conditional(\n X_,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n\n ss, ms, vs = [], [], []\n for X in Data.SX:\n s, m, v = session_tf.run([sample_tf, mean_tf, cov_tf], {X_: X})\n ms.append(m)\n vs.append(v)\n ss.append(s)\n\n ms = np.array(ms)\n vs = np.array(vs)\n ss = np.array(ss)\n\n ss_S12, ms_S12, vs_S12 = session_tf.run(\n sample_conditional(\n Data.SX,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n )\n\n ss_S1_S2, ms_S1_S2, vs_S1_S2 = session_tf.run(\n sample_conditional(\n Data.S1_S2_X,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n )\n\n assert_allclose(ss_S12.shape, ss.shape)\n assert_allclose(ms_S12, ms)\n assert_allclose(vs_S12, vs)\n assert_allclose(ms_S1_S2.reshape(Data.S1 * Data.S2, Data.N, Data.Dy), ms)\n assert_allclose(ss_S1_S2.shape, [Data.S1, Data.S2, num_samples, Data.N, Data.Dy])\n\n if full_cov:\n assert_allclose(vs_S1_S2.reshape(Data.S1 * Data.S2, Data.Dy, Data.N, Data.N), vs)\n else:\n assert_allclose(vs_S1_S2.reshape(Data.S1 * Data.S2, Data.N, Data.Dy), vs)",
"def check_elementwise_random(op='sum', shape=(1, 3, 224, 224)):\n a = mx.sym.Variable('a')\n b = mx.sym.Variable('b')\n if op == 'sum':\n sym = a + b\n elif op == 'sub':\n sym = a - b\n elif op == 'mul':\n sym = a * b\n\n a_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n b_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n\n executor = sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y = executor.forward(is_train=False, a=a_data, b=b_data)\n trt_sym = sym.get_backend_symbol('TensorRT')\n original_precision_value = mx.contrib.tensorrt.get_use_fp16()\n try:\n mx.contrib.tensorrt.set_use_fp16(True)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt = executor.forward(is_train=False, a=a_data, b=b_data)\n mx.contrib.tensorrt.set_use_fp16(False)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt_fp32 = executor.forward(is_train=False, a=a_data, b=b_data)\n assert_almost_equal(y[0].asnumpy(), y_trt[0].asnumpy(), 1e-1, 1e-2)\n assert_almost_equal(y[0].asnumpy(), y_trt_fp32[0].asnumpy(), 1e-4, 1e-4)\n finally:\n mx.contrib.tensorrt.set_use_fp16(original_precision_value)",
"def test_batch_size_None(self):\n prod_op = ValidOp(qml.PauliX(0), qml.RX(1.0, wires=0))\n assert prod_op.batch_size is None",
"def test_batchnorm():\n reset()\n\n # create model with a mix of pretrained and new weights\n # NOTE: the pretrained layers will be initialized by Keras on creation, while the new Dense\n # layer will remain uninitialized\n input_shape = (64,64,3)\n inputs = tf.keras.layers.Input(shape=input_shape)\n x = tf.keras.layers.Dense(1)(inputs)\n bn = tf.keras.layers.BatchNormalization()\n x = bn(x)\n logits = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs=inputs, outputs=logits, name=\"model\")\n\n # create session, force tf.Keras to use it\n config = tf.ConfigProto(allow_soft_placement=True)#, log_device_placement=True)\n sess = tf.Session(config=config)\n tf.keras.backend.set_session(sess)\n\n #sess.run(tf.global_variables_initializer())\n initialize_variables(sess)\n\n # moving mean & std dev before training\n mu, std = sess.run([bn.moving_mean, bn.moving_variance])\n\n x = np.random.randn(10, *input_shape)\n\n # training mode (should use batch mean & std dev)\n out1 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 1})\n out2 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 1})\n assert np.array_equal(out1, out2)\n\n # non-training mode (should use internal moving average and std dev)\n out3 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 0})\n out4 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 0})\n assert np.array_equal(out3, out4)\n assert not np.allclose(out3, out1)\n\n # training mode again\n out5 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 1})\n assert np.array_equal(out5, out1)\n\n # update ops (update internal moving average and std dev)\n sess.run(model.updates, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 1})\n\n # train again (should not be affected by the updates)\n out6 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 1})\n assert np.array_equal(out6, out1)\n\n # non-train again (should use updated moving average and std dev)\n out7 = sess.run(model.output, feed_dict={model.input: x, tf.keras.backend.learning_phase(): 0})\n assert not np.array_equal(out7, out6) # not equal to train\n assert not np.array_equal(out7, out3) # not equal to previous test due to update ops",
"def test_single_qubit_parameters(self, init_state, op, func, theta, rep, tol):\n dev = DefaultTensorTF(wires=1, representation=rep)\n state = init_state(1)\n\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [op(theta, wires=0)]\n dev.execute(queue, [], {})\n\n res = dev._state().numpy().flatten()\n expected = func(theta) @ state\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def keras_init(tf_min_log_level='3', gpu_mem_frac=0.3):\n # Remove TensorFlow (Keras' default backend) debugging prints.\n import os\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = tf_min_log_level\n\n # Configure the TensorFlow backend.\n config = tf.ConfigProto()\n # Limit the GPU memory allocated to TensorFlow.\n config.gpu_options.per_process_gpu_memory_fraction = gpu_mem_frac\n set_session(tf.Session(config=config))",
"def testTruncatingDispatcher(self):\n # batch = 1\n # length = 3\n # num_experts = 2\n expert_capacity = 2\n requests = tf.constant([\n [[True, False],\n [True, True],\n [True, False]],\n [[False, False],\n [False, True],\n [True, False]]\n ], dtype=tf.float32)\n dispatcher = expert_utils.TruncatingDispatcher(requests, expert_capacity)\n x = tf.constant([\n [[3, 4],\n [5, 6],\n [7, 8]],\n [[2, 3],\n [4, 5],\n [6, 7]]\n ], dtype=tf.float32)\n dispatched = dispatcher.dispatch(x)\n dispatched_expected = [\n [[[3, 4], [5, 6]],\n [[5, 6], [3, 4]]],\n [[[6, 7], [2, 3]],\n [[4, 5], [2, 3]]]\n ]\n y = [\n [[[7, 12], [11, 30]],\n [[-1, 30], [9, 9]]],\n [[[13, 42], [9, 9]],\n [[-1, 20], [9, 9]]]\n ]\n combined = dispatcher.combine(y)\n combined_expected = [\n [[7, 12],\n [10, 60],\n [0, 0]],\n [[0, 0],\n [-1, 20],\n [13, 42]]\n ]\n nonpadding = dispatcher.nonpadding()\n nonpadding_expected = [\n [[1, 1],\n [1, 0]],\n [[1, 0],\n [1, 0]]\n ]\n gates = dispatcher.gates()\n gates_expected = [\n [[1, 0],\n [1, 1],\n [0, 0]],\n [[0, 0],\n [0, 1],\n [1, 0]]\n ]\n\n with self.test_session() as sess:\n self._verify_value(sess, dispatched, dispatched_expected)\n self._verify_value(sess, combined, combined_expected)\n self._verify_value(sess, nonpadding, nonpadding_expected)\n self._verify_value(sess, gates, gates_expected)",
"def test_bluefog_broadcast(self):\n rank = bf.rank()\n size = bf.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n return\n\n dtypes = [tf.int32, tf.int64, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n root_ranks = list(range(size))\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n tensor = tf.ones([17] * dim) * rank\n root_tensor = tf.ones([17] * dim) * root_rank\n if dtype == tf.bool:\n tensor = tensor % 2\n root_tensor = root_tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n root_tensor = tf.cast(root_tensor, dtype=dtype)\n broadcasted_tensor = bf.broadcast(tensor, root_rank)\n self.assertTrue(\n self.evaluate(tf.reduce_all(tf.equal(\n tf.cast(root_tensor, tf.int32), tf.cast(broadcasted_tensor, tf.int32)))),\n \"bf.broadcast produces incorrect broadcasted tensor\")",
"def test_qc_custom_gradient_training_loop_param_learning(self):\n\n tf.compat.v1.reset_default_graph()\n tf.compat.v1.set_random_seed(0)\n np.random.seed(0)\n with tf.device('/cpu:0'):\n inputs = tf.keras.Input(shape=(32, 32, 1,))\n conv_op = tf.keras.layers.Conv2D(1, (2, 2),\n kernel_initializer=tf.random_uniform_initializer(-1, 2),\n bias_initializer='random_uniform',\n padding='SAME')(inputs)\n relu_op = tf.nn.relu(conv_op)\n reshape = tf.keras.layers.Flatten()(relu_op)\n _ = tf.keras.layers.Dense(10)(reshape)\n\n sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())\n initialize_uninitialized_vars(sess)\n\n # create quantsim model without config file\n sim = QuantizationSimModel(sess, ['input_1'], ['dense/BiasAdd'], use_cuda=False,\n quant_scheme=QuantScheme.training_range_learning_with_tf_init)\n\n for quant_op_name in sim._param_quantizers.keys():\n print(sim._param_quantizers[quant_op_name])\n\n for quant_op_name in sim._activation_quantizers.keys():\n print(sim._activation_quantizers[quant_op_name])\n\n def dummy_forward_pass(sess, args):\n model_output = sess.graph.get_tensor_by_name('dense/MatMul:0')\n model_input = sess.graph.get_tensor_by_name('input_1:0')\n shape = model_input.shape\n dummy_input = np.random.randn(1, shape[1], shape[2], shape[3])\n sess.run(model_output, feed_dict={model_input: dummy_input})\n\n conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')\n relu_output_quant_op = sim.session.graph.get_operation_by_name('Relu_quantized')\n\n # enable input\n sim.compute_encodings(dummy_forward_pass, None)\n\n inp_tensor = sim.session.graph.get_tensor_by_name('input_1:0')\n np.random.seed(0)\n w_shape = inp_tensor.shape\n batches = 32\n inp_data = np.random.rand(batches, w_shape[1], w_shape[2], w_shape[3])\n logits = sim.session.graph.get_tensor_by_name('dense/MatMul:0')\n\n labels = np.random.randint(10, size=batches)\n one_hot_labels = np.eye(10)[labels]\n\n with sim.session.graph.as_default():\n var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)\n labels_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 10], name='labels')\n loss = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels_placeholder, logits=logits)\n\n update_ops = []\n global_step = tf.compat.v1.train.create_global_step()\n initialize_uninitialized_vars(sim.session)\n\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=1e-3)\n gradients = optimizer.compute_gradients(loss, var_list)\n\n sim.compute_encodings(dummy_forward_pass, None)\n grad_updates = optimizer.apply_gradients(gradients, global_step=global_step)\n update_ops.append(grad_updates)\n update_op = tf.group(*update_ops)\n\n conv_inp_tensor = conv2d_weight_quant_op.inputs[0]\n grads = tf.gradients(loss, [conv_inp_tensor,\n conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min],\n conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max]])\n dqbydx, dqbydmin, dqbydmax = grads\n input_gradient = sim.session.run([dqbydx], feed_dict={inp_tensor: inp_data,\n labels_placeholder: one_hot_labels})[0]\n min_gradient = sim.session.run([dqbydmin], feed_dict={inp_tensor: inp_data,\n labels_placeholder: one_hot_labels})[0]\n max_gradient = sim.session.run([dqbydmax], feed_dict={inp_tensor: inp_data,\n labels_placeholder: one_hot_labels})[0]\n\n weights_before_train = sim.session.run(conv2d_weight_quant_op.inputs[0])\n encoding_min_before_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])\n encoding_max_before_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])\n relu_output_encoding_min_before_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_min])\n relu_output_encoding_max_before_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_max])\n with tf.control_dependencies([update_op]):\n train_op = tf.identity(loss, name='train_op')\n\n for quant_op_name in sim._param_quantizers.keys():\n print(quant_op_name + '_min_before_train = ' + str(sim.session.run(\n sim.session.graph.get_operation_by_name(quant_op_name).inputs[QuantizeOpIndices.encoding_min])))\n print(quant_op_name + '_max_before_train = ' + str(sim.session.run(\n sim.session.graph.get_operation_by_name(quant_op_name).inputs[QuantizeOpIndices.encoding_max])))\n\n # start training\n _ = sim.session.run(train_op, feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})\n\n for quant_op_name in sim._param_quantizers.keys():\n print(quant_op_name + '_min = ' + str(sim.session.run(sim.session.graph.get_operation_by_name\n (quant_op_name).inputs[\n QuantizeOpIndices.encoding_min])))\n print(quant_op_name + '_max = ' + str(sim.session.run(sim.session.graph.get_operation_by_name\n (quant_op_name).inputs[\n QuantizeOpIndices.encoding_max])))\n\n weights_after_train = sim.session.run(conv2d_weight_quant_op.inputs[0])\n relu_output_encoding_min_after_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_min])\n relu_output_encoding_max_after_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_max])\n encoding_min_after_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])\n encoding_max_after_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])\n\n assert not np.allclose(weights_before_train, weights_after_train, atol=1e-6)\n assert encoding_min_before_train != encoding_min_after_train\n assert encoding_max_before_train != encoding_max_after_train\n assert relu_output_encoding_min_before_train != relu_output_encoding_min_after_train\n assert relu_output_encoding_max_before_train != relu_output_encoding_max_after_train\n\n\n baseline = sim.session.run(logits, feed_dict={inp_tensor: inp_data})\n sim.export('/tmp', 'quant_sim_model')\n after_sim_export = sim.session.run(logits, feed_dict={inp_tensor: inp_data})\n assert np.allclose(baseline, after_sim_export)\n\n\n sess.close()\n sim.session.close()",
"def test_no_expansion_broadcasted(self):\n res = qml.operation.expand_matrix(\n self.base_matrix_2_broadcasted, wires=[0, 2], wire_order=[0, 2]\n )\n assert np.allclose(self.base_matrix_2_broadcasted, res)",
"def test_init(self):\n # ------ tests for basic properties of a tensor with default mode names\n true_shape = (2, 4, 8)\n true_size = reduce(lambda x, y: x * y, true_shape)\n true_order = len(true_shape)\n true_data = np.ones(true_size).reshape(true_shape)\n true_default_mode_names = ['mode-0', 'mode-1', 'mode-2']\n true_default_state = State(normal_shape=true_shape)\n tensor = Tensor(array=true_data)\n np.testing.assert_array_equal(tensor.data, true_data)\n assert (tensor.frob_norm == 8.0)\n assert (tensor.shape == true_shape)\n assert (tensor.ft_shape == true_shape)\n assert (tensor.order == true_order)\n assert (tensor.size == true_size)\n assert (tensor.mode_names == true_default_mode_names)\n assert (tensor._state == true_default_state)\n assert (tensor._data is not true_data) # check that is not a reference\n\n # ------ tests for creating a Tensor object with custom mode names\n true_custom_mode_names = ['time', 'frequency', 'channel']\n tensor = Tensor(array=true_data, mode_names=true_custom_mode_names)\n assert (tensor.mode_names == true_custom_mode_names)\n\n # ------ tests for creating a Tensor object in custom state\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J*K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n true_mode_names = [\"mode-0\", \"mode-1_mode-2\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_mode_names)\n\n # ------ tests for creating a Tensor object with in custom state and with custom mode names\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J * K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n custom_mode_names = [\"time\", \"frequency\", \"channel\"]\n true_custom_mode_names = [\"time\", \"frequency_channel\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state, mode_names=custom_mode_names)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_custom_mode_names)",
"def _setup_prediction_op(self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r"""Test that initialization of an operator with broadcasted parameters works and sets the ``batch_size`` correctly with Torch parameters.
|
def test_broadcasted_params(self, params, exp_batch_size):
import torch
class DummyOp(qml.operation.Operator):
r"""Dummy custom operator that declares ndim_params as a class property"""
ndim_params = (0, 2)
num_wires = 1
params = tuple(torch.tensor(p, requires_grad=True) for p in params)
op = DummyOp(*params, wires=0)
assert op.ndim_params == (0, 2)
assert op._batch_size == exp_batch_size
|
[
"def test_broadcasted_params(self, params, exp_batch_size):\n import jax\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(jax.numpy.array(p) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(pnp.array(p, requires_grad=True) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_broadcasted_params(self, params, exp_batch_size):\n import tensorflow as tf\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator that declares ndim_params as a class property\"\"\"\n ndim_params = (0, 2)\n num_wires = 1\n\n params = tuple(tf.Variable(p) for p in params)\n op = DummyOp(*params, wires=0)\n assert op.ndim_params == (0, 2)\n assert op._batch_size == exp_batch_size",
"def test_different_batch_sizes_raises_error(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n with pytest.raises(\n ValueError, match=\"Broadcasting was attempted but the broadcasted dimensions\"\n ):\n _ = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3, 4]), wires=2))",
"def test_batch_size_all_batched(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n op = ValidOp(base, base, base)\n assert op.batch_size == 3",
"def test_batch_size_not_all_batched(self):\n base = qml.RX(np.array([1.2, 2.3, 3.4]), 0)\n op = ValidOp(base, qml.RY(1, 0), qml.RZ(np.array([1, 2, 3]), wires=2))\n assert op.batch_size == 3",
"def check_elementwise_random(op='sum', shape=(1, 3, 224, 224)):\n a = mx.sym.Variable('a')\n b = mx.sym.Variable('b')\n if op == 'sum':\n sym = a + b\n elif op == 'sub':\n sym = a - b\n elif op == 'mul':\n sym = a * b\n\n a_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n b_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n\n executor = sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y = executor.forward(is_train=False, a=a_data, b=b_data)\n trt_sym = sym.get_backend_symbol('TensorRT')\n original_precision_value = mx.contrib.tensorrt.get_use_fp16()\n try:\n mx.contrib.tensorrt.set_use_fp16(True)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt = executor.forward(is_train=False, a=a_data, b=b_data)\n mx.contrib.tensorrt.set_use_fp16(False)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt_fp32 = executor.forward(is_train=False, a=a_data, b=b_data)\n assert_almost_equal(y[0].asnumpy(), y_trt[0].asnumpy(), 1e-1, 1e-2)\n assert_almost_equal(y[0].asnumpy(), y_trt_fp32[0].asnumpy(), 1e-4, 1e-4)\n finally:\n mx.contrib.tensorrt.set_use_fp16(original_precision_value)",
"def test_single_qubit_parameters(self, init_state, op, func, theta, rep, tol):\n dev = DefaultTensorTF(wires=1, representation=rep)\n state = init_state(1)\n\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [op(theta, wires=0)]\n dev.execute(queue, [], {})\n\n res = dev._state().numpy().flatten()\n expected = func(theta) @ state\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def _setup_prediction_op(self):",
"def testNoneBatch(self, constructor, kwargs):\n vqvae_module = constructor(**kwargs)\n inputs = jnp.zeros([0, 5, 5, kwargs['embedding_dim']])\n vqvae_module(inputs, is_training=False)",
"def test_batch_size_None(self):\n prod_op = ValidOp(qml.PauliX(0), qml.RX(1.0, wires=0))\n assert prod_op.batch_size is None",
"def test_expand_matrix_usage_in_operator_class_broadcasted(self, tol):\n\n perm = [0, 2, 1, 3]\n permuted_matrix = self.base_matrix_2_broadcasted[:, perm][:, :, perm]\n\n expanded_matrix = np.tensordot(\n np.tensordot(\n np.kron(SWAP, I),\n np.kron(I_broadcasted, self.base_matrix_2_broadcasted),\n axes=[[1], [1]],\n ),\n np.kron(SWAP, I),\n axes=[[2], [0]],\n )\n expanded_matrix = np.moveaxis(expanded_matrix, 0, -2)\n\n class DummyOp(qml.operation.Operator):\n num_wires = 2\n\n def compute_matrix(*params, **hyperparams):\n return self.base_matrix_2_broadcasted\n\n op = DummyOp(wires=[0, 2])\n assert np.allclose(op.matrix(), self.base_matrix_2_broadcasted, atol=tol)\n assert np.allclose(op.matrix(wire_order=[2, 0]), permuted_matrix, atol=tol)\n assert np.allclose(op.matrix(wire_order=[0, 1, 2]), expanded_matrix, atol=tol)",
"def test_create_resnet_with_callable(self):\n for (norm, activation) in itertools.product(\n (nn.BatchNorm3d, None), (nn.ReLU, nn.Sigmoid, None)\n ):\n input_channel = 3\n input_clip_length = 4\n input_crop_size = 56\n model_depth = 50\n stage_spatial_stride = (2, 1, 1, 1)\n stage_temporal_stride = (2, 1, 1, 1)\n model_gt, num_class = self._build_resnet(\n input_channel,\n input_clip_length,\n input_crop_size,\n model_depth,\n norm,\n activation,\n )\n\n total_spatial_stride = 4 * np.prod(stage_spatial_stride)\n total_temporal_stride = np.prod(stage_temporal_stride)\n head_pool_kernel_size = (\n input_clip_length // total_temporal_stride,\n input_crop_size // total_spatial_stride,\n input_crop_size // total_spatial_stride,\n )\n\n model = create_resnet(\n input_channel=input_channel,\n model_depth=50,\n model_num_class=num_class,\n dropout_rate=0,\n norm=norm,\n activation=activation,\n stem_dim_out=8,\n stem_conv_kernel_size=(3, 7, 7),\n stem_conv_stride=(1, 2, 2),\n stem_pool=nn.MaxPool3d,\n stem_pool_kernel_size=(1, 3, 3),\n stem_pool_stride=(1, 2, 2),\n stage_conv_a_kernel_size=((3, 1, 1),) * 4,\n stage_conv_b_kernel_size=((1, 3, 3),) * 4,\n stage_spatial_h_stride=stage_spatial_stride,\n stage_spatial_w_stride=stage_spatial_stride,\n stage_temporal_stride=stage_temporal_stride,\n bottleneck=create_bottleneck_block,\n head_pool=nn.AvgPool3d,\n head_pool_kernel_size=head_pool_kernel_size,\n head_output_size=(1, 1, 1),\n head_activation=nn.Softmax,\n )\n\n model.load_state_dict(\n model_gt.state_dict(), strict=True\n ) # explicitly use strict mode.\n\n # Test forwarding.\n for tensor in TestResNet._get_inputs(\n input_channel, input_clip_length, input_crop_size\n ):\n with torch.no_grad():\n if tensor.shape[1] != input_channel:\n with self.assertRaises(RuntimeError):\n out = model(tensor)\n continue\n\n out = model(tensor)\n out_gt = model_gt(tensor)\n\n self.assertEqual(\n out.shape,\n out_gt.shape,\n \"Output shape {} is different from expected shape {}\".format(\n out.shape, out_gt.shape\n ),\n )\n self.assertTrue(\n np.allclose(out.numpy(), out_gt.numpy(), rtol=1e-1, atol=1e-1)\n )",
"def test_set_get_quantizer_params_using_properties(self):\n tf.compat.v1.reset_default_graph()\n with tf.device('/cpu:0'):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))\n model.summary()\n\n sess = tf.compat.v1.Session()\n initialize_uninitialized_vars(sess)\n sim = QuantizationSimModel(sess, [model.input.op.name], [model.output.op.name], use_cuda=False)\n\n p_quantizer = sim.quantizer_config('conv2d/Conv2D/ReadVariableOp_quantized')\n o_quantizer = sim.quantizer_config('conv2d/Relu_quantized')\n bias_quantizer = sim.quantizer_config('conv2d/BiasAdd/ReadVariableOp_quantized')\n\n # check if __str__ can print the object info\n print(p_quantizer)\n bitwidth = p_quantizer.bitwidth\n self.assertEqual(8, bitwidth)\n p_quantizer.bitwidth = 6\n bitwidth = p_quantizer.bitwidth\n self.assertEqual(6, bitwidth)\n\n bitwidth = o_quantizer.bitwidth\n self.assertEqual(8, bitwidth)\n o_quantizer.bitwidth = 6\n bitwidth = o_quantizer.bitwidth\n self.assertEqual(6, bitwidth)\n\n sym_encoding = bias_quantizer.use_symmetric_encoding\n self.assertTrue(sym_encoding)\n bias_quantizer.use_symmetric_encoding = False\n sym_encoding = bias_quantizer.use_symmetric_encoding\n self.assertFalse(sym_encoding)\n\n rounding_mode = o_quantizer.rounding_mode\n self.assertEqual(libpymo.RoundingMode.ROUND_NEAREST, rounding_mode)\n o_quantizer.rounding_mode = libpymo.RoundingMode.ROUND_STOCHASTIC\n rounding_mode = o_quantizer.rounding_mode\n self.assertEqual(libpymo.RoundingMode.ROUND_STOCHASTIC, rounding_mode)\n\n quant_scheme = o_quantizer.quant_scheme\n self.assertEqual(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED, quant_scheme)\n o_quantizer.quant_scheme = QuantScheme.post_training_tf\n quant_scheme = o_quantizer.quant_scheme\n self.assertEqual(libpymo.QuantizationMode.QUANTIZATION_TF, quant_scheme)\n self.assertFalse(o_quantizer.tensor_quantizer.isEncodingValid)\n\n is_enabled = p_quantizer.enabled\n self.assertTrue(is_enabled)\n p_quantizer.enabled = False\n is_enabled = p_quantizer.enabled\n self.assertFalse(is_enabled)\n\n # use strict symmetric and unsigned symmetric\n use_strict_symmetric = p_quantizer.use_strict_symmetric\n self.assertFalse(use_strict_symmetric)\n p_quantizer.use_strict_symmetric = True\n use_strict_symmetric = p_quantizer.use_strict_symmetric\n self.assertTrue(use_strict_symmetric)\n\n use_unsigned_symmetric = p_quantizer.use_unsigned_symmetric\n self.assertFalse(use_unsigned_symmetric)\n p_quantizer.use_unsigned_symmetric = True\n use_unsigned_symmetric = p_quantizer.use_unsigned_symmetric\n self.assertTrue(use_unsigned_symmetric)\n\n sim.session.close()\n del sim",
"def test_init(self):\n # ------ tests for basic properties of a tensor with default mode names\n true_shape = (2, 4, 8)\n true_size = reduce(lambda x, y: x * y, true_shape)\n true_order = len(true_shape)\n true_data = np.ones(true_size).reshape(true_shape)\n true_default_mode_names = ['mode-0', 'mode-1', 'mode-2']\n true_default_state = State(normal_shape=true_shape)\n tensor = Tensor(array=true_data)\n np.testing.assert_array_equal(tensor.data, true_data)\n assert (tensor.frob_norm == 8.0)\n assert (tensor.shape == true_shape)\n assert (tensor.ft_shape == true_shape)\n assert (tensor.order == true_order)\n assert (tensor.size == true_size)\n assert (tensor.mode_names == true_default_mode_names)\n assert (tensor._state == true_default_state)\n assert (tensor._data is not true_data) # check that is not a reference\n\n # ------ tests for creating a Tensor object with custom mode names\n true_custom_mode_names = ['time', 'frequency', 'channel']\n tensor = Tensor(array=true_data, mode_names=true_custom_mode_names)\n assert (tensor.mode_names == true_custom_mode_names)\n\n # ------ tests for creating a Tensor object in custom state\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J*K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n true_mode_names = [\"mode-0\", \"mode-1_mode-2\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_mode_names)\n\n # ------ tests for creating a Tensor object with in custom state and with custom mode names\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J * K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n custom_mode_names = [\"time\", \"frequency\", \"channel\"]\n true_custom_mode_names = [\"time\", \"frequency_channel\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state, mode_names=custom_mode_names)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_custom_mode_names)",
"def test_construction_cpu_model(self):\n tf.compat.v1.reset_default_graph()\n with tf.device('/cpu:0'):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))\n model.summary()\n\n sess = tf.compat.v1.Session()\n initialize_uninitialized_vars(sess)\n sim = QuantizationSimModel(sess, ['conv2d_input'], ['conv2d_1/Relu'], use_cuda=False)\n\n # One run through the model to check if the ops got added correctly\n model_output = sess.graph.get_tensor_by_name('conv2d_1/BiasAdd_quantized:0')\n model_input = sess.graph.get_tensor_by_name('conv2d_input:0')\n dummy_input = np.random.randn(20, 28, 28, 3)\n sess.run(model_output, feed_dict={model_input: dummy_input})\n\n # Check that quantized ops got added for all params\n quant_ops = [op for op in sess.graph.get_operations() if op.type == 'QcQuantize']\n for op in quant_ops:\n print(op.name)\n self.assertEqual(10, len(quant_ops))\n\n # Check that the quant ops are correctly connected in the graph\n self.assertEqual('Conv2D', quant_ops[0].outputs[0].consumers()[0].type)\n self.assertEqual('BiasAdd', quant_ops[1].outputs[0].consumers()[0].type)\n self.assertEqual(int(libpymo.TensorQuantizerOpMode.passThrough), sess.run(quant_ops[1].inputs[1]))\n\n # Check that op-mode is set correctly\n self.assertEqual(int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),\n sess.run(quant_ops[0].inputs[1]))\n\n sess.close()\n sim.session.close()\n del sim",
"def distributed_shampoo(learning_rate,\n block_size,\n beta1=0.9,\n beta2=0.999,\n diagonal_epsilon=1e-10,\n matrix_epsilon=1e-6,\n weight_decay=0.0,\n start_preconditioning_step=1,\n preconditioning_compute_steps=1,\n statistics_compute_steps=1,\n best_effort_shape_interpretation=True,\n graft_type=GraftingType.SGD,\n nesterov=True,\n exponent_override=0,\n batch_axis_name=None,\n mesh_axis_names=None,\n num_devices_for_pjit=None,\n shard_optimizer_states=False,\n inverse_failure_threshold=0.1,\n moving_average_for_momentum=False,\n skip_preconditioning_dim_size_gt=4096,\n clip_by_scaled_gradient_norm=None,\n precision=lax.Precision.HIGHEST):\n\n def sharded_init_fn(params):\n params_flat, treedef = jax.tree_flatten(params)\n # Find max size to pad to.\n max_size = 0\n for param in params_flat:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n if not _skip_preconditioning(param):\n shapes = preconditioner.shapes_for_preconditioners()\n sizes = [s[0] for s in shapes]\n max_size = max(max(sizes), max_size)\n\n padded_statistics = []\n padded_preconditioners = []\n local_stats_flat = []\n for param in params_flat:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n shapes = preconditioner.shapes_for_preconditioners()\n sizes = []\n\n statistics = []\n preconditioners = []\n index_start = len(padded_statistics)\n if not _skip_preconditioning(param):\n sizes = [s[0] for s in shapes]\n shapes = preconditioner.shapes_for_preconditioners()\n statistics = [matrix_epsilon * jnp.eye(max_size) for s in shapes]\n preconditioners = [jnp.eye(max_size) for s in shapes]\n padded_statistics.extend(statistics)\n padded_preconditioners.extend(preconditioners)\n\n adagrad_statistics = []\n if graft_type != GraftingType.SGD:\n adagrad_statistics = jnp.zeros_like(param)\n local_stats_flat.append(\n LocalShardedParameterStats(adagrad_statistics, jnp.zeros_like(param),\n jnp.zeros_like(param), index_start, sizes))\n\n local_stats = jax.tree_unflatten(treedef, local_stats_flat)\n # Pad the statistics and preconditioner matrices to be a multiple of\n # num devices.\n # TODO(rohananil): Relax to only the size of the mesh axis where the dim\n # is split on.\n to_pad = -len(padded_statistics) % num_devices_for_pjit\n padded_statistics.extend([\n jnp.eye(max_size, dtype=padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n padded_preconditioners.extend([\n jnp.eye(max_size, dtype=padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n global_stats = GlobalShardedParameterStats(\n jnp.stack(padded_statistics), jnp.stack(padded_preconditioners))\n return ShampooState(\n count=jnp.zeros([], jnp.int32),\n stats=ShardedShampooStats(global_stats, local_stats))\n\n def sharded_update_fn(grads, state, params):\n \"\"\"Transform the input gradient and update all statistics in sharded mode.\n\n Args:\n grads: the gradient tensors for the parameters.\n state: a named tuple containing the state of the optimizer\n params: the parameters that should be updated.\n\n Returns:\n A tuple containing the new parameters and the new optimizer state.\n \"\"\"\n params_flat, treedef = jax.tree_flatten(params)\n grads_flat = treedef.flatten_up_to(grads)\n\n global_stats = state.stats.global_stats\n local_stats_flat = treedef.flatten_up_to(state.stats.local_stats)\n stats_flat = [\n _convert_to_parameter_stats(global_stats, local_stat)\n for local_stat in local_stats_flat\n ]\n new_stats_flat = jax.tree_multimap(\n lambda g, s, p: _compute_stats(g, s, p, state.count), grads_flat,\n stats_flat, params_flat)\n\n exponents = []\n for stat, param in zip(new_stats_flat, params_flat):\n num_statistics = len(stat.statistics)\n if num_statistics > 0:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n exponent = (\n preconditioner.exponent_for_preconditioner()\n if exponent_override == 0 else exponent_override)\n exponents.extend([exponent] * num_statistics)\n\n outputs = jax.tree_multimap(\n lambda g, s, p: _transform_grad(g, s, p, state.count), grads_flat,\n new_stats_flat, params_flat)\n updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())\n\n updates = jax.tree_unflatten(treedef, updates_flat)\n # Create new local_stats\n new_local_stats_flat = [\n _convert_from_parameter_stats(new_stat, local_stat)\n for new_stat, local_stat in zip(new_stats_flat, local_stats_flat)\n ]\n new_local_stats = jax.tree_unflatten(treedef, new_local_stats_flat)\n\n max_size = global_stats.statistics.shape[1]\n new_padded_statistics = []\n for stat in new_stats_flat:\n new_padded_statistics.extend(\n [pad_matrix(stat, max_size) for stat in stat.statistics])\n\n # Create global stats\n # TODO(rohananil): Preconditioner is not updated every step, so cost of\n # stack/pad can be obviated away.\n # Pad the statistics and preconditioner matrices to be a multiple of\n # num devices.\n # TODO(rohananil): Relax to only the size of the mesh axis where the dim\n # is split on.\n to_pad = -len(new_padded_statistics) % num_devices_for_pjit\n new_padded_statistics.extend([\n jnp.eye(max_size, dtype=new_padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n exponents.extend([1 for _ in range(to_pad)])\n\n def _matrix_inverse_pth_root_vmap(xs, ps):\n mi_pth_root = functools.partial(\n matrix_inverse_pth_root,\n ridge_epsilon=matrix_epsilon,\n precision=precision)\n preconditioners, errors = jax.vmap(mi_pth_root)(xs, ps)\n return preconditioners, errors\n\n def _internal_inverse_pth_root_all():\n preconditioners, errors = _matrix_inverse_pth_root_vmap(\n global_stats.statistics, jnp.stack(exponents))\n return preconditioners, errors\n\n if preconditioning_compute_steps == 1:\n new_preconditioners, errors = _internal_inverse_pth_root_all()\n else:\n # Passing statistics instead of preconditioners as they are similarly\n # shaped tensors. Note statistics will be ignored as we are passing in\n # a large init value for error.\n preconditioners_init = global_stats.statistics\n errors_init = np.stack([inverse_failure_threshold] * len(exponents))\n init_state = [preconditioners_init, errors_init]\n perform_step = state.count % preconditioning_compute_steps == 0\n new_preconditioners, errors = efficient_cond(\n perform_step, _internal_inverse_pth_root_all, init_state)\n\n errors = errors.reshape((-1, 1, 1))\n predicate = jnp.logical_or(\n jnp.isnan(errors),\n errors >= inverse_failure_threshold).astype(new_preconditioners.dtype)\n # TODO(rohananil): Check for numerical instabilities.\n new_conditional_preconditioners = (\n predicate * global_stats.preconditioners +\n (1.0 - predicate) * new_preconditioners)\n new_global_stats = GlobalShardedParameterStats(\n jnp.stack(new_padded_statistics), new_conditional_preconditioners)\n new_shampoo_state = ShampooState(\n count=state.count + 1,\n stats=ShardedShampooStats(new_global_stats, new_local_stats))\n return updates, new_shampoo_state\n\n def init_fn(params):\n \"\"\"Initialise the optimiser's state.\"\"\"\n\n def _init(param):\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n statistics = []\n preconditioners = []\n if not _skip_preconditioning(param):\n shapes = preconditioner.shapes_for_preconditioners()\n statistics = [matrix_epsilon * jnp.eye(s[0]) for s in shapes]\n preconditioners = [jnp.eye(s[0]) for s in shapes]\n\n adagrad_statistics = []\n if graft_type != GraftingType.SGD:\n adagrad_statistics = jnp.zeros_like(param)\n return ParameterStats(adagrad_statistics, statistics, preconditioners,\n jnp.zeros_like(param), jnp.zeros_like(param))\n\n return ShampooState(\n count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params))\n\n def _skip_preconditioning(param):\n return len(param.shape) < 1 or any(\n [s > skip_preconditioning_dim_size_gt for s in param.shape])\n\n def _compute_stats(grad, state, param, step):\n \"\"\"Compute per-parameter statistics.\"\"\"\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n new_statistics = [[]] * len(state.statistics)\n w1 = beta2\n w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)\n if not _skip_preconditioning(param):\n\n def compute_updated_statistics():\n new_stats = preconditioner.statistics_from_grad(grad)\n new_stats_accumulators = []\n for stat, stat_accumulator in zip(new_stats, state.statistics):\n new_stats_accumulators.append(w1 * stat_accumulator + w2 * stat)\n return new_stats_accumulators\n\n if statistics_compute_steps > 1:\n perform_step = step % statistics_compute_steps == 0\n init_state = state.statistics\n new_statistics = list(\n efficient_cond(perform_step, compute_updated_statistics,\n init_state))\n else:\n new_statistics = compute_updated_statistics()\n return ParameterStats(state.diagonal_statistics, new_statistics,\n state.preconditioners, state.diagonal_momentum,\n state.momentum)\n\n def _compute_preconditioners(states, params, step):\n \"\"\"Compute preconditioners for statistics.\"\"\"\n statistics = []\n num_statistics_per_state = []\n original_shapes = []\n exponents = []\n max_size = 0\n prev_preconditioners = []\n for state, param in zip(states, params):\n num_statistics = len(state.statistics)\n num_statistics_per_state.append(num_statistics)\n original_shapes_for_state = []\n if num_statistics > 0:\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n for statistic in state.statistics:\n exponents.append(preconditioner.exponent_for_preconditioner(\n ) if exponent_override == 0 else exponent_override)\n original_shapes_for_state.append(statistic.shape)\n max_size = max(max_size, statistic.shape[0])\n statistics.extend(state.statistics)\n prev_preconditioners.extend(state.preconditioners)\n original_shapes.extend(original_shapes_for_state)\n num_statistics = len(statistics)\n\n if batch_axis_name:\n num_devices = lax.psum(1, batch_axis_name)\n\n # Pad statistics and exponents to next multiple of num_devices.\n packed_statistics = [pad_matrix(stat, max_size) for stat in statistics]\n to_pad = -num_statistics % num_devices\n packed_statistics.extend([\n jnp.eye(max_size, dtype=packed_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n exponents.extend([1 for _ in range(to_pad)])\n\n if not packed_statistics:\n return states\n # Batch statistics and exponents so that so that leading axis is\n # num_devices.\n def _batch(statistics, exponents, num_devices):\n assert len(statistics) == len(exponents)\n n = len(statistics)\n b = int(n / num_devices)\n batched_statistics = [\n jnp.stack(statistics[idx:idx + b]) for idx in range(0, n, b)\n ]\n batched_exponents = [\n jnp.stack(exponents[idx:idx + b]) for idx in range(0, n, b)\n ]\n return jnp.stack(batched_statistics), jnp.stack(batched_exponents)\n\n # Unbatch values across leading axis and return a list of elements.\n def _unbatch(batched_values):\n b1, b2 = batched_values.shape[0], batched_values.shape[1]\n results = []\n for v_array in jnp.split(\n batched_values, indices_or_sections=b1, axis=0):\n v_array = jnp.squeeze(v_array)\n # b2 = batches (number of preconditioner computation) per core.\n if b2 > 1:\n for v in jnp.split(v_array, indices_or_sections=b2, axis=0):\n results.append(jnp.squeeze(v))\n else:\n results.append(v_array)\n return results\n\n all_statistics, all_exponents = _batch(packed_statistics, exponents,\n num_devices)\n else:\n to_pad = -num_statistics % num_devices_for_pjit\n padded_statistics = [pad_matrix(stat, max_size) for stat in statistics]\n padded_statistics.extend([\n jnp.eye(max_size, dtype=padded_statistics[0].dtype)\n for _ in range(to_pad)\n ])\n exponents.extend([1 for _ in range(to_pad)])\n all_statistics = jnp.stack(padded_statistics)\n all_exponents = jnp.stack(exponents)\n\n def _matrix_inverse_pth_root_vmap(xs, ps):\n mi_pth_root = functools.partial(\n matrix_inverse_pth_root,\n ridge_epsilon=matrix_epsilon,\n precision=precision)\n preconditioners, errors = jax.vmap(mi_pth_root)(xs, ps)\n return preconditioners, errors\n\n def _matrix_inverse_pth_root_pjit(xs, ps):\n mesh_axis_names_tuple = tuple(mesh_axis_names)\n # Partition the concatenated statistics matrix across all cores.\n partitioned_xs, partitioned_ps = pjit.pjit(\n lambda x, y: (x, y),\n in_axis_resources=None,\n out_axis_resources=pjit.PartitionSpec(mesh_axis_names_tuple,))(xs, ps)\n # Run matrix inverse pth root on each shard.\n partitioned_preconditioners, partitioned_errors = _matrix_inverse_pth_root_vmap(\n partitioned_xs, partitioned_ps)\n # Recombine the outputs at each core.\n preconditioners, errors = pjit.pjit(\n lambda x, y: (x, y),\n in_axis_resources=(pjit.PartitionSpec(mesh_axis_names_tuple,),\n pjit.PartitionSpec(mesh_axis_names_tuple,)),\n out_axis_resources=(None, None))(partitioned_preconditioners,\n partitioned_errors)\n return preconditioners, errors\n\n if not batch_axis_name:\n def _internal_inverse_pth_root_all():\n preconditioners, errors = _matrix_inverse_pth_root_pjit(\n all_statistics, all_exponents)\n b1 = preconditioners.shape[0]\n def split(batched_values):\n return [\n jnp.squeeze(v) for v in jnp.split(\n batched_values, indices_or_sections=b1, axis=0)\n ]\n\n return split(preconditioners), split(errors)\n\n if preconditioning_compute_steps == 1:\n preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()\n else:\n # Passing statistics instead of preconditioners as they are similarly\n # shaped tensors. Note statistics will be ignored as we are passing in\n # a large init value for error.\n preconditioners_init = padded_statistics\n errors_init = [inverse_failure_threshold] * len(padded_statistics)\n init_state = [preconditioners_init, errors_init]\n perform_step = step % preconditioning_compute_steps == 0\n preconditioners_flat, errors_flat = efficient_cond(\n perform_step, _internal_inverse_pth_root_all, init_state)\n else:\n\n def _internal_inverse_pth_root_all():\n preconditioners = jnp.array(all_statistics)\n current_replica = lax.axis_index(batch_axis_name)\n preconditioners, errors = _matrix_inverse_pth_root_vmap(\n all_statistics[current_replica], all_exponents[current_replica])\n preconditioners = jax.lax.all_gather(preconditioners, batch_axis_name)\n errors = jax.lax.all_gather(errors, batch_axis_name)\n preconditioners_flat = _unbatch(preconditioners)\n errors_flat = _unbatch(errors)\n return preconditioners_flat, errors_flat\n\n if preconditioning_compute_steps == 1:\n preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()\n else:\n # Passing statistics instead of preconditioners as they are similarly\n # shaped tensors. Note statistics will be ignored as we are passing in\n # a large init value for error.\n preconditioners_init = packed_statistics\n errors_init = ([inverse_failure_threshold] * len(packed_statistics))\n init_state = [preconditioners_init, errors_init]\n perform_step = step % preconditioning_compute_steps == 0\n preconditioners_flat, errors_flat = efficient_cond(\n perform_step, _internal_inverse_pth_root_all, init_state)\n\n def _skip(error):\n condition = jnp.logical_or(\n jnp.isnan(error), error >= inverse_failure_threshold)\n return condition.astype(error.dtype)\n\n def _select_preconditioner(error, new_p, old_p):\n return lax.cond(\n _skip(error), lambda _: old_p, lambda _: new_p, operand=None)\n\n new_preconditioners_flat = []\n for p, shape, prev_p, error in zip(preconditioners_flat, original_shapes,\n prev_preconditioners, errors_flat):\n new_preconditioners_flat.append(\n _select_preconditioner(error, p[:shape[0], :shape[1]], prev_p))\n\n assert len(states) == len(num_statistics_per_state)\n assert len(new_preconditioners_flat) == num_statistics\n\n # Add back empty preconditioners so we that we can set the optimizer state.\n preconditioners_for_states = []\n idx = 0\n for num_statistics, state in zip(num_statistics_per_state, states):\n if num_statistics == 0:\n preconditioners_for_states.append([])\n else:\n preconditioners_for_state = new_preconditioners_flat[idx:idx +\n num_statistics]\n assert len(state.statistics) == len(preconditioners_for_state)\n preconditioners_for_states.append(preconditioners_for_state)\n idx += num_statistics\n new_states = []\n for state, new_preconditioners in zip(states, preconditioners_for_states):\n new_states.append(\n ParameterStats(state.diagonal_statistics, state.statistics,\n new_preconditioners, state.diagonal_momentum,\n state.momentum))\n\n return new_states\n\n def _transform_grad(grad, state, param, step):\n \"\"\"Transform per-parameter gradients.\"\"\"\n preconditioner = Preconditioner(param, block_size,\n best_effort_shape_interpretation)\n sgd_update = grad\n new_diagonal_statistics = state.diagonal_statistics\n if graft_type == GraftingType.ADAGRAD:\n new_diagonal_statistics = state.diagonal_statistics + jnp.square(grad)\n adagrad_update = grad / (\n jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon)\n grafting_update = adagrad_update\n elif (graft_type == GraftingType.RMSPROP or\n graft_type == GraftingType.RMSPROP_NORMALIZED):\n\n scaled_grad = grad\n if graft_type == GraftingType.RMSPROP_NORMALIZED:\n scaled_grad = grad / jnp.linalg.norm(grad)\n\n w1 = beta2\n w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)\n\n new_diagonal_statistics = (\n w1 * state.diagonal_statistics + w2 * jnp.square(scaled_grad))\n rmsprop_update = scaled_grad / (\n jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon)\n\n if clip_by_scaled_gradient_norm:\n scaled_grad_norm = jnp.linalg.norm(rmsprop_update) / (\n jnp.sqrt(float(rmsprop_update.size)))\n clipping_denom = jnp.maximum(\n 1., scaled_grad_norm / clip_by_scaled_gradient_norm)\n rmsprop_update /= clipping_denom\n\n grafting_update = rmsprop_update\n else:\n grafting_update = sgd_update\n\n precond_grad = grad\n if not _skip_preconditioning(param):\n precond_grad = preconditioner.preconditioned_grad(precond_grad,\n state.preconditioners)\n else:\n precond_grad = grafting_update\n\n grafting_update_norm = jnp.linalg.norm(grafting_update)\n precond_grad_norm = jnp.linalg.norm(precond_grad)\n\n multiplier = (grafting_update_norm / (precond_grad_norm + 1e-16))\n shampoo_update = precond_grad * multiplier\n\n shampoo_update_with_wd = shampoo_update\n grafting_update_with_wd = grafting_update\n if weight_decay != 0:\n shampoo_update_with_wd = shampoo_update + weight_decay * param\n grafting_update_with_wd = grafting_update + weight_decay * param\n\n w = (1.0 - beta1) if moving_average_for_momentum else 1.0\n shampoo_update_with_wd_momentum = (\n state.momentum * beta1 + w * shampoo_update_with_wd)\n grafting_update_with_wd_momentum = (\n state.diagonal_momentum * beta1 + w * grafting_update_with_wd)\n\n run_shampoo = (step >= start_preconditioning_step).astype(\n grafting_update_with_wd_momentum.dtype)\n\n momentum_update = (\n run_shampoo * shampoo_update_with_wd_momentum +\n (1.0 - run_shampoo) * grafting_update_with_wd_momentum)\n\n wd_update = (\n run_shampoo * shampoo_update_with_wd +\n (1.0 - run_shampoo) * grafting_update_with_wd)\n\n if nesterov:\n momentum_update = w * wd_update + beta1 * momentum_update\n\n lr = learning_rate\n if callable(learning_rate):\n lr = learning_rate(step)\n transformed_update = -1.0 * lr * momentum_update\n\n param_stats = ParameterStats(new_diagonal_statistics, state.statistics,\n state.preconditioners,\n grafting_update_with_wd_momentum,\n shampoo_update_with_wd_momentum)\n return transformed_update, param_stats\n\n def update_fn(grads, state, params):\n \"\"\"Transform the input gradient and update all statistics.\n\n Args:\n grads: the gradient tensors for the parameters.\n state: a named tuple containing the state of the optimizer\n params: the parameters that should be updated.\n\n Returns:\n A tuple containing the new parameters and the new optimizer state.\n \"\"\"\n params_flat, treedef = jax.tree_flatten(params)\n stats_flat = treedef.flatten_up_to(state.stats)\n grads_flat = treedef.flatten_up_to(grads)\n\n new_stats_flat = jax.tree_multimap(\n lambda g, s, p: _compute_stats(g, s, p, state.count), grads_flat,\n stats_flat, params_flat)\n new_stats_flat = _compute_preconditioners(new_stats_flat, params_flat,\n state.count)\n\n outputs = jax.tree_multimap(\n lambda g, s, p: _transform_grad(g, s, p, state.count), grads_flat,\n new_stats_flat, params_flat)\n updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())\n\n updates = jax.tree_unflatten(treedef, updates_flat)\n new_stats = jax.tree_unflatten(treedef, new_stats_flat)\n\n new_state = ShampooState(\n count=state.count+1, stats=new_stats)\n return updates, new_state\n\n if shard_optimizer_states:\n return optax.GradientTransformation(sharded_init_fn, sharded_update_fn)\n else:\n return optax.GradientTransformation(init_fn, update_fn)",
"def __init__(self, module, equalize=True, bias_init=True, lrmult=1.0):\n super(EqualizedLayer, self).__init__()\n\n self.module = module\n self.equalize = equalize\n self.init_bias = bias_init\n\n if self.equalize:\n self.module.weight.data.normal_(0, 1) # Normal distribution mean of 0, SD of 1\n self.module.weight.data /= lrmult # Scale weights by a layer specific learning rate multiplier\n # Divides by multiplier as the He Value is the reciprocal of multiple of the output weights\n self.he_val = he_initializer(self.module)\n if self.init_bias:\n self.module.bias.data.fill_(0)",
"def test_conditional_broadcasting(session_tf, full_cov, white, conditional_type):\n X_ = tf.placeholder(tf.float64, [None, None])\n q_mu = np.random.randn(Data.M, Data.Dy)\n q_sqrt = np.tril(np.random.randn(Data.Dy, Data.M, Data.M), -1)\n\n if conditional_type == \"Z\":\n feat = Data.Z\n kern = gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5)\n elif conditional_type == \"inducing_points\":\n feat = gpflow.features.InducingPoints(Data.Z)\n kern = gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5)\n elif conditional_type == \"mixing\":\n # variational params have different output dim in this case\n q_mu = np.random.randn(Data.M, Data.L)\n q_sqrt = np.tril(np.random.randn(Data.L, Data.M, Data.M), -1)\n feat = mf.MixedKernelSharedMof(gpflow.features.InducingPoints(Data.Z))\n kern = mk.SeparateMixedMok(\n kernels=[gpflow.kernels.Matern52(Data.Dx, lengthscales=0.5) for _ in range(Data.L)],\n W=Data.W\n )\n\n if conditional_type == \"mixing\" and full_cov:\n pytest.skip(\"combination is not implemented\")\n\n num_samples = 5\n sample_tf, mean_tf, cov_tf = sample_conditional(\n X_,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n\n ss, ms, vs = [], [], []\n for X in Data.SX:\n s, m, v = session_tf.run([sample_tf, mean_tf, cov_tf], {X_: X})\n ms.append(m)\n vs.append(v)\n ss.append(s)\n\n ms = np.array(ms)\n vs = np.array(vs)\n ss = np.array(ss)\n\n ss_S12, ms_S12, vs_S12 = session_tf.run(\n sample_conditional(\n Data.SX,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n )\n\n ss_S1_S2, ms_S1_S2, vs_S1_S2 = session_tf.run(\n sample_conditional(\n Data.S1_S2_X,\n feat,\n kern,\n tf.convert_to_tensor(q_mu),\n q_sqrt=tf.convert_to_tensor(q_sqrt),\n white=white,\n full_cov=full_cov,\n num_samples=num_samples\n )\n )\n\n assert_allclose(ss_S12.shape, ss.shape)\n assert_allclose(ms_S12, ms)\n assert_allclose(vs_S12, vs)\n assert_allclose(ms_S1_S2.reshape(Data.S1 * Data.S2, Data.N, Data.Dy), ms)\n assert_allclose(ss_S1_S2.shape, [Data.S1, Data.S2, num_samples, Data.N, Data.Dy])\n\n if full_cov:\n assert_allclose(vs_S1_S2.reshape(Data.S1 * Data.S2, Data.Dy, Data.N, Data.N), vs)\n else:\n assert_allclose(vs_S1_S2.reshape(Data.S1 * Data.S2, Data.N, Data.Dy), vs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test an error is raised if no wires are passed.
|
def test_no_wires(self):
class DummyOp(qml.operation.Operator):
num_wires = 1
num_params = 1
with pytest.raises(ValueError, match="Must specify the wires"):
DummyOp(1.234)
|
[
"def test_check_wires_exception(self, wires):\n with pytest.raises(ValueError, match=\"wires must be a positive integer\"):\n check_wires(wires=wires)",
"def test_operation_on_nonexistant_wire(self, operable_mock_device_2_wires):\n\n operable_mock_device_2_wires.num_wires = 2\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 2])\n return qml.expval(qml.PauliZ(0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)",
"def test_observable_on_nonexistant_wire(self, operable_mock_device_2_wires):\n\n operable_mock_device_2_wires.num_wires = 2\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(2))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)",
"def testWelchsTTest_EmptySample_RaisesError(self):\n with self.assertRaises(RuntimeError):\n ttest.WelchsTTest([], [])\n with self.assertRaises(RuntimeError):\n ttest.WelchsTTest([], [1, 2, 3])\n with self.assertRaises(RuntimeError):\n ttest.WelchsTTest([1, 2, 3], [])",
"def test_non_unique_wires(self):\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator\"\"\"\n num_wires = 1\n\n with pytest.raises(qml.wires.WireError, match=\"Wires must be unique\"):\n DummyOp(0.5, wires=[1, 1], do_queue=False)",
"def _check_not_reproduced_expected_errors(self):\n if self.__not_reproduced_expected_errors:\n pytest.fail(msg=self._get_not_reproduced_expected_errors_msg() +\n self._get_reproduced_expected_errors_msg())",
"def _check_reproduced_expected_errors(self):\n if self.__reproduced_expected_errors:\n pytest.xfail(reason=self._get_reproduced_expected_errors_msg())",
"def test_error_calls_given_func_if_func_not_None(self):\n error('foo', func=Fake(callable=True, expect_call=True))",
"def test_all_or_none(self):\n obj = {\"seek\": \"0x1800000\",\n \"start\": \"0x1800000\",\n \"type\": \"flash\"}\n\n with self.assertRaises(KeyError):\n fpgaotsu.all_or_none(obj, 'filename', 'start')\n\n try:\n fpgaotsu.all_or_none(obj, 'type', 'start')\n except ExceptionType:\n self.fail(\"all_or_none raised ExceptionType unexpectedly\")",
"def test_radio_single_fail(self):\n with mock.patch('bbarchivist.networkutils.availability', mock.MagicMock(return_value=False)):\n with mock.patch('builtins.input', mock.MagicMock(return_value=\"n\")):\n with pytest.raises(SystemExit):\n bs.check_radio_single(\"http://qrrbrbirlbel.yu/\", \"10.3.2.2639\")",
"def error_check(self):\n error = self.root.find('error')\n if error is not None:\n if error.attrib == 'Invalid station type':\n raise InvalidStationTypeErrorException('Invalid station type')\n else:\n raise UnknownApiErrorException(error.attrib)",
"def test_not_running():\n comm_kwargs = dict(comm='RMQComm', direction='send', reverse_names=True)\n nt.assert_raises(RuntimeError, new_comm, 'test', **comm_kwargs)",
"def test_observable_not_returned(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"All measured observables\"):\n node(0.5)",
"def test_not_all_communication_used():\n with raises(AssertionError, match=\"Unprocessed protocol definitions remain\"):\n with expected_protocol(\n BasicTestInstrument,\n [('VOLT?', 3.14),\n ('VOLT 4.5 V', None),\n ]\n ) as instr:\n assert instr.simple == 3.14",
"def test_return_of_non_observable(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(wires=0)), 0.3\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"must return either\"):\n node(0.5)",
"def test_none_input_data(self):\n invalid_struct = self.struct \n invalid_struct.diffusion_data = np.array([])\n invalid_struct.structural_data = np.array([])\n self.assertRaises(ValueError, module_01.run_module, invalid_struct)",
"def test_build_ops_error():\n qubit = cirq.LineQubit.range(1)\n with pytest.raises(ValueError):\n cirq_utils.qubit_op_to_gate('W', qubit[0])",
"def test_same_wires(self):\n\n with pytest.raises(qml.QuantumFunctionError, match=\"The target wires and estimation wires\"):\n QuantumPhaseEstimation(np.eye(2), target_wires=[0, 1], estimation_wires=[1, 2])",
"def _checkForErrors(self, simulation, state):\n if self._needEnergy:\n energy = (state.getKineticEnergy()+state.getPotentialEnergy()).value_in_unit(unit.kilojoules_per_mole)\n if math.isnan(energy):\n raise ValueError('Energy is NaN')\n if math.isinf(energy):\n raise ValueError('Energy is infinite')",
"def test_town_checking_fail(self):\r\n for town in self.bad_towns:\r\n with self.assertRaises(ValueError) as context:\r\n hw.Garage.town_checking(town)\r\n self.assertTrue('Town should be instance of TOWNS!' in context.exception.args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that we can set the name of an operator
|
def test_name_setter(self):
class DummyOp(qml.operation.Operator):
r"""Dummy custom operator"""
num_wires = 1
op = DummyOp(wires=0)
op.name = "MyOp"
assert op.name == "MyOp"
|
[
"def test_operator_get_operator(self):\n pass",
"def test_operator_create_operator(self):\n pass",
"def test_explicitly_specified_control_op(self, op, target_name):\n assert _get_target_name(op) == target_name",
"def testSetOperator(self):\n parser = expression_parser.EventFilterExpressionParser()\n parser._Reset()\n\n self.assertIsNotNone(parser._current_expression)\n self.assertIsNone(parser._current_expression.operator)\n\n next_state = parser._SetOperator(string='&&')\n self.assertIsNone(next_state)\n self.assertEqual(parser._current_expression.operator, '&&')",
"def test_operator_update_operator(self):\n pass",
"def get_operator(operator_name):\n if operator_name == \"gt\":\n return Gt\n elif operator_name == \"gte\":\n return Gte\n elif operator_name == \"eq\":\n return Eq\n elif operator_name == \"in\":\n return In\n elif operator_name == \"range\":\n return Range\n elif operator_name == \"contains\":\n return Contains\n elif operator_name == \"startswith\":\n return StartsWith\n elif operator_name == \"endswith\":\n return EndsWith\n\n raise Exception(\"Invalid operator name {0}\".format(operator_name))",
"def set_operation(self, op_name):\n self.report[self.key_optype] = op_name",
"def testUnknownOperatorName(self):\r\n\r\n class MockCopsDevice(MockDevice):\r\n def process(self, cmd):\r\n\r\n # return a valid +COPS response for AT+COPS?, but error\r\n # for other commands (except built-in MockDevice stuff)\r\n if cmd == \"AT+COPS?\":\r\n return self._respond('+COPS: 0')\r\n\r\n return False\r\n\r\n device = MockCopsDevice()\r\n gsm = pygsm.GsmModem(device=device)\r\n self.assertEqual(gsm.network, \"(Automatic)\")",
"def op_name_equals(node, name):\n result = False\n if hasattr(node, 'op_name'):\n result = (node.op_name == name)\n\n return result",
"def test_operator_get_operator_groups_for_operator(self):\n pass",
"def test_operator_get_all_operators(self):\n pass",
"def test_operator_update_operator_with_patch(self):\n pass",
"def isOp(s):\n return getOp(s) != None",
"def test_operator_literals():\n TestScanner._run(**{\n 'name': 'Operator Alpha Literals',\n 'expressions': {\n 'concat': ['.'],\n 'alt': ['|'],\n 'star': ['*'],\n 'question': ['?'],\n 'plus': ['+'],\n 'slash': ['\\\\'],\n 'lparen': ['('],\n 'rparen': [')'],\n 'lbracket': ['['],\n 'rbracket': [']']\n },\n 'DFA': {\n 'Q': set(['S', 'F', 'Err']),\n 'V': set('.|*?+\\\\()[]'),\n # pylint: disable=bad-whitespace\n 'T': [\n [' ', 'S', 'F', 'Err'],\n ['.', 'F', 'Err', 'Err'],\n ['|', 'F', 'Err', 'Err'],\n ['*', 'F', 'Err', 'Err'],\n ['?', 'F', 'Err', 'Err'],\n ['+', 'F', 'Err', 'Err'],\n ['\\\\', 'F', 'Err', 'Err'],\n ['(', 'F', 'Err', 'Err'],\n [')', 'F', 'Err', 'Err'],\n ['[', 'F', 'Err', 'Err'],\n [']', 'F', 'Err', 'Err']\n ],\n # pylint: enable=bad-whitespace\n 'S': 'S',\n 'F': set(['F']),\n 'G': {\n 'concat': set(['F']),\n 'alt': set(['F']),\n 'star': set(['F']),\n 'question': set(['F']),\n 'plus': set(['F']),\n 'slash': set(['F']),\n 'lparen': set(['F']),\n 'rparen': set(['F']),\n 'lbracket': set(['F']),\n 'rbracket': set(['F']),\n '_sink': set(['Err'])\n }\n }\n })",
"def operator_names():\n return ExtensionManager('cosmic_ray.operators').names()",
"def get_operator(cls, exp: Expression):\n if callable(exp.operator):\n return exp.operator\n\n if isinstance(exp.operator, str):\n try:\n return cls.OPERATORS[exp.operator.lower()]\n except KeyError:\n raise InvalidOperator(f\"No such operator {exp.operator!r}!\")\n\n raise InvalidOperator(\n f\"Please provide a valid operator or callable, not {exp.operator!r}!\"\n )",
"def _isOperator(self, token):\n token = token.strip()\n \n if(token == \"+\"):\n return True\n\n if(token == \"*\"):\n return True\n \n return False",
"def test_operator_post_authorization_for_operator(self):\n pass",
"def operator_player_name(self, operator_player_name):\n\n self._operator_player_name = operator_player_name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test has_matrix property detects overriding of `compute_matrix` method.
|
def test_has_matrix_true(self):
class MyOp(qml.operation.Operator):
num_wires = 1
@staticmethod
def compute_matrix():
return np.eye(2)
assert MyOp.has_matrix
assert MyOp(wires=0).has_matrix
|
[
"def test_has_matrix_false(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n assert not MyOp.has_matrix\n assert not MyOp(wires=0).has_matrix",
"def _compute_matrix_profile(self):\n raise NotImplementedError",
"def has_ub_matrix(self):\n return False",
"def is_matrix(self):\r\n return self.size[0] > 1 and self.size[1] > 1",
"def test_sparse_matrix_undefined(self):\n with pytest.raises(NotImplementedError):\n MyOp(wires=\"a\").sparse_matrix(wire_order=[\"a\", \"b\"])\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n MyOp.compute_sparse_matrix()\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n op.sparse_matrix()",
"def test_incompatible_dimensions(self, matrices):\n # Instantiate 5x10, 10x5, and 10x10 matrices as Matrix class\n square_mat = chap5.Matrix(matrices.square)\n half_row_mat = chap5.Matrix(matrices.half_row)\n half_col_mat = chap5.Matrix(matrices.half_col)\n # Verify Matrix class raises AssertionError for incompatible dimensions\n with pytest.raises(AssertionError):\n half_row_mat + half_col_mat # (5x10) + (10x5)\n with pytest.raises(AssertionError):\n half_col_mat + half_row_mat # (10x5) + (5x10)\n with pytest.raises(AssertionError):\n half_col_mat @ square_mat # (10x5) @ (10x10)\n with pytest.raises(AssertionError):\n square_mat @ half_row_mat # (10x10) @ (5x10)",
"def test_metric_matrix_is_block_diagonal(self, metric_args, base_point):\n metric = self.Metric(*metric_args)\n result = metric.metric_matrix(base_point)\n individual_metric_matrices = [metric.matrix for metric in metric_args[0]]\n expected = reduce(gs.kron, individual_metric_matrices)\n self.assertAllClose(result, expected)",
"def test_has_matrix_false_concrete_template(self):\n\n rng = qml.numpy.random.default_rng(seed=42)\n shape = qml.StronglyEntanglingLayers.shape(n_layers=2, n_wires=2)\n params = rng.random(shape)\n op = qml.StronglyEntanglingLayers(params, wires=range(2))\n assert not op.has_matrix",
"def test_get_submatrix(self):\n\n # First up a 3x3 example\n M = matrices.Matrix(3, 3)\n M.set_row(0, [1, 5, 0])\n M.set_row(1, [-3, 2, 7])\n M.set_row(2, [0, 6, -3])\n\n result = M.submatrix(0, 2)\n\n expected = matrices.Matrix(2, 2)\n expected.set_row(0, [-3, 2])\n expected.set_row(1, [0, 6])\n\n self.assertEqual(result, expected)\n\n # Then a 4x4 example\n M = matrices.Matrix(4, 4)\n M.set_row(0, [-6, 1, 1, 6])\n M.set_row(1, [-8, 5, 8, 6])\n M.set_row(2, [-1, 0, 8, 2])\n M.set_row(3, [-7, 1, -1, 1])\n\n result = M.submatrix(2, 1)\n\n expected = matrices.Matrix(3, 3)\n expected.set_row(0, [-6, 1, 6])\n expected.set_row(1, [-8, 8, 6])\n expected.set_row(2, [-7, -7, 1])",
"def __add__(self, matrix):",
"def test_create_input_matrix(self):\n input_matrix = create_input_matrix(self.log_return_dataframe, 'angular')\n self.check_angular_distance(input_matrix)\n # An incorrect sub type raises Value Error\n self.assertRaises(ValueError, create_input_matrix, self.log_return_dataframe, 'invalid matrix subtype')",
"def test_compute_pdf_matrix(self):\n pdf_matrix = self.cluster_obj_2.compute_pdf_matrix()\n self.assertEqual(round(pdf_matrix[0,0], 3), 0.044)\n self.assertEqual(round(pdf_matrix[0,1], 3), 0.038)",
"def test_matmul(self, matrices):\n # Instantiate the 10x10 matrix and test matrix multiplication\n square_mat = chap5.Matrix(matrices.square)\n square_np = np.array(matrices.square)\n square_matmul = (square_mat @ square_mat)._matrix\n square_np_result = square_np @ square_np\n # Compare to the Numpy result of multiplying the matrix times itself\n assert (np.array(square_matmul) == square_np_result).all()\n # Instantiate a 5x10 and 10x5 matrix as Matrix class and Numpy array\n half_row_mat = chap5.Matrix(matrices.half_row)\n half_col_mat = chap5.Matrix(matrices.half_col)\n half_row_np = np.array(matrices.half_row)\n half_col_np = np.array(matrices.half_col)\n # Matrix multiplication amongst the 10x10, 5x10, and 10x5 matrices\n result1 = half_row_mat @ half_col_mat # (5x10) @ (10x5)\n exp_result1 = half_row_np @ half_col_np # (5x10) @ (10x5)\n result2 = half_col_mat @ half_row_mat # (10x5) @ (5x10)\n exp_result2 = half_col_np @ half_row_np # (10x5) @ (5x10)\n result3 = half_row_mat @ square_mat # (5x10) @ (10x10)\n exp_result3 = half_row_np @ square_np # (5x10) @ (10x10)\n result4 = square_mat @ half_col_mat # (10x10) @ (10x5)\n exp_result4 = square_np @ half_col_np # (10x10) @ (10x5)\n assert (np.array(result1._matrix) == exp_result1).all()\n assert (np.array(result2._matrix) == exp_result2).all()\n assert (np.array(result3._matrix) == exp_result3).all()\n assert (np.array(result4._matrix) == exp_result4).all()",
"def test_good_execute_ingest(\n self, mock_load, mock_transform, mock_has_unique_cells\n ):\n expression_matrix = DenseIngestor(\n \"../tests/data/dense_matrix_19_genes_1000_cells.txt\",\n \"5d276a50421aa9117c982845\",\n \"5dd5ae25421aa910a723a337\",\n )\n expression_matrix.execute_ingest()\n self.assertTrue(mock_transform.called)\n # Commenting out this \"good_execute_ingest\" assertion due to change in\n # mock with Python >= 3.8 (accessing attributes on the mock object\n # before patching mock object, causing error not seen with Python 3.7)\n # TODO (SCP-5032): Resolve this test\n #\n # self.assertTrue(mock_load.called)",
"def test_sparse_matrix_error(self):\n\n t = qml.PauliX(0) @ qml.Hermitian(np.eye(4), wires=[1, 2])\n with pytest.raises(ValueError, match=\"Can only compute\"):\n t.sparse_matrix()",
"def __mul__(self, matrix):",
"def test_no_wire_order_returns_base_matrix(self):\n res = qml.operation.expand_matrix(self.base_matrix_2, wires=[0, 2])\n assert np.allclose(self.base_matrix_2, res)",
"def is_tt_matrix(self):\n return len(self.get_raw_shape()) == 2",
"def test_patch_compute_board(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test has_matrix property defaults to false if `compute_matrix` not overwritten.
|
def test_has_matrix_false(self):
class MyOp(qml.operation.Operator):
num_wires = 1
assert not MyOp.has_matrix
assert not MyOp(wires=0).has_matrix
|
[
"def test_has_matrix_true(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n @staticmethod\n def compute_matrix():\n return np.eye(2)\n\n assert MyOp.has_matrix\n assert MyOp(wires=0).has_matrix",
"def is_matrix(self):\r\n return self.size[0] > 1 and self.size[1] > 1",
"def has_ub_matrix(self):\n return False",
"def HasMAT(self):\n return self.__has('MAT')",
"def is_tt_matrix(self):\n return len(self.get_raw_shape()) == 2",
"def is_matrix_zero(expr):\n return is_matrix_x(expr, 0)",
"def __bool__(self):\n return _core.StringMatrixMap___nonzero__(self)",
"def test_has_matrix_false_concrete_template(self):\n\n rng = qml.numpy.random.default_rng(seed=42)\n shape = qml.StronglyEntanglingLayers.shape(n_layers=2, n_wires=2)\n params = rng.random(shape)\n op = qml.StronglyEntanglingLayers(params, wires=range(2))\n assert not op.has_matrix",
"def simple_material(mat):\n return (mat is not None) and (not mat.use_nodes)",
"def __bool__(self):\n return _core.MatrixXdVec___bool__(self)",
"def __bool__(self):\n return _core.MatrixXdVec___nonzero__(self)",
"def __bool__(self):\n return _core.StringMatrixMap___bool__(self)",
"def _compute_matrix_profile(self):\n raise NotImplementedError",
"def _is_lefttotal(self):\r\n\r\n return np.all(np.sum(self.matrix,axis=0))",
"def is_matrix_minus_one(expr):\n return is_matrix_x(expr, -1)",
"def checkIfSquare(matrix):\n if len(matrix.shape) == 2:\n return True if matrix.shape[0] == matrix.shape[1] else False\n else:\n return False",
"def can_estimate(self):\n return not ((self.parameters.map is None)\n or (self.parameters.path is None)\n or (self.parameters.position is None)\n or (self.parameters.position_error is None))",
"def is_injective(self):\n # Some matrix representation is picked at random:\n matrix_rep = self._matrices.values()[0]\n return matrix_rep.right_kernel().rank() == 0",
"def is_matrix_one(expr):\n return is_matrix_x(expr, 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test has_matrix with a concrete operation (StronglyEntanglingLayers) that does not have a matrix defined.
|
def test_has_matrix_false_concrete_template(self):
rng = qml.numpy.random.default_rng(seed=42)
shape = qml.StronglyEntanglingLayers.shape(n_layers=2, n_wires=2)
params = rng.random(shape)
op = qml.StronglyEntanglingLayers(params, wires=range(2))
assert not op.has_matrix
|
[
"def test_has_matrix_false(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n assert not MyOp.has_matrix\n assert not MyOp(wires=0).has_matrix",
"def test_has_matrix_true(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n @staticmethod\n def compute_matrix():\n return np.eye(2)\n\n assert MyOp.has_matrix\n assert MyOp(wires=0).has_matrix",
"def test_sparse_matrix_undefined(self):\n with pytest.raises(NotImplementedError):\n MyOp(wires=\"a\").sparse_matrix(wire_order=[\"a\", \"b\"])\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n MyOp.compute_sparse_matrix()\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n op.sparse_matrix()",
"def has_ub_matrix(self):\n return False",
"def is_matrix(self):\r\n return self.size[0] > 1 and self.size[1] > 1",
"def simple_material(mat):\n return (mat is not None) and (not mat.use_nodes)",
"def is_matrix_zero(expr):\n return is_matrix_x(expr, 0)",
"def test_shape_matrixOp(self):\n\n\t\tresultShape = np.zeros([self.numQuery,self.numNhbrs,self.numNhbrs])\n\t\twith self.test_session() as sess:\n\t\t\tx = self.testKnn.matrixOp(self.feeder5, self.numNhbrs, graphToMatrixEmptyTestFunction)\n\t\t\tself.assertShapeEqual(resultShape, x)",
"def is_injective(self):\n # Some matrix representation is picked at random:\n matrix_rep = self._matrices.values()[0]\n return matrix_rep.right_kernel().rank() == 0",
"def is_matrix_minus_one(expr):\n return is_matrix_x(expr, -1)",
"def is_tt_matrix(self):\n return len(self.get_raw_shape()) == 2",
"def test_matmul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Can only perform tensor products between operators.\"):\n _ = qml.PauliX(0) @ \"dummy\"",
"def test_incompatible_dimensions(self, matrices):\n # Instantiate 5x10, 10x5, and 10x10 matrices as Matrix class\n square_mat = chap5.Matrix(matrices.square)\n half_row_mat = chap5.Matrix(matrices.half_row)\n half_col_mat = chap5.Matrix(matrices.half_col)\n # Verify Matrix class raises AssertionError for incompatible dimensions\n with pytest.raises(AssertionError):\n half_row_mat + half_col_mat # (5x10) + (10x5)\n with pytest.raises(AssertionError):\n half_col_mat + half_row_mat # (10x5) + (5x10)\n with pytest.raises(AssertionError):\n half_col_mat @ square_mat # (10x5) @ (10x10)\n with pytest.raises(AssertionError):\n square_mat @ half_row_mat # (10x10) @ (5x10)",
"def test_sparse_matrix_error(self):\n\n t = qml.PauliX(0) @ qml.Hermitian(np.eye(4), wires=[1, 2])\n with pytest.raises(ValueError, match=\"Can only compute\"):\n t.sparse_matrix()",
"def test_inv_3d_blockmatrix_odd(self):\n matrix = np.random.rand(3, 5, 5)\n with self.assertRaises(ValueError):\n inversematrix = geometry.inv_3d_blockmatrix(matrix)",
"def test_matrices_dont_commute(self):\n x0 = qml.PauliX(0)\n z0 = qml.PauliZ(0)\n\n assert not _check_mat_commutation(x0, z0)\n assert not _check_mat_commutation(z0, x0)",
"def test_difference_matrix_size_neg():\n with pytest.raises(ValueError):\n _banded_utils.difference_matrix(-1)",
"def test_is_not_positive_definite():\n mat = np.array([[-1, -1], [-1, -1]])\n np.testing.assert_equal(is_positive_definite(mat), False)",
"def is_matrix_one(expr):\n return is_matrix_x(expr, 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests using tf.function with an operation works with and without just in time (JIT) compilation.
|
def test_with_tf_function(self, jit_compile):
import tensorflow as tf
class MyRX(qml.RX):
@property
def ndim_params(self):
return self._ndim_params
def fun(x):
op0 = qml.RX(x, 0)
op1 = MyRX(x, 0)
# No kwargs
fun0 = tf.function(fun)
fun0(tf.Variable(0.2))
fun0(tf.Variable([0.2, 0.5]))
# With kwargs
signature = (tf.TensorSpec(shape=None, dtype=tf.float32),)
fun1 = tf.function(fun, jit_compile=jit_compile, input_signature=signature)
fun1(tf.Variable(0.2))
fun1(tf.Variable([0.2, 0.5]))
|
[
"def _generic_test(self,\n f_raw,\n examples,\n input_signature=None,\n skip_modes=None):\n f_tf = None\n if not skip_modes:\n skip_modes = []\n\n if tf_inspect.isfunction(f_raw):\n self.recordProperty('f', tf_inspect.getsource(f_raw))\n else:\n self.recordProperty('f', tf_inspect.getdoc(f_raw))\n\n for arg, out, failure, bugs in examples:\n del out\n self.recordProperty('Input \"{}\"'.format(arg), {\n 'not-working': failure,\n 'bugs': bugs\n })\n\n # Run the function without tf.function\n if RunMode.RAW not in skip_modes:\n self._run_and_check(f_raw, RunMode.RAW, examples)\n\n # TF Function\n if RunMode.FUNCTION not in skip_modes:\n f_tf = tf.function(f_raw, input_signature=input_signature)\n self._run_and_check(f_tf, RunMode.FUNCTION, examples)\n\n # XLA Function\n if RunMode.XLA not in skip_modes:\n f_xla = tf.function(\n f_raw, input_signature=input_signature, experimental_compile=True)\n self._run_and_check(f_xla, RunMode.XLA, examples)\n\n # Write a saved model and try to run it\n if RunMode.SAVED not in skip_modes:\n module = tf.Module()\n if f_tf:\n module.f = f_tf\n else:\n module.f = tf.function(f_raw, input_signature=input_signature)\n\n saved_model_dir = tempfile.gettempdir()\n tf.saved_model.save(module, saved_model_dir)\n module_loaded = tf.saved_model.load(saved_model_dir)\n self._run_and_check(module_loaded.f, RunMode.SAVED, examples)",
"def testVariableModelWithFunctionAndFunctionInliningDisabled(self):\n\n class BasicModel:\n\n def __init__(self):\n self.v1 = None\n self.v2 = variables.Variable(2.)\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=[1], dtype=dtypes.float32)\n ])\n def add_all(self, x):\n if self.v1 is None:\n self.v1 = variables.Variable(3.)\n return x + self.v1 + self.v2\n\n def run(self, x):\n y = self.add_all(x)\n return y\n\n save_dir = os.path.join(self.get_temp_dir(), \"frozen_saved_model\")\n with ops.Graph().as_default():\n model = BasicModel()\n a = array_ops.placeholder(dtypes.float32, shape=[1])\n b = model.run(a)\n with session_lib.Session() as sess:\n sess.run(variables.global_variables_initializer())\n simple_save.simple_save(sess, save_dir, {\"myinput\": a}, {\"myoutput\": b})\n\n # Add _noinline to the SavedModel.\n self._addNoinlineAttributeToFunction(\n saved_model_dir=save_dir, func_name=\"add_all\")\n\n saved_model = load(save_dir)\n func = saved_model.signatures[\"serving_default\"]\n frozen_func = convert_to_constants.convert_variables_to_constants_v2(func)\n constant_graph_def = frozen_func.graph.as_graph_def()\n self._ensure_no_variables_in_graph(constant_graph_def)",
"def testMultiFunctionModel(self):\n\n class BasicModel(autotrackable.AutoTrackable):\n\n def __init__(self):\n self.y = None\n self.z = None\n\n @def_function.function\n def add(self, x):\n if self.y is None:\n self.y = variables.Variable(2.)\n return x + self.y\n\n @def_function.function\n def sub(self, x):\n if self.z is None:\n self.z = variables.Variable(3.)\n return x - self.z\n\n with ops.Graph().as_default():\n with session_lib.Session() as sess:\n input_data = {\"x\": constant_op.constant(1., shape=[1])}\n root = BasicModel()\n input_func = root.add.get_concrete_function(input_data[\"x\"])\n\n variable_graph_def = input_func.graph.as_graph_def()\n self.assertEqual(1, get_num_variables(variable_graph_def))\n\n output_func = convert_to_constants.convert_var_to_const_function_in_v1(\n input_func)\n self._testConvertedFunction(sess, root, root.add, output_func,\n input_data)",
"def testIf(self):\n with ops.Graph().as_default():\n with session_lib.Session() as sess:\n input_data = {\n \"x\": constant_op.constant([1., 2.], shape=[1, 2]),\n \"b\": constant_op.constant(True)\n }\n\n weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]],\n dtype=dtypes.float32)\n\n def true_fn(x):\n return math_ops.matmul(x, weights)\n\n def false_fn(x):\n return math_ops.add(x, weights)\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)\n ])\n def model(x, b):\n return cond.cond(\n b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))\n\n root, output_func = self._freezeModel(model)\n self._testConvertedFunction(sess, root, root.f, output_func, input_data)",
"def copy_if_tf_function(x):\n return copy.copy(x) if not tf.executing_eagerly() else x",
"def testStatelessIf(self):\n with ops.Graph().as_default():\n with session_lib.Session() as sess:\n input_data = {\"b\": constant_op.constant(True)}\n\n x = constant_op.constant([1., 2.], shape=[1, 2], name=\"x\")\n\n def true_fn():\n return x\n\n def false_fn():\n return x + 2\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)\n ])\n def model(b):\n return cond_v2.cond_v2(b, true_fn, false_fn)\n\n root, output_func = self._freezeModel(model)\n self._testConvertedFunction(sess, root, root.f, output_func, input_data)",
"def testJitOfEvalOnShapes(self):\n\n def transformer(f, **kwargs):\n def f_prime(*args):\n res = extensions.eval_on_shapes(f, **kwargs)(*args)\n return tf.nest.map_structure(\n lambda x: tf_np.zeros(x.shape, x.dtype), res)\n return extensions.jit(f_prime, kwargs.get(\"static_argnums\", ()))\n\n self._testEvalOnShapes(transformer, False)",
"def testIf(self):\n input_data = {\n \"x\": constant_op.constant([1., 2.], shape=[1, 2]),\n \"b\": constant_op.constant(True)\n }\n\n weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32)\n\n def true_fn(x):\n return math_ops.matmul(x, weights)\n\n def false_fn(x):\n return math_ops.add(x, weights)\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)\n ])\n def model(x, b):\n return cond.cond(\n b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))\n\n root, output_func = self._freezeModel(model)\n self._testConvertedFunction(root, root.f, output_func, input_data)",
"def testMultiFunctionModel(self):\n\n class BasicModel(autotrackable.AutoTrackable):\n\n def __init__(self):\n self.y = None\n self.z = None\n\n @def_function.function\n def add(self, x):\n if self.y is None:\n self.y = variables.Variable(2.)\n return x + self.y\n\n @def_function.function\n def sub(self, x):\n if self.z is None:\n self.z = variables.Variable(3.)\n return x - self.z\n\n input_data = {\"x\": constant_op.constant(1., shape=[1])}\n root = BasicModel()\n input_func = root.add.get_concrete_function(input_data[\"x\"])\n\n variable_graph_def = input_func.graph.as_graph_def()\n self.assertEqual(1, get_num_variables(variable_graph_def))\n\n output_func = convert_to_constants.convert_variables_to_constants_v2(\n input_func)\n self._testConvertedFunction(root, root.add, output_func, input_data)",
"def build_graph_with_function(self):\n @tf.function\n def multiplier_function(v):\n return tf.constant(10.0, name=\"function_multiplier\") * v\n\n tf_g = tf.Graph()\n with tf_g.as_default():\n x = tf.placeholder(name=\"x\", dtype=tf.float32, shape=[])\n y = tf.placeholder(name=\"y\", dtype=tf.float32, shape=[])\n result_op = tf.add(x, y, name=\"add\")\n func_call_op = multiplier_function(result_op)\n _ = tf.identity(func_call_op, name=\"output\")\n return gde.Graph(g=tf_g)",
"def test_add_op_jit():\n paddle.set_device(\"cpu\")\n paddle.seed(33)\n custom_ops = load(name=\"add_op_jit\", sources=[current_path + \"/add_op_const.cc\"])\n\n x = paddle.to_tensor(np.array([[1, 1], [1, 1]]).astype(\"float32\"))\n x1 = paddle.to_tensor(np.array([[1, 1], [1, 1]]).astype(\"float32\"))\n x.stop_gradient = False\n x1.stop_gradient = False\n print(x)\n out = custom_ops.add_test(x, x1)\n assert np.allclose(out.numpy(), np.array([[2, 2], [2, 2]]).astype(\"float32\"))\n out.retain_grads()\n out.backward()\n assert np.allclose(out.grad, np.array([[1, 1], [1, 1]]).astype(\"float32\"))\n assert np.allclose(x.grad, np.array([[1, 1], [1, 1]]).astype(\"float32\"))\n assert np.allclose(x1.grad, np.array([[1, 1], [1, 1]]).astype(\"float32\"))",
"def test_host_func():\n # te schedule copied from test_tir_transform_split_host_device.py\n\n func = tvm.te.create_prim_func(\n te_workload.matmul(729, 729, 729, in_dtype=\"float32\", out_dtype=\"float32\")\n )\n mod = tvm.ir.IRModule({\"main\": func})\n target = tvm.target.Target(\"cuda\")\n mod = tvm.tir.transform.Apply(\n lambda f: f.with_attr(\n {\n \"global_symbol\": \"test\",\n \"tir.is_host_func\": 1,\n }\n )\n )(mod)\n mod = tvm.tir.transform.BindTarget(target)(mod)\n tvm.ir.assert_structural_equal(mod, Module)\n assert (\n \"tir.is_host_func\" not in mod[\"main\"].attrs\n ), \"\"\"Target and is_host_func attributes should be mutually exclusive\"\"\"",
"def function_factory(model, loss, train_x, train_y):\r\n\r\n # obtain the shapes of all trainable parameters in the model\r\n shapes = tf.shape_n(model.trainable_variables)\r\n n_tensors = len(shapes)\r\n\r\n # we'll use tf.dynamic_stitch and tf.dynamic_partition later, so we need to\r\n # prepare required information first\r\n count = 0\r\n idx = [] # stitch indices\r\n part = [] # partition indices\r\n\r\n for i, shape in enumerate(shapes):\r\n n = reduce(mul, shape)\r\n idx.append(tf.reshape(tf.range(count, count + n, dtype=tf.int32),\r\n shape))\r\n part.extend([i] * n)\r\n count += n\r\n\r\n part = tf.constant(part)\r\n\r\n @tf.function\r\n def assign_new_model_parameters(params_1d):\r\n \"\"\"A function updating the model's parameters with a 1D tf.Tensor.\r\n\r\n Args:\r\n params_1d [in]: a 1D tf.Tensor representing the model's\r\n trainable parameters.\r\n \"\"\"\r\n\r\n params = tf.dynamic_partition(params_1d, part, n_tensors)\r\n for i, (shape, param) in enumerate(zip(shapes, params)):\r\n model.trainable_variables[i].assign(tf.reshape(param, shape))\r\n\r\n # now create a function that will be returned by this factory\r\n @tf.function\r\n def exposed_func(params_1d):\r\n \"\"\"A function that can be used by tfp.optimizer.rotosolve_minimize.\r\n\r\n This function is created by function_factory.\r\n\r\n Args:\r\n params_1d [in]: a 1D tf.Tensor.\r\n\r\n Returns:\r\n A scalar loss and the gradients w.r.t. the `params_1d`.\r\n \"\"\"\r\n\r\n # update the parameters in the model\r\n assign_new_model_parameters(params_1d)\r\n # calculate the loss\r\n loss_value = loss(model(train_x, training=True), train_y)\r\n exposed_func.iter.assign_add(1)\r\n\r\n return loss_value\r\n\r\n # store these information as members so we can use them outside the scope\r\n exposed_func.iter = tf.Variable(0)\r\n exposed_func.idx = idx\r\n exposed_func.part = part\r\n exposed_func.shapes = shapes\r\n exposed_func.assign_new_model_parameters = assign_new_model_parameters\r\n\r\n return exposed_func",
"def test_simple_call_params_to_function(self):\n cmp_name = Name(\"/func/f1\")\n cmp_name += \"_(/test/data)\"\n cmp_name += \"NFN\"\n workflow = \"/func/f1(/test/data)\"\n fib = self.optimizer.fib\n fib.add_fib_entry(Name(\"/func\"), [1], False)\n self.optimizer.fib = fib\n ast = self.parser.parse(workflow)\n self.assertTrue(self.optimizer.compute_fwd(None, ast, Interest(cmp_name)))\n self.assertFalse(self.optimizer.compute_local(None, ast, Interest(cmp_name)))\n rules = self.optimizer.rewrite(cmp_name, ast)\n self.assertEqual(rules, ['%/func/f1%(/test/data)', 'local'])\n name = self.parser.nfn_str_to_network_name(rules[0])\n self.assertEqual(name, cmp_name)\n name_str, prepended = self.parser.network_name_to_nfn_str(name)\n self.assertEqual(name_str, workflow)\n self.assertEqual(prepended, Name(\"/func/f1\"))",
"def randomly_apply_operation(operation, img, gt_boxes, *args):\n return tf.cond(\n get_random_bool(),\n lambda: operation(img, gt_boxes, *args),\n lambda: (img, gt_boxes, *args)\n )",
"def execute_gamma_simplify_tests_for_function(tfunc, D):\n\n mu, nu, rho, sigma = tensor_indices(\"mu, nu, rho, sigma\", LorentzIndex)\n a1, a2, a3, a4, a5, a6 = tensor_indices(\"a1:7\", LorentzIndex)\n mu11, mu12, mu21, mu31, mu32, mu41, mu51, mu52 = tensor_indices(\n \"mu11, mu12, mu21, mu31, mu32, mu41, mu51, mu52\", LorentzIndex\n )\n mu61, mu71, mu72 = tensor_indices(\"mu61, mu71, mu72\", LorentzIndex)\n m0, m1, m2, m3, m4, m5, m6 = tensor_indices(\"m0:7\", LorentzIndex)\n\n def g(xx, yy):\n return (G(xx) * G(yy) + G(yy) * G(xx)) / 2\n\n # Some examples taken from Kahane's paper, 4 dim only:\n if D == 4:\n t = G(a1) * G(mu11) * G(a2) * G(mu21) * G(-a1) * G(mu31) * G(-a2)\n assert _is_tensor_eq(\n tfunc(t), -4 * G(mu11) * G(mu31) * G(mu21) - 4 * G(mu31) * G(mu11) * G(mu21)\n )\n\n t = (\n G(a1)\n * G(mu11)\n * G(mu12)\n * G(a2)\n * G(mu21)\n * G(a3)\n * G(mu31)\n * G(mu32)\n * G(a4)\n * G(mu41)\n * G(-a2)\n * G(mu51)\n * G(mu52)\n * G(-a1)\n * G(mu61)\n * G(-a3)\n * G(mu71)\n * G(mu72)\n * G(-a4)\n )\n assert _is_tensor_eq(\n tfunc(t),\n 16\n * G(mu31)\n * G(mu32)\n * G(mu72)\n * G(mu71)\n * G(mu11)\n * G(mu52)\n * G(mu51)\n * G(mu12)\n * G(mu61)\n * G(mu21)\n * G(mu41)\n + 16\n * G(mu31)\n * G(mu32)\n * G(mu72)\n * G(mu71)\n * G(mu12)\n * G(mu51)\n * G(mu52)\n * G(mu11)\n * G(mu61)\n * G(mu21)\n * G(mu41)\n + 16\n * G(mu71)\n * G(mu72)\n * G(mu32)\n * G(mu31)\n * G(mu11)\n * G(mu52)\n * G(mu51)\n * G(mu12)\n * G(mu61)\n * G(mu21)\n * G(mu41)\n + 16\n * G(mu71)\n * G(mu72)\n * G(mu32)\n * G(mu31)\n * G(mu12)\n * G(mu51)\n * G(mu52)\n * G(mu11)\n * G(mu61)\n * G(mu21)\n * G(mu41),\n )\n\n # Fully Lorentz-contracted expressions, these return scalars:\n\n def add_delta(ne):\n return ne * eye(\n 4\n ) # DiracSpinorIndex.delta(DiracSpinorIndex.auto_left, -DiracSpinorIndex.auto_right)\n\n t = G(mu) * G(-mu)\n ts = add_delta(D)\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = G(mu) * G(nu) * G(-mu) * G(-nu)\n ts = add_delta(2 * D - D ** 2) # -8\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = G(mu) * G(nu) * G(-nu) * G(-mu)\n ts = add_delta(D ** 2) # 16\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = G(mu) * G(nu) * G(-rho) * G(-nu) * G(-mu) * G(rho)\n ts = add_delta(4 * D - 4 * D ** 2 + D ** 3) # 16\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = G(mu) * G(nu) * G(rho) * G(-rho) * G(-nu) * G(-mu)\n ts = add_delta(D ** 3) # 64\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = G(a1) * G(a2) * G(a3) * G(a4) * G(-a3) * G(-a1) * G(-a2) * G(-a4)\n ts = add_delta(-8 * D + 16 * D ** 2 - 8 * D ** 3 + D ** 4) # -32\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = G(-mu) * G(-nu) * G(-rho) * G(-sigma) * G(nu) * G(mu) * G(sigma) * G(rho)\n ts = add_delta(-16 * D + 24 * D ** 2 - 8 * D ** 3 + D ** 4) # 64\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = G(-mu) * G(nu) * G(-rho) * G(sigma) * G(rho) * G(-nu) * G(mu) * G(-sigma)\n ts = add_delta(8 * D - 12 * D ** 2 + 6 * D ** 3 - D ** 4) # -32\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = (\n G(a1)\n * G(a2)\n * G(a3)\n * G(a4)\n * G(a5)\n * G(-a3)\n * G(-a2)\n * G(-a1)\n * G(-a5)\n * G(-a4)\n )\n ts = add_delta(64 * D - 112 * D ** 2 + 60 * D ** 3 - 12 * D ** 4 + D ** 5) # 256\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = (\n G(a1)\n * G(a2)\n * G(a3)\n * G(a4)\n * G(a5)\n * G(-a3)\n * G(-a1)\n * G(-a2)\n * G(-a4)\n * G(-a5)\n )\n ts = add_delta(64 * D - 120 * D ** 2 + 72 * D ** 3 - 16 * D ** 4 + D ** 5) # -128\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = (\n G(a1)\n * G(a2)\n * G(a3)\n * G(a4)\n * G(a5)\n * G(a6)\n * G(-a3)\n * G(-a2)\n * G(-a1)\n * G(-a6)\n * G(-a5)\n * G(-a4)\n )\n ts = add_delta(\n 416 * D - 816 * D ** 2 + 528 * D ** 3 - 144 * D ** 4 + 18 * D ** 5 - D ** 6\n ) # -128\n assert _is_tensor_eq(tfunc(t), ts)\n\n t = (\n G(a1)\n * G(a2)\n * G(a3)\n * G(a4)\n * G(a5)\n * G(a6)\n * G(-a2)\n * G(-a3)\n * G(-a1)\n * G(-a6)\n * G(-a4)\n * G(-a5)\n )\n ts = add_delta(\n 416 * D - 848 * D ** 2 + 584 * D ** 3 - 172 * D ** 4 + 22 * D ** 5 - D ** 6\n ) # -128\n assert _is_tensor_eq(tfunc(t), ts)\n\n # Expressions with free indices:\n\n t = G(mu) * G(nu) * G(rho) * G(sigma) * G(-mu)\n assert _is_tensor_eq(\n tfunc(t), (-2 * G(sigma) * G(rho) * G(nu) + (4 - D) * G(nu) * G(rho) * G(sigma))\n )\n\n t = G(mu) * G(nu) * G(-mu)\n assert _is_tensor_eq(tfunc(t), (2 - D) * G(nu))\n\n t = G(mu) * G(nu) * G(rho) * G(-mu)\n assert _is_tensor_eq(\n tfunc(t), 2 * G(nu) * G(rho) + 2 * G(rho) * G(nu) - (4 - D) * G(nu) * G(rho)\n )\n\n t = 2 * G(m2) * G(m0) * G(m1) * G(-m0) * G(-m1)\n st = tfunc(t)\n assert _is_tensor_eq(st, (D * (-2 * D + 4)) * G(m2))\n\n t = G(m2) * G(m0) * G(m1) * G(-m0) * G(-m2)\n st = tfunc(t)\n assert _is_tensor_eq(st, ((-D + 2) ** 2) * G(m1))\n\n t = G(m0) * G(m1) * G(m2) * G(m3) * G(-m1)\n st = tfunc(t)\n assert _is_tensor_eq(st, (D - 4) * G(m0) * G(m2) * G(m3) + 4 * G(m0) * g(m2, m3))\n\n t = G(m0) * G(m1) * G(m2) * G(m3) * G(-m1) * G(-m0)\n st = tfunc(t)\n assert _is_tensor_eq(st, ((D - 4) ** 2) * G(m2) * G(m3) + (8 * D - 16) * g(m2, m3))\n\n t = G(m2) * G(m0) * G(m1) * G(-m2) * G(-m0)\n st = tfunc(t)\n assert _is_tensor_eq(st, ((-D + 2) * (D - 4) + 4) * G(m1))\n\n t = G(m3) * G(m1) * G(m0) * G(m2) * G(-m3) * G(-m0) * G(-m2)\n st = tfunc(t)\n assert _is_tensor_eq(st, (-4 * D + (-D + 2) ** 2 * (D - 4) + 8) * G(m1))\n\n t = 2 * G(m0) * G(m1) * G(m2) * G(m3) * G(-m0)\n st = tfunc(t)\n assert _is_tensor_eq(\n st, ((-2 * D + 8) * G(m1) * G(m2) * G(m3) - 4 * G(m3) * G(m2) * G(m1))\n )\n\n t = G(m5) * G(m0) * G(m1) * G(m4) * G(m2) * G(-m4) * G(m3) * G(-m0)\n st = tfunc(t)\n assert _is_tensor_eq(\n st,\n (\n ((-D + 2) * (-D + 4)) * G(m5) * G(m1) * G(m2) * G(m3)\n + (2 * D - 4) * G(m5) * G(m3) * G(m2) * G(m1)\n ),\n )\n\n t = -G(m0) * G(m1) * G(m2) * G(m3) * G(-m0) * G(m4)\n st = tfunc(t)\n assert _is_tensor_eq(\n st,\n ((D - 4) * G(m1) * G(m2) * G(m3) * G(m4) + 2 * G(m3) * G(m2) * G(m1) * G(m4)),\n )\n\n t = G(-m5) * G(m0) * G(m1) * G(m2) * G(m3) * G(m4) * G(-m0) * G(m5)\n st = tfunc(t)\n\n result1 = (\n ((-D + 4) ** 2 + 4) * G(m1) * G(m2) * G(m3) * G(m4)\n + (4 * D - 16) * G(m3) * G(m2) * G(m1) * G(m4)\n + (4 * D - 16) * G(m4) * G(m1) * G(m2) * G(m3)\n + 4 * G(m2) * G(m1) * G(m4) * G(m3)\n + 4 * G(m3) * G(m4) * G(m1) * G(m2)\n + 4 * G(m4) * G(m3) * G(m2) * G(m1)\n )\n\n # Kahane's algorithm yields this result, which is equivalent to `result1`\n # in four dimensions, but is not automatically recognized as equal:\n result2 = 8 * G(m1) * G(m2) * G(m3) * G(m4) + 8 * G(m4) * G(m3) * G(m2) * G(m1)\n\n if D == 4:\n assert _is_tensor_eq(st, (result1)) or _is_tensor_eq(st, (result2))\n else:\n assert _is_tensor_eq(st, (result1))\n\n # and a few very simple cases, with no contracted indices:\n\n t = G(m0)\n st = tfunc(t)\n assert _is_tensor_eq(st, t)\n\n t = -7 * G(m0)\n st = tfunc(t)\n assert _is_tensor_eq(st, t)\n\n t = 224 * G(m0) * G(m1) * G(-m2) * G(m3)\n st = tfunc(t)\n assert _is_tensor_eq(st, t)",
"def minimize(sess, loss_fn, batches, operations={}, learning_rate=0.001, print_every=None, var_list=None, **kw):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_fn, var_list=var_list)\n op_keys = sorted(list(operations.keys()))\n ops = [train_op] + [operations[k] for k in op_keys]\n t = time.time()\n sess.run(reinitialize_variables(sess))\n for i, epoch, batch in batches:\n results = sess.run(ops, feed_dict=batch)\n if print_every and i % print_every == 0:\n s = 'Batch {}, epoch {}, time {:.1f}s'.format(i, epoch, time.time() - t)\n for j,k in enumerate(op_keys, 1):\n s += ', {} {:.4f}'.format(k, results[j])\n print(s)",
"def test_tf_interface(self, circuit, interface, tol):\n import tensorflow as tf\n\n a = tf.Variable(self.a, dtype=tf.float64)\n b = tf.Variable(self.b, dtype=tf.float64)\n\n with tf.GradientTape() as tape:\n tape.watch([a, b])\n res = circuit(a, b)\n\n assert np.allclose(res.numpy(), self.expected_cost, atol=tol, rtol=0)\n\n res = tape.gradient(res, [a, b])\n assert np.allclose(res, self.expected_grad, atol=tol, rtol=0)",
"def test_add_basic(self):\n\n def test_f(a, b):\n c = a.add(b)\n return c.add(c)\n\n x = torch.randn(4)\n y = torch.randn(4)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::add\"})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that an operation with a gradient recipe that depends on its instantiated parameter values works correctly
|
def test_grad_recipe_parameter_dependent(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
grad_method = "A"
@property
def grad_recipe(self):
x = self.data[0]
return ([[1.0, 1.0, x], [1.0, 0.0, -x]],)
x = 0.654
op = DummyOp(x, wires=0)
assert op.grad_recipe == ([[1.0, 1.0, x], [1.0, 0.0, -x]],)
|
[
"def test_qc_custom_gradient_training_loop_param_learning(self):\n\n tf.compat.v1.reset_default_graph()\n tf.compat.v1.set_random_seed(0)\n np.random.seed(0)\n with tf.device('/cpu:0'):\n inputs = tf.keras.Input(shape=(32, 32, 1,))\n conv_op = tf.keras.layers.Conv2D(1, (2, 2),\n kernel_initializer=tf.random_uniform_initializer(-1, 2),\n bias_initializer='random_uniform',\n padding='SAME')(inputs)\n relu_op = tf.nn.relu(conv_op)\n reshape = tf.keras.layers.Flatten()(relu_op)\n _ = tf.keras.layers.Dense(10)(reshape)\n\n sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())\n initialize_uninitialized_vars(sess)\n\n # create quantsim model without config file\n sim = QuantizationSimModel(sess, ['input_1'], ['dense/BiasAdd'], use_cuda=False,\n quant_scheme=QuantScheme.training_range_learning_with_tf_init)\n\n for quant_op_name in sim._param_quantizers.keys():\n print(sim._param_quantizers[quant_op_name])\n\n for quant_op_name in sim._activation_quantizers.keys():\n print(sim._activation_quantizers[quant_op_name])\n\n def dummy_forward_pass(sess, args):\n model_output = sess.graph.get_tensor_by_name('dense/MatMul:0')\n model_input = sess.graph.get_tensor_by_name('input_1:0')\n shape = model_input.shape\n dummy_input = np.random.randn(1, shape[1], shape[2], shape[3])\n sess.run(model_output, feed_dict={model_input: dummy_input})\n\n conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')\n relu_output_quant_op = sim.session.graph.get_operation_by_name('Relu_quantized')\n\n # enable input\n sim.compute_encodings(dummy_forward_pass, None)\n\n inp_tensor = sim.session.graph.get_tensor_by_name('input_1:0')\n np.random.seed(0)\n w_shape = inp_tensor.shape\n batches = 32\n inp_data = np.random.rand(batches, w_shape[1], w_shape[2], w_shape[3])\n logits = sim.session.graph.get_tensor_by_name('dense/MatMul:0')\n\n labels = np.random.randint(10, size=batches)\n one_hot_labels = np.eye(10)[labels]\n\n with sim.session.graph.as_default():\n var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)\n labels_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 10], name='labels')\n loss = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels_placeholder, logits=logits)\n\n update_ops = []\n global_step = tf.compat.v1.train.create_global_step()\n initialize_uninitialized_vars(sim.session)\n\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=1e-3)\n gradients = optimizer.compute_gradients(loss, var_list)\n\n sim.compute_encodings(dummy_forward_pass, None)\n grad_updates = optimizer.apply_gradients(gradients, global_step=global_step)\n update_ops.append(grad_updates)\n update_op = tf.group(*update_ops)\n\n conv_inp_tensor = conv2d_weight_quant_op.inputs[0]\n grads = tf.gradients(loss, [conv_inp_tensor,\n conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min],\n conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max]])\n dqbydx, dqbydmin, dqbydmax = grads\n input_gradient = sim.session.run([dqbydx], feed_dict={inp_tensor: inp_data,\n labels_placeholder: one_hot_labels})[0]\n min_gradient = sim.session.run([dqbydmin], feed_dict={inp_tensor: inp_data,\n labels_placeholder: one_hot_labels})[0]\n max_gradient = sim.session.run([dqbydmax], feed_dict={inp_tensor: inp_data,\n labels_placeholder: one_hot_labels})[0]\n\n weights_before_train = sim.session.run(conv2d_weight_quant_op.inputs[0])\n encoding_min_before_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])\n encoding_max_before_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])\n relu_output_encoding_min_before_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_min])\n relu_output_encoding_max_before_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_max])\n with tf.control_dependencies([update_op]):\n train_op = tf.identity(loss, name='train_op')\n\n for quant_op_name in sim._param_quantizers.keys():\n print(quant_op_name + '_min_before_train = ' + str(sim.session.run(\n sim.session.graph.get_operation_by_name(quant_op_name).inputs[QuantizeOpIndices.encoding_min])))\n print(quant_op_name + '_max_before_train = ' + str(sim.session.run(\n sim.session.graph.get_operation_by_name(quant_op_name).inputs[QuantizeOpIndices.encoding_max])))\n\n # start training\n _ = sim.session.run(train_op, feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})\n\n for quant_op_name in sim._param_quantizers.keys():\n print(quant_op_name + '_min = ' + str(sim.session.run(sim.session.graph.get_operation_by_name\n (quant_op_name).inputs[\n QuantizeOpIndices.encoding_min])))\n print(quant_op_name + '_max = ' + str(sim.session.run(sim.session.graph.get_operation_by_name\n (quant_op_name).inputs[\n QuantizeOpIndices.encoding_max])))\n\n weights_after_train = sim.session.run(conv2d_weight_quant_op.inputs[0])\n relu_output_encoding_min_after_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_min])\n relu_output_encoding_max_after_train = sim.session.run(relu_output_quant_op.inputs[\n QuantizeOpIndices.encoding_max])\n encoding_min_after_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])\n encoding_max_after_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])\n\n assert not np.allclose(weights_before_train, weights_after_train, atol=1e-6)\n assert encoding_min_before_train != encoding_min_after_train\n assert encoding_max_before_train != encoding_max_after_train\n assert relu_output_encoding_min_before_train != relu_output_encoding_min_after_train\n assert relu_output_encoding_max_before_train != relu_output_encoding_max_after_train\n\n\n baseline = sim.session.run(logits, feed_dict={inp_tensor: inp_data})\n sim.export('/tmp', 'quant_sim_model')\n after_sim_export = sim.session.run(logits, feed_dict={inp_tensor: inp_data})\n assert np.allclose(baseline, after_sim_export)\n\n\n sess.close()\n sim.session.close()",
"def gradient(x):\n\t\tpass",
"def gradient(self, var, bayesianOptimizer):\n pass",
"def get_apply_gradients_op(self):\n raise NotImplementedError()",
"def gradCheck(l=GRULayer(1, 10)):\n\n def loss(h):\n \"\"\"A dummy loss function; the square error compared to a linspace.\"\"\"\n dh = h - np.linspace(-1, 1, h.shape[0])[:, None, None]\n return 0.5 * np.sum(dh * dh), dh\n\n num_checks = 5\n delta = 1e-5\n n = 20\n x = np.arange(n * 2.0).reshape((n, 1, 2)) # dummy input; batch of size 2, 20 samples per sequence\n h = l.forward(x)\n dh = loss(h)[1]\n dx = l.backward(dh) # analytical gradient\n\n for param, name in zip([x, l.W, l.Wr, l.Wz],\n ['x', 'W', 'Wr', 'Wz']):\n\n print(name)\n a = param if (name == 'x') else param.a # only x is not a Param object\n\n for i in range(num_checks):\n ri = int(np.random.randint(a.size))\n # compute the derivative from definition - evaluate loss at [x+delta] and [x-delta]\n old_val = a.flat[ri]\n a.flat[ri] = old_val + delta\n cg0 = loss(l.forward(x))[0]\n a.flat[ri] = old_val - delta\n cg1 = loss(l.forward(x))[0]\n a.flat[ri] = old_val # reset old value for this parameter\n # fetch both numerical and analytic gradient\n grad_analytic = (dx if (name == 'x') else param.d).flat[ri] # again, treat x differently\n grad_numerical = (cg0 - cg1) / (2 * delta)\n\n rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)\n print('%f, %f => %e ' % (grad_numerical, grad_analytic, rel_error))\n # rel_error should be on order of 1e-7 or less",
"def testCustomGradStructure(self, decorator, x_struct, y_struct):\n\n def zeros(x):\n return tf.nest.map_structure(lambda _: tf_np.zeros([], np.float32), x)\n\n def get_struct(x):\n return tf.nest.map_structure(lambda _: None, x)\n\n @extensions.custom_grad\n def f(*x):\n del x\n\n def vjp(dy):\n self.assertEqual(y_struct, get_struct(dy))\n return zeros(x_struct)\n\n return zeros(y_struct), vjp\n\n x, dy = zeros([x_struct, y_struct])\n\n @decorator\n def run(x, dy):\n y, vjp = extensions.vjp(f, *x)\n dx = vjp(dy)\n return dx, y\n\n dx, y = run(x, dy)\n self.assertEqual(x_struct, get_struct(dx))\n self.assertEqual(y_struct, get_struct(y))",
"def test_cost_gradient(self):\n\n # Use seed for deterministic testing\n np.random.seed(42)\n\n def check(D, N, mu=None, Lambda=None, rho=None, A=None):\n if mu is None:\n mu = np.zeros(D)\n if Lambda is None:\n Lambda = np.identity(D)\n if rho is None:\n rho = np.ones(D)\n if A is None:\n A = GaussianARD(3, 5,\n shape=(D,),\n plates=(D,))\n \n V = np.identity(D) + np.ones((D,D))\n\n # Construct model\n X = GaussianMarkovChain(mu,\n Lambda,\n A,\n rho,\n n=N+1,\n initialize=False)\n Y = Gaussian(X,\n V,\n initialize=False)\n\n # Posterior estimation\n Y.observe(np.random.randn(*(Y.get_shape(0))))\n X.update()\n try:\n A.update()\n except:\n pass\n try:\n mu.update()\n except:\n pass\n try:\n Lambda.update()\n except:\n pass\n try:\n rho.update()\n except:\n pass\n\n # Construct rotator\n rotA = RotateGaussianARD(A, axis=-1)\n rotX = RotateGaussianMarkovChain(X, rotA)\n rotX.setup()\n\n # Check gradient with respect to R\n R = np.random.randn(D, D)\n def cost(r):\n (b, dr) = rotX.bound(np.reshape(r, np.shape(R)))\n return (b, np.ravel(dr))\n\n err = optimize.check_gradient(cost, \n np.ravel(R), \n verbose=False)[1]\n self.assertAllClose(err, 0, \n atol=1e-5,\n msg=\"Gradient incorrect\")\n \n return\n\n self._run_checks(check)\n\n pass",
"def test_controlled_RY_gradient(self, tol):\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.PauliX(wires=0)\n qml.CRY(x, wires=[0, 1])\n return qml.expval(qml.PauliZ(0))\n\n a = 0.542 # any value of a should give zero gradient\n\n # get the analytic gradient\n gradA = circuit.jacobian([a], method=\"A\")\n # get the finite difference gradient\n gradF = circuit.jacobian([a], method=\"F\")\n\n # the expected gradient\n expected = 0\n\n assert np.allclose(gradF, expected, atol=tol, rtol=0)\n assert np.allclose(gradA, expected, atol=tol, rtol=0)\n\n @qml.qnode(dev)\n def circuit1(x):\n qml.RX(x, wires=0)\n qml.CRY(x, wires=[0, 1])\n return qml.expval(qml.PauliZ(0))\n\n b = 0.123 # gradient is -sin(x)\n\n # get the analytic gradient\n gradA = circuit1.jacobian([b], method=\"A\")\n # get the finite difference gradient\n gradF = circuit1.jacobian([b], method=\"F\")\n\n # the expected gradient\n expected = -np.sin(b)\n\n assert np.allclose(gradF, expected, atol=tol, rtol=0)\n assert np.allclose(gradA, expected, atol=tol, rtol=0)",
"def test_grad():\n for ex in EXAMPLES:\n input, b_grad, w_grad = ex[\"in\"], ex[\"bias_grad\"], ex[\"weight_grad\"]\n\n loss = loss_function(g_lin(input))\n with backpack(new_ext.BatchGrad()):\n loss.backward()\n\n assert allclose(g_lin.bias.grad, b_grad)\n assert allclose(g_lin.weight.grad, w_grad)\n\n del g_lin.bias.grad\n del g_lin.weight.grad",
"def test_sum_with_grad(self):\n tensor1 = Tensor([1, 2, 3], requires_grad=True)\n tensor2 = tensor1.sum()\n\n tensor2.backward(Tensor(3.))\n\n assert tensor1.grad.data.tolist() == [3, 3, 3]",
"def update_function(parameters, learningRate, adaDecayCoeff, momDecayCoeff, reg_one, reg_two):\n N = len(parameters)\n if reg_one: assert len(reg_one.get_value()) == N\n if reg_two: assert len(reg_two.get_value()) == N\n \n gradient = [T.TensorVariable(p.type, name=p.name+'Grad') for p in parameters] #[:3]]\n #gradient.append(S.csr_matrix(parameters[3].name+'Grad', 'float64'))\n zero = [T.zeros_like(p) for p in parameters]\n squareSum = [shared(zeros_like(p.get_value()), name=p.name+'SqSum') for p in parameters]\n stepSize = [shared(zeros_like(p.get_value()), name=p.name+'Step') for p in parameters]\n \n rate = shared(learningRate, name='rate')\n adaDecay = shared(adaDecayCoeff, name='adaDecay')\n momDecay = shared(momDecayCoeff, name='momDecay') \n \n update_sum = function(gradient, updates=\n list((squareSum[i],\n adaDecay*squareSum[i] + gradient[i]**2)\n for i in range(N)), #-1))\n # + [(squareSum[3],\n # adaDecay*squareSum[3] + S.sqr(gradient[3]))],\n allow_input_downcast=True)\n \n update_step= function(gradient, updates=\n list((stepSize[i],\n momDecay*stepSize[i] + T.switch(T.eq(squareSum[i],0),\n zero[i],\n rate/T.sqrt(squareSum[i])*gradient[i]))\n for i in range(N)), #-1))\n # + [(stepSize[3],\n # momDecay*stepSize[3] + S.mul(gradient[3], rate/T.sqrt(squareSum[3])))],\n allow_input_downcast=True)\n \n update_wei = function([], updates=\n list((parameters[i],\n parameters[i] - stepSize[i])\n for i in range(N)),\n allow_input_downcast=True)\n \n if reg_one:\n regular_l1 = function([], updates=\n list((parameters[i],\n T.switch(T.lt(abs(parameters[i]),reg_one[i]),\n zero[i],\n parameters[i] - reg_one[i]*T.sgn(parameters[i])))\n for i in range(N)),\n allow_input_downcast=True)\n \n if reg_two:\n reg_two.set_value(array([1-x for x in reg_two.get_value()])) # Convert to decay version\n regular_l2 = function([], updates=\n list((parameters[i],\n reg_two[i]*parameters[i])\n for i in range(N)),\n allow_input_downcast=True)\n \n def update(*grads):\n update_sum(*grads)\n update_step(*grads)\n update_wei()\n if reg_one: regular_l1()\n if reg_two: regular_l2()\n \n # If regularisation is part of the gradient, we still need to set weights to 0 appropriately for L1, i.e.:\n # don't allow weights to change sign in one step\n # if the weight is zero, the step size must be more than the adagrad-reduced (but momentum-increased?) L1 regularisation\n \n return update, squareSum, stepSize",
"def test_grad_func(self):\n pars = np.array(self.spec.central, dtype='float64')\n data = np.copy(self.spec(pars))\n data *= 1.1 # move away from centre to ensure non-zero gradients\n self.spec.set_data(data)\n self.move_pars(pars) # move parameters to check proper partials\n\n ll = ROOT.Double(0)\n grads1 = pars*0\n grads2 = pars*0\n\n self.spec._obj.FdF(pars, ll, grads1)\n self.spec._obj.Gradient(pars, grads2)\n\n np.testing.assert_almost_equal(grads1, grads2)",
"def getGradient(function):\n def grad(x):\n return evaluateGradient(function,x) \n return grad",
"def _apply_gradient(self, loss, var_list, grad_dict):\n raise NotImplementedError(\"Please use subclass with specific algorithms, like boomdiff.optimize.GD\")",
"def test_get_grad_parameters(self):\n self.assertLess(\n 0, len(list(self.instance.get_grad_params())), msg=\"There is not at least one trainable parameter\"\n )\n\n # Check that all the parameters actually require a gradient\n for parameter in self.instance.get_grad_params():\n assert parameter.requires_grad\n\n # Try to initialize an optimizer\n optimizer = SGD(params=self.instance.get_grad_params(), lr=1.0)\n assert optimizer is not None",
"def test_checkpointed_gradient_test(shape, kernel, space_order):\n spacing = tuple([15.0 for _ in shape])\n tn = 500.\n example = CheckpointingExample(shape, spacing, tn, kernel, space_order)\n m0, dm = example.initial_estimate()\n gradient, rec_data = example.gradient(m0)\n example.verify(m0, gradient, rec_data, dm)",
"def test_qnode_gradient_repeated_gate_parameters(self, tol):\n par = [0.8, 1.3]\n\n def qf(x, y):\n qml.RX(np.pi / 4, wires=[0])\n qml.Rot(y, x, 2 * x, wires=[0])\n return qml.expval(qml.PauliX(0))\n\n dev = qml.device(\"default.qubit\", wires=1)\n q = qml.QNode(qf, dev)\n grad_A = q.jacobian(par, method=\"A\")\n grad_F = q.jacobian(par, method=\"F\")\n\n # the different methods agree\n assert np.allclose(grad_A, grad_F, atol=tol, rtol=0)",
"def test_cost_gradient(self):\n\n # Use seed for deterministic testing\n np.random.seed(42)\n\n def test(shape, plates, \n axis=-1, \n alpha_plates=None, \n plate_axis=None,\n mu=3):\n \n if plate_axis is not None:\n precomputes = [False, True]\n else:\n precomputes = [False]\n \n for precompute in precomputes:\n # Construct the model\n D = shape[axis]\n if alpha_plates is not None:\n alpha = Gamma(3, 5,\n plates=alpha_plates)\n alpha.initialize_from_random()\n else:\n alpha = 2\n X = GaussianARD(mu, alpha,\n shape=shape,\n plates=plates)\n\n # Some initial learning and rotator constructing\n X.initialize_from_random()\n Y = GaussianARD(X, 1)\n Y.observe(np.random.randn(*(Y.get_shape(0))))\n X.update()\n if alpha_plates is not None:\n alpha.update()\n rotX = RotateGaussianARD(X, alpha, \n axis=axis,\n precompute=precompute)\n else:\n rotX = RotateGaussianARD(X, \n axis=axis,\n precompute=precompute)\n try:\n mu.update()\n except:\n pass\n\n # Rotation matrices\n R = np.random.randn(D, D)\n if plate_axis is not None:\n C = plates[plate_axis]\n Q = np.random.randn(C, C)\n else:\n Q = None\n\n # Compute bound terms\n rotX.setup(plate_axis=plate_axis)\n\n if plate_axis is None:\n def f_r(r):\n (b, dr) = rotX.bound(np.reshape(r, np.shape(R)))\n return (b, np.ravel(dr))\n else:\n def f_r(r):\n (b, dr, dq) = rotX.bound(np.reshape(r, np.shape(R)),\n Q=Q)\n return (b, np.ravel(dr))\n\n def f_q(q):\n (b, dr, dq) = rotX.bound(R,\n Q=np.reshape(q, np.shape(Q)))\n return (b, np.ravel(dq))\n\n # Check gradient with respect to R\n err = optimize.check_gradient(f_r, \n np.ravel(R), \n verbose=False)[1]\n self.assertAllClose(err, 0, \n atol=1e-4,\n msg=\"Gradient incorrect for R\")\n\n # Check gradient with respect to Q\n if plate_axis is not None:\n err = optimize.check_gradient(f_q, \n np.ravel(Q), \n verbose=False)[1]\n self.assertAllClose(err, 0,\n atol=1e-4,\n msg=\"Gradient incorrect for Q\")\n\n return\n\n #\n # Basic rotation\n #\n test((3,), (), axis=-1)\n test((2,3,4), (), axis=-1)\n test((2,3,4), (), axis=-2)\n test((2,3,4), (), axis=-3)\n test((2,3,4), (5,6), axis=-2)\n\n #\n # Rotation with mu\n #\n\n # Simple\n test((1,), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(1,),\n plates=()))\n test((3,), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n # Broadcast mu over rotated dim\n test((3,), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(1,),\n plates=()))\n test((3,), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(),\n plates=()))\n # Broadcast mu over dim when multiple dims\n test((2,3), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(1,3),\n plates=()))\n test((2,3), (), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n # Broadcast mu over rotated dim when multiple dims\n test((2,3), (), axis=-2,\n mu=GaussianARD(2, 4,\n shape=(1,3),\n plates=()))\n test((2,3), (), axis=-2,\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n # Broadcast mu over plates\n test((3,), (4,5), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,1)))\n test((3,), (4,5), axis=-1,\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(5,)))\n\n #\n # Rotation with alpha\n #\n\n # Simple\n test((1,), (), axis=-1,\n alpha_plates=())\n test((3,), (), axis=-1,\n alpha_plates=(3,))\n # Broadcast alpha over rotated dim\n test((3,), (), axis=-1,\n alpha_plates=())\n test((3,), (), axis=-1,\n alpha_plates=(1,))\n # Broadcast alpha over dim when multiple dims\n test((2,3), (), axis=-1,\n alpha_plates=(1,3))\n test((2,3), (), axis=-1,\n alpha_plates=(3,))\n # Broadcast alpha over rotated dim when multiple dims\n test((2,3), (), axis=-2,\n alpha_plates=(1,3))\n test((2,3), (), axis=-2,\n alpha_plates=(3,))\n # Broadcast alpha over plates\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,1,3))\n test((3,), (4,5), axis=-1,\n alpha_plates=(5,3))\n\n #\n # Rotation with alpha and mu\n #\n\n # Simple\n test((1,), (), axis=-1,\n alpha_plates=(1,),\n mu=GaussianARD(2, 4,\n shape=(1,),\n plates=()))\n test((3,), (), axis=-1,\n alpha_plates=(3,),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n # Broadcast mu over rotated dim\n test((3,), (), axis=-1,\n alpha_plates=(3,),\n mu=GaussianARD(2, 4,\n shape=(1,),\n plates=()))\n test((3,), (), axis=-1,\n alpha_plates=(3,),\n mu=GaussianARD(2, 4,\n shape=(),\n plates=()))\n # Broadcast alpha over rotated dim\n test((3,), (), axis=-1,\n alpha_plates=(1,),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n test((3,), (), axis=-1,\n alpha_plates=(),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=()))\n # Broadcast both mu and alpha over rotated dim\n test((3,), (), axis=-1,\n alpha_plates=(1,),\n mu=GaussianARD(2, 4,\n shape=(1,),\n plates=()))\n test((3,), (), axis=-1,\n alpha_plates=(),\n mu=GaussianARD(2, 4,\n shape=(),\n plates=()))\n # Broadcast mu over plates\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,5,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,1)))\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,5,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(5,)))\n # Broadcast alpha over plates\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,1,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,5)))\n test((3,), (4,5), axis=-1,\n alpha_plates=(5,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,5)))\n # Broadcast both mu and alpha over plates\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,1,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,1)))\n test((3,), (4,5), axis=-1,\n alpha_plates=(5,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(5,)))\n # Broadcast both mu and alpha over plates but different plates\n test((3,), (4,5), axis=-1,\n alpha_plates=(4,1,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(5,)))\n test((3,), (4,5), axis=-1,\n alpha_plates=(5,3),\n mu=GaussianARD(2, 4,\n shape=(3,),\n plates=(4,1)))\n\n #\n # Rotation with missing values\n #\n\n # TODO\n\n #\n # Plate rotation\n #\n\n # Simple\n test((2,), (3,), axis=-1, plate_axis=-1)\n test((2,), (3,4,5), axis=-1, plate_axis=-1)\n test((2,), (3,4,5), axis=-1, plate_axis=-2)\n test((2,), (3,4,5), axis=-1, plate_axis=-3)\n test((2,3), (4,5), axis=-2, plate_axis=-2)\n\n # With mu\n test((2,), (3,), axis=-1, plate_axis=-1,\n mu=GaussianARD(3, 4,\n shape=(2,),\n plates=(3,)))\n # With mu broadcasted\n test((2,), (3,), axis=-1, plate_axis=-1,\n mu=GaussianARD(3, 4,\n shape=(2,),\n plates=(1,)))\n test((2,), (3,), axis=-1, plate_axis=-1,\n mu=GaussianARD(3, 4,\n shape=(2,),\n plates=()))\n # With mu multiple plates\n test((2,), (3,4,5), axis=-1, plate_axis=-2,\n mu=GaussianARD(3, 4,\n shape=(2,),\n plates=(3,4,5)))\n # With mu multiple dims\n test((2,3,4), (5,), axis=-2, plate_axis=-1,\n mu=GaussianARD(3, 4,\n shape=(2,3,4),\n plates=(5,)))\n\n #\n # With alpha\n #\n print(\"Test: Plate rotation with alpha. Scalars.\")\n test((1,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=0)\n print(\"Test: Plate rotation with alpha. Plates.\")\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(3,1),\n mu=0)\n print(\"Test: Plate rotation with alpha. Dims.\")\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,3),\n mu=0)\n print(\"Test: Plate rotation with alpha. Broadcast alpha over rotated plates.\")\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=0)\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(1,),\n mu=0)\n print(\"Test: Plate rotation with alpha. Broadcast alpha over dims.\")\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=0)\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(),\n mu=0)\n print(\"Test: Plate rotation with alpha. Multiple dims.\")\n test((2,3,4,5), (6,), axis=-2, plate_axis=-1,\n alpha_plates=(6,2,3,4,5),\n mu=0)\n print(\"Test: Plate rotation with alpha. Multiple plates.\")\n test((2,), (3,4,5), axis=-1, plate_axis=-1,\n alpha_plates=(3,4,5,2),\n mu=0)\n test((2,), (3,4,5), axis=-1, plate_axis=-2,\n alpha_plates=(3,4,5,2),\n mu=0)\n test((2,), (3,4,5), axis=-1, plate_axis=-3,\n alpha_plates=(3,4,5,2),\n mu=0)\n\n #\n # With alpha and mu\n #\n print(\"Test: Plate rotation with alpha and mu. Scalars.\")\n test((1,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=GaussianARD(2, 3,\n shape=(1,),\n plates=(1,)))\n print(\"Test: Plate rotation with alpha and mu. Plates.\")\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(3,1),\n mu=GaussianARD(2, 3,\n shape=(1,),\n plates=(3,)))\n print(\"Test: Plate rotation with alpha and mu. Dims.\")\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,3),\n mu=GaussianARD(2, 3,\n shape=(3,),\n plates=(1,)))\n print(\"Test: Plate rotation with alpha and mu. Broadcast over rotated \"\n \"plates.\")\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=GaussianARD(2, 3,\n shape=(1,),\n plates=(1,)))\n test((1,), (3,), axis=-1, plate_axis=-1,\n alpha_plates=(1,),\n mu=GaussianARD(2, 3,\n shape=(1,),\n plates=()))\n print(\"Test: Plate rotation with alpha and mu. Broadcast over dims.\")\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(1,1),\n mu=GaussianARD(2, 3,\n shape=(1,),\n plates=(1,)))\n test((3,), (1,), axis=-1, plate_axis=-1,\n alpha_plates=(),\n mu=GaussianARD(2, 3,\n shape=(),\n plates=(1,)))\n print(\"Test: Plate rotation with alpha and mu. Multiple dims.\")\n test((2,3,4,5), (6,), axis=-2, plate_axis=-1,\n alpha_plates=(6,2,3,4,5),\n mu=GaussianARD(2, 3,\n shape=(2,3,4,5),\n plates=(6,)))\n print(\"Test: Plate rotation with alpha and mu. Multiple plates.\")\n test((2,), (3,4,5), axis=-1, plate_axis=-1,\n alpha_plates=(3,4,5,2),\n mu=GaussianARD(2, 3,\n shape=(2,),\n plates=(3,4,5,)))\n test((2,), (3,4,5), axis=-1, plate_axis=-2,\n alpha_plates=(3,4,5,2),\n mu=GaussianARD(2, 3,\n shape=(2,),\n plates=(3,4,5,)))\n test((2,), (3,4,5), axis=-1, plate_axis=-3,\n alpha_plates=(3,4,5,2),\n mu=GaussianARD(2, 3,\n shape=(2,),\n plates=(3,4,5,)))\n\n # TODO: With missing values\n \n pass",
"def _calculate_trainable_parameters_gradient(self, input, backwards_input):\n pass",
"def test_fast_gradient_method():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)\n label = np.asarray([2], np.int32)\n label = np.eye(3)[label].astype(np.float32)\n\n attack = FastGradientMethod(Net())\n ms_adv_x = attack.generate(input_np, label)\n\n assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \\\n ' must not be equal to original value.'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that ``get_parameter_shift`` issues a deprecation warning.
|
def test_warning_get_parameter_shift(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
num_params = 1
grad_recipe = ("Dummy recipe",)
op = DummyOp(0.1, wires=0)
with pytest.warns(UserWarning, match="get_parameter_shift is deprecated"):
assert op.get_parameter_shift(0) == "Dummy recipe"
|
[
"def test_legacy_deprecated(recwarn):\n warnings.simplefilter('always')\n from luma.led_matrix import legacy\n\n assert len(recwarn) == 1\n w = recwarn.pop(DeprecationWarning)\n\n assert str(w.message) == legacy.deprecation_msg",
"def test_deprecated(self):\n @misc.deprecated\n def function():\n return 1, None\n \n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n # test whether the function still works\n self.assertEqual(function(), (1, None))\n # test whether a warning was emitted\n self.assertEqual(len(w), 1)\n self.assertTrue(issubclass(w[-1].category, DeprecationWarning))\n self.assertIn(\"deprecated\", str(w[-1].message))",
"def functionWithDeprecatedParameter(a, b, c=1, foo=2, bar=3):",
"def test_old_argument_deprecation(self):\n post_data = {\n 'nick': 'johndoe',\n }\n cnt = Person.objects.count()\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n response = self.client.post(reverse('admin:admin_custom_urls_person_add'), post_data)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(Person.objects.count(), cnt + 1)\n # We should get a DeprecationWarning\n self.assertEqual(len(w), 1)\n self.assertTrue(isinstance(w[0].message, DeprecationWarning))",
"def deprecated(instructions):\r\n\r\n @decorator\r\n def wrapper(func, *args, **kwargs):\r\n message = \"{0} is deprecated and may break unexpectedly.\\n{1}\".format(\r\n func.__name__,\r\n instructions)\r\n\r\n warnings.warn(message,\r\n GmusicapiWarning,\r\n stacklevel=2)\r\n\r\n return func(*args, **kwargs)\r\n\r\n return wrapper",
"def test_methodIsDeprecated(self):\n sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n method=SSL.SSLv23_METHOD,\n )\n\n message = (\"Passing method to twisted.internet.ssl.CertificateOptions \"\n \"was deprecated in Twisted 17.1.0. Please use a \"\n \"combination of insecurelyLowerMinimumTo, raiseMinimumTo, \"\n \"and lowerMaximumSecurityTo instead, as Twisted will \"\n \"correctly configure the method.\")\n\n warnings = self.flushWarnings([self.test_methodIsDeprecated])\n self.assertEqual(1, len(warnings))\n self.assertEqual(DeprecationWarning, warnings[0]['category'])\n self.assertEqual(message, warnings[0]['message'])",
"def test_obersvablereturntypes_import_warnings(return_type):\n\n with pytest.warns(UserWarning, match=r\"is deprecated\"):\n getattr(qml.operation, return_type)",
"def deprecated():\n\n def wrapper(func):\n @wraps(func)\n def wrapped(self, *args, **kwargs):\n (data, code, headers) = unpack(func(self, *args, **kwargs))\n headers[\"Deprecation\"] = \"true\"\n\n return (data, code, headers)\n\n return wrapped\n\n return wrapper",
"def test_error_get_parameter_shift_no_recipe(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_wires = 1\n num_params = 1\n grad_recipe = (None,)\n\n op = DummyOp(0.1, wires=0)\n with pytest.raises(\n qml.operation.OperatorPropertyUndefined,\n match=\"The operation DummyOp does not have a parameter-shift recipe\",\n ):\n op.get_parameter_shift(0)",
"def test_deprecatedModule(self):\n self.checkOneWarning(self.simpleModuleEntry())",
"def deprecated_renamed_argument(old_name, new_name, since, **kwargs):\n kwargs[\"warning_type\"] = CTAPipeDeprecationWarning\n return astropy.utils.deprecated_renamed_argument(\n old_name, new_name, since, **kwargs\n )",
"def test_getDeprecationWarningString(self):\n version = Version(\"Twisted\", 8, 0, 0)\n self.assertEqual(\n getDeprecationWarningString(self.test_getDeprecationWarningString, version),\n \"%s.DeprecationWarningsTests.test_getDeprecationWarningString \"\n \"was deprecated in Twisted 8.0.0\" % (__name__,),\n )",
"def test_optim_input_warning(self):\n\n def should_check_method(method_name: str):\n # Check every method since they all accept `optim_input`\n return method_name not in (\n \"sharded_optim_state_dict\",\n \"flatten_sharded_optim_state_dict\",\n )\n\n def get_warning_context():\n warning_regex = \"`optim_input` argument is deprecated\"\n return self.assertWarnsRegex(\n expected_warning=UserWarning, expected_regex=warning_regex\n )\n\n self._run_on_all_optim_state_apis(\n should_check_method, get_warning_context, fsdp_kwargs=None\n )",
"def check_deprecated_kwargs(kwargs, renames):\n for field, renamed_to in six.iteritems(renames):\n if field in kwargs:\n if renamed_to is not None:\n msg = (\n \"The parameter `{old}` has been renamed to `{new}`.\"\n \"`{old}` will be removed in future versions, \"\n \"please use `{new}` instead.\".format(old=field, new=renamed_to)\n )\n else:\n msg = (\n \"The parameter `{}` has been deprecated \"\n \"and will be removed in future versions.\".format(field)\n )\n warnings.warn(msg, DeprecationWarning)",
"def test_compare_model_and_inst_deprecation(self):\n\n with warnings.catch_warnings(record=True) as war:\n try:\n mu.compare_model_and_inst(pairs=None)\n except ValueError:\n # Setting pairs to None should produce a ValueError after\n # warning is generated\n pass\n\n assert len(war) >= 1\n assert war[0].category == DeprecationWarning",
"def test_getDeprecationWarningStringReplacementWithCallable(self):\n version = Version(\"Twisted\", 8, 0, 0)\n warningString = getDeprecationWarningString(\n self.test_getDeprecationWarningString,\n version,\n replacement=dummyReplacementMethod,\n )\n self.assertEqual(\n warningString,\n \"%s was deprecated in Twisted 8.0.0; please use \"\n \"%s.dummyReplacementMethod instead\"\n % (fullyQualifiedName(self.test_getDeprecationWarningString), __name__),\n )",
"def manual_warn(message, stacklevel=3):\r\n deprecated._any_called()\r\n warnings.warn(KvDeprecationWarning(message), stacklevel=stacklevel)",
"def test_collect_inst_model_pairs_deprecation(self):\n\n with warnings.catch_warnings(record=True) as war:\n try:\n mu.collect_inst_model_pairs(inst=None)\n except ValueError:\n # Setting inst to None should produce a ValueError after\n # warning is generated\n pass\n\n assert len(war) >= 1\n assert war[0].category == DeprecationWarning",
"def test_getDeprecationWarningStringReplacement(self):\n version = Version(\"Twisted\", 8, 0, 0)\n warningString = getDeprecationWarningString(\n self.test_getDeprecationWarningString,\n version,\n replacement=\"something.foobar\",\n )\n self.assertEqual(\n warningString,\n \"%s was deprecated in Twisted 8.0.0; please use something.foobar \"\n \"instead\" % (fullyQualifiedName(self.test_getDeprecationWarningString),),\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that ``get_parameter_shift`` raises an Error if no grad_recipe is available, as we no longer assume the twoterm rule by default.
|
def test_error_get_parameter_shift_no_recipe(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
num_params = 1
grad_recipe = (None,)
op = DummyOp(0.1, wires=0)
with pytest.raises(
qml.operation.OperatorPropertyUndefined,
match="The operation DummyOp does not have a parameter-shift recipe",
):
op.get_parameter_shift(0)
|
[
"def test_warning_get_parameter_shift(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_wires = 1\n num_params = 1\n grad_recipe = (\"Dummy recipe\",)\n\n op = DummyOp(0.1, wires=0)\n with pytest.warns(UserWarning, match=\"get_parameter_shift is deprecated\"):\n assert op.get_parameter_shift(0) == \"Dummy recipe\"",
"def test_all_paramshift_state(self, interface, return_type, shots, wire_specs):\n msg = (\n \"Computing the gradient of circuits that return the state with the \"\n \"parameter-shift rule gradient transform is not supported.\"\n )\n complex = return_type == \"StateVector\"\n\n with pytest.raises(ValueError, match=msg):\n circuit = get_qnode(interface, \"parameter-shift\", return_type, shots, wire_specs)\n x = get_variable(interface, wire_specs, complex=complex)\n if shots is not None:\n with pytest.warns(UserWarning, match=\"the returned result is analytic\"):\n compute_gradient(x, interface, circuit, return_type, complex=complex)\n else:\n compute_gradient(x, interface, circuit, return_type, complex=complex)",
"def test_all_paramshift_nonstate(self, interface, return_type, shots, wire_specs):\n\n # correctness is already tested in other test files\n circuit = get_qnode(interface, \"parameter-shift\", return_type, shots, wire_specs)\n x = get_variable(interface, wire_specs)\n compute_gradient(x, interface, circuit, return_type)",
"def test_get_configurable_parameter_fails_with_none_arguments(self):\n with pytest.raises(ValueError):\n get_configurable_parameters()",
"def test_get_tool_constraints(self):\n\n # When no parameters are given an error is raised\n self.assertRaises(MissingParameterError, portal.get_tool)",
"def test_resolvelinkedparams_noinputstep(self):\n self.mar._linkedidx = None\n self.mar._inputappstep = []\n assertDiracSucceeds(self.mar._resolveLinkedStepParameters(None), self)",
"def test_staking_parameters_get(self):\n pass",
"def check_redshift():\n try:\n print 'myData.redshift:', myData.redshift\n redshift=myData.redshift\n except:\n print 'Could not find REDSHIFT! --> Taken from SCALE_FACTOR_MAPPING instead!\\nz:',\n print self.SAM_scale_factor_map\n redshift=self.SAM_scale_factor_map[self.myconfig_array['catname'+str(self.a)]+'_redshift'+str(self.i)]\n myData.redshift=redshift\n print redshift\n return redshift",
"def test_get_parameter_value_exception(value):\n args = {'A': True}\n with pytest.raises(err.MissingArgumentError):\n assert tp.get_value(value=value, arguments=args)",
"def prepare_retry_step():\n function = LegacyFunctionSpecification()\n function.addParameter('index_of_the_star', dtype='int32', direction=function.IN\n , description=\"The index of the star to get the value of\")\n function.addParameter('dt_next', dtype='float64', direction=function.IN\n , description=\"New timestep to try\")\n function.addParameter('result', dtype='int32', direction=function.OUT\n , description=\"What the star should do next (keep going, redo, retry, terminate)\")\n function.result_type = 'int32'\n return function",
"def test_get_grad_parameters(self):\n self.assertLess(\n 0, len(list(self.instance.get_grad_params())), msg=\"There is not at least one trainable parameter\"\n )\n\n # Check that all the parameters actually require a gradient\n for parameter in self.instance.get_grad_params():\n assert parameter.requires_grad\n\n # Try to initialize an optimizer\n optimizer = SGD(params=self.instance.get_grad_params(), lr=1.0)\n assert optimizer is not None",
"def test_shift_typeerror(self):\n self.assertRaises(TypeError, when.shift, 'a')\n self.assertRaises(TypeError, when.shift, when.today())",
"def test_error_missing_wind_gust_attribute(wind_gust_percentile_cube, interpreter):\n wind_gust_percentile_cube.attributes.pop(\"wind_gust_diagnostic\")\n with pytest.raises(ValueError, match=\"missing .* required values\"):\n interpreter.run(wind_gust_percentile_cube)",
"def test_varied_psf_missing_x_fails():\n ref = simple_psf(lambda x, y: x + y)\n with pytest.raises(PSFParameterValidationError):\n varied_psf(ref)(lambda c, y: {'sigma': 0.1})",
"def test_require_arg(self):\n self.layer.require_arg('bobofet')\n self.assertRaises(outline.layer.LayerException, self.layer.check_required_args)\n self.layer.set_arg('bobofet', 1)\n self.layer.check_required_args()",
"def test_get_params_decorator(self, name):\n test_get_params_fn = self._test_get_params_fn()\n stage = self._get_mock_stage()\n encode_params, decode_params = self.evaluate(\n test_get_params_fn(stage, name))\n\n # The graph should contain a single node.\n graph = tf.compat.v1.get_default_graph()\n self.assertLen(graph.as_graph_def().node, 1)\n if name is not None:\n self._assert_all_graph_nodes_in_name_scope(graph, name)\n else:\n self._assert_all_graph_nodes_in_name_scope(\n graph, self._DEFAULT_NAME + '_get_params')\n # The functionality is not modified.\n self.assertEqual(1.0, encode_params['param'])\n self.assertEqual(1.0, decode_params['param'])",
"def test_get_variable_for_feature__invalid_feature_key(self):\n\n opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_features))\n project_config = opt_obj.config_manager.get_config()\n\n self.assertIsNone(project_config.get_variable_for_feature('invalid_feature', 'is_working'))",
"def test_simple_psf_missing_xy_fails():\n with pytest.raises(PSFParameterValidationError):\n simple_psf(lambda: 1)",
"def getShift(self) -> retval:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the correct ``grad_method`` is returned by default if ``parameter_frequencies`` are present.
|
def test_default_grad_method_with_frequencies(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
@property
def parameter_frequencies(self):
return [(0.4, 1.2)]
x = 0.654
op = DummyOp(x, wires=0)
assert op.grad_method == "A"
|
[
"def test_get_grad_parameters(self):\n self.assertLess(\n 0, len(list(self.instance.get_grad_params())), msg=\"There is not at least one trainable parameter\"\n )\n\n # Check that all the parameters actually require a gradient\n for parameter in self.instance.get_grad_params():\n assert parameter.requires_grad\n\n # Try to initialize an optimizer\n optimizer = SGD(params=self.instance.get_grad_params(), lr=1.0)\n assert optimizer is not None",
"def requires_grad(self):\n return self.param_info.requires_grad",
"def check_grad_param(grad_dic):\n grad_dtype = grad_dic.get(\"dtype\").lower()\n grad_shape = grad_dic.get(\"shape\")\n op_utils.check_shape(grad_shape)\n op_utils.check_dtype(grad_dtype, [\"float32\"])",
"def has_grad(self) -> bool:\n return self.check_sensi_orders((1,), MODE_FUN)",
"def test_unknown_grad_method_error(self):\n tape = JacobianTape()\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n tape.jacobian(None, method=\"unknown method\")",
"def has_learned_parameters(self) -> bool:\n return any(param.requires_grad for param in self.parameters(recurse=True))",
"def test_bogus_gradient_method_set(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n # in non-cached mode, the grad method would be\n # recomputed and overwritten from the\n # bogus value 'J'. Caching stops this from happening.\n node = qml.QNode(circuit, operable_mock_device_2_wires, cache=True)\n\n node.evaluate([0.0])\n keys = node.grad_method_for_par.keys()\n if keys:\n k0 = [k for k in keys][0]\n\n node.grad_method_for_par[k0] = \"J\"\n\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n node.jacobian(0.5)",
"def _reset_is_grad_none(self) -> None:\n if not self._use_orig_params:\n return\n _p_assert(\n self._training_state == HandleTrainingState.BACKWARD_POST,\n \"Expects to only be called in the post-backward after gradient computation\",\n )\n flat_param = self.flat_param\n assert flat_param._params is not None # mypy\n for i, param in enumerate(flat_param._params): # type: ignore[arg-type]\n # As long as the parameter requires gradient, it should receive a\n # meaningful gradient (even if the gradient happens to be zeros)\n if param.requires_grad:\n assert flat_param._is_grad_none_mask is not None # mypy\n flat_param._is_grad_none_mask[i] = False",
"def _check_for_gradient_implementation(self, parameters):\n # Implemented for the requested parameters?\n if not isinstance(parameters, list):\n parameters = [parameters]\n implemented = {'t_0', 't_E', 'u_0', 't_eff', 'pi_E_N', 'pi_E_E'}\n if len(set(parameters) - implemented) > 0:\n raise NotImplementedError((\n \"chi^2 gradient is implemented only for {:}\\nCannot work \" +\n \"with {:}\").format(implemented, parameters))\n\n # Implemented for the number of sources in the model?\n if self.model.n_lenses != 1:\n raise NotImplementedError(\n 'chi2_gradient() only implemented for single lens models')\n\n # Implemented for finite source effects?\n if self.model.parameters.is_finite_source():\n raise NotImplementedError('Event.chi2_gradient() is not working '\n 'for finite source models yet')",
"def _grad_requires_forward(self):\n return False",
"def test_frequencies_default_multi_param(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_params = 3\n num_wires = 1\n grad_method = \"A\"\n\n x = [0.654, 2.31, 0.1]\n op = DummyOp(*x, wires=0)\n with pytest.raises(\n qml.operation.OperatorPropertyUndefined, match=\"DummyOp does not have parameter\"\n ):\n op.parameter_frequencies",
"def test_frequencies_default_single_param(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_wires = 1\n grad_method = \"A\"\n\n def generator(self):\n return -0.2 * qml.PauliX(wires=self.wires)\n\n x = 0.654\n op = DummyOp(x, wires=0)\n assert op.parameter_frequencies == (0.4,)",
"def check_gradients(self, X, Y, method='finite_diff'):\n grad_w_num = np.zeros((self.k, self.d))\n Y_pred, h_act = self.evaluate(X)\n grad_b1, grad_b2, grad_w1, grad_w2 = self.compute_gradients(X, Y, Y_pred, h_act)\n if method == 'finite_diff':\n grad_b1_num, grad_b2_num, grad_w1_num, grad_w2_num = self.compute_gradient_num_fast(X, Y)\n elif method == 'centered_diff':\n grad_b1_num, grad_b2_num, grad_w1_num, grad_w2_num = self.compute_gradient_num_slow(X, Y)\n else:\n print(method, \" IS NOT A VALID NUMERICAL GRADIENT CHECKING.\")\n\n grad_w1_vec = grad_w1.flatten()\n grad_w1_num_vec = grad_w1_num.flatten()\n x_w1 = np.arange(1, grad_w1_vec.shape[0] + 1)\n plt.bar(x_w1, grad_w1_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_w1+0.35, grad_w1_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of w1, batch size = \" + str(X.shape[1])))\n plt.show()\n\n grad_w2_vec = grad_w2.flatten()\n grad_w2_num_vec = grad_w2_num.flatten()\n x_w2 = np.arange(1, grad_w2_vec.shape[0] + 1)\n plt.bar(x_w2, grad_w2_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_w2 + 0.35, grad_w2_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of w2, batch size = \" + str(X.shape[1])))\n plt.show()\n\n grad_b1_vec = grad_b1.flatten()\n grad_b1_num_vec = grad_b1_num.flatten()\n x_b1 = np.arange(1, grad_b1.shape[0] + 1)\n plt.bar(x_b1, grad_b1_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_b1 + 0.35, grad_b1_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of b1, batch size = \" + str(X.shape[1])))\n plt.show()\n\n grad_b2_vec = grad_b2.flatten()\n grad_b2_num_vec = grad_b2_num.flatten()\n x_b2 = np.arange(1, grad_b2.shape[0] + 1)\n plt.bar(x_b2, grad_b2_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_b2 + 0.35, grad_b2_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of b2, batch size = \" + str(X.shape[1])))\n plt.show()",
"def test_grad_func(self):\n pars = np.array(self.spec.central, dtype='float64')\n data = np.copy(self.spec(pars))\n data *= 1.1 # move away from centre to ensure non-zero gradients\n self.spec.set_data(data)\n self.move_pars(pars) # move parameters to check proper partials\n\n ll = ROOT.Double(0)\n grads1 = pars*0\n grads2 = pars*0\n\n self.spec._obj.FdF(pars, ll, grads1)\n self.spec._obj.Gradient(pars, grads2)\n\n np.testing.assert_almost_equal(grads1, grads2)",
"def _use_sharded_grad_views(self) -> None:\n flat_param = self.flat_param\n self._check_sharded(flat_param)\n grad = self.sharded_grad\n if grad is None:\n for param in chain(flat_param._params, flat_param._shared_params):\n param.grad = None\n return\n self._check_sharded(grad)\n for param, shard_param_info, is_grad_none in zip(\n flat_param._params,\n flat_param._shard_param_infos,\n flat_param._is_grad_none_mask,\n ):\n if not shard_param_info.in_shard:\n param.grad = None\n else:\n numel_in_shard = shard_param_info.numel_in_shard\n if param.requires_grad and not is_grad_none:\n offset = shard_param_info.offset_in_shard\n if self._keep_low_precision_grads or param.dtype != grad.dtype:\n # NOTE: This is a hack using `.data` to side step the\n # check that parameter/gradient dtypes match. Here,\n # `param` has full precision; `grad` has low precision.\n if param.grad is None:\n # `.grad` must have the same shape as `param`\n param.grad = torch.empty_like(param)\n param.grad.data = grad[\n offset : offset + numel_in_shard\n ].reshape(param.shape)\n else:\n param.grad = grad[offset : offset + numel_in_shard].reshape(\n param.shape\n )\n else:\n param.grad = None\n assert flat_param._shared_params is not None\n for i, (param, (_, _, _, prim_param_name, prim_module, _)) in enumerate(\n zip(flat_param._shared_params, flat_param._shared_param_infos)\n ):\n in_sharded_flat_param = hasattr(prim_module, prim_param_name)\n if in_sharded_flat_param and param.requires_grad:\n prim_param = getattr(prim_module, prim_param_name)\n param.grad = prim_param.grad # share the same reference\n else:\n param.grad = None",
"def requires_grad_(self, flag=True):\n for t_in in self.dummy_input:\n if isinstance(t_in, torch.Tensor) and t_in.dtype in torch_float_dtype:\n # only float type can require the gradient\n # enable the auto gradient\n t_in.requires_grad_(flag)\n for para_name in self.weights:\n if self.weights[para_name].dtype in torch_float_dtype:\n self.weights[para_name].requires_grad_(flag)",
"def prepare_gradient_for_optim(self):\n\n def cast_grad_to_param_dtype_if_needed(flat_param):\n # TODO (rohan-varma): test for full precision with keep_low_precision_grads\n if not self._force_full_precision and self._keep_low_precision_grads:\n _p_assert(flat_param.grad is not None, \"Unexpected None grad!\")\n if flat_param.grad.dtype != self._fwd_bwd_param_dtype:\n flat_param.grad.data = flat_param.grad.to(self._fwd_bwd_param_dtype)\n if self._use_orig_params:\n self._use_sharded_grad_views()\n\n flat_param = self.flat_param\n # TODO (awgu): We should replace these conditional checks to encode\n # the logical intention more directly.\n if hasattr(flat_param, \"_cpu_grad\"):\n # NOTE: This branch includes `NO_SHARD`.\n self._check_sharded(flat_param)\n self._check_on_cpu(flat_param)\n flat_param.grad = flat_param._cpu_grad # type: ignore[attr-defined]\n cast_grad_to_param_dtype_if_needed(flat_param)\n elif hasattr(flat_param, \"_saved_grad_shard\"):\n self._check_sharded(flat_param)\n self._check_on_compute_device(flat_param)\n if flat_param._saved_grad_shard is not None:\n self._check_on_compute_device(flat_param._saved_grad_shard) # type: ignore[attr-defined]\n # If no sharded gradient was computed this iteration, then there is\n # no need to forward `_saved_grad_shard` to `grad`\n if flat_param._post_backward_called: # type: ignore[attr-defined]\n flat_param.grad = flat_param._saved_grad_shard # type: ignore[attr-defined]\n if flat_param.grad is not None:\n cast_grad_to_param_dtype_if_needed(flat_param)\n else:\n _p_assert(\n not self.uses_sharded_strategy\n or not flat_param._post_backward_called, # type: ignore[attr-defined]\n \"All sharded parameters that received a gradient in the \"\n \"post-backward should use `_saved_grad_shard`\",\n )\n # Delete `_saved_grad_shard` since its existence indicates a previous\n # gradient to accumulate with in the post-backward hook\n if hasattr(flat_param, \"_saved_grad_shard\"):\n delattr(flat_param, \"_saved_grad_shard\")",
"def do_grad_check(self, which, index, eps=1e-5):\n vocab_size = 5\n vector_dim = 2\n model = SkipGramWV(vocab_size, vector_dim)\n\n input_index = 2\n context_indices = [1, 3]\n noise_indices = [0, 4]\n ig, cg, ng = model._gradient_tensors(input_index, context_indices, noise_indices)\n l1 = model.neg_loss(input_index, context_indices, noise_indices)\n\n if which == \"context\":\n model._output_vectors[context_indices[index]][0] += eps\n agrad = cg[index,0]\n elif which == \"noise\":\n model._output_vectors[noise_indices[index]][0] += eps\n agrad = ng[index, 0]\n elif which == \"input\":\n model._input_vectors[input_index][0] += eps\n agrad = ig[0]\n l2 = model.neg_loss(input_index, context_indices, noise_indices)\n ngrad = (l2 - l1) / eps\n return agrad, ngrad",
"def disable_param_gradients(self):\n Util.set_param_requires_grad(self.model, False)",
"def _check_trainable_parameters_gradient_dim(self):\n\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the correct ``grad_method`` is returned by default if a generator is present to determine parameter_frequencies from.
|
def test_default_grad_method_with_generator(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
def generator(self):
return -0.2 * qml.PauliX(wires=self.wires)
x = 0.654
op = DummyOp(x, wires=0)
assert op.grad_method == "A"
|
[
"def test_get_grad_parameters(self):\n self.assertLess(\n 0, len(list(self.instance.get_grad_params())), msg=\"There is not at least one trainable parameter\"\n )\n\n # Check that all the parameters actually require a gradient\n for parameter in self.instance.get_grad_params():\n assert parameter.requires_grad\n\n # Try to initialize an optimizer\n optimizer = SGD(params=self.instance.get_grad_params(), lr=1.0)\n assert optimizer is not None",
"def requires_grad(self):\n return self.param_info.requires_grad",
"def has_grad(self) -> bool:\n return self.check_sensi_orders((1,), MODE_FUN)",
"def check_grad_param(grad_dic):\n grad_dtype = grad_dic.get(\"dtype\").lower()\n grad_shape = grad_dic.get(\"shape\")\n op_utils.check_shape(grad_shape)\n op_utils.check_dtype(grad_dtype, [\"float32\"])",
"def test_unknown_grad_method_error(self):\n tape = JacobianTape()\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n tape.jacobian(None, method=\"unknown method\")",
"def test_bogus_gradient_method_set(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n # in non-cached mode, the grad method would be\n # recomputed and overwritten from the\n # bogus value 'J'. Caching stops this from happening.\n node = qml.QNode(circuit, operable_mock_device_2_wires, cache=True)\n\n node.evaluate([0.0])\n keys = node.grad_method_for_par.keys()\n if keys:\n k0 = [k for k in keys][0]\n\n node.grad_method_for_par[k0] = \"J\"\n\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n node.jacobian(0.5)",
"def do_grad_check(self, which, index, eps=1e-5):\n vocab_size = 5\n vector_dim = 2\n model = SkipGramWV(vocab_size, vector_dim)\n\n input_index = 2\n context_indices = [1, 3]\n noise_indices = [0, 4]\n ig, cg, ng = model._gradient_tensors(input_index, context_indices, noise_indices)\n l1 = model.neg_loss(input_index, context_indices, noise_indices)\n\n if which == \"context\":\n model._output_vectors[context_indices[index]][0] += eps\n agrad = cg[index,0]\n elif which == \"noise\":\n model._output_vectors[noise_indices[index]][0] += eps\n agrad = ng[index, 0]\n elif which == \"input\":\n model._input_vectors[input_index][0] += eps\n agrad = ig[0]\n l2 = model.neg_loss(input_index, context_indices, noise_indices)\n ngrad = (l2 - l1) / eps\n return agrad, ngrad",
"def has_learned_parameters(self) -> bool:\n return any(param.requires_grad for param in self.parameters(recurse=True))",
"def requires_grad_(self, flag=True):\n for t_in in self.dummy_input:\n if isinstance(t_in, torch.Tensor) and t_in.dtype in torch_float_dtype:\n # only float type can require the gradient\n # enable the auto gradient\n t_in.requires_grad_(flag)\n for para_name in self.weights:\n if self.weights[para_name].dtype in torch_float_dtype:\n self.weights[para_name].requires_grad_(flag)",
"def _reset_is_grad_none(self) -> None:\n if not self._use_orig_params:\n return\n _p_assert(\n self._training_state == HandleTrainingState.BACKWARD_POST,\n \"Expects to only be called in the post-backward after gradient computation\",\n )\n flat_param = self.flat_param\n assert flat_param._params is not None # mypy\n for i, param in enumerate(flat_param._params): # type: ignore[arg-type]\n # As long as the parameter requires gradient, it should receive a\n # meaningful gradient (even if the gradient happens to be zeros)\n if param.requires_grad:\n assert flat_param._is_grad_none_mask is not None # mypy\n flat_param._is_grad_none_mask[i] = False",
"def _use_sharded_grad_views(self) -> None:\n flat_param = self.flat_param\n self._check_sharded(flat_param)\n grad = self.sharded_grad\n if grad is None:\n for param in chain(flat_param._params, flat_param._shared_params):\n param.grad = None\n return\n self._check_sharded(grad)\n for param, shard_param_info, is_grad_none in zip(\n flat_param._params,\n flat_param._shard_param_infos,\n flat_param._is_grad_none_mask,\n ):\n if not shard_param_info.in_shard:\n param.grad = None\n else:\n numel_in_shard = shard_param_info.numel_in_shard\n if param.requires_grad and not is_grad_none:\n offset = shard_param_info.offset_in_shard\n if self._keep_low_precision_grads or param.dtype != grad.dtype:\n # NOTE: This is a hack using `.data` to side step the\n # check that parameter/gradient dtypes match. Here,\n # `param` has full precision; `grad` has low precision.\n if param.grad is None:\n # `.grad` must have the same shape as `param`\n param.grad = torch.empty_like(param)\n param.grad.data = grad[\n offset : offset + numel_in_shard\n ].reshape(param.shape)\n else:\n param.grad = grad[offset : offset + numel_in_shard].reshape(\n param.shape\n )\n else:\n param.grad = None\n assert flat_param._shared_params is not None\n for i, (param, (_, _, _, prim_param_name, prim_module, _)) in enumerate(\n zip(flat_param._shared_params, flat_param._shared_param_infos)\n ):\n in_sharded_flat_param = hasattr(prim_module, prim_param_name)\n if in_sharded_flat_param and param.requires_grad:\n prim_param = getattr(prim_module, prim_param_name)\n param.grad = prim_param.grad # share the same reference\n else:\n param.grad = None",
"def _grad_requires_forward(self):\n return False",
"def gradient(self, node, output_grad):\n\t\traise NotImplementedError",
"def test_random_fast_gradient_method():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)\n label = np.asarray([2], np.int32)\n label = np.eye(3)[label].astype(np.float32)\n\n attack = RandomFastGradientMethod(Net())\n ms_adv_x = attack.generate(input_np, label)\n\n assert np.any(ms_adv_x != input_np), 'Random fast gradient method: ' \\\n 'generate value must not be equal to' \\\n ' original value.'",
"def prepare_gradient_for_optim(self):\n\n def cast_grad_to_param_dtype_if_needed(flat_param):\n # TODO (rohan-varma): test for full precision with keep_low_precision_grads\n if not self._force_full_precision and self._keep_low_precision_grads:\n _p_assert(flat_param.grad is not None, \"Unexpected None grad!\")\n if flat_param.grad.dtype != self._fwd_bwd_param_dtype:\n flat_param.grad.data = flat_param.grad.to(self._fwd_bwd_param_dtype)\n if self._use_orig_params:\n self._use_sharded_grad_views()\n\n flat_param = self.flat_param\n # TODO (awgu): We should replace these conditional checks to encode\n # the logical intention more directly.\n if hasattr(flat_param, \"_cpu_grad\"):\n # NOTE: This branch includes `NO_SHARD`.\n self._check_sharded(flat_param)\n self._check_on_cpu(flat_param)\n flat_param.grad = flat_param._cpu_grad # type: ignore[attr-defined]\n cast_grad_to_param_dtype_if_needed(flat_param)\n elif hasattr(flat_param, \"_saved_grad_shard\"):\n self._check_sharded(flat_param)\n self._check_on_compute_device(flat_param)\n if flat_param._saved_grad_shard is not None:\n self._check_on_compute_device(flat_param._saved_grad_shard) # type: ignore[attr-defined]\n # If no sharded gradient was computed this iteration, then there is\n # no need to forward `_saved_grad_shard` to `grad`\n if flat_param._post_backward_called: # type: ignore[attr-defined]\n flat_param.grad = flat_param._saved_grad_shard # type: ignore[attr-defined]\n if flat_param.grad is not None:\n cast_grad_to_param_dtype_if_needed(flat_param)\n else:\n _p_assert(\n not self.uses_sharded_strategy\n or not flat_param._post_backward_called, # type: ignore[attr-defined]\n \"All sharded parameters that received a gradient in the \"\n \"post-backward should use `_saved_grad_shard`\",\n )\n # Delete `_saved_grad_shard` since its existence indicates a previous\n # gradient to accumulate with in the post-backward hook\n if hasattr(flat_param, \"_saved_grad_shard\"):\n delattr(flat_param, \"_saved_grad_shard\")",
"def test_frequencies_default_single_param(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_wires = 1\n grad_method = \"A\"\n\n def generator(self):\n return -0.2 * qml.PauliX(wires=self.wires)\n\n x = 0.654\n op = DummyOp(x, wires=0)\n assert op.parameter_frequencies == (0.4,)",
"def test_grad(self):\n X = [np.array([[0, 0.], [1, 0]]), np.array([[1, 0.], [0, 1]])]\n X, _, _ = LongitudinalFeaturesLagger(n_lags=self.n_lags) \\\n .fit_transform(X)\n model = ModelSCCS(n_intervals=2, n_lags=self.n_lags) \\\n .fit(X, self.y)\n grad = model.grad(coeffs=np.array([0.0, 0.0, 1.0, 0.0]))\n expected_grad = -np.array([\n -1 / 2 - 1 / (1 + np.e), 1 - np.e / (1 + np.e), 1 - np.e /\n (1 + np.e), 0\n ]) / 2\n np.testing.assert_almost_equal(grad, expected_grad, decimal=15)",
"def test_fast_gradient_method():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)\n label = np.asarray([2], np.int32)\n label = np.eye(3)[label].astype(np.float32)\n\n attack = FastGradientMethod(Net())\n ms_adv_x = attack.generate(input_np, label)\n\n assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \\\n ' must not be equal to original value.'",
"def _check_for_gradient_implementation(self, parameters):\n # Implemented for the requested parameters?\n if not isinstance(parameters, list):\n parameters = [parameters]\n implemented = {'t_0', 't_E', 'u_0', 't_eff', 'pi_E_N', 'pi_E_E'}\n if len(set(parameters) - implemented) > 0:\n raise NotImplementedError((\n \"chi^2 gradient is implemented only for {:}\\nCannot work \" +\n \"with {:}\").format(implemented, parameters))\n\n # Implemented for the number of sources in the model?\n if self.model.n_lenses != 1:\n raise NotImplementedError(\n 'chi2_gradient() only implemented for single lens models')\n\n # Implemented for finite source effects?\n if self.model.parameters.is_finite_source():\n raise NotImplementedError('Event.chi2_gradient() is not working '\n 'for finite source models yet')",
"def test_qnode_gradient_repeated_gate_parameters(self, tol):\n par = [0.8, 1.3]\n\n def qf(x, y):\n qml.RX(np.pi / 4, wires=[0])\n qml.Rot(y, x, 2 * x, wires=[0])\n return qml.expval(qml.PauliX(0))\n\n dev = qml.device(\"default.qubit\", wires=1)\n q = qml.QNode(qf, dev)\n grad_A = q.jacobian(par, method=\"A\")\n grad_F = q.jacobian(par, method=\"F\")\n\n # the different methods agree\n assert np.allclose(grad_A, grad_F, atol=tol, rtol=0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the correct ``grad_method`` is returned by default if no information is present to deduce an analytic gradient method.
|
def test_default_grad_method_numeric(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
x = 0.654
op = DummyOp(x, wires=0)
assert op.grad_method == "F"
|
[
"def test_unknown_grad_method_error(self):\n tape = JacobianTape()\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n tape.jacobian(None, method=\"unknown method\")",
"def has_grad(self) -> bool:\n return self.check_sensi_orders((1,), MODE_FUN)",
"def test_bogus_gradient_method_set(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n # in non-cached mode, the grad method would be\n # recomputed and overwritten from the\n # bogus value 'J'. Caching stops this from happening.\n node = qml.QNode(circuit, operable_mock_device_2_wires, cache=True)\n\n node.evaluate([0.0])\n keys = node.grad_method_for_par.keys()\n if keys:\n k0 = [k for k in keys][0]\n\n node.grad_method_for_par[k0] = \"J\"\n\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n node.jacobian(0.5)",
"def test_get_grad_parameters(self):\n self.assertLess(\n 0, len(list(self.instance.get_grad_params())), msg=\"There is not at least one trainable parameter\"\n )\n\n # Check that all the parameters actually require a gradient\n for parameter in self.instance.get_grad_params():\n assert parameter.requires_grad\n\n # Try to initialize an optimizer\n optimizer = SGD(params=self.instance.get_grad_params(), lr=1.0)\n assert optimizer is not None",
"def check_gradients(self, X, Y, method='finite_diff'):\n grad_w_num = np.zeros((self.k, self.d))\n Y_pred, h_act = self.evaluate(X)\n grad_b1, grad_b2, grad_w1, grad_w2 = self.compute_gradients(X, Y, Y_pred, h_act)\n if method == 'finite_diff':\n grad_b1_num, grad_b2_num, grad_w1_num, grad_w2_num = self.compute_gradient_num_fast(X, Y)\n elif method == 'centered_diff':\n grad_b1_num, grad_b2_num, grad_w1_num, grad_w2_num = self.compute_gradient_num_slow(X, Y)\n else:\n print(method, \" IS NOT A VALID NUMERICAL GRADIENT CHECKING.\")\n\n grad_w1_vec = grad_w1.flatten()\n grad_w1_num_vec = grad_w1_num.flatten()\n x_w1 = np.arange(1, grad_w1_vec.shape[0] + 1)\n plt.bar(x_w1, grad_w1_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_w1+0.35, grad_w1_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of w1, batch size = \" + str(X.shape[1])))\n plt.show()\n\n grad_w2_vec = grad_w2.flatten()\n grad_w2_num_vec = grad_w2_num.flatten()\n x_w2 = np.arange(1, grad_w2_vec.shape[0] + 1)\n plt.bar(x_w2, grad_w2_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_w2 + 0.35, grad_w2_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of w2, batch size = \" + str(X.shape[1])))\n plt.show()\n\n grad_b1_vec = grad_b1.flatten()\n grad_b1_num_vec = grad_b1_num.flatten()\n x_b1 = np.arange(1, grad_b1.shape[0] + 1)\n plt.bar(x_b1, grad_b1_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_b1 + 0.35, grad_b1_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of b1, batch size = \" + str(X.shape[1])))\n plt.show()\n\n grad_b2_vec = grad_b2.flatten()\n grad_b2_num_vec = grad_b2_num.flatten()\n x_b2 = np.arange(1, grad_b2.shape[0] + 1)\n plt.bar(x_b2, grad_b2_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_b2 + 0.35, grad_b2_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of b2, batch size = \" + str(X.shape[1])))\n plt.show()",
"def requires_grad(self):\n return self.param_info.requires_grad",
"def test_grad(self):\n X = [np.array([[0, 0.], [1, 0]]), np.array([[1, 0.], [0, 1]])]\n X, _, _ = LongitudinalFeaturesLagger(n_lags=self.n_lags) \\\n .fit_transform(X)\n model = ModelSCCS(n_intervals=2, n_lags=self.n_lags) \\\n .fit(X, self.y)\n grad = model.grad(coeffs=np.array([0.0, 0.0, 1.0, 0.0]))\n expected_grad = -np.array([\n -1 / 2 - 1 / (1 + np.e), 1 - np.e / (1 + np.e), 1 - np.e /\n (1 + np.e), 0\n ]) / 2\n np.testing.assert_almost_equal(grad, expected_grad, decimal=15)",
"def test_grad_func(self):\n pars = np.array(self.spec.central, dtype='float64')\n data = np.copy(self.spec(pars))\n data *= 1.1 # move away from centre to ensure non-zero gradients\n self.spec.set_data(data)\n self.move_pars(pars) # move parameters to check proper partials\n\n ll = ROOT.Double(0)\n grads1 = pars*0\n grads2 = pars*0\n\n self.spec._obj.FdF(pars, ll, grads1)\n self.spec._obj.Gradient(pars, grads2)\n\n np.testing.assert_almost_equal(grads1, grads2)",
"def test_operation_not_supporting_analytic_gradient(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.Hermitian(np.diag([x, 0]), 0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(ValueError, match=\"analytic gradient method cannot be used with\"):\n node.jacobian(0.5, method=\"A\")",
"def do_grad_check(self, which, index, eps=1e-5):\n vocab_size = 5\n vector_dim = 2\n model = SkipGramWV(vocab_size, vector_dim)\n\n input_index = 2\n context_indices = [1, 3]\n noise_indices = [0, 4]\n ig, cg, ng = model._gradient_tensors(input_index, context_indices, noise_indices)\n l1 = model.neg_loss(input_index, context_indices, noise_indices)\n\n if which == \"context\":\n model._output_vectors[context_indices[index]][0] += eps\n agrad = cg[index,0]\n elif which == \"noise\":\n model._output_vectors[noise_indices[index]][0] += eps\n agrad = ng[index, 0]\n elif which == \"input\":\n model._input_vectors[input_index][0] += eps\n agrad = ig[0]\n l2 = model.neg_loss(input_index, context_indices, noise_indices)\n ngrad = (l2 - l1) / eps\n return agrad, ngrad",
"def gradient(self, node, output_grad):\n\t\traise NotImplementedError",
"def _test_gradient_numerical(self, clf, x, extra_classes=None,\n th=1e-3, epsilon=eps, **grad_kwargs):\n if 'y' in grad_kwargs:\n raise ValueError(\"`y` cannot be passed to this unittest.\")\n\n if extra_classes is not None:\n classes = clf.classes.append(extra_classes)\n else:\n classes = clf.classes\n\n grads = []\n for c in classes:\n grad_kwargs['y'] = c # Appending class to test_f_x\n\n # Analytical gradient\n gradient = clf.grad_f_x(x, **grad_kwargs)\n grads.append(gradient)\n\n self.assertTrue(gradient.is_vector_like)\n self.assertEqual(x.size, gradient.size)\n self.assertEqual(x.issparse, gradient.issparse)\n\n # Numerical gradient\n num_gradient = CFunction(\n clf.decision_function).approx_fprime(x.todense(), epsilon, y=c)\n\n # Compute the norm of the difference\n error = (gradient - num_gradient).norm()\n\n self.logger.info(\n \"Analytic grad wrt. class {:}:\\n{:}\".format(c, gradient))\n self.logger.info(\n \"Numeric gradient wrt. class {:}:\\n{:}\".format(\n c, num_gradient))\n\n self.logger.info(\"norm(grad - num_grad): {:}\".format(error))\n self.assertLess(error, th)\n\n self.assertIsSubDtype(gradient.dtype, float)\n\n return grads",
"def test_getgrad():\n\n from pydft.schrodinger import _getgrad, _getE\n\n s = [3,3,3]\n R = np.array([[6,0,0],[0,6,0],[0,0,6]])\n Ns = 4\n np.random.seed(2004)\n\n W = np.random.normal(0,5,(27,4)) + np.random.normal(0,5,(27,4))*1j\n\n E0 = _getE(s,R,W)\n g0 = _getgrad(s,R,W)\n\n dW = np.random.normal(0,5,(27,4)) + np.random.normal(0,5,(27,4))*1j\n\n temp = list(range(-10,2))\n temp.reverse()\n for delta in temp:\n eps = 10**delta\n dE = 2*np.real(np.trace(np.dot(np.conj(g0.T),eps*dW)))\n\n diff = (_getE(s,R,W + eps*dW) -E0)/dE\n estimate = np.sqrt(len(W))*eps/abs(dE)\n print(\"eps\",eps,\"diff\",diff,\"error\",estimate)\n\n assert np.allclose(diff,1,atol=1e-3)",
"def grad_checker(X, y, theta, epsilon=0.01, tolerance=1e-4):\n true_gradient = compute_square_loss_gradient(X, y, theta) #the true gradient\n num_features = theta.shape[0]\n approx_grad = np.zeros(num_features) #Initialize the gradient we approximate\n #TODO",
"def test_unknown_method(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.Rot(0.3, x, -0.2, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n node.jacobian(0.5, method=\"unknown\")",
"def test_2():\n d = 3\n x = np.array([1, 1.5, 2])\n\n grad_val = mt_obj.griewank_grad(x, d)\n assert(np.all(np.round(grad_val, 6) == np.array([0.166577,\n 0.135511,\n 0.140324])))",
"def _grad_requires_forward(self):\n return False",
"def _test_grad(self, model, coeffs,\n delta_check_grad=1e-5,\n delta_model_grad=1e-4):\n self.assertAlmostEqual(check_grad(model.loss,\n model.grad,\n coeffs),\n 0.,\n delta=delta_check_grad)\n # Check that minimum iss achievable with a small gradient\n coeffs_min = fmin_bfgs(model.loss, coeffs,\n fprime=model.grad, disp=False)\n self.assertAlmostEqual(norm(model.grad(coeffs_min)),\n .0, delta=delta_model_grad)",
"def is_gradient(self):\n return self.container['is_gradient']",
"def _test_grad(self, model, coeffs, delta_check_grad=1e-5,\n delta_model_grad=1e-4):\n self.assertAlmostEqual(\n check_grad(model.loss, model.grad, coeffs), 0.,\n delta=delta_check_grad)\n # Check that minimum iss achievable with a small gradient\n coeffs_min = fmin_bfgs(model.loss, coeffs, fprime=model.grad,\n disp=False)\n self.assertAlmostEqual(\n norm(model.grad(coeffs_min)), .0, delta=delta_model_grad)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the correct ``grad_method`` is returned by default if a grad_recipe is present.
|
def test_default_grad_method_with_grad_recipe(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
grad_recipe = ["not a recipe"]
x = 0.654
op = DummyOp(x, wires=0)
assert op.grad_method == "A"
|
[
"def requires_grad(self):\n return self.param_info.requires_grad",
"def has_grad(self) -> bool:\n return self.check_sensi_orders((1,), MODE_FUN)",
"def test_unknown_grad_method_error(self):\n tape = JacobianTape()\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n tape.jacobian(None, method=\"unknown method\")",
"def test_get_grad_parameters(self):\n self.assertLess(\n 0, len(list(self.instance.get_grad_params())), msg=\"There is not at least one trainable parameter\"\n )\n\n # Check that all the parameters actually require a gradient\n for parameter in self.instance.get_grad_params():\n assert parameter.requires_grad\n\n # Try to initialize an optimizer\n optimizer = SGD(params=self.instance.get_grad_params(), lr=1.0)\n assert optimizer is not None",
"def test_bogus_gradient_method_set(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n # in non-cached mode, the grad method would be\n # recomputed and overwritten from the\n # bogus value 'J'. Caching stops this from happening.\n node = qml.QNode(circuit, operable_mock_device_2_wires, cache=True)\n\n node.evaluate([0.0])\n keys = node.grad_method_for_par.keys()\n if keys:\n k0 = [k for k in keys][0]\n\n node.grad_method_for_par[k0] = \"J\"\n\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n node.jacobian(0.5)",
"def requires_grad(module: nn.Module, recurse: bool = False) -> bool:\n requires_grad = any(p.requires_grad for p in module.parameters(recurse))\n return requires_grad",
"def do_grad_check(self, which, index, eps=1e-5):\n vocab_size = 5\n vector_dim = 2\n model = SkipGramWV(vocab_size, vector_dim)\n\n input_index = 2\n context_indices = [1, 3]\n noise_indices = [0, 4]\n ig, cg, ng = model._gradient_tensors(input_index, context_indices, noise_indices)\n l1 = model.neg_loss(input_index, context_indices, noise_indices)\n\n if which == \"context\":\n model._output_vectors[context_indices[index]][0] += eps\n agrad = cg[index,0]\n elif which == \"noise\":\n model._output_vectors[noise_indices[index]][0] += eps\n agrad = ng[index, 0]\n elif which == \"input\":\n model._input_vectors[input_index][0] += eps\n agrad = ig[0]\n l2 = model.neg_loss(input_index, context_indices, noise_indices)\n ngrad = (l2 - l1) / eps\n return agrad, ngrad",
"def _test_grad(self, model, coeffs,\n delta_check_grad=1e-5,\n delta_model_grad=1e-4):\n self.assertAlmostEqual(check_grad(model.loss,\n model.grad,\n coeffs),\n 0.,\n delta=delta_check_grad)\n # Check that minimum iss achievable with a small gradient\n coeffs_min = fmin_bfgs(model.loss, coeffs,\n fprime=model.grad, disp=False)\n self.assertAlmostEqual(norm(model.grad(coeffs_min)),\n .0, delta=delta_model_grad)",
"def _test_grad(self, model, coeffs, delta_check_grad=1e-5,\n delta_model_grad=1e-4):\n self.assertAlmostEqual(\n check_grad(model.loss, model.grad, coeffs), 0.,\n delta=delta_check_grad)\n # Check that minimum iss achievable with a small gradient\n coeffs_min = fmin_bfgs(model.loss, coeffs, fprime=model.grad,\n disp=False)\n self.assertAlmostEqual(\n norm(model.grad(coeffs_min)), .0, delta=delta_model_grad)",
"def test_grad_func(self):\n pars = np.array(self.spec.central, dtype='float64')\n data = np.copy(self.spec(pars))\n data *= 1.1 # move away from centre to ensure non-zero gradients\n self.spec.set_data(data)\n self.move_pars(pars) # move parameters to check proper partials\n\n ll = ROOT.Double(0)\n grads1 = pars*0\n grads2 = pars*0\n\n self.spec._obj.FdF(pars, ll, grads1)\n self.spec._obj.Gradient(pars, grads2)\n\n np.testing.assert_almost_equal(grads1, grads2)",
"def _grad_requires_forward(self):\n return False",
"def check_grad_rel(func, grad, x0, *args):\n step = 1.49e-08\n target = approx_fprime(x0, func, step, *args)\n actual = grad(x0, *args)\n delta = target - actual\n # make sure target is not 0\n delta[target > 0] /= target[target > 0]\n return delta",
"def test_2():\n d = 3\n x = np.array([1, 1.5, 2])\n\n grad_val = mt_obj.griewank_grad(x, d)\n assert(np.all(np.round(grad_val, 6) == np.array([0.166577,\n 0.135511,\n 0.140324])))",
"def requires_grad_(self, flag=True):\n for t_in in self.dummy_input:\n if isinstance(t_in, torch.Tensor) and t_in.dtype in torch_float_dtype:\n # only float type can require the gradient\n # enable the auto gradient\n t_in.requires_grad_(flag)\n for para_name in self.weights:\n if self.weights[para_name].dtype in torch_float_dtype:\n self.weights[para_name].requires_grad_(flag)",
"def test_grad(self):\n X = [np.array([[0, 0.], [1, 0]]), np.array([[1, 0.], [0, 1]])]\n X, _, _ = LongitudinalFeaturesLagger(n_lags=self.n_lags) \\\n .fit_transform(X)\n model = ModelSCCS(n_intervals=2, n_lags=self.n_lags) \\\n .fit(X, self.y)\n grad = model.grad(coeffs=np.array([0.0, 0.0, 1.0, 0.0]))\n expected_grad = -np.array([\n -1 / 2 - 1 / (1 + np.e), 1 - np.e / (1 + np.e), 1 - np.e /\n (1 + np.e), 0\n ]) / 2\n np.testing.assert_almost_equal(grad, expected_grad, decimal=15)",
"def is_gradient(self):\n return self.container['is_gradient']",
"def gradient(self, node, output_grad):\n\t\traise NotImplementedError",
"def _reset_is_grad_none(self) -> None:\n if not self._use_orig_params:\n return\n _p_assert(\n self._training_state == HandleTrainingState.BACKWARD_POST,\n \"Expects to only be called in the post-backward after gradient computation\",\n )\n flat_param = self.flat_param\n assert flat_param._params is not None # mypy\n for i, param in enumerate(flat_param._params): # type: ignore[arg-type]\n # As long as the parameter requires gradient, it should receive a\n # meaningful gradient (even if the gradient happens to be zeros)\n if param.requires_grad:\n assert flat_param._is_grad_none_mask is not None # mypy\n flat_param._is_grad_none_mask[i] = False",
"def _use_sharded_grad_views(self) -> None:\n flat_param = self.flat_param\n self._check_sharded(flat_param)\n grad = self.sharded_grad\n if grad is None:\n for param in chain(flat_param._params, flat_param._shared_params):\n param.grad = None\n return\n self._check_sharded(grad)\n for param, shard_param_info, is_grad_none in zip(\n flat_param._params,\n flat_param._shard_param_infos,\n flat_param._is_grad_none_mask,\n ):\n if not shard_param_info.in_shard:\n param.grad = None\n else:\n numel_in_shard = shard_param_info.numel_in_shard\n if param.requires_grad and not is_grad_none:\n offset = shard_param_info.offset_in_shard\n if self._keep_low_precision_grads or param.dtype != grad.dtype:\n # NOTE: This is a hack using `.data` to side step the\n # check that parameter/gradient dtypes match. Here,\n # `param` has full precision; `grad` has low precision.\n if param.grad is None:\n # `.grad` must have the same shape as `param`\n param.grad = torch.empty_like(param)\n param.grad.data = grad[\n offset : offset + numel_in_shard\n ].reshape(param.shape)\n else:\n param.grad = grad[offset : offset + numel_in_shard].reshape(\n param.shape\n )\n else:\n param.grad = None\n assert flat_param._shared_params is not None\n for i, (param, (_, _, _, prim_param_name, prim_module, _)) in enumerate(\n zip(flat_param._shared_params, flat_param._shared_param_infos)\n ):\n in_sharded_flat_param = hasattr(prim_module, prim_param_name)\n if in_sharded_flat_param and param.requires_grad:\n prim_param = getattr(prim_module, prim_param_name)\n param.grad = prim_param.grad # share the same reference\n else:\n param.grad = None",
"def check_grad_param(grad_dic):\n grad_dtype = grad_dic.get(\"dtype\").lower()\n grad_shape = grad_dic.get(\"shape\")\n op_utils.check_shape(grad_shape)\n op_utils.check_dtype(grad_dtype, [\"float32\"])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the correct ``grad_method`` is returned by default if an operation does not have a parameter.
|
def test_default_grad_no_param(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
op = DummyOp(wires=0)
assert op.grad_method is None
|
[
"def test_unknown_grad_method_error(self):\n tape = JacobianTape()\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n tape.jacobian(None, method=\"unknown method\")",
"def test_bogus_gradient_method_set(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n # in non-cached mode, the grad method would be\n # recomputed and overwritten from the\n # bogus value 'J'. Caching stops this from happening.\n node = qml.QNode(circuit, operable_mock_device_2_wires, cache=True)\n\n node.evaluate([0.0])\n keys = node.grad_method_for_par.keys()\n if keys:\n k0 = [k for k in keys][0]\n\n node.grad_method_for_par[k0] = \"J\"\n\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n node.jacobian(0.5)",
"def test_get_grad_parameters(self):\n self.assertLess(\n 0, len(list(self.instance.get_grad_params())), msg=\"There is not at least one trainable parameter\"\n )\n\n # Check that all the parameters actually require a gradient\n for parameter in self.instance.get_grad_params():\n assert parameter.requires_grad\n\n # Try to initialize an optimizer\n optimizer = SGD(params=self.instance.get_grad_params(), lr=1.0)\n assert optimizer is not None",
"def requires_grad(self):\n return self.param_info.requires_grad",
"def get_apply_gradients_op(self):\n raise NotImplementedError()",
"def has_grad(self) -> bool:\n return self.check_sensi_orders((1,), MODE_FUN)",
"def _reset_is_grad_none(self) -> None:\n if not self._use_orig_params:\n return\n _p_assert(\n self._training_state == HandleTrainingState.BACKWARD_POST,\n \"Expects to only be called in the post-backward after gradient computation\",\n )\n flat_param = self.flat_param\n assert flat_param._params is not None # mypy\n for i, param in enumerate(flat_param._params): # type: ignore[arg-type]\n # As long as the parameter requires gradient, it should receive a\n # meaningful gradient (even if the gradient happens to be zeros)\n if param.requires_grad:\n assert flat_param._is_grad_none_mask is not None # mypy\n flat_param._is_grad_none_mask[i] = False",
"def _const_round_grad(unused_op, grad):\n return grad",
"def gradient(self, node, output_grad):\n\t\traise NotImplementedError",
"def _grad_requires_forward(self):\n return False",
"def test_unknown_method(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.Rot(0.3, x, -0.2, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n node.jacobian(0.5, method=\"unknown\")",
"def test_operation_not_supporting_analytic_gradient(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.Hermitian(np.diag([x, 0]), 0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(ValueError, match=\"analytic gradient method cannot be used with\"):\n node.jacobian(0.5, method=\"A\")",
"def _IfGrad(op, *grads): # pylint: disable=invalid-name\n # Get the if operator (this logic handles the case where op is a MockOp)\n if_op = op.outputs[0].op\n true_graph, false_graph = get_func_graphs(if_op)\n # Note: op.graph != ops.get_default_graph() when we are computing the gradient\n # of a nested cond.\n assert true_graph.outer_graph == if_op.graph\n assert false_graph.outer_graph == if_op.graph\n\n # Create grad functions that compute the gradient of the true/false forward\n # graphs. These functions will capture tensors from the forward pass\n # functions.\n true_grad_graph = _create_grad_func(\n true_graph, grads, util.unique_grad_fn_name(true_graph.name))\n false_grad_graph = _create_grad_func(\n false_graph, grads, util.unique_grad_fn_name(false_graph.name))\n\n # Replaces output None grads with zeros if at least one branch has non-None\n # grad at that index.\n _create_zeros_for_none_grads([true_graph, false_graph],\n [true_grad_graph, false_grad_graph])\n\n if (true_grad_graph.op_needs_rewrite or false_grad_graph.op_needs_rewrite):\n # Modify 'op' to output the intermediates needed by the grad functions. Note\n # that all needed intermediates are wrapped in optionals. Each optional\n # intermediate output will have a value iff its corresponding branch is\n # taken.\n # NOTE(skyewm): if there are any active sessions, this modification to `op`\n # may make them unrunnable!\n\n if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()):\n # XLA does not yet support optionals, so output intermediates directly and\n # make them match via FakeParams, which can be converted to zeros in XLA.\n # TODO(skyewm,jpienaar): can XLA support optionals?\n true_intermediates = true_grad_graph.xla_intermediates\n false_intermediates = false_grad_graph.xla_intermediates\n extra_true_outputs, extra_false_outputs = _make_intermediates_match_xla(\n [true_graph, false_graph], [true_intermediates, false_intermediates])\n else:\n true_intermediates = true_grad_graph.wrapped_intermediates\n false_intermediates = false_grad_graph.wrapped_intermediates\n # Make outputs match by adding none optionals.\n extra_true_outputs, extra_false_outputs = _make_intermediates_match(\n [true_graph, false_graph], [true_intermediates, false_intermediates])\n\n true_graph.outputs.extend(extra_true_outputs)\n false_graph.outputs.extend(extra_false_outputs)\n # TODO(skyewm): indicate it's an internal bug if this fails.\n _check_same_outputs(_COND, [true_graph, false_graph])\n\n true_graph.name += \"_rewritten\"\n false_graph.name += \"_rewritten\"\n\n if_op._set_func_attr(\"then_branch\", util.create_new_tf_function(true_graph))\n if_op._set_func_attr(\"else_branch\",\n util.create_new_tf_function(false_graph))\n if_op._set_type_list_attr(\"Tout\", true_graph.output_types)\n if_op._set_shape_list_attr(\"output_shapes\", true_graph.output_shapes)\n if_op._add_outputs(\n [t.dtype for t in extra_true_outputs],\n [t.shape for t in extra_true_outputs])\n\n # Resolve references to forward graph tensors in grad graphs and ensure\n # they are in-scope, i.e., belong to one of outer graphs of the grad graph.\n true_grad_inputs = _resolve_grad_inputs(true_graph, true_grad_graph)\n false_grad_inputs = _resolve_grad_inputs(false_graph, false_grad_graph)\n\n # This modifies true_grad_graph and false_grad_graph.\n _make_output_composite_tensors_match(_COND,\n [true_grad_graph, false_grad_graph])\n\n outputs = _build_cond(\n if_op.inputs[0],\n true_grad_graph,\n false_grad_graph,\n true_grad_inputs,\n false_grad_inputs,\n building_gradient=True,\n )\n\n # The predicate has no gradient.\n return [None] + outputs",
"def test_grad_func(self):\n pars = np.array(self.spec.central, dtype='float64')\n data = np.copy(self.spec(pars))\n data *= 1.1 # move away from centre to ensure non-zero gradients\n self.spec.set_data(data)\n self.move_pars(pars) # move parameters to check proper partials\n\n ll = ROOT.Double(0)\n grads1 = pars*0\n grads2 = pars*0\n\n self.spec._obj.FdF(pars, ll, grads1)\n self.spec._obj.Gradient(pars, grads2)\n\n np.testing.assert_almost_equal(grads1, grads2)",
"def check_grad_param(grad_dic):\n grad_dtype = grad_dic.get(\"dtype\").lower()\n grad_shape = grad_dic.get(\"shape\")\n op_utils.check_shape(grad_shape)\n op_utils.check_dtype(grad_dtype, [\"float32\"])",
"def _use_sharded_grad_views(self) -> None:\n flat_param = self.flat_param\n self._check_sharded(flat_param)\n grad = self.sharded_grad\n if grad is None:\n for param in chain(flat_param._params, flat_param._shared_params):\n param.grad = None\n return\n self._check_sharded(grad)\n for param, shard_param_info, is_grad_none in zip(\n flat_param._params,\n flat_param._shard_param_infos,\n flat_param._is_grad_none_mask,\n ):\n if not shard_param_info.in_shard:\n param.grad = None\n else:\n numel_in_shard = shard_param_info.numel_in_shard\n if param.requires_grad and not is_grad_none:\n offset = shard_param_info.offset_in_shard\n if self._keep_low_precision_grads or param.dtype != grad.dtype:\n # NOTE: This is a hack using `.data` to side step the\n # check that parameter/gradient dtypes match. Here,\n # `param` has full precision; `grad` has low precision.\n if param.grad is None:\n # `.grad` must have the same shape as `param`\n param.grad = torch.empty_like(param)\n param.grad.data = grad[\n offset : offset + numel_in_shard\n ].reshape(param.shape)\n else:\n param.grad = grad[offset : offset + numel_in_shard].reshape(\n param.shape\n )\n else:\n param.grad = None\n assert flat_param._shared_params is not None\n for i, (param, (_, _, _, prim_param_name, prim_module, _)) in enumerate(\n zip(flat_param._shared_params, flat_param._shared_param_infos)\n ):\n in_sharded_flat_param = hasattr(prim_module, prim_param_name)\n if in_sharded_flat_param and param.requires_grad:\n prim_param = getattr(prim_module, prim_param_name)\n param.grad = prim_param.grad # share the same reference\n else:\n param.grad = None",
"def test_gradient_optionality(simulated_problem, scipy_method):\n simulation, _, problem, _ = simulated_problem\n\n # define a custom optimization method that doesn't use gradients\n def custom_method(initial, bounds, objective_function, _):\n wrapper = lambda x: objective_function(x)[0]\n results = scipy.optimize.minimize(wrapper, initial, method=scipy_method, bounds=bounds)\n return results.x, results.success\n\n # solve the problem when not using gradients and when not computing them\n optimization1 = Optimization(custom_method)\n optimization2 = Optimization(scipy_method, compute_gradient=False)\n results1 = problem.solve(simulation.sigma, simulation.pi, steps=1, optimization=optimization1)\n results2 = problem.solve(simulation.sigma, simulation.pi, steps=1, optimization=optimization2)\n\n # test that all arrays are essentially identical\n for key, result1 in results1.__dict__.items():\n if isinstance(result1, np.ndarray) and result1.dtype != np.object:\n result2 = getattr(results2, key)\n np.testing.assert_allclose(result1, result2, atol=1e-14, rtol=0, err_msg=key)",
"def requires_grad_(self, flag=True):\n for t_in in self.dummy_input:\n if isinstance(t_in, torch.Tensor) and t_in.dtype in torch_float_dtype:\n # only float type can require the gradient\n # enable the auto gradient\n t_in.requires_grad_(flag)\n for para_name in self.weights:\n if self.weights[para_name].dtype in torch_float_dtype:\n self.weights[para_name].requires_grad_(flag)",
"def is_gradient(self):\n return self.container['is_gradient']",
"def prepare_gradient_for_optim(self):\n\n def cast_grad_to_param_dtype_if_needed(flat_param):\n # TODO (rohan-varma): test for full precision with keep_low_precision_grads\n if not self._force_full_precision and self._keep_low_precision_grads:\n _p_assert(flat_param.grad is not None, \"Unexpected None grad!\")\n if flat_param.grad.dtype != self._fwd_bwd_param_dtype:\n flat_param.grad.data = flat_param.grad.to(self._fwd_bwd_param_dtype)\n if self._use_orig_params:\n self._use_sharded_grad_views()\n\n flat_param = self.flat_param\n # TODO (awgu): We should replace these conditional checks to encode\n # the logical intention more directly.\n if hasattr(flat_param, \"_cpu_grad\"):\n # NOTE: This branch includes `NO_SHARD`.\n self._check_sharded(flat_param)\n self._check_on_cpu(flat_param)\n flat_param.grad = flat_param._cpu_grad # type: ignore[attr-defined]\n cast_grad_to_param_dtype_if_needed(flat_param)\n elif hasattr(flat_param, \"_saved_grad_shard\"):\n self._check_sharded(flat_param)\n self._check_on_compute_device(flat_param)\n if flat_param._saved_grad_shard is not None:\n self._check_on_compute_device(flat_param._saved_grad_shard) # type: ignore[attr-defined]\n # If no sharded gradient was computed this iteration, then there is\n # no need to forward `_saved_grad_shard` to `grad`\n if flat_param._post_backward_called: # type: ignore[attr-defined]\n flat_param.grad = flat_param._saved_grad_shard # type: ignore[attr-defined]\n if flat_param.grad is not None:\n cast_grad_to_param_dtype_if_needed(flat_param)\n else:\n _p_assert(\n not self.uses_sharded_strategy\n or not flat_param._post_backward_called, # type: ignore[attr-defined]\n \"All sharded parameters that received a gradient in the \"\n \"post-backward should use `_saved_grad_shard`\",\n )\n # Delete `_saved_grad_shard` since its existence indicates a previous\n # gradient to accumulate with in the post-backward hook\n if hasattr(flat_param, \"_saved_grad_shard\"):\n delattr(flat_param, \"_saved_grad_shard\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that an operation with default parameter frequencies and a single parameter works correctly.
|
def test_frequencies_default_single_param(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
grad_method = "A"
def generator(self):
return -0.2 * qml.PauliX(wires=self.wires)
x = 0.654
op = DummyOp(x, wires=0)
assert op.parameter_frequencies == (0.4,)
|
[
"def test_frequencies_default_multi_param(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_params = 3\n num_wires = 1\n grad_method = \"A\"\n\n x = [0.654, 2.31, 0.1]\n op = DummyOp(*x, wires=0)\n with pytest.raises(\n qml.operation.OperatorPropertyUndefined, match=\"DummyOp does not have parameter\"\n ):\n op.parameter_frequencies",
"def test_wrong_num_of_num_freqs_per_parameter(fun, param, num_freq):\n\n opt = RotosolveOptimizer()\n\n with pytest.raises(ValueError, match=\"The number of the frequency counts\"):\n opt.step(fun, *param, num_freqs=num_freq)",
"def test_param(param_def):\n assert param_def.fixed == {'a': 1, 'b': 2, 'c': 3}\n assert param_def.free == {'f': [0, 1]}\n assert param_def.dependent == {'d': '2 + mean([a, b])'}\n assert param_def.dynamic == {'study': {'e': 'distract / c'}}",
"def test_wrong_typed_num_freqs(fun, param, num_freq):\n\n opt = RotosolveOptimizer()\n\n with pytest.raises(ValueError, match=\"The numbers of frequencies are expected to be integers.\"):\n opt.step(fun, *param, num_freqs=num_freq)",
"def test_wrong_len_num_freqs(fun, param, num_freq):\n\n opt = RotosolveOptimizer()\n\n with pytest.raises(ValueError, match=\"The length of the provided numbers of frequencies\"):\n opt.step(fun, *param, num_freqs=num_freq)",
"def example(param):\n\tassert param >0 \n\t# do stuf here...",
"def test_getDefaultParameter() -> None:\n from resistics.config.defaults import getDefaultParameter\n\n name = getDefaultParameter(\"name\")\n assert name == \"default\"\n ncores = getDefaultParameter(\"ncores\")\n assert ncores == -1\n window = getDefaultParameter(\"Window\")\n assert window == {\n \"minwindows\": 5,\n \"windowfactor\": 2.0,\n \"minwindowsize\": 512,\n \"minoverlapsize\": 128,\n \"overlapfraction\": 0.25,\n \"windowsizes\": [],\n \"overlapsizes\": [],\n }\n spectra = getDefaultParameter(\"Spectra\")\n assert spectra == {\n \"specdir\": \"spectra\",\n \"applywindow\": True,\n \"windowfunc\": \"hann\",\n \"ncores\": -1,\n }\n statistics = getDefaultParameter(\"Statistics\")\n assert statistics == {\n \"ncores\": -1,\n \"stats\": [\"coherence\", \"transferFunction\"],\n \"remotestats\": [\"RR_coherence\", \"RR_transferFunction\"],\n }\n solver = getDefaultParameter(\"Solver\")\n assert solver == {\n \"ncores\": -1,\n \"smoothfunc\": \"hann\",\n \"smoothlen\": 9,\n \"intercept\": False,\n \"method\": \"cm\",\n \"OLS\": {},\n \"MM\": {\"weightfnc1\": \"huber\", \"weightfnc2\": \"bisquare\"},\n \"CM\": {},\n }",
"def test_DistMult_args():\n testing_function_with_args('distmult')",
"def test_passed_noDefaultValues(self):\n\n def func(a, b, c=1, d=2, e=3):\n pass\n\n self.assertEqual(self.checkPassed(func, 1, 2, e=7), dict(a=1, b=2, e=7))",
"def test_set_params_Reg_feature_selector():\n feature_selector = Reg_feature_selector()\n feature_selector.set_params(strategy=\"variance\")\n assert feature_selector.strategy == \"variance\"\n feature_selector.set_params(threshold=0.2)\n assert feature_selector.threshold == 0.2\n with pytest.warns(UserWarning) as record:\n feature_selector.set_params(wrong_strategy=\"wrong_strategy\")\n assert len(record) == 1",
"def test_params_module():\n # Get the inputs required by the Scales object\n (profile, disp_phases, z0) = get_sim_data()\n\n\n # Test that the governing parameters are computed correctly\n # First, test a single dispersed phase\n model = params.Scales(profile, disp_phases[1])\n check_get_variables(model, z0, 0.15, 0.21724144538674975,\n 0.001724100901081246, 0.22611661456807244, 0.15)\n\n # Second, try a list of dispersed phases, where the dominant phase is\n # not the first one\n particles = [disp_phases[1], disp_phases[0], disp_phases[2]]\n model = params.Scales(profile, particles)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Third, make sure we get the same answer as the previous case if the\n # particles are in a different order (i.e., the original order)\n model = params.Scales(profile, disp_phases)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Using the latest Scales object, check that the other methods return\n # the correct results. Since these methods only depend on the values\n # of B, N, and us computed by the get_variables() method, only one case\n # needs to be tested\n assert_approx_equal(model.h_T(z0), 346.40139518559153, significant=6)\n assert_approx_equal(model.h_P(z0), 627.57408319500291, significant=6)\n assert_approx_equal(model.h_S(z0, 0.15), 295.45365120553163,\n significant=6)\n assert_approx_equal(model.lambda_1(z0, 0), 0.74523735215223819,\n significant=6)\n assert_approx_equal(model.u_inf_crit(z0), 0.063723667111426671,\n significant=6)",
"def test_single_qubit_parameters(self, init_state, op, func, theta, rep, tol):\n dev = DefaultTensorTF(wires=1, representation=rep)\n state = init_state(1)\n\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [op(theta, wires=0)]\n dev.execute(queue, [], {})\n\n res = dev._state().numpy().flatten()\n expected = func(theta) @ state\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def test_error_get_parameter_shift_no_recipe(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_wires = 1\n num_params = 1\n grad_recipe = (None,)\n\n op = DummyOp(0.1, wires=0)\n with pytest.raises(\n qml.operation.OperatorPropertyUndefined,\n match=\"The operation DummyOp does not have a parameter-shift recipe\",\n ):\n op.get_parameter_shift(0)",
"def test_RESCAL_args():\n testing_function_with_args('rescal')",
"def test_compute_prior_default():\n t = np.arange(0, 5, 0.001)\n c1 = 2 * np.cos(2 * 2 * np.pi * t + 0)\n c2 = 1.1 * np.cos(5 * 2 * np.pi * t + 1)\n S = c1 + c2\n\n kursl = KurslMethod()\n kursl.compute_prior(t, S)\n params = kursl.theta_init\n params[:, 1] = (params[:, 1] + 2 * np.pi) % (2 * np.pi)\n\n oscN = kursl.oscN\n paramN = kursl.paramN\n # Testing for number\n assert oscN == 2, \"2 oscillators\"\n assert paramN == 4, \"4 params per oscillator\"\n assert params.shape == (2, 4), \"Two oscillators (W, ph, A, K)\"\n\n # Testing for frequency\n assert abs(params[0, 0] - 5 * 2 * np.pi) < 0.05, \"Expected {} rad/s, Got {} [rad/s]\".format(\n 5 * 2 * np.pi, params[0, 0]\n )\n assert abs(params[1, 0] - 2 * 2 * np.pi) < 0.05, \"Expected {} rad/s, Got {} [rad/s]\".format(\n 2 * 2 * np.pi, params[1, 0]\n )\n\n # Testing for phase\n assert abs(params[0, 1] - 1) < 0.001, \"Expected phase {}, Got {}.\".format(1, params[0, 1])\n assert abs(params[1, 1] - 0) < 0.001, \"Expected phase {}, Got {}.\".format(0, params[1, 1])\n\n # Testing for amplitude\n assert abs(params[0, 2] - 1.1) < 0.1, \"Expected amp {}, Got {}.\".format(1.1, params[0, 2])\n assert abs(params[1, 2] - 2) < 0.1, \"Expected amp {}, Got {}.\".format(2, params[1, 2])\n\n # Testing for coupling\n assert params[0, 3] == 0, \"First->Second coupling should be 0\"\n assert params[1, 3] == 0, \"Second->First coupling should be 0\"",
"def test_some_parser_defaults(self):\n assert self.args.rate == 250.0\n assert self.args.gain == 1",
"def test_opdef_sig():\n from tensorflow.core.framework import op_def_pb2\n\n custom_opdef_tf = op_def_pb2.OpDef()\n custom_opdef_tf.name = \"MyOpDef\"\n\n arg1_tf = op_def_pb2.OpDef.ArgDef()\n arg1_tf.name = \"arg1\"\n arg1_tf.type_attr = \"T\"\n\n arg2_tf = op_def_pb2.OpDef.ArgDef()\n arg2_tf.name = \"arg2\"\n arg2_tf.type_attr = \"T\"\n\n custom_opdef_tf.input_arg.extend([arg1_tf, arg2_tf])\n\n attr1_tf = op_def_pb2.OpDef.AttrDef()\n attr1_tf.name = \"T\"\n attr1_tf.type = \"type\"\n\n attr2_tf = op_def_pb2.OpDef.AttrDef()\n attr2_tf.name = \"axis\"\n attr2_tf.type = \"int\"\n attr2_tf.default_value.i = 1\n\n custom_opdef_tf.attr.extend([attr1_tf, attr2_tf])\n\n opdef_sig, opdef_func = MetaOpDefLibrary.make_opdef_sig(custom_opdef_tf)\n\n import inspect\n\n # These are standard inputs\n assert opdef_sig.parameters[\"arg1\"].default == inspect._empty\n assert opdef_sig.parameters[\"arg2\"].default == inspect._empty\n # These are attributes that are sometimes required by the OpDef\n assert opdef_sig.parameters[\"axis\"].default == inspect._empty\n # The obligatory tensor name parameter\n assert opdef_sig.parameters[\"name\"].default is None",
"def test_defaults() -> None:\n\n def fun_defaults(name: str, num: int = 5) -> list[str]:\n \"\"\"Function arguments can have default values.\"\"\"\n ret = []\n for i in range(num):\n ret.append(name)\n return ret\n\n exp = [\"damon\", \"damon\"]\n\n assert [\"damon\", \"damon\"] == fun_defaults(name=\"damon\", num=2)\n\n assert [\"damon\", \"damon\", \"damon\", \"damon\", \"damon\"] == fun_defaults(name=\"damon\")",
"def test_staking_parameters_get(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that an operation with default parameter frequencies and multiple parameters raises an error when calling parameter_frequencies.
|
def test_frequencies_default_multi_param(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_params = 3
num_wires = 1
grad_method = "A"
x = [0.654, 2.31, 0.1]
op = DummyOp(*x, wires=0)
with pytest.raises(
qml.operation.OperatorPropertyUndefined, match="DummyOp does not have parameter"
):
op.parameter_frequencies
|
[
"def test_wrong_num_of_num_freqs_per_parameter(fun, param, num_freq):\n\n opt = RotosolveOptimizer()\n\n with pytest.raises(ValueError, match=\"The number of the frequency counts\"):\n opt.step(fun, *param, num_freqs=num_freq)",
"def test_frequencies_default_single_param(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_wires = 1\n grad_method = \"A\"\n\n def generator(self):\n return -0.2 * qml.PauliX(wires=self.wires)\n\n x = 0.654\n op = DummyOp(x, wires=0)\n assert op.parameter_frequencies == (0.4,)",
"def test_wrong_typed_num_freqs(fun, param, num_freq):\n\n opt = RotosolveOptimizer()\n\n with pytest.raises(ValueError, match=\"The numbers of frequencies are expected to be integers.\"):\n opt.step(fun, *param, num_freqs=num_freq)",
"def test_wrong_len_num_freqs(fun, param, num_freq):\n\n opt = RotosolveOptimizer()\n\n with pytest.raises(ValueError, match=\"The length of the provided numbers of frequencies\"):\n opt.step(fun, *param, num_freqs=num_freq)",
"def test_frequency_validation():\n sines = [np.sin(f * 2.0 * np.pi * np.arange(0, 1, 1.0 / 78125)) for f in [32, 70]]\n driving_data = np.sum(np.vstack(sines), axis=0)\n with pytest.raises(\n RuntimeError, match=\"Did not manage to find driving peak in spectral search range\"\n ):\n f_drive_guess = 50\n estimate_driving_input_parameters(\n 78125, driving_data, f_drive_guess, window_factor=10, f_search=5, n_fit=1\n )\n\n with pytest.raises(RuntimeError, match=\"Peak is outside frequency search range\"):\n f_drive_guess = 37.1\n estimate_driving_input_parameters(\n 78125, driving_data, f_drive_guess, window_factor=10, f_search=5, n_fit=1\n )\n\n with pytest.raises(RuntimeError, match=\"Peak is outside frequency search range\"):\n f_drive_guess = 26.9\n estimate_driving_input_parameters(\n 78125, driving_data, f_drive_guess, window_factor=10, f_search=5, n_fit=1\n )",
"def test_varied_psf_too_many_parameters_fails():\n ref = simple_psf(lambda x, y: x + y)\n with pytest.raises(PSFParameterValidationError):\n varied_psf(ref)(lambda x, y, c: {'sigma': 0.1})",
"def test_varied_psf_too_few_parameters_fails():\n base = simple_psf(lambda x, y, sigma, mu: x + y)\n with pytest.raises(PSFParameterValidationError):\n varied_psf(base)(lambda: {'sigma': 0.1})",
"def test_too_many_input_parameters():\n with pytest.raises(TypeError):\n output = get_recommendations(\"Titanic\", \"2\", 2)",
"def test_invalid_hop_frequency(self):\r\n with self.assertRaises(fygen.InvalidFrequencyError):\r\n self.fy.set_modulation(hop_freq_hz=-0.1)\r\n with self.assertRaises(fygen.InvalidFrequencyError):\r\n self.fy.set_modulation(hop_freq_uhz=-0.1)\r\n with self.assertRaises(fygen.InvalidFrequencyError):\r\n self.fy.set_modulation(hop_freq_hz=1, hop_freq_uhz=1)",
"def test_freq_cutoffs_wrong_order_raises():\n freq_cutoff_attrib = simple_attr(\"freq_cutoffs\")\n\n with pytest.raises(ValueError):\n vak.config.spect_params.freq_cutoffs_validator(\n instance=None, attribute=freq_cutoff_attrib, value=[10000, 500]\n )",
"def test_freq_cutoffs_not_two_values_raises():\n freq_cutoff_attrib = simple_attr(\"freq_cutoffs\")\n\n with pytest.raises(ValueError):\n vak.config.spect_params.freq_cutoffs_validator(\n instance=None, attribute=freq_cutoff_attrib, value=[0]\n )\n\n with pytest.raises(ValueError):\n vak.config.spect_params.freq_cutoffs_validator(\n instance=None, attribute=freq_cutoff_attrib, value=[0, 10, 100]\n )",
"def test_expectations_frequency(self, valid_profile):\n valid_profile['expectations'][0]['frequency'] = 'infrequently'\n with pytest.raises(FormatError):\n PipelineProfile(valid_profile)",
"def test_modify_parameter_no_occurrence(self):\n params = insightiq_api.Parameters(one=1)\n with self.assertRaises(KeyError):\n params.modify_parameter(name='one', new_value=2, occurrence=200)",
"def test_varied_psf_missing_x_fails():\n ref = simple_psf(lambda x, y: x + y)\n with pytest.raises(PSFParameterValidationError):\n varied_psf(ref)(lambda c, y: {'sigma': 0.1})",
"def test_set_params_Reg_feature_selector():\n feature_selector = Reg_feature_selector()\n feature_selector.set_params(strategy=\"variance\")\n assert feature_selector.strategy == \"variance\"\n feature_selector.set_params(threshold=0.2)\n assert feature_selector.threshold == 0.2\n with pytest.warns(UserWarning) as record:\n feature_selector.set_params(wrong_strategy=\"wrong_strategy\")\n assert len(record) == 1",
"def test_invalid_freq1(self):\r\n with self.assertRaises(fygen.InvalidFrequencyError):\r\n self.fy.set(freq_hz=-1)",
"def test_invalid_freq2(self):\r\n with self.assertRaises(fygen.InvalidFrequencyError):\r\n self.fy.set(freq_uhz=-1)",
"def test_assertSimilarFreqs_false(self):\n observed = [10,15,20,10,12,12,13]\n expected = [100,50,10,20,700,2,100]\n self.assertRaises(AssertionError, self.assertSimilarFreqs, \\\n observed, expected)\n self.assertRaises(AssertionError, self.assertSimilarFreqs, \\\n observed, expected, 0.2)\n self._set_suite_pvalue(0.001)\n self.assertRaises(AssertionError, self.assertSimilarFreqs, \\\n observed, expected)",
"def test_param(param_def):\n assert param_def.fixed == {'a': 1, 'b': 2, 'c': 3}\n assert param_def.free == {'f': [0, 1]}\n assert param_def.dependent == {'d': '2 + mean([a, b])'}\n assert param_def.dynamic == {'study': {'e': 'distract / c'}}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that an operation with parameter frequencies that depend on its instantiated parameter values works correctly
|
def test_frequencies_parameter_dependent(self, num_param):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_params = num_param
num_wires = 1
grad_method = "A"
@property
def parameter_frequencies(self):
x = self.data
return [(0.2, _x) for _x in x]
x = [0.654, 2.31][:num_param]
op = DummyOp(*x, wires=0)
f = op.parameter_frequencies
for i in range(num_param):
assert f[i] == (0.2, x[i])
|
[
"def test_frequencies_default_multi_param(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_params = 3\n num_wires = 1\n grad_method = \"A\"\n\n x = [0.654, 2.31, 0.1]\n op = DummyOp(*x, wires=0)\n with pytest.raises(\n qml.operation.OperatorPropertyUndefined, match=\"DummyOp does not have parameter\"\n ):\n op.parameter_frequencies",
"def test_params_module():\n # Get the inputs required by the Scales object\n (profile, disp_phases, z0) = get_sim_data()\n\n\n # Test that the governing parameters are computed correctly\n # First, test a single dispersed phase\n model = params.Scales(profile, disp_phases[1])\n check_get_variables(model, z0, 0.15, 0.21724144538674975,\n 0.001724100901081246, 0.22611661456807244, 0.15)\n\n # Second, try a list of dispersed phases, where the dominant phase is\n # not the first one\n particles = [disp_phases[1], disp_phases[0], disp_phases[2]]\n model = params.Scales(profile, particles)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Third, make sure we get the same answer as the previous case if the\n # particles are in a different order (i.e., the original order)\n model = params.Scales(profile, disp_phases)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Using the latest Scales object, check that the other methods return\n # the correct results. Since these methods only depend on the values\n # of B, N, and us computed by the get_variables() method, only one case\n # needs to be tested\n assert_approx_equal(model.h_T(z0), 346.40139518559153, significant=6)\n assert_approx_equal(model.h_P(z0), 627.57408319500291, significant=6)\n assert_approx_equal(model.h_S(z0, 0.15), 295.45365120553163,\n significant=6)\n assert_approx_equal(model.lambda_1(z0, 0), 0.74523735215223819,\n significant=6)\n assert_approx_equal(model.u_inf_crit(z0), 0.063723667111426671,\n significant=6)",
"def test_frequencies_default_single_param(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_wires = 1\n grad_method = \"A\"\n\n def generator(self):\n return -0.2 * qml.PauliX(wires=self.wires)\n\n x = 0.654\n op = DummyOp(x, wires=0)\n assert op.parameter_frequencies == (0.4,)",
"def test_wrong_num_of_num_freqs_per_parameter(fun, param, num_freq):\n\n opt = RotosolveOptimizer()\n\n with pytest.raises(ValueError, match=\"The number of the frequency counts\"):\n opt.step(fun, *param, num_freqs=num_freq)",
"def test_param(param_def):\n assert param_def.fixed == {'a': 1, 'b': 2, 'c': 3}\n assert param_def.free == {'f': [0, 1]}\n assert param_def.dependent == {'d': '2 + mean([a, b])'}\n assert param_def.dynamic == {'study': {'e': 'distract / c'}}",
"def test_mutate(self):\n a = Alphabet('abc')**2\n m = Probs([0.5,0.25,0.25,0.1,0.8,0.1,0.3,0.6,0.1], a)\n #because of fp math in accumulate, can't predict boundaries exactly\n #so add/subtract eps to get the result we expect\n eps = 1e-6\n # a b b a c c a b c\n seq = array([0,1,1,0,2,2,0,1,2])\n random_vec = array([0,.01,.8-eps,1,1,.3,.05,.9+eps,.95])\n self.assertEqual(m.mutate(seq, random_vec), \\\n # a a b c c a a c c\n array([0,0,1,2,2,0,0,2,2]))\n #check that freq. distribution is about right\n seqs = array([m.mutate(seq) for i in range(1000)])\n #WARNING: bool operators return byte arrays, whose sums wrap at 256!\n zero_count = asarray(seqs == 0, 'int32')\n sums = sum(zero_count, axis=0)\n #expect: 500, 100, 100, 500, 300, 300, 500, 100, 300\n #std dev = sqrt(npq), which is sqrt(250), sqrt(90), sqrt(210)\n means = array([500, 100, 100, 500, 300, 300, 500, 100, 300])\n var = array([250, 90, 90, 250, 210, 210, 250, 90, 210])\n three_sd = 3 * sqrt(var)\n for obs, exp, sd in zip(sums, means, three_sd):\n assert exp - 2*sd < obs < exp + 2*sd",
"def test_measure_parameter(parameters, measure_param, sweep_values):\n p0, p1 = parameters\n m = measure_param[0]\n v0, v1 = sweep_values\n\n def test():\n sweep_object = Nest([ParameterSweep(p0, lambda: v0), ParameterWrapper(m), ParameterSweep(p1, lambda: v1)])\n parameter_table = sweep_object.parameter_table\n\n assert parameter_table.table_list[0][\"independent_parameters\"] == [\n (p0.full_name, p0.unit), (p1.full_name, p1.unit)\n ]\n assert parameter_table.table_list[0][\"dependent_parameters\"][0] == (m.full_name, m.unit)\n\n list(sweep_object)\n\n def compare():\n\n for value0 in v0:\n p0.set(value0)\n m()\n for value1 in v1:\n p1.set(value1)\n\n equivalence_test(test, compare)",
"def test_compute_prior_default():\n t = np.arange(0, 5, 0.001)\n c1 = 2 * np.cos(2 * 2 * np.pi * t + 0)\n c2 = 1.1 * np.cos(5 * 2 * np.pi * t + 1)\n S = c1 + c2\n\n kursl = KurslMethod()\n kursl.compute_prior(t, S)\n params = kursl.theta_init\n params[:, 1] = (params[:, 1] + 2 * np.pi) % (2 * np.pi)\n\n oscN = kursl.oscN\n paramN = kursl.paramN\n # Testing for number\n assert oscN == 2, \"2 oscillators\"\n assert paramN == 4, \"4 params per oscillator\"\n assert params.shape == (2, 4), \"Two oscillators (W, ph, A, K)\"\n\n # Testing for frequency\n assert abs(params[0, 0] - 5 * 2 * np.pi) < 0.05, \"Expected {} rad/s, Got {} [rad/s]\".format(\n 5 * 2 * np.pi, params[0, 0]\n )\n assert abs(params[1, 0] - 2 * 2 * np.pi) < 0.05, \"Expected {} rad/s, Got {} [rad/s]\".format(\n 2 * 2 * np.pi, params[1, 0]\n )\n\n # Testing for phase\n assert abs(params[0, 1] - 1) < 0.001, \"Expected phase {}, Got {}.\".format(1, params[0, 1])\n assert abs(params[1, 1] - 0) < 0.001, \"Expected phase {}, Got {}.\".format(0, params[1, 1])\n\n # Testing for amplitude\n assert abs(params[0, 2] - 1.1) < 0.1, \"Expected amp {}, Got {}.\".format(1.1, params[0, 2])\n assert abs(params[1, 2] - 2) < 0.1, \"Expected amp {}, Got {}.\".format(2, params[1, 2])\n\n # Testing for coupling\n assert params[0, 3] == 0, \"First->Second coupling should be 0\"\n assert params[1, 3] == 0, \"Second->First coupling should be 0\"",
"def test_wrong_len_num_freqs(fun, param, num_freq):\n\n opt = RotosolveOptimizer()\n\n with pytest.raises(ValueError, match=\"The length of the provided numbers of frequencies\"):\n opt.step(fun, *param, num_freqs=num_freq)",
"def test_single_qubit_parameters(self, init_state, op, func, theta, rep, tol):\n dev = DefaultTensorTF(wires=1, representation=rep)\n state = init_state(1)\n\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [op(theta, wires=0)]\n dev.execute(queue, [], {})\n\n res = dev._state().numpy().flatten()\n expected = func(theta) @ state\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def test_wrong_typed_num_freqs(fun, param, num_freq):\n\n opt = RotosolveOptimizer()\n\n with pytest.raises(ValueError, match=\"The numbers of frequencies are expected to be integers.\"):\n opt.step(fun, *param, num_freqs=num_freq)",
"def test_dependent(param_def):\n param = {'a': 1, 'b': 2}\n param = param_def.eval_dependent(param)\n assert param == {'a': 1, 'b': 2, 'd': 3.5}",
"def test_ContinuousModel_multivariate():\n\n class MyModel(ContinuousModel):\n def __init__(self):\n self.weight = Parameter([5, 3], name=\"Weight\")\n self.bias = Parameter([1, 3], name=\"Bias\")\n self.std = ScaleParameter([1, 3], name=\"Std\")\n\n def __call__(self, x):\n return Normal(x @ self.weight() + self.bias(), self.std())\n\n # Instantiate the model\n model = MyModel()\n\n # Data\n x = np.random.randn(100, 5).astype(\"float32\")\n w = np.random.randn(5, 3).astype(\"float32\")\n y = x @ w + 1\n\n # Fit the model\n model.fit(x, y, batch_size=50, epochs=2, lr=0.01)\n\n # pred_dist_plot should not work with nonscalar output\n with pytest.raises(NotImplementedError):\n model.pred_dist_plot(x[:10, :], n=10)\n\n # predictive_prc should not work with nonscalar output\n with pytest.raises(NotImplementedError):\n model.predictive_prc(x[:10, :], y[:10, :], n=10)",
"def test_parameter_sweep(parameters, sweep_values):\n def test():\n p = parameters[0]\n v = sweep_values[0]\n\n sweep_object = ParameterSweep(p, lambda: v)\n parameter_table = sweep_object.parameter_table\n\n assert parameter_table.table_list[0][\"independent_parameters\"][0] == (\n p.full_name, p.unit\n )\n\n for i in ParameterSweep(p, lambda: v):\n assert i[p.name] == p()\n\n def compare():\n p = parameters[0]\n v = sweep_values[0]\n\n for value in v:\n p.set(value)\n\n equivalence_test(test, compare)",
"def test_parameter(beam, parameter):\n twiss_path = beam1_coupling_path() if \"coupling\" in beam else beam_path(beam)\n relative_error = 0.1\n randomize = [VALUES, ERRORS]\n\n results = fake_measurement(\n twiss=twiss_path,\n randomize=randomize,\n relative_errors=[relative_error],\n parameters=[parameter],\n seed=2022, # gaussian distribution test is sensitive to seed used!\n )\n\n assert len(results)\n assert all(name in results.keys() for name in OUTPUTNAMES_MAP[parameter])\n\n name_tester_map = {\n TOTAL_PHASE_NAME: _test_total_phase,\n PHASE_NAME: _test_phase,\n BETA_NAME: _test_beta,\n AMP_BETA_NAME: _test_beta,\n DISPERSION_NAME: _test_disp,\n NORM_DISP_NAME: _test_norm_disp,\n F1010_NAME[:-1]: _test_coupling,\n F1001_NAME[:-1]: _test_coupling,\n }\n\n for name, df in results.items():\n plane = parameter[-1]\n assert S in df.columns\n if plane in \"XY\":\n assert f\"{PHASE_ADV}{plane}{MDL}\" in df.columns\n name_tester_map[name[:-1]](df, plane, relative_error)",
"def test_chi_squared():\n assert frequency.chi_squared({'a': 2, 'b': 3}, {'a': 2, 'b': 3}) == 0",
"def test_probs(self):\n u = self.abUsage([1,3])\n self.assertEqual(u.probs(), self.abUsage([0.25,0.75]))",
"def test_measurement_function(parameters, measurements, sweep_values):\n p0, p1 = parameters\n m = measurements[0]\n v0, v1 = sweep_values\n\n def test():\n sweep_object = Nest([ParameterSweep(p0, lambda: v0), FunctionWrapper(m), ParameterSweep(p1, lambda: v1)])\n parameter_table = sweep_object.parameter_table\n assert parameter_table.table_list[0][\"independent_parameters\"] == [\n (p0.full_name, p0.unit), (p1.full_name, p1.unit)\n ]\n assert parameter_table.table_list[0][\"dependent_parameters\"][0] == (m.name, \"hash\")\n\n list(sweep_object)\n\n def compare():\n\n for value0 in v0:\n p0.set(value0)\n m()[0]()\n for value1 in v1:\n p1.set(value1)\n\n equivalence_test(test, compare)",
"def test_varied_psf_too_many_parameters_fails():\n ref = simple_psf(lambda x, y: x + y)\n with pytest.raises(PSFParameterValidationError):\n varied_psf(ref)(lambda x, y, c: {'sigma': 0.1})",
"def test_param_changer_changes(self):\n i = Island()\n loc = (1, 1)\n s = Herbivore(i, loc)\n old_param = s.parameters[\"F\"]\n s.param_changer({\"F\" : 20})\n new_param = s.parameters[\"F\"]\n\n assert old_param != new_param\n s.param_changer({\"F\": 10})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that control_wires defaults to an empty Wires object.
|
def test_control_wires(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
grad_method = None
op = DummyOp(1.0, wires=0, id="test")
assert op.control_wires == qml.wires.Wires([])
|
[
"def test_check_wires(self, wires, target):\n res = check_wires(wires=wires)\n assert res == target",
"def test_uninitialized_setting(self):\n ...",
"def test_wise_config_constructor_defaults(self) -> None:\n wise_config = WiseConfig()\n\n self.assertIsNone(wise_config.weight)\n self.assertIsNone(wise_config.activation)",
"def test_default_work_empty():\n assert Work().empty()",
"def test_empty_default(self):\n assert self.reg.defaults.get(Type1) is None",
"def test_non_unique_wires(self):\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator\"\"\"\n num_wires = 1\n\n with pytest.raises(qml.wires.WireError, match=\"Wires must be unique\"):\n DummyOp(0.5, wires=[1, 1], do_queue=False)",
"def test_default_props():\n #==========================================================================\n # assert w_grid.active == True # timespan is as big as possible\n # assert w_grid.uncertain_duration == 3.0\n # assert w_grid.uncertain_time_delay == 0\n # assert w_grid.uncertain_speed_scale == 2\n # assert w_grid.uncertain_angle_scale == 0.4\n # assert w_grid.uncertain_angle_units == 'rad'\n #==========================================================================\n assert w_grid.wind_scale == 1\n assert w_grid.extrapolate == False\n assert w_grid.time_offset == 0\n\n _defaults(w_grid)",
"def test_initialization_owltools_default(self):\n\n owl_nets = OwlNets(kg_construct_approach='subclass',\n graph=self.graph,\n write_location=self.write_location,\n full_kg=self.kg_filename)\n\n self.assertEqual(owl_nets.owl_tools, './pkt_kg/libs/owltools')\n\n return None",
"def remove_wires(self):\n self.wires['wires'] = []\n self.wires['wires_started'] = []",
"def test_nothing_to_validate(self):\n with mn.model() as m:\n mn.constant('X7Allowed', False)\n mn.constant('X5Allowed', False)\n mn.constant('X4Allowed', False)\n\n self.assertEqual(m.validate_all(), {'success': True})",
"def test_empty(self):\n kit = Kit()\n\n self.assertFalse(kit.is_complete())\n self.assertFalse(kit.is_sufficient())\n self.assertFalse(kit.unpack('.'))\n self.assertEqual(kit.master_key(), None)\n self.assertEqual(kit.locker, None)\n self.assertEqual(kit.uuid, None)\n self.assertEqual(kit.participants, 0)\n self.assertEqual(kit.threshold, 0)\n self.assertEqual(kit.mac, None)\n self.assertEqual(kit.any_share, None)",
"def test_setup(self):\n assert self.cosm_trade_handler.setup() is None\n self.assert_quantity_in_outbox(0)",
"def test_no_lights_does_not_create(self):\n with assert_setup_component(0, \"light\"):\n assert setup.setup_component(\n self.hass, \"light\", {\"light\": {\"platform\": \"template\"}}\n )\n\n self.hass.block_till_done()\n self.hass.start()\n self.hass.block_till_done()\n\n assert self.hass.states.all() == []",
"def test_robot_not_triggered(self):\n self.robot.handler_signal.send('tests hi foo')\n self.assertEqual(self.robot.adapters['null'].responses, [])",
"def test_check_wires_exception(self, wires):\n with pytest.raises(ValueError, match=\"wires must be a positive integer\"):\n check_wires(wires=wires)",
"def remove_wires(self):\n self.wires['wires'] = []\n self.wires['wires_ended'] = []",
"def test_init_default(self):\n eater = DumplingEater()\n\n assert eater.name == 'nameless_eater'\n assert eater.chef_filter is None\n assert eater.hub_ws == 'ws://{}:{}'.format(HUB_HOST, HUB_OUT_PORT)",
"def test_configure_metrics_default_metrics_not_installed():\n with patch.object(PubSubSendMetrics, \"get_metrics\") as mocked:\n mocked.return_value = None\n\n graph = create_object_graph(\"example\", testing=True)\n assert_that(graph.pubsub_send_metrics.enabled, is_(equal_to(False)))",
"def clear_victories(self):\r\n GameSimulator.WON = 0",
"def test_blank_transaction_initialized(blank_tester):\n assert blank_tester.orig_usd is None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that is_hermitian defaults to False for an Operator
|
def test_is_hermitian(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operation"""
num_wires = 1
grad_method = None
op = DummyOp(wires=0)
assert op.is_hermitian is False
|
[
"def test_binary_operator(self):\n t = ExpressionTreeNode.build_tree(['A', 'B', 'or'])\n self.assertFalse(t.is_really_unary)",
"def test_operator_get_operator(self):\n pass",
"def assertCalcFalse(self, calc, context=None):\n context = context or {}\n self.assertFalse(calc.resolve(context))\n calc.negate = not calc.negate\n self.assert_(calc.resolve(context))",
"def test_operator_create_operator(self):\n pass",
"def test_returns_false_if_encapsulated_predicates_are_false(self):\n self.some_left_predicate.return_value = False\n self.some_right_predicate.return_value = False\n self.assertFalse(self.or_predicate(self.get_response_mock, self.request_mock))",
"def isOp(s):\n return getOp(s) != None",
"def test_returns_false_if_left_encapsulated_predicate_is_false(self):\n self.some_left_predicate.return_value = False\n self.assertFalse(self.and_predicate(self.get_response_mock, self.request_mock))",
"def booleanOps(*args, **kwargs):\n \n pass",
"async def is_(info):\n anode, bnode = info.args\n a, b = info.abstracts\n at = a.xtype()\n bt = b.xtype()\n if at is Nil or bt is Nil:\n return Constant(at is bt)\n elif at is NotImplementedType or bt is NotImplementedType:\n return Constant(at is bt)\n elif at is Bool and bt is Bool:\n return info.graph.apply(P.bool_eq, anode, bnode)\n elif at is Bool or bt is Bool:\n return Constant(False)\n else:\n raise MyiaTypeError(\n f'The operator \"is\" is only implemented for booleans ' +\n f'and singletons such as None or NotImplemented.'\n )",
"def test_negate_operator(self):\n actual = search_queries.negate_operator('=')\n self.assertEqual('!=', actual)\n\n actual = search_queries.negate_operator('!=')\n self.assertEqual('=', actual)\n\n actual = search_queries.negate_operator('<')\n self.assertEqual('>=', actual)\n\n actual = search_queries.negate_operator('<=')\n self.assertEqual('>', actual)\n\n actual = search_queries.negate_operator('>')\n self.assertEqual('<=', actual)\n\n actual = search_queries.negate_operator('>=')\n self.assertEqual('<', actual)",
"def _is_chainable(self, operator, lastArg=None):\n non_chaining_operators = [\"and\", \"or\", \"remote\", \"file\", \"re\",\"post\"]\n if (lastArg is not None and type(lastArg) == dict and\n '@value' not in lastArg and\n '@type' not in lastArg and\n 'value' not in lastArg\n ):\n for op in non_chaining_operators:\n if op in operator:\n return False\n return True\n else:\n return False",
"def run_operator_tests(self):\n return self.get_boolean('run_operator_tests')",
"def is_hermitian(self):\n for o1, o2 in combinations(self.factors, r=2):\n if qml.wires.Wires.shared_wires([o1.wires, o2.wires]):\n return False\n return all(op.is_hermitian for op in self.factors)",
"def _isOperator(self, token):\n token = token.strip()\n \n if(token == \"+\"):\n return True\n\n if(token == \"*\"):\n return True\n \n return False",
"def test_not(self):\n crit = qml.BooleanFn(lambda x: x < 4)\n ncrit = ~crit\n assert crit(-2) and not ncrit(-2)\n assert not crit(10) and ncrit(10)",
"def test_has_matrix_false(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n assert not MyOp.has_matrix\n assert not MyOp(wires=0).has_matrix",
"def test_negate_calls_not_predicate(self):\n with patch('uncertainty.conditions.NotPredicate') as not_predicate_mock:\n -self.predicate\n not_predicate_mock.assert_called_once_with(self.predicate)",
"def is_hermitian(self) -> bool:\n return is_hermitian(self.tensor(reshape_to_square_matrix=True))",
"def test_operator_post_authorization_for_operator(self):\n pass",
"def test_doesnt_call_right_encapsulated_predicate_if_left_is_false(self):\n self.some_left_predicate.return_value = False\n self.and_predicate(self.get_response_mock, self.request_mock)\n self.some_right_predicate.assert_not_called()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check that the return_type of an observable is initially None
|
def test_observable_return_type_none(self):
class DummyObserv(qml.operation.Observable):
r"""Dummy custom observable"""
num_wires = 1
grad_method = None
assert DummyObserv(0, wires=[1]).return_type is None
|
[
"def test_builtins_cast_return_none():\n assert m.return_none_string() is None\n assert m.return_none_char() is None\n assert m.return_none_bool() is None\n assert m.return_none_int() is None\n assert m.return_none_float() is None\n assert m.return_none_pair() is None",
"def test_return_type_none(self):\n @typechecked\n def foo() -> None:\n return 'a'\n\n exc = pytest.raises(TypeError, foo)\n assert str(exc.value) == 'type of the return value must be NoneType; got str instead'",
"def test_Nothing_observation(model):\n assert make_obs(ecole.observation.Nothing(), model) is None",
"def test_success_to_nothing():\n assert result_to_maybe(Success(None)) == Nothing",
"def test_valid_none():\n returned_value = object_._convert_relationship(value=None)\n\n assert returned_value is None",
"def not_none(x: Optional[Any]) -> bool:\n return x is not None",
"def is_none(x: Optional[Any]) -> bool:\n return x is None",
"def ll_assert_not_none(x):\n assert x is not None, \"ll_assert_not_none(%r)\" % (x,)\n return x",
"def accepts_none(fn):\n fn.accepts_none = True \n return fn",
"def is_not_none(x) -> bool:\n return x is not None",
"def _handle_optional(type_):\n if typing.get_origin(type_) is typing.Union:\n args = typing.get_args(type_)\n if len(args) == 2 and type(None) in args:\n return next(iter(set(args) - {type(None)}))\n return None",
"async def infer_type_not(engine, x):\n x_t = await x['type']\n if x_t != Bool():\n raise MyiaTypeError('Expected Bool for not.')\n return Bool()",
"def testNoneTypes(self):\n self.assertIsNone(getAtomType(self.mol5.atoms[0],\n self.mol5.getBonds(self.mol5.atoms[0])))\n self.assertIsNone(getAtomType(self.mol6.atoms[0],\n self.mol6.getBonds(self.mol6.atoms[0])))\n self.assertIsNone(getAtomType(self.mol7.atoms[0],\n self.mol7.getBonds(self.mol7.atoms[0])))\n self.assertIsNone(getAtomType(self.mol8.atoms[0],\n self.mol8.getBonds(self.mol8.atoms[0])))",
"def test_none(error = N_(u'Unexpected value')):\n def none(value, state = None):\n if value is None:\n return value, None\n if state is None:\n state = states.default_state\n return value, state._(error) if strings.is_basestring(error) else error\n return none",
"def Should_Not_Be_Type_None(var):\n if var is None:\n raise AssertionError(\"the variable passed was type NoneType\")\n return \"PASS\"",
"def has_outcome(self, outcome, null=True):\n raise NotImplementedError",
"def test_observable_not_returned(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"All measured observables\"):\n node(0.5)",
"def is_returned(self):\n\t\treturn self.return_time is not None",
"def _condnone(self, lhs, rhs):\n return type(lhs) == type(None) and type(rhs) == type(None)",
"def test_one_and_none(self):\n assert bu.one_and_none(None, \"snek\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check that the Observable class inherits from an Operator, not from an Operation
|
def test_observable_is_not_operation_but_operator(self):
assert issubclass(qml.operation.Observable, qml.operation.Operator)
assert not issubclass(qml.operation.Observable, qml.operation.Operation)
|
[
"def test_multiple_inheritance():\n\n class SomeBaseClass(object):\n pass\n\n class SomeBaseAndObservable(SomeBaseClass, Observable):\n def __init__(self):\n super(SomeBaseAndObservable, self).__init__()\n\n def test(self):\n self.trigger('some', True)\n\n def some_test(data):\n nose.assert_true(data)\n\n obj = SomeBaseAndObservable()\n obj.on('some', some_test)\n\n obj.test()",
"def test_command_eq_for_non_command_objects(self):\n assert Command(\"device\", \"command name\") != 1\n assert Command(\"device\", \"command name\") != object()",
"def _checkSpecialOperators(self, fromReading, toReading, args, options):\n # check options, don't overwrite existing operators\n for arg in args:\n if isinstance(arg, readingoperator.ReadingOperator):\n if arg.READING_NAME == fromReading \\\n and 'sourceOptions' in options:\n raise ValueError(\n \"source reading operator options given, \" \\\n + \"but a source reading operator already exists\")\n if arg.READING_NAME == toReading \\\n and 'targetOptions' in options:\n raise ValueError(\n \"target reading operator options given, \" \\\n + \"but a target reading operator already exists\")\n # create operators for options\n if 'sourceOptions' in options:\n readingOp = self._getReadingOperatorInstance(fromReading,\n **options['sourceOptions'])\n del options['sourceOptions']\n\n # add reading operator to converter\n if 'sourceOperators' not in options:\n options['sourceOperators'] = []\n options['sourceOperators'].append(readingOp)\n\n if 'targetOptions' in options:\n readingOp = self._getReadingOperatorInstance(toReading,\n **options['targetOptions'])\n del options['targetOptions']\n\n # add reading operator to converter\n if 'targetOperators' not in options:\n options['targetOperators'] = []\n options['targetOperators'].append(readingOp)",
"def is_broad(self, x: object, **kwargs):\n return x is ANYTHING",
"def test_subclass_of_base(self):\n self.assertTrue(issubclass(Square, Rectangle))",
"def inherits_from(obj, a_class):\n if isinstance(obj, a_class) and type(obj) is not a_class:\n\n return True\n else:\n return False",
"def test_base_class_pyloperator_creation(self, single_gate):\n with pytest.raises(TypeError):\n # should raise the following error [TypeError: Can't instantiate abstract class pyLOperator with abstract methods _num_qubits_, _qid_shape_, get_resouces, num_qubits]\n plo = po(single_gate)",
"def __ne__(self, type: 'SoType') -> \"SbBool\":\n return _coin.SoType___ne__(self, type)",
"def test_observable_not_returned(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"All measured observables\"):\n node(0.5)",
"def test_direct_initialization_fails(self):\n with pytest.raises(\n TypeError, match=\"Can't instantiate abstract class CompositeOp with abstract methods\"\n ):\n _ = CompositeOp(*self.simple_operands) # pylint:disable=abstract-class-instantiated",
"def _cast_other(binary_op):\r\n def cast_op(self, other):\r\n \"\"\"A wrapped binary operator that can handle non-Expression arguments.\r\n \"\"\"\r\n other = self.cast_to_const(other)\r\n return binary_op(self, other)\r\n return cast_op",
"def _is_chainable(self, operator, lastArg=None):\n non_chaining_operators = [\"and\", \"or\", \"remote\", \"file\", \"re\",\"post\"]\n if (lastArg is not None and type(lastArg) == dict and\n '@value' not in lastArg and\n '@type' not in lastArg and\n 'value' not in lastArg\n ):\n for op in non_chaining_operators:\n if op in operator:\n return False\n return True\n else:\n return False",
"def is_op_quantizable(op: tf.Operation) -> bool:\n\n if op.outputs:\n if op.outputs[0].dtype in QUANT_ALLOWED_DTYPES:\n return True\n\n return False",
"def test_propagator_type_consistency(self):",
"def inherits_from(obj, a_class):\n if type(obj) != a_class:\n if isinstance(obj, a_class):\n return True\n return False",
"def test_op_successors_observables_only(self, opqueue_test_node):\n\n observable_successors = opqueue_test_node._op_successors(0, only=\"E\")\n\n assert opqueue_test_node.ops[0] not in observable_successors\n assert opqueue_test_node.ops[1] not in observable_successors\n assert opqueue_test_node.ops[4] in observable_successors",
"def ODataIsA (self):\n ################################################################\n # Allow derived types\n return Obit.ODataIsA(self.cast(myClass))",
"def test_constructor_with_operands(self):\n Xor(BoolVar(), TrafficLightVar())",
"def test_Place_is_subclass(self):\n self.assertTrue(issubclass(self.place.__class__, BaseModel), True)",
"def test_interface_typecheck_doesnt_inherit(self):\n class Zeroable(metaclass=TypeCheckableMeta):\n \"\"\"In most cases, this indicates a container that can be 'empty'.\"\"\"\n @abstractclassmethod\n def zero(cls):\n return NotImplemented\n\n @classmethod\n def __subclasshook__(cls, subclass):\n if cls is Zeroable:\n return meets_interface(subclass, Zeroable)\n return NotImplemented\n\n class Integer(int, Zeroable):\n @classmethod\n def zero(cls):\n return cls(0)\n\n class PositiveInteger(Integer):\n def __new__(cls, *args, **kwargs):\n self = super().__new__(cls, *args, **kwargs)\n if self <= 0:\n raise ValueError(\"Integer must be positive\")\n return self\n\n i1 = Integer(1)\n p1 = PositiveInteger(1)\n\n self.assertFalse(isinstance(i1, PositiveInteger))\n self.assertTrue(isinstance(p1, PositiveInteger))\n self.assertTrue(isinstance(i1, Integer))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check that the Observable class inherits from an Operator class as well
|
def test_observable_is_operation_as_well(self):
class DummyObserv(qml.operation.Observable, qml.operation.Operation):
r"""Dummy custom observable"""
num_wires = 1
grad_method = None
assert issubclass(DummyObserv, qml.operation.Operator)
assert issubclass(DummyObserv, qml.operation.Observable)
assert issubclass(DummyObserv, qml.operation.Operation)
|
[
"def test_multiple_inheritance():\n\n class SomeBaseClass(object):\n pass\n\n class SomeBaseAndObservable(SomeBaseClass, Observable):\n def __init__(self):\n super(SomeBaseAndObservable, self).__init__()\n\n def test(self):\n self.trigger('some', True)\n\n def some_test(data):\n nose.assert_true(data)\n\n obj = SomeBaseAndObservable()\n obj.on('some', some_test)\n\n obj.test()",
"def _checkSpecialOperators(self, fromReading, toReading, args, options):\n # check options, don't overwrite existing operators\n for arg in args:\n if isinstance(arg, readingoperator.ReadingOperator):\n if arg.READING_NAME == fromReading \\\n and 'sourceOptions' in options:\n raise ValueError(\n \"source reading operator options given, \" \\\n + \"but a source reading operator already exists\")\n if arg.READING_NAME == toReading \\\n and 'targetOptions' in options:\n raise ValueError(\n \"target reading operator options given, \" \\\n + \"but a target reading operator already exists\")\n # create operators for options\n if 'sourceOptions' in options:\n readingOp = self._getReadingOperatorInstance(fromReading,\n **options['sourceOptions'])\n del options['sourceOptions']\n\n # add reading operator to converter\n if 'sourceOperators' not in options:\n options['sourceOperators'] = []\n options['sourceOperators'].append(readingOp)\n\n if 'targetOptions' in options:\n readingOp = self._getReadingOperatorInstance(toReading,\n **options['targetOptions'])\n del options['targetOptions']\n\n # add reading operator to converter\n if 'targetOperators' not in options:\n options['targetOperators'] = []\n options['targetOperators'].append(readingOp)",
"def test_operator_get_operator(self):\n pass",
"def test_operator_create_operator(self):\n pass",
"def supports_aux_operators(self) -> bool:\n return True",
"def test_command_eq_for_non_command_objects(self):\n assert Command(\"device\", \"command name\") != 1\n assert Command(\"device\", \"command name\") != object()",
"def is_op_quantizable(op: tf.Operation) -> bool:\n\n if op.outputs:\n if op.outputs[0].dtype in QUANT_ALLOWED_DTYPES:\n return True\n\n return False",
"def test_constructor_with_operands(self):\n Xor(BoolVar(), TrafficLightVar())",
"def check_operator(operator, column_object):\n if operator not in OPERATORS:\n raise ValueError(f\"Operator {operator} is not supported.\")\n \n if not isinstance(column_object, Column):\n raise TypeError(f\"Type {type(column_object)} is not a Column.\")\n\n column_type = column_object.type.python_type\n\n if column_type not in TYPES:\n raise ValueError(f\"Column '{column_object.name}' \"\n f\"has an unsupported type, {column_type}.\")\n if operator in NUMERIC_OPERATORS and \\\n column_type not in COMPARABLE_TYPES:\n raise ValueError(f\"Column '{column_object.name}' \"\n f\"is not comparable with '{operator}'.\")",
"def test_base_class_pyloperator_creation(self, single_gate):\n with pytest.raises(TypeError):\n # should raise the following error [TypeError: Can't instantiate abstract class pyLOperator with abstract methods _num_qubits_, _qid_shape_, get_resouces, num_qubits]\n plo = po(single_gate)",
"def _is_chainable(self, operator, lastArg=None):\n non_chaining_operators = [\"and\", \"or\", \"remote\", \"file\", \"re\",\"post\"]\n if (lastArg is not None and type(lastArg) == dict and\n '@value' not in lastArg and\n '@type' not in lastArg and\n 'value' not in lastArg\n ):\n for op in non_chaining_operators:\n if op in operator:\n return False\n return True\n else:\n return False",
"def is_broad(self, x: object, **kwargs):\n return x is ANYTHING",
"def pipe(self, *operators: Callable[['Observable'], 'Observable']) -> 'Observable':\n from ..pipe import pipe\n return pipe(*operators)(self)",
"def test_op_successors_observables_only(self, opqueue_test_node):\n\n observable_successors = opqueue_test_node._op_successors(0, only=\"E\")\n\n assert opqueue_test_node.ops[0] not in observable_successors\n assert opqueue_test_node.ops[1] not in observable_successors\n assert opqueue_test_node.ops[4] in observable_successors",
"def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))",
"def self(accessing_obj, accessed_obj, *args, **kwargs):\r\n return accessing_obj.typeclass == accessed_obj.typeclass",
"def test_type_check(self):\n first = CustomEntity(\"first_entity\")\n second = CustomEntity(\"second_entity\")\n # Check that we can connect first.out to second.in\n dg.plug(first.signal(\"out_double\"), second.signal(\"in_double\"))\n\n # Check that we can't connect first.out to second\n with self.assertRaises(TypeError) as cm_in:\n dg.plug(first.signal(\"out_double\"), second)\n self.assertEqual(\n str(cm_in.exception), ERR % (\"SignalTimeDependentDouble\", \"CustomEntity\")\n )\n\n # Check that we can't connect first to second.in\n with self.assertRaises(TypeError) as cm_out:\n dg.plug(first, second.signal(\"in_double\"))\n self.assertEqual(\n str(cm_out.exception), ERR % (\"CustomEntity\", \"SignalPtrDouble\")\n )",
"def _filter_operator_classes(\n self, class_def_nodes: List[ast.ClassDef], import_from_nodes: List[ast.ImportFrom]\n ) -> List[ast.ClassDef]:\n operator_classes = []\n classes_to_analyze = []\n\n # Get class names for package imports that match one of the following patterns,\n # indicating that this class does match a known Operator class as defined in\n # a provider package or core Airflow package\n regex_patterns = [\n re.compile(r\"airflow\\.providers\\.[a-zA-Z0-9_]+\\.operators\"), # airflow.providers.*.operators (provider)\n re.compile(r\"airflow\\.operators\\.\"), # airflow.operators.* (core Airflow package)\n ]\n operator_bases = [\"BaseOperator\"]\n for module in import_from_nodes:\n if any(regex.match(module.module) for regex in regex_patterns):\n operator_bases.extend([name.name for name in module.names])\n\n # Determine whether each class directly extends the BaseOperator or whether it\n # must be further analyzed for indirect extension\n for node in class_def_nodes:\n if not hasattr(node, \"bases\") or len(node.bases) == 0:\n # Class does not extend other classes; do not add to Operator list\n continue\n if any(base.id in operator_bases for base in node.bases):\n # At least one base class either directly extends the BaseOperator or\n # indirectly extends it from an Operator class imported from another package\n operator_classes.append(node)\n continue\n # This class doesn't extend the BaseOperator directly or from an imported module\n # and must be further analyzed to determine indirect extension\n classes_to_analyze.append(node)\n\n # Identify classes that indirectly extend the BaseOperator from Operator classes\n # defined in the same file\n analysis_incomplete = len(classes_to_analyze) != 0\n while analysis_incomplete:\n analysis_incomplete = False\n for node in classes_to_analyze:\n if any(base.id in [op_class.name for op_class in operator_classes] for base in node.bases):\n # This class directly extends an Operator class defined in this file\n operator_classes.append(node)\n classes_to_analyze.remove(node)\n\n # The classes still present in classes_to_analyze must be\n # re-analyzed with the addition of the new Operator class\n analysis_incomplete = True\n break\n\n return operator_classes",
"def test_subclass_of_base(self):\n self.assertTrue(issubclass(Square, Rectangle))",
"def check_that_operator_can_be_applied_to_produces_items(op, g1, g2):\n g1_tmp_copy = g1.spawn()\n g2_tmp_copy = g2.spawn()\n sample_item_1 = next(g1_tmp_copy)\n sample_item_2 = next(g2_tmp_copy)\n try:\n op(sample_item_1, sample_item_2)\n except TypeError:\n raise TypeError(f\"Operator '{op.__name__}' cannot be applied to items produced by {g1} and {g2} \"\n f\"(which have type {type(sample_item_1)} and {type(sample_item_2)}, respectively)\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks that the TensorN operator was constructed correctly when multiple modes were specified.
|
def test_tensor_n_multiple_modes(self):
cv_obs = qml.TensorN(wires=[0, 1])
assert isinstance(cv_obs, qml.TensorN)
assert cv_obs.wires == Wires([0, 1])
assert cv_obs.ev_order is None
|
[
"def test_init_fail(self):\n # ------ the following tests should FAIL\n correct_shape = (2, 4, 8)\n size = reduce(lambda x, y: x * y, correct_shape)\n order = len(correct_shape)\n correct_data = np.ones(size).reshape(correct_shape)\n\n # ------ tests that Tensor object can be created only from numpy array\n # can not create from list\n with pytest.raises(TypeError):\n incorrect_data = [[1, 2, 3], [4, 5, 6]]\n Tensor(array=incorrect_data)\n\n # can not create from another Tensor\n with pytest.raises(TypeError):\n incorrect_data = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))\n Tensor(array=incorrect_data)\n\n # ------ tests for custom mode names being incorrectly defined\n # mode names are not of list type\n with pytest.raises(ModeError):\n incorrect_mode_names = {mode: \"{}-mode\".format(mode) for mode in range(order)}\n Tensor(array=correct_data, mode_names=incorrect_mode_names)\n\n # not enough mode names\n with pytest.raises(ModeError):\n incorrect_mode_names = [\"{}-mode\".format(mode) for mode in range(order - 1)]\n Tensor(array=correct_data, mode_names=incorrect_mode_names)\n\n # too many mode names\n with pytest.raises(ModeError):\n incorrect_mode_names = [\"{}-mode\".format(mode) for mode in range(order + 1)]\n Tensor(array=correct_data, mode_names=incorrect_mode_names)\n\n # all mode names should be strings\n with pytest.raises(ModeError):\n incorrect_mode_names = [\"{}-mode\".format(mode) for mode in range(order)]\n incorrect_mode_names[0] = 0\n Tensor(array=correct_data, mode_names=incorrect_mode_names)\n\n # ------ tests for custom state being incorrectly defined\n # custom state should be passed as a dict\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n correct_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = [correct_normal_shape,\n correct_mode_order,\n correct_rtype]\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # custom state not fully defined\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n correct_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=correct_mode_order)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # normal shape of custom state should be a tuple\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n incorrect_normal_shape = [I, J, K]\n correct_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=incorrect_normal_shape,\n mode_order=correct_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # normal shape of custom state is inconsistent with the shape of provided data\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n incorrect_normal_shape = (I+1, J, K)\n correct_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=incorrect_normal_shape,\n mode_order=correct_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # mode order of custom state should be a !! TUPLE !! of lists\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n incorrect_mode_order = [[0], [1], [2]]\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=incorrect_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # mode order of custom state should be a tuple of !! LISTS !!\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n incorrect_mode_order = (0, 1, 2)\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=incorrect_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # number of list in mode order should correspond to the number of dimensions of provided data\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n incorrect_mode_order = ([0], [1, 2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=incorrect_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J*K)\n correct_normal_shape = (I, J, K)\n incorrect_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=incorrect_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # length of mode order of custom state is inconsistent with the normal shape\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n incorrect_mode_order = ([0], [1], [2, 3])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=incorrect_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # length of normal shape of custom state is inconsistent with the length of provided mode names\n with pytest.raises(ModeError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n correct_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n correct_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=correct_mode_order,\n rtype=correct_rtype)\n incorrect_mode_names = [\"frequency\", \"time\"]\n Tensor(array=correct_data, custom_state=correct_custom_state, mode_names=incorrect_mode_names)",
"def test_init(self):\n # ------ tests for basic properties of a tensor with default mode names\n true_shape = (2, 4, 8)\n true_size = reduce(lambda x, y: x * y, true_shape)\n true_order = len(true_shape)\n true_data = np.ones(true_size).reshape(true_shape)\n true_default_mode_names = ['mode-0', 'mode-1', 'mode-2']\n true_default_state = State(normal_shape=true_shape)\n tensor = Tensor(array=true_data)\n np.testing.assert_array_equal(tensor.data, true_data)\n assert (tensor.frob_norm == 8.0)\n assert (tensor.shape == true_shape)\n assert (tensor.ft_shape == true_shape)\n assert (tensor.order == true_order)\n assert (tensor.size == true_size)\n assert (tensor.mode_names == true_default_mode_names)\n assert (tensor._state == true_default_state)\n assert (tensor._data is not true_data) # check that is not a reference\n\n # ------ tests for creating a Tensor object with custom mode names\n true_custom_mode_names = ['time', 'frequency', 'channel']\n tensor = Tensor(array=true_data, mode_names=true_custom_mode_names)\n assert (tensor.mode_names == true_custom_mode_names)\n\n # ------ tests for creating a Tensor object in custom state\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J*K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n true_mode_names = [\"mode-0\", \"mode-1_mode-2\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_mode_names)\n\n # ------ tests for creating a Tensor object with in custom state and with custom mode names\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J * K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n custom_mode_names = [\"time\", \"frequency\", \"channel\"]\n true_custom_mode_names = [\"time\", \"frequency_channel\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state, mode_names=custom_mode_names)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_custom_mode_names)",
"def test_set_mode_names(self):\n true_shape = (2, 4, 8)\n true_size = reduce(lambda x, y: x * y, true_shape)\n true_order = len(true_shape)\n true_data = np.ones(true_size).reshape(true_shape)\n true_new_mode_names = {0: 'pixel-x',\n 1: 'pixel-y',\n 2: 'color'\n }\n tensor = Tensor(array=true_data)\n tensor.set_mode_names(true_new_mode_names)\n assert tensor.mode_names == list(true_new_mode_names.values())\n\n # ------ tests that should FAIL for new mode names being incorrectly defined for renaming\n with pytest.raises(ModeError):\n # too many mode names\n incorrect_new_mode_names = {mode: \"{}-mode\".format(mode) for mode in range(true_order + 1)}\n tensor.set_mode_names(mode_names=incorrect_new_mode_names)\n\n with pytest.raises(ModeError):\n # incorrect type of keys (not integers)\n incorrect_new_mode_names = {\"{}-mode\".format(mode): mode for mode in range(true_order)}\n tensor.set_mode_names(mode_names=incorrect_new_mode_names)\n\n with pytest.raises(ModeError):\n # key value exceeds the order of a tensor\n incorrect_new_mode_names = {mode: \"{}-mode\".format(mode) for mode in range(true_order - 2, true_order + 1)}\n tensor.set_mode_names(mode_names=incorrect_new_mode_names)\n\n with pytest.raises(ModeError):\n # key value is set to be negative\n incorrect_new_mode_names = {mode: \"{}-mode\".format(mode) for mode in range(-1, true_order - 1)}\n tensor.set_mode_names(mode_names=incorrect_new_mode_names)",
"def test_set_mode_index(self):\n shape = (2, 2, 2)\n true_order = len(shape)\n size = reduce(lambda x, y: x * y, shape)\n data = np.ones(size).reshape(shape)\n tensor = Tensor(array=data)\n\n # ------ tests that should FAIL for new mode index being incorrectly defined for renaming\n with pytest.raises(ModeError):\n # too many lists of indices provided\n mode_index = {i: [\"index\"] for i in range(len(shape)+1)}\n tensor.set_mode_index(mode_index=mode_index)\n\n with pytest.raises(ModeError):\n # incorrect type of keys (not integers)\n mode_index = {\"index-name\": mode for mode in range(true_order)}\n tensor.set_mode_index(mode_index=mode_index)\n\n with pytest.raises(ModeError):\n # key value exceeds the order of a tensor\n wrong_key = true_order + 1\n mode_index = {wrong_key : [\"idx\"]}\n tensor.set_mode_index(mode_index=mode_index)\n\n with pytest.raises(ModeError):\n # key value exceeds the order of a tensor\n wrong_key = -1\n mode_index = {wrong_key : [\"idx\"]}\n tensor.set_mode_index(mode_index=mode_index)\n\n with pytest.raises(ModeError):\n # not enough indices for the length of the mode\n mode_index = {0: [\"idx\"]}\n tensor.set_mode_index(mode_index=mode_index)",
"def __check_mode(m):\n if m not in range(0, 3):\n return False\n return True",
"def assert_number_of_modes(self, device):\n # Program subsystems may be created and destroyed during execution. The length\n # of the program registers represents the total number of modes that has ever existed.\n modes_total = len(self.reg_refs)\n\n if modes_total > device.modes:\n raise CircuitError(\n f\"This program contains {modes_total} modes, but the device '{device.target}' \"\n f\"only supports a {device.modes}-mode program.\"\n )",
"def test_set_mode_names(self):\n r1, r2 = 2, 3\n I, J, K = 2, 3, 4\n core_1 = np.arange(I * r1).reshape(I, r1)\n core_2 = np.arange(r1 * J * r2).reshape(r1, J, r2)\n core_3 = np.arange(r2 * K).reshape(r2, K)\n core_values = [core_1, core_2, core_3]\n ft_shape = (I, J, K)\n init_names = [\"country\", \"year\", \"month\"]\n mode_names = {i: name for i, name in enumerate(init_names)}\n\n tensor_tkd = TensorTT(core_values=core_values)\n tensor_tkd.set_mode_names(mode_names)\n tensor_tkd_true = TensorTT(core_values=core_values, mode_names=init_names)\n assert all([tensor_tkd.modes[i].name == tensor_tkd_true.modes[i].name for i in range(tensor_tkd.order)])",
"def test_tensor_number_operator(self, tol):\n cutoff_dim = 10\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n gate_name = \"TensorN\"\n assert dev.supports_observable(gate_name)\n\n op = qml.TensorN\n sf_expectation = dev._observable_map[gate_name]\n wires = [0, 1]\n\n @qml.qnode(dev)\n def circuit():\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(wires=wires))\n\n expval = circuit()\n assert np.allclose(\n expval, SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0\n )",
"def check_spec_node(tensor):\n if tensor.op.tag:\n str_list = tensor.op.tag.split(\"|\")\n if ('elewise' not in str_list[0].split('_')) and (\n 'transpose' not in str_list[0].split('_')) \\\n and ('poolinginput' not in str_list[0].split('_')):\n return True\n return False\n if isinstance((tensor.op), tvm.tensor.ExternOp):\n return True\n return False",
"def test_mode(self):\n self.logger.debug('Starting unit_test on mode mode')\n modes = ['Modulation','Voltage1','Voltage2']\n for m in modes:\n self.inst.mode = m\n assert m == self.inst.mode\n self.logger.info('Mode assertion passed for mode: {}'.format(m))\n\n self.logger.info('Mode unit_test passed')",
"def _check_open_mode(self, node):\n try:\n mode_arg = utils.get_argument_from_call(node, position=1, keyword=\"mode\")\n except utils.NoSuchArgumentError:\n return\n if mode_arg:\n mode_arg = utils.safe_infer(mode_arg)\n if isinstance(mode_arg, astroid.Const) and not _check_mode_str(\n mode_arg.value\n ):\n self.add_message(\"bad-open-mode\", node=node, args=mode_arg.value)",
"def test_three_mode(self, tol):\n N = 3\n wires = range(N)\n\n theta = [0.321, 0.4523, 0.21321]\n phi = [0.234, 0.324, 0.234]\n varphi = [0.42342, 0.234, 0.1121]\n\n with qml.tape.OperationRecorder() as rec_rect:\n Interferometer(theta, phi, varphi, wires=wires)\n\n with qml.tape.OperationRecorder() as rec_tria:\n Interferometer(theta, phi, varphi, wires=wires)\n\n for rec in [rec_rect, rec_tria]:\n # test both meshes (both give identical results for the 3 mode case).\n assert len(rec.queue) == 6\n\n expected_bs_wires = [[0, 1], [1, 2], [0, 1]]\n\n for idx, op in enumerate(rec_rect.queue[:3]):\n assert isinstance(op, qml.Beamsplitter)\n assert op.parameters == [theta[idx], phi[idx]]\n assert op.wires == Wires(expected_bs_wires[idx])\n\n for idx, op in enumerate(rec.queue[3:]):\n assert isinstance(op, qml.Rotation)\n assert op.parameters == [varphi[idx]]\n assert op.wires == Wires([idx])",
"def _AssertOpShapeOk(self, Op):\n nSitesS = self.nSites\n nSitesU = self.nSitesU\n nGridK = nUnitCells = self.nUnitCells\n assert(nUnitCells * nSitesU == nSitesS)\n assert(Op.shape == (nUnitCells*nSitesU, nSitesU) or\n Op.shape == (nUnitCells,nSitesU,nSitesU))",
"def check_mode_set(master, mode):\n if mode not in master.mode_mapping():\n return False\n return True",
"def _is_multi_input_op_to_parse(module: torch.nn.Module):\n\n return isinstance(module, tuple(MULTI_INPUT_OPS_TO_PARSE))",
"def test_operation_1_multiple_targets(self):\n\n def op():\n qml.PauliZ(wires=2)\n qml.PauliY(wires=2)\n\n with pytest.raises(\n qml.QuantumFunctionError, match=\"MultipleTargets controlled is not supported.\"\n ):\n qml.is_commuting(qml.ctrl(op, control=[0, 1])(), qml.PauliX(wires=0))",
"def test_reset_circuit(self, num_weights, num_modes):\n # Create a state over 1 mode, 1 weight per mode\n example = circuit.BosonicModes(1, 1)\n\n # Reset with number of weights and modes and perform the same check\n # as test_circuit_init\n example.reset(num_modes, num_weights)\n tot_num_weights = num_weights**num_modes\n num_quad = 2 * num_modes\n assert example.nlen == num_modes\n assert np.isclose(sum(example.get_weights()), 1)\n assert example.is_vacuum(tol=1e-10)\n for i in range(num_modes):\n assert np.isclose(example.fidelity_vacuum([i]), 1)\n assert example.active == list(range(num_modes))\n assert example.get_mean().shape == (tot_num_weights, num_quad)\n assert example.get_covmat().shape == circuit.c_shape(num_modes, num_weights)\n to_xp_list = list(range(0, num_quad, 2)) + list(range(1, num_quad + 1, 2))\n from_xp_list = []\n for i in range(num_modes):\n from_xp_list += [i, i + num_modes]\n assert np.allclose(example.to_xp, np.array(to_xp_list))\n assert np.allclose(example.from_xp, np.array(from_xp_list))",
"def test_set_mode_index(self):\n ft_shape = (2, 3, 4) # define shape of the tensor in full form\n ml_rank = (2, 3, 4) # define multi-linear rank of a tensor in Tucker form\n core_size = reduce(lambda x, y: x * y, ml_rank)\n core_values = np.arange(core_size).reshape(ml_rank)\n true_orig_fmat_list = [np.arange(ft_shape[mode] * ml_rank[mode]).reshape(ft_shape[mode], ml_rank[mode]) for mode\n in range(len(ft_shape))]\n fmat_list = [fmat.copy() for fmat in true_orig_fmat_list]\n\n tensor_tkd = TensorTKD(fmat=fmat_list, core_values=core_values)\n mode_index = {0: [\"idx1\", \"idx2\"],\n 1: [\"idx1\", \"idx2\", \"idx3\"],\n 2: [\"idx1\", \"idx2\", \"idx3\", \"idx4\"]}\n tensor_tkd.set_mode_index(mode_index=mode_index)\n assert all([tensor_tkd.modes[i].index == mode_index[i] for i in range(tensor_tkd.order)])",
"def test_operation_2_multiple_targets(self):\n\n def op():\n qml.PauliZ(wires=2)\n qml.PauliY(wires=2)\n\n with pytest.raises(\n qml.QuantumFunctionError, match=\"MultipleTargets controlled is not supported.\"\n ):\n qml.is_commuting(qml.PauliX(wires=0), qml.ctrl(op, control=[0, 1])())",
"def arityCheck(self, opsTuple):\n return self.arity is None or self.arity == len(opsTuple)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks that instantiating a TensorN when passing a single mode as a keyword argument returns a NumberOperator.
|
def test_tensor_n_single_mode_wires_explicit(self):
cv_obs = qml.TensorN(wires=[0])
assert isinstance(cv_obs, qml.NumberOperator)
assert cv_obs.wires == Wires([0])
assert cv_obs.ev_order == 2
|
[
"def test_tensor_number_operator(self, tol):\n cutoff_dim = 10\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n gate_name = \"TensorN\"\n assert dev.supports_observable(gate_name)\n\n op = qml.TensorN\n sf_expectation = dev._observable_map[gate_name]\n wires = [0, 1]\n\n @qml.qnode(dev)\n def circuit():\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(wires=wires))\n\n expval = circuit()\n assert np.allclose(\n expval, SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0\n )",
"def test_number_operator(self, tol):\n cutoff_dim = 10\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n gate_name = \"NumberOperator\"\n assert dev.supports_observable(gate_name)\n\n op = qml.NumberOperator\n sf_expectation = dev._observable_map[gate_name]\n wires = [0]\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(*args, wires=wires))\n\n assert np.allclose(\n circuit(), SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0\n )",
"def _mode(\n preds: Tensor,\n target: Tensor,\n threshold: float,\n top_k: Optional[int],\n num_classes: Optional[int],\n multiclass: Optional[bool],\n) -> DataType:\n\n mode = _check_classification_inputs(\n preds,\n target,\n threshold=threshold,\n top_k=top_k,\n num_classes=num_classes,\n multiclass=multiclass,\n )\n return mode",
"def number_op(number):\n return Op(Tensor(\"$number\", [], content=number, exponent=1))",
"def test_operator_create_operator(self):\n pass",
"def check_spec_node(tensor):\n if tensor.op.tag:\n str_list = tensor.op.tag.split(\"|\")\n if ('elewise' not in str_list[0].split('_')) and (\n 'transpose' not in str_list[0].split('_')) \\\n and ('poolinginput' not in str_list[0].split('_')):\n return True\n return False\n if isinstance((tensor.op), tvm.tensor.ExternOp):\n return True\n return False",
"def test_operator_get_operator(self):\n pass",
"def test_init(self):\n # ------ tests for basic properties of a tensor with default mode names\n true_shape = (2, 4, 8)\n true_size = reduce(lambda x, y: x * y, true_shape)\n true_order = len(true_shape)\n true_data = np.ones(true_size).reshape(true_shape)\n true_default_mode_names = ['mode-0', 'mode-1', 'mode-2']\n true_default_state = State(normal_shape=true_shape)\n tensor = Tensor(array=true_data)\n np.testing.assert_array_equal(tensor.data, true_data)\n assert (tensor.frob_norm == 8.0)\n assert (tensor.shape == true_shape)\n assert (tensor.ft_shape == true_shape)\n assert (tensor.order == true_order)\n assert (tensor.size == true_size)\n assert (tensor.mode_names == true_default_mode_names)\n assert (tensor._state == true_default_state)\n assert (tensor._data is not true_data) # check that is not a reference\n\n # ------ tests for creating a Tensor object with custom mode names\n true_custom_mode_names = ['time', 'frequency', 'channel']\n tensor = Tensor(array=true_data, mode_names=true_custom_mode_names)\n assert (tensor.mode_names == true_custom_mode_names)\n\n # ------ tests for creating a Tensor object in custom state\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J*K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n true_mode_names = [\"mode-0\", \"mode-1_mode-2\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_mode_names)\n\n # ------ tests for creating a Tensor object with in custom state and with custom mode names\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J * K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n custom_mode_names = [\"time\", \"frequency\", \"channel\"]\n true_custom_mode_names = [\"time\", \"frequency_channel\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state, mode_names=custom_mode_names)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_custom_mode_names)",
"def is_op_quantizable(op: tf.Operation) -> bool:\n\n if op.outputs:\n if op.outputs[0].dtype in QUANT_ALLOWED_DTYPES:\n return True\n\n return False",
"def determine_mode(\n model_for_inference: TPModel,\n dataset: Dataset,\n validation_fn: Callable[[Any, Iterable[Any]], Tuple[float, Union[None, List[float], List[List[TTensor]]]]],\n ) -> bool:\n metric_mode = None\n\n data_item = dataset.get_data([0])\n # pylint: disable=W0703\n try:\n metric_value, values_for_each_item = validation_fn(model_for_inference, data_item)\n except Exception:\n metric_mode = False\n\n if metric_mode is not None:\n return metric_mode\n\n try:\n metric_value = metric_value if metric_value is None else float(metric_value)\n except Exception as ex:\n raise RuntimeError(\n f\"Metric value of {type(metric_value)} type was returned from the `validation_fn` \"\n \"but the float value is expected.\"\n ) from ex\n\n convert_to_float_possible = True\n if values_for_each_item is not None:\n # pylint: disable=W0703\n try:\n _ = float(values_for_each_item[0])\n except Exception:\n convert_to_float_possible = False\n\n # Analyze `metric_value` and `values_for_each_item` values:\n # +--------------+----------------------+-------------+\n # | metric_value | values_for_each_item | metric_mode |\n # +--------------+----------------------+-------------+\n # | float | None | True |\n # +--------------+----------------------+-------------+\n # | float | List[float] | True |\n # +--------------+----------------------+-------------+\n # | float | List[List[TTensor]] | False |\n # +--------------+----------------------+-------------+\n # | None | None | False |\n # +--------------+----------------------+-------------+\n # | None | List[float] | UNEXPECTED |\n # +--------------+----------------------+-------------+\n # | None | List[List[TTensor]] | False |\n # +--------------+----------------------+-------------+\n\n metric_mode = False\n if isinstance(metric_value, float) and (values_for_each_item is None or convert_to_float_possible):\n metric_mode = True\n elif values_for_each_item is not None and not isinstance(values_for_each_item[0], list):\n raise RuntimeError(\"Unexpected return value from provided validation function.\")\n\n return metric_mode",
"def _is_multi_input_op_to_parse(module: torch.nn.Module):\n\n return isinstance(module, tuple(MULTI_INPUT_OPS_TO_PARSE))",
"def test_has_matrix_true(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n @staticmethod\n def compute_matrix():\n return np.eye(2)\n\n assert MyOp.has_matrix\n assert MyOp(wires=0).has_matrix",
"def evaluates_to_number(self):\n return self.shape_for_testing == ()",
"def test_set_mode_index(self):\n shape = (2, 2, 2)\n true_order = len(shape)\n size = reduce(lambda x, y: x * y, shape)\n data = np.ones(size).reshape(shape)\n tensor = Tensor(array=data)\n\n # ------ tests that should FAIL for new mode index being incorrectly defined for renaming\n with pytest.raises(ModeError):\n # too many lists of indices provided\n mode_index = {i: [\"index\"] for i in range(len(shape)+1)}\n tensor.set_mode_index(mode_index=mode_index)\n\n with pytest.raises(ModeError):\n # incorrect type of keys (not integers)\n mode_index = {\"index-name\": mode for mode in range(true_order)}\n tensor.set_mode_index(mode_index=mode_index)\n\n with pytest.raises(ModeError):\n # key value exceeds the order of a tensor\n wrong_key = true_order + 1\n mode_index = {wrong_key : [\"idx\"]}\n tensor.set_mode_index(mode_index=mode_index)\n\n with pytest.raises(ModeError):\n # key value exceeds the order of a tensor\n wrong_key = -1\n mode_index = {wrong_key : [\"idx\"]}\n tensor.set_mode_index(mode_index=mode_index)\n\n with pytest.raises(ModeError):\n # not enough indices for the length of the mode\n mode_index = {0: [\"idx\"]}\n tensor.set_mode_index(mode_index=mode_index)",
"def as_op(x):\n if isinstance(x, Op):\n return x\n\n return constant(x)",
"def _register_external_op_helper(op_name, supported=True):\n\n @tvm.ir.register_op_attr(op_name, \"target.dnnl\")\n def _func_wrapper(expr):\n args = expr.args\n if any([x.checked_type.dtype == \"int64\" for x in args]):\n logger.info(\"DNNL does not support int64.\")\n return False\n # DNNL does not support pooling with ceil_mode = True.\n if \"pool\" in op_name:\n attrs = dict(get_attrs(expr))\n if \"ceil_mode\" in attrs.keys() and attrs[\"ceil_mode\"]:\n return False\n return supported\n\n return _func_wrapper",
"def get_param_quantizer(op: tf.Operation, index: int) -> tf.Operation:\n\n # MatMul param is directly connected input node, get quantized readVarOp\n quantized_op = op.inputs[index].op\n # handle MatMuls where the param is fed via strided_slice or split op types\n if op.inputs[index].op.type in ['StridedSlice', 'Split']:\n matmul_input_op = op.inputs[index].op\n # get quantized readVarOp\n for inp in matmul_input_op.inputs:\n if inp.op.type in ['QcQuantize', 'QcQuantizeRecurrentParam', 'QcQuantizePerChannel']:\n quantized_op = inp.op\n\n return quantized_op",
"def _isOperator(self, token):\n token = token.strip()\n \n if(token == \"+\"):\n return True\n\n if(token == \"*\"):\n return True\n \n return False",
"def get_operation_type(layer, output_cache):\n\n wx_table = [\n [\"mult\", \"barrel\", \"mux\", \"mux\", \"mux\", \"fmult\"],\n [\"barrel\", \"adder\", \"mux\", \"xor\", \"mux\", \"fmult\"],\n [\"mux\", \"mux\", \"mux\", \"mux\", \"xor\", \"fmult\"],\n [\"mux\", \"xor\", \"mux\", \"xor\", \"xor\", \"fmult\"],\n [\"mux\", \"mux\", \"xor\", \"xor\", \"xor\", \"fmult\"],\n [\"fmult\", \"fmult\", \"fmult\", \"fmult\", \"fmult\", \"fmult\"],\n ]\n\n # check if this is a quantized layers (QDense, QConv, QDepthwise)\n if hasattr(layer, \"get_quantizers\"):\n w_quant = layer.get_quantizers()[0]\n w_mode, w_bits, w_sign = get_quant_mode(w_quant)\n if w_mode == \"float\":\n logging.warning(\"%s kernel is unquantized!\", layer.name)\n\n # for the input, get tensor input and search the cache that associates\n # the quantizer with a tensor\n if output_cache.get(layer.input.experimental_ref(), None) is not None:\n x_mode, x_bits, x_sign = get_quant_mode(\n output_cache.get(layer.input.experimental_ref()))\n if x_mode == \"float\":\n logging.warning(\"%s input is unquantized!\", layer.name)\n else:\n print(\"cannot determine presently model for {}\".format(layer.name))\n return \"null\", (w_mode, -1), (w_bits, -1), (w_sign, -1)\n mode = wx_table[w_mode][x_mode]\n return mode, (w_mode, x_mode), (w_bits, x_bits), (w_sign, x_sign)\n\n raise ValueError(\"Cannot find suitable quantization candidates for {}\".format(\n layer.name))",
"def test_has_matrix_false(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n assert not MyOp.has_matrix\n assert not MyOp(wires=0).has_matrix"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the id attribute of an observable can be set.
|
def test_id(self):
class DummyObserv(qml.operation.Observable):
r"""Dummy custom observable"""
num_wires = 1
grad_method = None
op = DummyObserv(1.0, wires=0, id="test")
assert op.id == "test"
|
[
"def test_set_id(self):\n test_id = 5\n self.test_manager.set_id(test_id)\n self.assertEqual(self.test_manager.get_id(), test_id)\n self.test_manager.set_id(self.NO_ID)\n self.assertEqual(self.test_manager.get_id(), self.NO_ID)",
"def test_setValidid(self):\n object1 = Base(45)\n self.assertEqual(object1.id, 45)",
"def test_assign_atom_id(self):\n mol = Molecule(smiles='CCCC')\n mol.assign_atom_ids()\n self.assertTrue(mol.atom_ids_valid())",
"def test_id(self):\n\n self.assertEqual(self.r1.id, 1)\n self.assertEqual(self.r2.id, 2)\n self.assertEqual(self.r3.id, 3)\n self.assertEqual(self.r4.id, 9)",
"def test_property_id(base_property: Property):\n assert base_property.id == 'https://booking.com'",
"def test_task_id_change(generic_task):\n generic_task.set_task_id('Puf')\n assert generic_task.get_task_id() == 'Puf'",
"def test_setIDFunction(self):\n value = object()\n previous = util.setIDFunction(value)\n result = util.setIDFunction(previous)\n self.assertIdentical(value, result)",
"def test_setNoneid(self):\n object3 = Base(None)\n self.assertEqual(object3.id, 1)",
"def test_get_observable_ids(petab_problem): # pylint: disable=W0621\n assert set(petab_problem.get_observable_ids()) == {'observable_1'}",
"def test_read_only(self):\n with pytest.raises(AttributeError):\n self.message.id = 2",
"def test_has_id(self):\n test_pattern = re.compile('test1|test2', re.I)\n test_node = fragment_fromstring('<div/>')\n test_node.set('id', 'test2')\n\n self.assertTrue(check_node_attributes(test_pattern, test_node, 'id'))",
"def test_musicals_id_get(self):\n pass",
"def test_patch_obj_id_get(self):\n pass",
"def test_feature_request_id(new_feature_request):\n new_feature_request.id = 17\n assert isinstance(new_feature_request.get_id(), str)\n assert not isinstance(new_feature_request.get_id(), int)\n assert new_feature_request.get_id() == \"17\"",
"def test_if_BaseModel_instance_has_id(self):\n b = BaseModel()\n self.assertTrue(hasattr(b, \"id\"))",
"def testId(self):\n self.assertEqual(CAPTCHA_ID in self.ff1, True)\n self.assertNotEqual('captcha_field' in self.ff1, True)",
"def test_check_id(self):\n s1 = Square(10)\n s2 = Square(10)\n s3 = Square(10)\n self.assertGreater(s2.id, s1.id)\n self.assertGreater(s3.id, s2.id)",
"def test_exists_by_id(self, _id):",
"def test_lacks_id(self):\n test_pattern = re.compile('test1|test2', re.I)\n test_node = fragment_fromstring('<div/>')\n test_node.set('id', 'test4')\n self.assertFalse(check_node_attributes(test_pattern, test_node, 'id'))",
"def test_id_generated():\n msg = Message({'@type': TEST_TYPE})\n assert msg.type == TEST_TYPE\n assert msg.id is not None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that simplify method returns the same instance.
|
def test_simplify_method(self):
class DummyObserv(qml.operation.Observable):
r"""Dummy custom observable"""
num_wires = 1
grad_method = None
op = DummyObserv(wires=0)
sim_op = op.simplify()
assert op is sim_op
|
[
"def test_simplify_unit(self, simplify_unit_test_case: SimplifyUnitTest\n ) -> None:\n # Arrange done in fixtures.\n # Act.\n simplified = unit_analysis.simplify(\n simplify_unit_test_case.to_simplify,\n simplify_unit_test_case.mock_type_factories)\n\n # Assert.\n # It should have created a new unit with the correct raw value.\n simplified_type = simplify_unit_test_case.mock_simplify_type\\\n .return_value\n assert simplified == simplified_type.return_value\n simplified_type.assert_called_once_with(\n pytest.approx(simplify_unit_test_case.expected_raw))",
"def test_simplify_type(self, simplify_type_test_case: SimplifyTypeTest\n ) -> None:\n # Arrange done in fixtures.\n # Act.\n simplified = unit_analysis.simplify(\n simplify_type_test_case.to_simplify,\n simplify_type_test_case.type_factories)\n\n # Assert.\n assert simplified == simplify_type_test_case.simplified",
"def test_simplify(self):\r\n self.assertTrue(str(Fraction(9, 27).simplify()) == \"1/3\")\r\n self.assertFalse(str(Fraction(9, 27).simplify()) == str(Fraction(2, 3)))\r\n self.assertTrue(str(Fraction(-9, -27).simplify()) == str(Fraction(-1, -3)))\r\n self.assertTrue(str(Fraction(9, -27).simplify()) == str(Fraction(-1, 3)))",
"def simplify(units):\n pass",
"def gentle_simplify(self):\n return self.apply_to_content(operator.methodcaller('gentle_simplify'))",
"def test_simplifier(self):\n self.pool, self.doThreadWork = deterministicPool()\n self.reactor, self.doReactorWork = deterministicReactorThreads()\n self.getter = FakeAddrInfoGetter()\n self.resolver = GAIResolver(self.reactor, lambda: self.pool,\n self.getter.getaddrinfo)\n simpleResolver = ComplexResolverSimplifier(self.resolver)\n self.getter.addResultForHost('example.com', ('192.168.3.4', 4321))\n success = simpleResolver.getHostByName('example.com')\n failure = simpleResolver.getHostByName('nx.example.com')\n self.doThreadWork()\n self.doReactorWork()\n self.doThreadWork()\n self.doReactorWork()\n self.assertEqual(self.failureResultOf(failure).type, DNSLookupError)\n self.assertEqual(self.successResultOf(success), '192.168.3.4')",
"def test_fraction_simplify(self):\n examples = [\n Fraction(P(A), One()),\n Fraction(P(A) * P(B), P(B)),\n ]\n for example in examples:\n with self.subTest(expr=example.to_y0()):\n self.assertIsInstance(example, Fraction)\n self.assert_complexity_le(example.simplify(), example)",
"def simplify_unit_test_case(cls, request: RequestType,\n unit_factory: UnitFactory,\n unit_type_factory: UnitTypeFactory,\n compound_unit_factory: CompoundUnitFactory\n ) -> SimplifyUnitTest:\n test_index = request.param\n\n unit1 = unit_factory(\"Unit1\", raw=2.0)\n unit2 = unit_factory(\"Unit2\", raw=3.0)\n\n # Create two units of the same type.\n type1 = unit_type_factory(\"UnitType1\")\n unit1_of_type1 = unit_factory(\"Type1Unit1\", raw=5.0,\n unit_type_class=type1)\n unit2_of_type1 = unit_factory(\"Type1Unit2\", raw=7.0,\n unit_type_class=type1)\n\n # Mock the standard values of these units, since they'll be used.\n unit1_of_type1.to_standard.return_value = unit_factory(\n \"Type1Unit1Standard\", raw=16.0)\n unit2_of_type1.to_standard.return_value = unit_factory(\n \"Type1Unit2Standard\", raw=8.0)\n\n mul_factory = functools.partial(compound_unit_factory, Operation.MUL)\n div_factory = functools.partial(compound_unit_factory, Operation.DIV)\n\n # Mock the version of simplify() for types. We simply do this by\n # registering an alternate version.\n mock_simplify_type = mock.Mock()\n unit_analysis.simplify.register(UnitType, mock_simplify_type)\n\n # Create a fake CompoundTypeFactories instance, which will never\n # actually be used.\n compound_type_factories = mock.Mock(spec=CompoundTypeFactories)\n simplify_test = functools.partial(\n cls.SimplifyUnitTest, mock_type_factories=compound_type_factories,\n mock_simplify_type=mock_simplify_type\n )\n\n simplify_tests = [\n # When no standardization is required.\n simplify_test(to_simplify=unit1, expected_raw=2.0),\n simplify_test(to_simplify=mul_factory(unit1, unit2),\n expected_raw=6.0),\n simplify_test(to_simplify=div_factory(mul_factory(unit1, unit2),\n unit2),\n expected_raw=2.0),\n # When standardization is required.\n simplify_test(to_simplify=mul_factory(unit1_of_type1,\n unit2_of_type1),\n expected_raw=128.0),\n simplify_test(to_simplify=div_factory(unit1_of_type1,\n unit2_of_type1),\n expected_raw=2.0),\n simplify_test(to_simplify=mul_factory(div_factory(unit1,\n unit1_of_type1),\n div_factory(unit2_of_type1,\n unit2)),\n expected_raw=(1.0 / 3.0)),\n # When we have multiple units of the same class. (It should not\n # standardize.)\n simplify_test(to_simplify=mul_factory(unit1_of_type1,\n unit1_of_type1),\n expected_raw=25.0)\n ]\n\n assert len(simplify_tests) == cls._NUM_SIMPLIFY_UNIT_TESTS\n yield simplify_tests[test_index]\n\n # Un-mock the simplify function so it can be used again.\n unit_analysis.simplify.register(UnitType, unit_analysis.simplify_type)",
"def simplify_type_test_case(cls, request: RequestType,\n unit_type_factory: UnitTypeFactory,\n compound_type_factory: CompoundTypeFactory,\n fake_compound_type_factory:\n FakeCompoundTypeFactory,\n unitless_type_factory: UnitlessTypeFactory,\n ) -> SimplifyTypeTest:\n test_index = request.param\n\n type1 = unit_type_factory(\"UnitType1\")\n type2 = unit_type_factory(\"UnitType2\")\n type3 = unit_type_factory(\"UnitType3\")\n type4 = unit_type_factory(\"UnitType4\")\n\n # Other instances of the same types.\n type1_other = unit_type_factory(\"UnitType1\")\n # Fake the standard_unit_class() method so it returns something\n # predictable.\n type1.standard_unit_class.return_value = type1\n type1_other.standard_unit_class.return_value = type1\n\n # We use fake CompoundUnitTypes for the output from simplify() and real\n # ones for the input.\n fake_mul = functools.partial(fake_compound_type_factory,\n Operation.MUL)\n fake_div = functools.partial(fake_compound_type_factory,\n Operation.DIV)\n real_mul = functools.partial(compound_type_factory, Operation.MUL)\n real_div = functools.partial(compound_type_factory, Operation.DIV)\n\n simplified1 = real_mul(type1, type2)\n simplified2 = real_div(real_mul(type1, type2), real_mul(type3, type4))\n simplified3 = real_div(type1, real_div(type2, type1))\n simplified4 = unitless_type_factory()\n simplified5 = real_div(unitless_type_factory(), type1)\n\n # All simplify tests should use the fake compound unit type factories.\n fake_type_factories = CompoundTypeFactories(mul=fake_mul, div=fake_div)\n simplify_test = functools.partial(cls.SimplifyTypeTest,\n type_factories=fake_type_factories)\n\n # The list of tests that we want to perform. We use ordered dict for\n # the attributes here because the iteration order can change the results\n # of un_flatten(), and we want them to be consistent.\n simplify_tests = [\n # When no simplification is necessary. In these cases, we should\n # just return the input.\n simplify_test(to_simplify=simplified1, simplified=simplified1),\n simplify_test(to_simplify=simplified2, simplified=simplified2),\n simplify_test(to_simplify=simplified3, simplified=simplified3),\n # Simple simplification cases.\n simplify_test(to_simplify=real_div(real_mul(type1, type2),\n real_mul(type2, type3)),\n simplified=fake_div(type1, type3)),\n simplify_test(to_simplify=real_div(real_mul(\n real_mul(type1, type1),\n type2,\n ), type1),\n simplified=fake_mul(type1, type2)),\n # Nested divisions.\n simplify_test(to_simplify=real_div(real_div(type1, type2),\n real_div(type3, type2)),\n simplified=fake_div(type1, type3)),\n simplify_test(to_simplify=real_div(real_div(\n type2, real_mul(type1, type1)),\n real_div(type3, type1)),\n simplified=fake_div(type2, fake_mul(type1, type3))),\n # Unitless values.\n simplify_test(to_simplify=simplified4, simplified=simplified4),\n simplify_test(to_simplify=real_mul(unitless_type_factory(),\n unitless_type_factory()),\n simplified=unitless_type_factory()),\n simplify_test(to_simplify=real_mul(unitless_type_factory(), type1),\n simplified=type1),\n simplify_test(to_simplify=simplified5, simplified=simplified5),\n simplify_test(to_simplify=real_div(type1, unitless_type_factory()),\n simplified=type1),\n simplify_test(to_simplify=real_div(type1, real_mul(type1, type2)),\n simplified=fake_div(unitless_type_factory(), type2)),\n # Cases with multiple instances of the same UnitType.\n simplify_test(to_simplify=real_mul(real_mul(type1, type2),\n real_mul(type1_other, type2)),\n simplified=fake_mul(fake_mul(type1, type1),\n fake_mul(type2, type2))),\n simplify_test(to_simplify=real_div(real_mul(type1, type2),\n real_mul(type1_other, type3)),\n simplified=fake_div(type2, type3)),\n simplify_test(to_simplify=real_div(real_mul(type1, type2),\n real_mul(type1_other, type2)),\n simplified=unitless_type_factory()),\n simplify_test(to_simplify=real_div(real_mul(type1,\n real_mul(type1, type1)),\n real_mul(type1, type1)),\n simplified=type1),\n simplify_test(to_simplify=real_div(type1,\n real_mul(type1_other, type2)),\n simplified=fake_div(unitless_type_factory(),\n type2))\n ]\n\n assert len(simplify_tests) == cls._NUM_SIMPLIFY_TYPE_TESTS\n return simplify_tests[test_index]",
"def simplify(*args):\n return _wali.simplify(*args)",
"def simplify(self):\n # TODO: Make primitives version of this method do course parameter\n # simplification based on courses\n\n # Not implemented yet\n pass",
"def _force_full_simplify(worker: AbstractWorker, obj: object) -> object:\n # check to see if there is a full simplifier\n # for this type. If there is, return the full simplified object.\n current_type = type(obj)\n if current_type in msgpack_global_state.forced_full_simplifiers:\n result = (\n msgpack_global_state.forced_full_simplifiers[current_type][0],\n msgpack_global_state.forced_full_simplifiers[current_type][1](worker, obj),\n )\n return result\n # If we already tried to find a full simplifier for this type but failed, we should\n # simplify it instead.\n elif current_type in msgpack_global_state.no_full_simplifiers_found:\n return _simplify(worker, obj)\n else:\n # If the object type is not in forced_full_simplifiers,\n # we check the classes that this object inherits from.\n # `inspect.getmro` give us all types this object inherits\n # from, including `type(obj)`. We can skip the type of the\n # object because we already tried this in the\n # previous step.\n classes_inheritance = inspect.getmro(type(obj))[1:]\n\n for inheritance_type in classes_inheritance:\n if inheritance_type in msgpack_global_state.forced_full_simplifiers:\n # Store the inheritance_type in forced_full_simplifiers so next\n # time we see this type serde will be faster.\n msgpack_global_state.forced_full_simplifiers[\n current_type\n ] = msgpack_global_state.forced_full_simplifiers[inheritance_type]\n result = (\n msgpack_global_state.forced_full_simplifiers[current_type][0],\n msgpack_global_state.forced_full_simplifiers[current_type][1](worker, obj),\n )\n return result\n\n # If there is not a full_simplifier for this\n # object, then we simplify it.\n msgpack_global_state.no_full_simplifiers_found.add(current_type)\n return _simplify(worker, obj)",
"def testSimpleInspectors(self):\n self.assertEqual(self.swap.payNominal(), self.pay_nominal)\n self.assertEqual(self.swap.payCurrency(), self.pay_currency)\n self.assertEqual(self.swap.paySpread(), self.pay_spread)\n self.assertEqual(self.swap.recNominal(), self.rec_nominal)\n self.assertEqual(self.swap.recCurrency(), self.rec_currency)\n self.assertEqual(self.swap.recSpread(), self.rec_spread)",
"def _simplify(obj):\n if type(obj) is not str and hasattr(obj, '__iter__') and len(obj) == 1:\n return obj[0]\n else:\n return obj",
"def test_identical_true(self):\n mol = Molecule(smiles='CCCC')\n mol.assign_atom_ids()\n mol_copy = mol.copy(deep=True)\n self.assertTrue(mol.is_isomorphic(mol_copy))\n self.assertTrue(mol.is_identical(mol_copy))",
"def simplify(self):\n new_numerator = self.numerator // self.euclid_gcd()\n new_denominator = self.denominator // self.euclid_gcd()\n return Fraction(new_numerator, new_denominator)",
"def simplify(circuit):\n circuit=removeZeroRotations(circuit)\n circuit,wires=removeDoubleCZ(circuit)\n circuit=combineRotations(circuit,wires)\n return circuit",
"def test_replicate_primitive(self):\n card = examples.Card(1, 'clubs')\n self.assertEqual(card.rank, 1)\n self.assertEqual(card.suit, 'clubs')\n card_copy = self.replicator.replicate(card)\n\n self.assertNotEqual(id(card), id(card_copy))\n self.assertEqual(card, card_copy)\n\n self.assertEqual(card.rank, card_copy.rank)\n self.assertEqual(card.suit, card_copy.suit)",
"def testSimpleInspectors(self):\n self.assertEqual(self.swap.type(), self.type)\n self.assertEqual(self.swap.fixedNominal(), self.nominal2)\n self.assertEqual(self.swap.fixedCurrency(), self.currency2)\n self.assertEqual(self.swap.fixedRate(), self.couponRate)\n self.assertEqual(self.swap.fixedDayCount(), self.tsDayCounter)\n self.assertEqual(self.swap.fixedPaymentBdc(), self.busDayConvention)\n self.assertEqual(self.swap.fixedPaymentLag(), self.payLag)\n self.assertEqual(self.swap.fixedPaymentCalendar(), self.calendar)\n self.assertEqual(self.swap.floatNominal(), self.nominal1)\n self.assertEqual(self.swap.floatCurrency(), self.currency1)\n self.assertEqual(self.swap.floatSpread(), self.floatSpread)\n self.assertEqual(self.swap.floatPaymentBdc(), self.busDayConvention)\n self.assertEqual(self.swap.floatPaymentLag(), self.payLag)\n self.assertEqual(self.swap.floatPaymentCalendar(), self.calendar)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that when raising an Operator to a power that is not a number raises a ValueError.
|
def test_pow_method_with_non_numeric_power_raises_error(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom operator"""
num_wires = 1
with pytest.raises(ValueError, match="Cannot raise an Operator"):
_ = DummyOp(wires=[0]) ** DummyOp(wires=[0])
|
[
"def test_power_except(self):\n chan = SuperOp(self.depol_sop(1))\n # Non-integer power raises error\n self.assertRaises(QiskitError, chan.power, 0.5)",
"def test_raise_error_fewer_than_2_operands(self):\n with pytest.raises(ValueError, match=\"Require at least two operators to combine;\"):\n _ = ValidOp(qml.PauliX(0))",
"def test_power(val: Union[Val, Real], power: Union[Val, Real], expected: Val):\n assert dataclasses.astuple(val ** power) == pytest.approx(dataclasses.astuple(expected))",
"def test_not_a_positive_number_error():\n calc = Calculator(-4)\n with pytest.raises(NotAPositiveNumber):\n result = calc.n_root(2)",
"def testNumberBadExponent( self ):\n self.assertTokenizingRaises( NumberBadExponent, '1e')\n self.assertTokenizingRaises( NumberBadExponent, '1ex')\n self.assertTokenizingRaises( NumberBadExponent, '1E')\n self.assertTokenizingRaises( NumberBadExponent, '1EX')\n self.assertTokenizingRaises( NumberBadExponent, '1Ex')\n self.assertTokenizingRaises( NumberBadExponent, '1eX')\n self.assertTokenizingRaises( NumberBadExponent, '1.134eX')\n self.assertTokenizingRaises( NumberBadExponent, '1.0eX')",
"def test_multiply_except(self):\n chan = SuperOp(self.sopI)\n self.assertRaises(QiskitError, chan.multiply, 's')\n self.assertRaises(QiskitError, chan.multiply, chan)",
"def test_no_exp_exceptions():\n AI(pmisc.number._no_exp, \"number\", number=None)\n AI(pmisc.number._no_exp, \"number\", number=True)\n AI(pmisc.number._no_exp, \"number\", number=\"a\")",
"def test_input_validation_two():\n with pytest.raises(NotANumber):\n calc = Calculator('four')",
"def testConvertionWithExponent(unit_database_custom_conversion) -> None:\n unit_database = unit_database_custom_conversion\n assert approx(abs(100 - unit_database.Convert(\"length\", [(\"m\", 1)], [(\"cm\", 1)], 1)), 5) == 0\n assert approx(abs(10000 - unit_database.Convert(\"length\", [(\"m\", 2)], [(\"cm\", 2)], 1)), 5) == 0\n\n # Doesn't make sense changing the exponent in the from and to\n with pytest.raises(ValueError):\n unit_database.Convert(\"length\", [(\"m\", 2)], [(\"m\", 1)], 1)",
"def test_hamiltonian_invalid_init_exception(self, coeffs, ops):\n with pytest.raises(ValueError, match=\"number of coefficients and operators does not match\"):\n qml.Hamiltonian(coeffs, ops)",
"def test_invalid_type():\n with pytest.raises(TypeError):\n # Test with string value\n assert calculate_E_min(B_degrees=\"blah\")\n with pytest.raises(ValueError):\n # Test with NaN value\n assert calculate_E_min(B_degrees=nan)\n with pytest.raises(ValueError):\n # Test with infinite value\n assert calculate_E_min(B_degrees=inf)",
"def test_build_ops_error():\n qubit = cirq.LineQubit.range(1)\n with pytest.raises(ValueError):\n cirq_utils.qubit_op_to_gate('W', qubit[0])",
"def testPower(self):\n f8 = self.f8\n self.assertTrue(f8(1, 1, 1) ** 2 == f8(1, 1, 0))",
"def test_convert_to_invalid_symbol(self):\n with self.assertRaises(ValueError):\n convert_value_to_standard_unit('3.141592 Pi', 'Xi')",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def test_invalid(self):\n\n expression = \"- 1 + 3\" # Invalid syntax\n\n self.assertNotEqual(eval(expression), PrefixOperation(expression).evaluate_expression())",
"def test_arithmetic_errors(self):\n H = qml.Hamiltonian([1], [qml.PauliZ(0)])\n A = [[1, 0], [0, -1]]\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H @ A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = A @ H\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H + A\n with pytest.raises(TypeError, match=\"can't multiply sequence by non-int\"):\n _ = H * A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H - A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H += A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H *= A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H -= A",
"def test_no_exp(num, ref):\n assert pmisc.number._no_exp(num) == ref",
"def test_mul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Cannot multiply Observable by\"):\n _ = \"dummy\" * qml.PauliX(0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the __sum__ dunder method with two operators.
|
def test_sum_with_operator(self):
sum_op = qml.PauliX(0) + qml.RX(1, 0)
final_op = qml.op_sum(qml.PauliX(0), qml.RX(1, 0))
# TODO: Use qml.equal when fixed.
assert isinstance(sum_op, qml.ops.Sum)
for s1, s2 in zip(sum_op.summands, final_op.summands):
assert s1.name == s2.name
assert s1.wires == s2.wires
assert s1.data == s2.data
assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)
|
[
"def test_sum_multi_wire_operator_with_scalar(self):\n sum_op = 5 + qml.CNOT(wires=[0, 1])\n final_op = qml.op_sum(\n qml.CNOT(wires=[0, 1]),\n qml.s_prod(5, qml.prod(qml.Identity(0), qml.Identity(1))),\n )\n # TODO: Use qml.equal when fixed.\n assert isinstance(sum_op, qml.ops.Sum)\n for s1, s2 in zip(sum_op.summands, final_op.summands):\n assert s1.name == s2.name\n assert s1.wires == s2.wires\n assert s1.data == s2.data\n assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)",
"def OpSum(*operators):\n if not operators:\n return OperatorSum()\n return OperatorSum(list(operators))",
"def test_sum_with_scalar(self):\n sum_op = 5 + qml.PauliX(0) + 0\n final_op = qml.op_sum(qml.PauliX(0), qml.s_prod(5, qml.Identity(0)))\n # TODO: Use qml.equal when fixed.\n assert isinstance(sum_op, qml.ops.Sum)\n for s1, s2 in zip(sum_op.summands, final_op.summands):\n assert s1.name == s2.name\n assert s1.wires == s2.wires\n assert s1.data == s2.data\n assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)",
"def test_right_hand_side_operations(self):\n operators = (\n (\"__add__\", operator.add, True),\n (\"__sub__\", operator.sub, False),\n (\"__mul__\", operator.mul, True),\n (\"__truediv__\", operator.truediv, False),\n (\"__floordiv__\", operator.floordiv, False),\n (\"__mod__\", operator.mod, False),\n (\"__pow__\", operator.pow, False),\n )\n tensor = ht.float32([[1, 4], [2, 3]])\n num = 3\n for attr, op, commutative in operators:\n try:\n func = tensor.__getattribute__(attr)\n except AttributeError:\n continue\n self.assertTrue(callable(func))\n res_1 = op(tensor, num)\n res_2 = op(num, tensor)\n if commutative:\n self.assertTrue(ht.equal(res_1, res_2))\n # TODO: Test with split tensors when binary operations are working properly for split tensors",
"def test_add(a, b):\n assert Surreal(a) + Surreal(b) == Surreal(a + b)",
"def calculate(self, op, a, b):\n if op == \"+\":\n return a + b\n elif op == \"-\":\n return a - b\n elif op == \"*\":\n return a * b\n elif op == \"/\":\n return a / b",
"def sum(a, b):\n return a + b",
"def add_checker(attrs, args, op_name):\n if op_name == \"sum\":\n if not isinstance(args[0].op, tvm.ir.op.Op):\n return False\n if args[0].op.name != \"add\":\n return False\n if tuple(get_shape(args[0])) != tuple(get_shape(args[1])):\n return False\n if op_name == \"bias_add\":\n if attrs is None:\n return False\n if not isinstance(args[0].op, tvm.ir.op.Op):\n return False\n if args[0].op.name != \"nn.conv2d\":\n return False\n channel = dict(attrs)[\"channels\"]\n const_shape = get_shape(args[1])\n if channel != reduce(lambda x, y: x * y, const_shape):\n return False\n return True",
"def testSum(self):\n f4 = self.f4\n self.assertTrue(f4(0, 1) + f4(1, 0) == f4(1, 1))",
"def test_overlapping_ops_property(self):\n valid_op = ValidOp(\n qml.sum(qml.PauliX(0), qml.PauliY(5), qml.PauliZ(10)),\n qml.sum(qml.PauliX(1), qml.PauliY(4), qml.PauliZ(6)),\n qml.prod(qml.PauliX(10), qml.PauliY(2), qml.PauliZ(7)),\n qml.PauliY(7),\n qml.prod(qml.PauliX(4), qml.PauliY(3), qml.PauliZ(8)),\n )\n overlapping_ops = [\n [\n qml.sum(qml.PauliX(0), qml.PauliY(5), qml.PauliZ(10)),\n qml.prod(qml.PauliX(10), qml.PauliY(2), qml.PauliZ(7)),\n qml.PauliY(7),\n ],\n [\n qml.sum(qml.PauliX(1), qml.PauliY(4), qml.PauliZ(6)),\n qml.prod(qml.PauliX(4), qml.PauliY(3), qml.PauliZ(8)),\n ],\n ]\n\n # TODO: Use qml.equal when supported for nested operators\n\n for list_op1, list_op2 in zip(overlapping_ops, valid_op.overlapping_ops):\n for op1, op2 in zip(list_op1, list_op2):\n assert op1.name == op2.name\n assert op1.wires == op2.wires\n assert op1.data == op2.data\n assert op1.arithmetic_depth == op2.arithmetic_depth",
"def test_adder_commute():\n num1 = 99\n num2 = 100\n num3 = 1\n\n added1 = adder.add(num1, adder.add(num2,num3))\n added2 = adder.add(adder.add(num1,num2), num3)\n\n assert added1 == added2",
"def _assert_arithmetic_operation_dunder_method_expression(\r\n self, any_value: AnyValue, result: VariableNameInterface,\r\n other: VariableNameInterface,\r\n expected_operator: str) -> None:\r\n expression: str = expression_file_util.get_current_expression()\r\n expected: str = (\r\n f'{result.variable_name} = {any_value.variable_name} '\r\n f'{expected_operator} '\r\n f'{other.variable_name};'\r\n )\r\n assert expected in expression",
"def __add__(self, other):\n return self.componentwise(other, operator.__add__)",
"def operate(term1: int, term2: int, op: str) -> int:\n if op == '+':\n return term1 + term2\n elif op == '*':\n return term1 * term2\n else:\n raise ValueError",
"def test_arithmetic(self):\n self.assertEqualRun(\"(+ (* 3 (- 2 1)) (/ 12 3))\", 7)\n self.assertEqualRun(\"(max 0 (min 100 50))\", 50)\n self.assertTrueRun(\"(eq? 7 7)\")\n self.assertTrueRun(\"(not (eq? 7 8))\")\n self.assertTrueRun(\"(< 7 8)\")\n self.assertTrueRun(\"(not (> 1 8))\")\n self.assertTrueRun(\"(<= 7 7)\")\n self.assertTrueRun(\"(not (>= 7 8))\")",
"def _keep_EUM_after_math_operation(self, other, func) -> bool:\n if hasattr(other, \"shape\") and hasattr(other, \"ndim\"):\n # other is array-like, so maybe we cannot keep EUM\n if func == np.subtract or func == np.sum:\n # +/-: we may want to keep EUM\n if isinstance(other, DataArray):\n if self.type == other.type and self.unit == other.unit:\n return True\n else:\n return False\n else:\n return True # assume okay, since no EUM\n return False\n\n # other is likely scalar, okay to keep EUM\n return True",
"def test_sum_series(arg, first, second, expected):\n assert sum_series(arg, first, second) == expected",
"def binary_op(cls, operator, a, b):\n return cls.binary_operators[operator](a, b)",
"def sum_entries(operator):\r\n return lo.LinOp(lo.SUM_ENTRIES, (1, 1), [operator], None)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the __sum__ dunder method with a scalar value.
|
def test_sum_with_scalar(self):
sum_op = 5 + qml.PauliX(0) + 0
final_op = qml.op_sum(qml.PauliX(0), qml.s_prod(5, qml.Identity(0)))
# TODO: Use qml.equal when fixed.
assert isinstance(sum_op, qml.ops.Sum)
for s1, s2 in zip(sum_op.summands, final_op.summands):
assert s1.name == s2.name
assert s1.wires == s2.wires
assert s1.data == s2.data
assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)
|
[
"def test_scale_sum(self):\n u = self.abUsage([1,3])\n self.assertEqual(u.scale_sum(12), self.abUsage([3.0, 9.0]))\n self.assertEqual(u.scale_sum(1), self.abUsage([0.25,0.75]))\n #default is sum to 1\n self.assertEqual(u.scale_sum(), self.abUsage([0.25,0.75]))",
"def test_sum_multi_wire_operator_with_scalar(self):\n sum_op = 5 + qml.CNOT(wires=[0, 1])\n final_op = qml.op_sum(\n qml.CNOT(wires=[0, 1]),\n qml.s_prod(5, qml.prod(qml.Identity(0), qml.Identity(1))),\n )\n # TODO: Use qml.equal when fixed.\n assert isinstance(sum_op, qml.ops.Sum)\n for s1, s2 in zip(sum_op.summands, final_op.summands):\n assert s1.name == s2.name\n assert s1.wires == s2.wires\n assert s1.data == s2.data\n assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)",
"def fsum(iterable):\n return 0.0",
"def testSum(self):\n f4 = self.f4\n self.assertTrue(f4(0, 1) + f4(1, 0) == f4(1, 1))",
"def sum(x):\n\treturn np.sum(x)",
"def _check_sum(self, weights):\n return np.sum(weights) - 1",
"def test_sum_array():\n assert myFunction.sum_array([1, 1, 1, 1, 1]) == 5, 'incorrect'\n assert myFunction.sum_array([3]) == 3, 'incorrect'\n assert myFunction.sum_array([-1, -1, -1]) == -3, 'incorrect'",
"def _reduceSum(self,tensor):\n if self._envType == \"DDP\":\n dist.reduce(tensor,0)\n return tensor",
"def test_sum_with_operator(self):\n sum_op = qml.PauliX(0) + qml.RX(1, 0)\n final_op = qml.op_sum(qml.PauliX(0), qml.RX(1, 0))\n # TODO: Use qml.equal when fixed.\n assert isinstance(sum_op, qml.ops.Sum)\n for s1, s2 in zip(sum_op.summands, final_op.summands):\n assert s1.name == s2.name\n assert s1.wires == s2.wires\n assert s1.data == s2.data\n assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)",
"def sum_(args):\n return sum(args)",
"def sum_sqr_vals(self):\n\treturn numpy.sum(numpy.square(self.data))",
"def test_add_scalar(self):\n self.assertEqual(self.OneType(1, 2, 3) + 2, self.OneType(3, 4, 5))\n self.assertEqual(2 + self.OneType(1, 2, 0), self.OneType(3, 4, 2))",
"def _scalar_summary(scalar: Union[int, bool, float], session: tf.Session,\n tag: str, step: Optional[int]=None) -> None:\n summary_writer = session._hooks[1]._summary_writer\n sum_val = tf.Summary.Value(tag=tag, simple_value=scalar)\n score_sum = tf.Summary(value=[sum_val])\n summary_writer.add_summary(score_sum, step)",
"def sum(sequence):\n return __builtin__.sum(sequence)",
"def test_sum(self):\n self.assertEqual(sumOfNumbers(1,2), 3)",
"def test_sum_entries(self):\r\n size = (5, 5)\r\n x = create_var(size)\r\n expr = sum_entries(x)\r\n self.assertEqual(expr.size, (1, 1))\r\n self.assertEqual(len(expr.args), 1)\r\n self.assertEqual(expr.type, lo.SUM_ENTRIES)",
"def test_sum_series(arg, first, second, expected):\n assert sum_series(arg, first, second) == expected",
"def __sum__(self):\n return sum(self.TAA)",
"def hasSum(self, column, assertion):\n function = jc.scala_function1(self.spark.sparkContext._gateway,\n assertion)\n jvmConstraint = self.jvmCheck.hasSum(\n column,\n function,\n getattr(self.jvmCheck, \"hasSum$default$3\")()\n )\n return Check(\n self.spark,\n self.level,\n self.description,\n jvmConstraint\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the __sum__ dunder method with a multiwire operator and a scalar value.
|
def test_sum_multi_wire_operator_with_scalar(self):
sum_op = 5 + qml.CNOT(wires=[0, 1])
final_op = qml.op_sum(
qml.CNOT(wires=[0, 1]),
qml.s_prod(5, qml.prod(qml.Identity(0), qml.Identity(1))),
)
# TODO: Use qml.equal when fixed.
assert isinstance(sum_op, qml.ops.Sum)
for s1, s2 in zip(sum_op.summands, final_op.summands):
assert s1.name == s2.name
assert s1.wires == s2.wires
assert s1.data == s2.data
assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)
|
[
"def test_sum_with_scalar(self):\n sum_op = 5 + qml.PauliX(0) + 0\n final_op = qml.op_sum(qml.PauliX(0), qml.s_prod(5, qml.Identity(0)))\n # TODO: Use qml.equal when fixed.\n assert isinstance(sum_op, qml.ops.Sum)\n for s1, s2 in zip(sum_op.summands, final_op.summands):\n assert s1.name == s2.name\n assert s1.wires == s2.wires\n assert s1.data == s2.data\n assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)",
"def test_sum_with_operator(self):\n sum_op = qml.PauliX(0) + qml.RX(1, 0)\n final_op = qml.op_sum(qml.PauliX(0), qml.RX(1, 0))\n # TODO: Use qml.equal when fixed.\n assert isinstance(sum_op, qml.ops.Sum)\n for s1, s2 in zip(sum_op.summands, final_op.summands):\n assert s1.name == s2.name\n assert s1.wires == s2.wires\n assert s1.data == s2.data\n assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)",
"def OpSum(*operators):\n if not operators:\n return OperatorSum()\n return OperatorSum(list(operators))",
"def test_add_scalar(self):\n self.assertEqual(self.OneType(1, 2, 3) + 2, self.OneType(3, 4, 5))\n self.assertEqual(2 + self.OneType(1, 2, 0), self.OneType(3, 4, 2))",
"def testSum(self):\n f4 = self.f4\n self.assertTrue(f4(0, 1) + f4(1, 0) == f4(1, 1))",
"def test_reduce_sum_len1(self):\n\n def inner_pipe(data):\n val = reduce_sum([data])\n res = add(val, PipeConstant.from_float(1))\n return res\n\n self.run_pipe(inner_pipe, list(range(40)), [i + 1 for i in range(40)])",
"def sum_entries(operator):\r\n return lo.LinOp(lo.SUM_ENTRIES, (1, 1), [operator], None)",
"def sum_(args):\n return sum(args)",
"def test_sum_array():\n assert myFunction.sum_array([1, 1, 1, 1, 1]) == 5, 'incorrect'\n assert myFunction.sum_array([3]) == 3, 'incorrect'\n assert myFunction.sum_array([-1, -1, -1]) == -3, 'incorrect'",
"def add_checker(attrs, args, op_name):\n if op_name == \"sum\":\n if not isinstance(args[0].op, tvm.ir.op.Op):\n return False\n if args[0].op.name != \"add\":\n return False\n if tuple(get_shape(args[0])) != tuple(get_shape(args[1])):\n return False\n if op_name == \"bias_add\":\n if attrs is None:\n return False\n if not isinstance(args[0].op, tvm.ir.op.Op):\n return False\n if args[0].op.name != \"nn.conv2d\":\n return False\n channel = dict(attrs)[\"channels\"]\n const_shape = get_shape(args[1])\n if channel != reduce(lambda x, y: x * y, const_shape):\n return False\n return True",
"def sum_op(*arrays):\n sum_result = np.full(arrays[0].shape, 0, dtype=numpy.int16)\n for array in arrays:\n valid_mask = ~np.isclose(array, TARGET_NODATA)\n sum_result[valid_mask] = sum_result[valid_mask] + array[valid_mask]\n\n return np.where(sum_result == 0, TARGET_NODATA, sum_result)",
"def test_sum_entries(self):\r\n size = (5, 5)\r\n x = create_var(size)\r\n expr = sum_entries(x)\r\n self.assertEqual(expr.size, (1, 1))\r\n self.assertEqual(len(expr.args), 1)\r\n self.assertEqual(expr.type, lo.SUM_ENTRIES)",
"def sum(a, b):\n return a + b",
"def test_right_hand_side_operations(self):\n operators = (\n (\"__add__\", operator.add, True),\n (\"__sub__\", operator.sub, False),\n (\"__mul__\", operator.mul, True),\n (\"__truediv__\", operator.truediv, False),\n (\"__floordiv__\", operator.floordiv, False),\n (\"__mod__\", operator.mod, False),\n (\"__pow__\", operator.pow, False),\n )\n tensor = ht.float32([[1, 4], [2, 3]])\n num = 3\n for attr, op, commutative in operators:\n try:\n func = tensor.__getattribute__(attr)\n except AttributeError:\n continue\n self.assertTrue(callable(func))\n res_1 = op(tensor, num)\n res_2 = op(num, tensor)\n if commutative:\n self.assertTrue(ht.equal(res_1, res_2))\n # TODO: Test with split tensors when binary operations are working properly for split tensors",
"def test_multiply_scalar(self):\n self.assertEqual(self.OneType(1, 2, 3) * 2, self.OneType(2, 4, 6))",
"def test_optimization_handles_scalar_intermediates(simple_drudge):\n\n dr = simple_drudge\n\n r = dr.r\n a, b, c = dr.ds[:3]\n\n u = IndexedBase('u')\n eps = IndexedBase('epsilon')\n t = IndexedBase('t')\n s = IndexedBase('s')\n\n targets = [dr.define(\n u, (a, r), (b, r),\n dr.sum((c, r), 8 * s[a, b] * eps[c] * t[a])\n - 8 * s[a, b] * eps[a] * t[a]\n )]\n eval_seq = optimize(targets)\n assert verify_eval_seq(eval_seq, targets)",
"def add_scalar_operator(cls, name, callable):\n cls._operators_scalar[name] = callable",
"def _mul_scalar(data=None, scalar=_Null, name=None, attr=None, out=None, **kwargs):\n return (0,)",
"def __add__(self, other):\n return self.componentwise(other, operator.__add__)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the __sub__, __rsub__ and __neg__ dunder methods.
|
def test_sub_rsub_and_neg_dunder_methods(self):
sum_op = qml.PauliX(0) - 5
sum_op_2 = -(5 - qml.PauliX(0))
assert np.allclose(a=sum_op.matrix(), b=np.array([[-5, 1], [1, -5]]), rtol=0)
assert np.allclose(a=sum_op.matrix(), b=sum_op_2.matrix(), rtol=0)
neg_op = -qml.PauliX(0)
assert np.allclose(a=neg_op.matrix(), b=np.array([[0, -1], [-1, 0]]), rtol=0)
|
[
"def test_rsub():\n\tcomplexnr = 2 - Complex(4, 5) - (9 + 2j)\n\tassert complexnr == Complex(-11, -7)",
"def test_subtract():\n calc = Calculator(5)\n assert calc.subtract(6) == -1",
"def _sub(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):\n return (0,)",
"def test_subtract_scalar(self):\n self.assertEqual(self.OneType(1, 2, 3) - 2, self.OneType(-1, 0, 1))\n self.assertEqual(5 - self.OneType(1, 2, 0), self.OneType(4, 3, 5))",
"def __sub__(self, other):\n if isinstance(other, (tuple,list)):\n neg_other = (-other[0], -other[1])\n else:\n neg_other = -other \n return self.__add__(-neg_other)",
"def test_subtraction(self):\n Mod5 = IntegersModP(5)\n Mod11 = IntegersModP(11)\n\n polysOverQ = polynomials_over(Fraction).factory\n polysMod5 = polynomials_over(Mod5).factory\n polysMod11 = polynomials_over(Mod11).factory\n for p in [polysOverQ, polysMod5, polysMod11]:\n # subtraction\n assert p([1,-2,3]) == p([1,0,3]) - p([0,2])\n assert p([1,2,3]) == p([1,2,3]) - p([])\n assert p([-1,-2,-3]) == p([]) - p([1,2,3])",
"def Sub(a, b):\n assert (a >= b)\n return a - b",
"def __isub__(self, *args, **kwargs):\n return _decomp.SOM___isub__(self, *args, **kwargs)",
"def test_calculator_subtract(self):\n self.calculator = Calculator()\n self.calculator.subtract_number(1)\n assert self.calculator.get_result() == -1",
"def __rsub__(self, other):\n self._typecheck_other(other)\n try:\n return Ad_Var(other._val - self._val, other._ders - self._ders)\n except AttributeError:\n return Ad_Var(other - self._val, - self._ders) #self._ders",
"def testSubtractionOfSelf(self):\n self.time_sub(['10:10:10','10:10:10','00:00:00'])",
"def __sub__(self, *args):\n return _vnl_vectorPython.vnl_vectorUS___sub__(self, *args)",
"def __sub__(self, other):\n obj = self._to_complex(other)\n return self.__add__(-obj)",
"def __sub__(self, other: int|Mod) -> Mod:\n other_mod = self.type_check(other)\n self.mod_check(other_mod)\n return Mod(self.value - other_mod.value, self.mod_value)",
"def __sub__(self, other):\n self._typecheck_other(other)\n try:\n return Ad_Var(self._val - other._val, self._ders - other._ders)\n except AttributeError:\n return Ad_Var(self._val - other, self._ders)",
"def __sub__(self, other):\r\n return self.difference(other)",
"def __sub__(self, *args):\n return _vnl_vectorPython.vnl_vectorSC___sub__(self, *args)",
"def __sub__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexLD___sub__(self, *args)",
"def __sub__(self, *args):\n return _vnl_vectorPython.vnl_vectorUL___sub__(self, *args)",
"def __sub__(self, *args):\n return _vnl_vectorPython.vnl_vectorSL___sub__(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the __mul__ dunder method raises an error when using a nonsupported object.
|
def test_mul_with_not_supported_object_raises_error(self):
with pytest.raises(ValueError, match="Cannot multiply Observable by"):
_ = "dummy" * qml.PauliX(0)
|
[
"def test_matmul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Can only perform tensor products between operators.\"):\n _ = qml.PauliX(0) @ \"dummy\"",
"def test_multiply_except(self):\n chan = SuperOp(self.sopI)\n self.assertRaises(QiskitError, chan.multiply, 's')\n self.assertRaises(QiskitError, chan.multiply, chan)",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def test_mul():\n\tcomplexnr1 = Complex(1, 1)\n\tcomplexnr2 = Complex(2, 2)\n\tassert complexnr1*complexnr2 == Complex(0, 4)",
"def test_mul(a, b):\n assert Surreal(a) * Surreal(b) == Surreal(a * b)",
"def test_multiply_scalar(self):\n self.assertEqual(self.OneType(1, 2, 3) * 2, self.OneType(2, 4, 6))",
"def testMul(self):\n f25 = self.f25\n a = f25(1, 2)\n b = f25(1, 1)\n self.assertEqual(a * b, f25(-1, 1))",
"def test_my_mul():\n assert my_mul(2, 7) == 14\n assert my_mul(9, 9) == 81",
"def test_pow_method_with_non_numeric_power_raises_error(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operator\"\"\"\n num_wires = 1\n\n with pytest.raises(ValueError, match=\"Cannot raise an Operator\"):\n _ = DummyOp(wires=[0]) ** DummyOp(wires=[0])",
"def test_mul(self):\n self.assertEqual(Fraction(3, 8), Fraction(3, 4)*Fraction(1, 2))\n self.assertEqual(Fraction(-3, 8), Fraction(-3, 4)*Fraction(1, 2))\n self.assertEqual(Fraction(-3, 8), Fraction(3, -4)*Fraction(1, 2))\n self.assertEqual(Fraction(3, -8), Fraction(3, -4)*Fraction(1, 2))\n self.assertEqual(Fraction(3, -8), Fraction(-3, 4)*Fraction(1, 2))\n self.assertEqual(Fraction(0, 1), Fraction(-3, 4)*Fraction(0))\n self.assertEqual(Fraction(1, 4), Fraction(-1, 2)*Fraction(-1, 2))",
"def __mul__(self, other):\n if isinstance(other, PolynomialInterface):\n return self.ring_mul(other)\n else:\n try:\n return self.scalar_mul(other)\n except TypeError:\n return NotImplemented",
"def test_arithmetic_errors(self):\n H = qml.Hamiltonian([1], [qml.PauliZ(0)])\n A = [[1, 0], [0, -1]]\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H @ A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = A @ H\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H + A\n with pytest.raises(TypeError, match=\"can't multiply sequence by non-int\"):\n _ = H * A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H - A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H += A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H *= A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H -= A",
"def test_multiply():\n calc = Calculator(2)\n assert calc.multiply(2) == 4",
"def test_mul_elemwise(self):\r\n self.assertEquals(mul_elemwise([1, -1], self.x).sign, u.Sign.UNKNOWN_KEY)\r\n self.assertEquals(mul_elemwise([1, -1], self.x).curvature, u.Curvature.AFFINE_KEY)\r\n self.assertEquals(mul_elemwise([1, -1], self.x).size, (2, 1))\r\n pos_param = Parameter(2, sign=\"positive\")\r\n neg_param = Parameter(2, sign=\"negative\")\r\n self.assertEquals(mul_elemwise(pos_param, pos_param).sign, u.Sign.POSITIVE_KEY)\r\n self.assertEquals(mul_elemwise(pos_param, neg_param).sign, u.Sign.NEGATIVE_KEY)\r\n self.assertEquals(mul_elemwise(neg_param, neg_param).sign, u.Sign.POSITIVE_KEY)\r\n\r\n self.assertEquals(mul_elemwise(neg_param, square(self.x)).curvature, u.Curvature.CONCAVE_KEY)\r\n\r\n # Test promotion.\r\n self.assertEquals(mul_elemwise([1, -1], 1).size, (2, 1))\r\n self.assertEquals(mul_elemwise(1, self.C).size, self.C.size)\r\n\r\n with self.assertRaises(Exception) as cm:\r\n mul_elemwise(self.x, [1, -1])\r\n self.assertEqual(str(cm.exception),\r\n \"The first argument to mul_elemwise must be constant.\")",
"def test_power_except(self):\n chan = SuperOp(self.depol_sop(1))\n # Non-integer power raises error\n self.assertRaises(QiskitError, chan.power, 0.5)",
"def _mul(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):\n return (0,)",
"def __mul__(self, other):\n if isinstance(other, IrreducibleRepresentation):\n if self.pg == other.pg:\n product = self._irrep_product(other)\n if self.degenerate and other.degenerate:\n # The product of two degenerate irrep's is a reducible representation\n # int() weg, dafür // -> from future import division\n # eleganter als skalarprodukt schreibbar?\n ai = [int(1/self.pg.order * sum([i*j*k for i,j,k in zip(self.pg.symop_multiplicity, irrep.irrep, product)])) for irrep in self.pg.elements.values()]\n product_irreps = [irrep for a, irrep in zip(ai, self.pg.elements.values()) if a != 0]\n return product_irreps\n else:\n return IrreducibleRepresentation(pg=self.pg, irrep=product, degenerate=(self.degenerate or other.degenerate))\n else:\n raise NotImplementedError('no automatic lowering of symmetry: cannot multiply irreps of non-identical point groups {} and {}.'.format(self.pg, other.pg))\n elif isinstance(other, list):\n # besser: self * list_product(other) = self * reduce(mul, other)\n # was der konvention entspricht, dass in a x (b x c) die klammer zuerst ausgerechnet wird\n all_products = list(flatten([self*irrep for irrep in other]))\n all_products.sort()\n return all_products\n else:\n raise TypeError('IrreducibleRepresentation can only be multiplied with (list of) IrreducibleRepresentation')",
"def __mul__(self, other):\n try:\n return Scalar(self._val*other._val, self._der*other._val+self._val*other._der)\n except AttributeError:\n return self.__rmul__(other)",
"def _mul_scalar(data=None, scalar=_Null, name=None, attr=None, out=None, **kwargs):\n return (0,)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the __matmul__ dunder method raises an error when using a nonsupported object.
|
def test_matmul_with_not_supported_object_raises_error(self):
with pytest.raises(ValueError, match="Can only perform tensor products between operators."):
_ = qml.PauliX(0) @ "dummy"
|
[
"def test_mul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Cannot multiply Observable by\"):\n _ = \"dummy\" * qml.PauliX(0)",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def test_multiply_except(self):\n chan = SuperOp(self.sopI)\n self.assertRaises(QiskitError, chan.multiply, 's')\n self.assertRaises(QiskitError, chan.multiply, chan)",
"def test_matmul(self, matrices):\n # Instantiate the 10x10 matrix and test matrix multiplication\n square_mat = chap5.Matrix(matrices.square)\n square_np = np.array(matrices.square)\n square_matmul = (square_mat @ square_mat)._matrix\n square_np_result = square_np @ square_np\n # Compare to the Numpy result of multiplying the matrix times itself\n assert (np.array(square_matmul) == square_np_result).all()\n # Instantiate a 5x10 and 10x5 matrix as Matrix class and Numpy array\n half_row_mat = chap5.Matrix(matrices.half_row)\n half_col_mat = chap5.Matrix(matrices.half_col)\n half_row_np = np.array(matrices.half_row)\n half_col_np = np.array(matrices.half_col)\n # Matrix multiplication amongst the 10x10, 5x10, and 10x5 matrices\n result1 = half_row_mat @ half_col_mat # (5x10) @ (10x5)\n exp_result1 = half_row_np @ half_col_np # (5x10) @ (10x5)\n result2 = half_col_mat @ half_row_mat # (10x5) @ (5x10)\n exp_result2 = half_col_np @ half_row_np # (10x5) @ (5x10)\n result3 = half_row_mat @ square_mat # (5x10) @ (10x10)\n exp_result3 = half_row_np @ square_np # (5x10) @ (10x10)\n result4 = square_mat @ half_col_mat # (10x10) @ (10x5)\n exp_result4 = square_np @ half_col_np # (10x10) @ (10x5)\n assert (np.array(result1._matrix) == exp_result1).all()\n assert (np.array(result2._matrix) == exp_result2).all()\n assert (np.array(result3._matrix) == exp_result3).all()\n assert (np.array(result4._matrix) == exp_result4).all()",
"def test_mul():\n\tcomplexnr1 = Complex(1, 1)\n\tcomplexnr2 = Complex(2, 2)\n\tassert complexnr1*complexnr2 == Complex(0, 4)",
"def testMul(self):\n f25 = self.f25\n a = f25(1, 2)\n b = f25(1, 1)\n self.assertEqual(a * b, f25(-1, 1))",
"def test_decomposition_raises_error(self):\n op = ValidOp(*self.simple_operands)\n\n with pytest.raises(DecompositionUndefinedError):\n op.decomposition()",
"def __matmul__(self, a):\n if isinstance(a, tm):\n return tm(self.TM @ a.TM)\n else:\n if isinstance(a, np.ndarray):\n return tm(self.TM @ a)",
"def test_classic_2x2(self):\r\n # problem\r\n A = [[0, 1], [1, 0]]\r\n B = [[2, 3], [3, 2]]\r\n\r\n # solution\r\n answer = [[3, 2], [2, 3]]\r\n\r\n # test\r\n C = matrix_multiply(A, B)\r\n self.assertEqual(C, answer)",
"def test_incompatible_dimensions(self, matrices):\n # Instantiate 5x10, 10x5, and 10x10 matrices as Matrix class\n square_mat = chap5.Matrix(matrices.square)\n half_row_mat = chap5.Matrix(matrices.half_row)\n half_col_mat = chap5.Matrix(matrices.half_col)\n # Verify Matrix class raises AssertionError for incompatible dimensions\n with pytest.raises(AssertionError):\n half_row_mat + half_col_mat # (5x10) + (10x5)\n with pytest.raises(AssertionError):\n half_col_mat + half_row_mat # (10x5) + (5x10)\n with pytest.raises(AssertionError):\n half_col_mat @ square_mat # (10x5) @ (10x10)\n with pytest.raises(AssertionError):\n square_mat @ half_row_mat # (10x10) @ (5x10)",
"def __mul__(self, other):\n if isinstance(other, (int, float)):\n return Matrix(self.row_n, self.col_n, [a * other for a_row in self.data for a in a_row])\n elif isinstance(other, Matrix):\n return Matrix(self.row_n, other.col_n, [self.dot_product(a_row, b_col) for a_row in self.data for b_col in other.columns])\n else:\n print(self.OP_ERROR)\n return Matrix()",
"def test_mul(a, b):\n assert Surreal(a) * Surreal(b) == Surreal(a * b)",
"def test_arithmetic_errors(self):\n H = qml.Hamiltonian([1], [qml.PauliZ(0)])\n A = [[1, 0], [0, -1]]\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H @ A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = A @ H\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H + A\n with pytest.raises(TypeError, match=\"can't multiply sequence by non-int\"):\n _ = H * A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H - A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H += A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H *= A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H -= A",
"def test_has_matrix_false(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n assert not MyOp.has_matrix\n assert not MyOp(wires=0).has_matrix",
"def __rmatmul__(self, a):\n if isinstance(a, tm):\n return tm(a.TM @ self.TM)\n else:\n if isinstance(a, np.ndarray):\n return tm(a @ self.TM)\n return tm(a * self.TAA)",
"def test_dot(self):\n\n # If no arrays, return 0\n self.assertAllClose(linalg.dot(),\n 0)\n # If only one array, return itself\n self.assertAllClose(linalg.dot([[1,2,3],\n [4,5,6]]),\n [[1,2,3],\n [4,5,6]])\n # Basic test of two arrays: (2,3) * (3,2)\n self.assertAllClose(linalg.dot([[1,2,3],\n [4,5,6]],\n [[7,8],\n [9,1],\n [2,3]]),\n [[31,19],\n [85,55]])\n # Basic test of four arrays: (2,3) * (3,2) * (2,1) * (1,2)\n self.assertAllClose(linalg.dot([[1,2,3],\n [4,5,6]],\n [[7,8],\n [9,1],\n [2,3]],\n [[4],\n [5]],\n [[6,7]]),\n [[1314,1533],\n [3690,4305]])\n\n # Test broadcasting: (2,2,2) * (2,2,2,2)\n self.assertAllClose(linalg.dot([[[1,2],\n [3,4]],\n [[5,6],\n [7,8]]],\n [[[[1,2],\n [3,4]],\n [[5,6],\n [7,8]]],\n [[[9,1],\n [2,3]],\n [[4,5],\n [6,7]]]]),\n [[[[ 7, 10],\n [ 15, 22]],\n\n [[ 67, 78],\n [ 91, 106]]],\n\n\n [[[ 13, 7],\n [ 35, 15]],\n\n [[ 56, 67],\n [ 76, 91]]]])\n\n # Inconsistent shapes: (2,3) * (2,3)\n self.assertRaises(ValueError,\n linalg.dot,\n [[1,2,3],\n [4,5,6]],\n [[1,2,3],\n [4,5,6]])\n # Other axes do not broadcast: (2,2,2) * (3,2,2)\n self.assertRaises(ValueError,\n linalg.dot,\n [[[1,2],\n [3,4]],\n [[5,6],\n [7,8]]],\n [[[1,2],\n [3,4]],\n [[5,6],\n [7,8]],\n [[9,1],\n [2,3]]])\n # Do not broadcast matrix axes: (2,1) * (3,2)\n self.assertRaises(ValueError,\n linalg.dot,\n [[1],\n [2]],\n [[1,2,3],\n [4,5,6]])\n # Do not accept less than 2-D arrays: (2) * (2,2)\n self.assertRaises(ValueError,\n linalg.dot,\n [1,2],\n [[1,2,3],\n [4,5,6]])",
"def __mul__(self, matrix):",
"def test_reconstruct_not_raised(self, *shapes):\n self.assert_exception_is_not_raised(matting.reconstruct, shapes)",
"def test_matrix_dot_matrix():\n A = np.linspace(1, 12, 12, dtype=FTYPE).reshape(3, 4)\n B = np.linspace(1, 12, 12, dtype=FTYPE).reshape(4, 3)\n C = np.ones((3, 3), dtype=FTYPE)\n\n matrix_dot_matrix_guf(A, B, C)\n\n test = C\n ref = np.dot(A, B).astype(FTYPE)\n assert np.allclose(test, ref, **ALLCLOSE_KW), f\"test:\\n{test}\\n!= ref:\\n{ref}\"\n\n logging.info(\"<< PASS : test_matrix_dot_matrix >>\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that inv updates the inverse property in place during queuing.
|
def test_inv_queuing(self):
class DummyOp(qml.operation.Operation):
r"""Dummy custom Operation"""
num_wires = 1
with qml.tape.QuantumTape() as tape:
op = DummyOp(wires=[0]).inv()
assert op.inverse is True
assert op.inverse is True
|
[
"def test_inv(self):\n\n operation = CirqOperation(\n lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]\n )\n\n assert not operation.is_inverse\n\n operation.inv()\n\n assert operation.is_inverse\n\n operation.inv()\n\n assert not operation.is_inverse",
"def test_operation_inverse_defined(self, qnode_for_inverse):\n assert qnode_for_inverse.qtape.operations[0].name == \"RZ.inv\"\n assert qnode_for_inverse.qtape.operations[0].inverse\n assert issubclass(qnode_for_inverse.qtape.operations[0].__class__, qml.operation.Operation)\n assert qnode_for_inverse.qtape.operations[1].name == \"RZ\"\n assert not qnode_for_inverse.qtape.operations[1].inverse\n assert issubclass(qnode_for_inverse.qtape.operations[1].__class__, qml.operation.Operation)",
"def test_local_inversion(invertible_xform, to_invert, im, dict_key=None):\n im_item = im if dict_key is None else im[dict_key]\n if not isinstance(im_item, MetaTensor):\n return\n im_ref = copy.deepcopy(im)\n im_inv = invertible_xform.inverse(to_invert)\n if dict_key:\n im_inv = im_inv[dict_key]\n im_ref = im_ref[dict_key]\n np.testing.assert_array_equal(im_inv.applied_operations, [])\n assert_allclose(im_inv.shape, im_ref.shape)\n assert_allclose(im_inv.affine, im_ref.affine, atol=1e-3, rtol=1e-3)",
"def test_custom_inverse():\n\n p = models.Polynomial1D(1, c0=-2, c1=3)\n # A trivial inverse for a trivial polynomial\n inv = models.Polynomial1D(1, c0=(2./3.), c1=(1./3.))\n\n with pytest.raises(NotImplementedError):\n p.inverse\n\n p.inverse = inv\n\n x = np.arange(100)\n\n assert_allclose(x, p(p.inverse(x)))\n assert_allclose(x, p.inverse(p(x)))\n\n p.inverse = None\n\n with pytest.raises(NotImplementedError):\n p.inverse",
"def verify_inverse_transform(x, x_inv):\n assert x.equals(x_inv)",
"def inverse(self, inv):\n if not isinstance(inv, bool):\n raise ValueError(f'The recover entry {inv} is not of type bool.')\n self._inverse = inv",
"def test_custom_inverse_reset():\n\n class TestModel(Model):\n n_inputs = 0\n outputs = ('y',)\n\n @property\n def inverse(self):\n return models.Shift()\n\n @staticmethod\n def evaluate():\n return 0\n\n # The above test model has no meaning, nor does its inverse--this just\n # tests that setting an inverse and resetting to the default inverse works\n\n m = TestModel()\n assert isinstance(m.inverse, models.Shift)\n\n m.inverse = models.Scale()\n assert isinstance(m.inverse, models.Scale)\n\n del m.inverse\n assert isinstance(m.inverse, models.Shift)",
"def test_compute_mne_inverse():\n\n setno = 0\n snr = 3.0\n lambda2 = 1.0 / snr**2\n dSPM = True\n\n res = mne.compute_inverse(fname_data, setno, fname_inv, lambda2, dSPM,\n baseline=(None, 0))\n\n assert np.all(res['sol'] > 0)\n assert np.all(res['sol'] < 35)",
"def test_inverse(self):\n permlist = []\n for n in range(1, 10):\n for _ in range(1, 10*n):\n permlist.append(randperm(n))\n\n for perm in permlist:\n e = rangefunc(range(len(perm)))\n self.assertSequenceEqual(e, perm * perm.inverse)\n self.assertSequenceEqual(e, perm.inverse * perm)\n self.assertSequenceEqual(perm, perm.inverse.inverse)",
"def test_queue_remove(self):\n q1 = self.party.enqueue_song(self.user, 't123')\n q2 = self.party.enqueue_song(self.user, 't456')\n q2.upvote(self.user2)\n next_entry = self.party.dequeue_next_song()\n self.assertEquals(next_entry, q2)\n self.party.save(self.redis)\n p = Party.get(self.redis, self.party.id)\n self.assertEquals(p.queue[0].id, q1.id)",
"def test_request_inverse_transform():\n rng = np.random.default_rng(0)\n A2B = pt.random_transform(rng)\n\n tm = TransformManager()\n tm.add_transform(\"A\", \"B\", A2B)\n A2B_2 = tm.get_transform(\"A\", \"B\")\n assert_array_almost_equal(A2B, A2B_2)\n\n B2A = tm.get_transform(\"B\", \"A\")\n B2A_2 = pt.invert_transform(A2B)\n assert_array_almost_equal(B2A, B2A_2)",
"def test_update_inbox_replier(self):\n pass",
"def test_io_inverse():\n fwd = mne.read_inverse_operator(fname_inv)",
"def __inv_sub_bytes(self):\n for i in range(4):\n for j in range(self.nb):\n self.state[i][j] = self.is_box[self.state[i][j]]",
"def testTransactedTransacts(self):\n s = store.Store()\n i = TransactedMethodItem(store=s, value=\"unchanged\")\n exc = self.assertRaises(Exception, i.method, 'a', 'b', 'c')\n self.assertEqual(exc.args, (\"TransactedMethodItem.method test exception\",))\n self.assertEqual(i.value, \"unchanged\")",
"def test__put_revoked_into():\n for input_, defaults, expected_output in (\n (False, False, {}),\n (False, True, {'revoked': False}),\n (True, False, {'revoked': True}),\n ):\n data = put_revoked_into(input_, {}, defaults)\n vampytest.assert_eq(data, expected_output)",
"def test_zmq_api_queue_item_update_1(re_manager, replace): # noqa F811\n\n resp1, _ = zmq_single_request(\"queue_item_add\", {\"item\": _plan1, \"user\": _user, \"user_group\": _user_group})\n assert resp1[\"success\"] is True\n assert resp1[\"qsize\"] == 1\n assert resp1[\"item\"][\"name\"] == _plan1[\"name\"]\n assert resp1[\"item\"][\"args\"] == _plan1[\"args\"]\n assert resp1[\"item\"][\"user\"] == _user\n assert resp1[\"item\"][\"user_group\"] == _user_group\n assert \"item_uid\" in resp1[\"item\"]\n\n plan = resp1[\"item\"]\n uid = plan[\"item_uid\"]\n\n plan_changed = plan.copy()\n plan_new_args = [[\"det1\"]]\n plan_changed[\"args\"] = plan_new_args\n\n user_replaced = \"Different User\"\n params = {\"item\": plan_changed, \"user\": user_replaced, \"user_group\": _user_group}\n if replace is not None:\n params[\"replace\"] = replace\n\n status1 = get_queue_state()\n\n resp2, _ = zmq_single_request(\"queue_item_update\", params)\n assert resp2[\"success\"] is True\n assert resp2[\"qsize\"] == 1\n assert resp2[\"item\"][\"name\"] == _plan1[\"name\"]\n assert resp2[\"item\"][\"args\"] == plan_new_args\n assert resp2[\"item\"][\"user\"] == user_replaced\n assert resp2[\"item\"][\"user_group\"] == _user_group\n assert \"item_uid\" in resp2[\"item\"]\n if replace:\n assert resp2[\"item\"][\"item_uid\"] != uid\n else:\n assert resp2[\"item\"][\"item_uid\"] == uid\n\n status2 = get_queue_state()\n assert status2[\"plan_queue_uid\"] != status1[\"plan_queue_uid\"]\n assert status2[\"plan_history_uid\"] == status1[\"plan_history_uid\"]\n\n resp3, _ = zmq_single_request(\"queue_get\")\n assert resp3[\"items\"] != []\n assert len(resp3[\"items\"]) == 1\n assert resp3[\"items\"][0] == resp2[\"item\"]\n assert resp3[\"running_item\"] == {}\n assert resp3[\"plan_queue_uid\"] == status2[\"plan_queue_uid\"]",
"def testCTFInverse(self):\n m=4\n n=3\n k=2\n A = crandom.random((m,k))\n ctfInvA = CTFinverse(A)\n npA = la.pinv(A.to_nparray())\n for i in range(len(npA)):\n for j in range(len(npA[0])):\n assert(abs(ctfInvA[i][j] - npA[i][j]) < .00000000001)\n print(\"Inverse Works!\")",
"def test_unroutable(self):\n session = self.session\n #create an exchange with an alternate defined\n session.exchange_declare(exchange=\"secondary\", type=\"fanout\")\n session.exchange_declare(exchange=\"primary\", type=\"direct\", alternate_exchange=\"secondary\")\n\n #declare, bind (to the alternate exchange) and consume from a queue for 'returned' messages\n session.queue_declare(queue=\"returns\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"returns\", exchange=\"secondary\")\n session.message_subscribe(destination=\"a\", queue=\"returns\")\n session.message_flow(destination=\"a\", unit=session.credit_unit.message, value=0xFFFFFFFF)\n session.message_flow(destination=\"a\", unit=session.credit_unit.byte, value=0xFFFFFFFF)\n returned = session.incoming(\"a\")\n\n #declare, bind (to the primary exchange) and consume from a queue for 'processed' messages\n session.queue_declare(queue=\"processed\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"processed\", exchange=\"primary\", binding_key=\"my-key\")\n session.message_subscribe(destination=\"b\", queue=\"processed\")\n session.message_flow(destination=\"b\", unit=session.credit_unit.message, value=0xFFFFFFFF)\n session.message_flow(destination=\"b\", unit=session.credit_unit.byte, value=0xFFFFFFFF)\n processed = session.incoming(\"b\")\n\n #publish to the primary exchange\n #...one message that makes it to the 'processed' queue:\n dp=self.session.delivery_properties(routing_key=\"my-key\")\n session.message_transfer(destination=\"primary\", message=Message(dp, \"Good\"))\n #...and one that does not:\n dp=self.session.delivery_properties(routing_key=\"unused-key\")\n session.message_transfer(destination=\"primary\", message=Message(dp, \"Bad\"))\n\n #delete the exchanges\n session.exchange_delete(exchange=\"primary\")\n session.exchange_delete(exchange=\"secondary\")\n\n #verify behaviour\n self.assertEqual(\"Good\", processed.get(timeout=1).body)\n self.assertEqual(\"Bad\", returned.get(timeout=1).body)\n self.assertEmpty(processed)\n self.assertEmpty(returned)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the inverse of operations is not currently supported on the default gaussian device
|
def test_inverse_operations_not_supported(self):
dev1 = qml.device("default.gaussian", wires=2)
@qml.qnode(dev1)
def mean_photon_gaussian(mag_alpha, phase_alpha, phi):
qml.Displacement(mag_alpha, phase_alpha, wires=0)
qml.Rotation(phi, wires=0).inv()
return qml.expval(qml.NumberOperator(0))
with pytest.raises(
qml.DeviceError,
match=r"inverse of gates are not supported on device default\.gaussian",
):
mean_photon_gaussian(0.015, 0.02, 0.005)
|
[
"def test_gaussian(self):\n self.logTestName()\n res = self.H.is_gaussian()\n self.assertFalse(res)",
"def test_gaussian(self):\n self.logTestName()\n res = self.H.is_gaussian()\n self.assertTrue(res)",
"def test_inverse(self):\n\n\t\tgsm = GSM(3, 10)\n\t\tgsm.initialize('cauchy')\n\n\t\t# generate test data\n\t\tsamples = gsm.sample(100)\n\n\t\trg = RadialGaussianization(gsm)\n\n\t\t# reconstructed samples\n\t\tsamples_ = rg.inverse(rg(samples))\n\n\t\t# distance between norm and reconstructed norm\n\t\tdist = abs(sqrt(sum(square(samples_))) - sqrt(sum(square(samples))))\n\n\t\tself.assertTrue(all(dist < 1E-6))\n\n\t\t###\n\n\t\t# test one-dimensional GSM\n\t\tgsm = GSM(1, 7)\n\t\tgsm.initialize('cauchy')\n\n\t\t# generate test data\n\t\tsamples = gsm.sample(100)\n\n\t\trg = RadialGaussianization(gsm)\n\n\t\t# reconstructed samples\n\t\tsamples_rg = rg.inverse(rg(samples))\n\n\t\t# distance between norm and reconstructed norm\n\t\tdist = abs(sqrt(sum(square(samples_rg))) - sqrt(sum(square(samples))))\n\n\t\tself.assertTrue(all(dist < 1E-6))",
"def test_unsupported_gates(self):\n self.logTestName()\n\n dev = qml.device('strawberryfields.gaussian', wires=2)\n gates = set(dev._operation_map.keys())\n all_gates = qml.ops._cv__ops__\n\n for g in all_gates - gates:\n op = getattr(qml.ops, g)\n\n if op.num_wires <= 0:\n wires = [0]\n else:\n wires = list(range(op.num_wires))\n\n @qml.qnode(dev)\n def circuit(*x):\n x = prep_par(x, op)\n op(*x, wires=wires)\n\n if issubclass(op, qml.operation.CV):\n return qml.expval(qml.X(0))\n else:\n return qml.expval(qml.PauliZ(0))\n\n with self.assertRaisesRegex(qml.DeviceError,\n \"Gate {} not supported on device strawberryfields.gaussian\".format(g)):\n x = np.random.random([op.num_params])\n circuit(*x)",
"def test_multiscale_zero(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x, self.x))",
"def test(x_norm, x_unnorm):\n # NOTE: closes over x_np & x_norm_correct_np\n assert x_norm.dtype == x_norm_correct_np.dtype\n assert x_unnorm.dtype == x_np.dtype\n assert np.allclose(x_norm, x_norm_correct_np)\n assert not np.allclose(x_norm, x_np)\n assert np.all(np.max(x_norm, axis=(0,1)) > 1)\n assert np.all(np.max(x_norm, axis=(0,1)) < 255 - means)\n assert np.all(np.min(x_norm, axis=(0,1)) < 0)\n assert np.all(np.min(x_norm, axis=(0,1)) > 0 - means)\n assert np.allclose(x_unnorm, x_np, rtol=1e-4, atol=1e-7)",
"def test_two_gaussian_potential_no_trigger(self):\n\n trigger = two_gaussian_potential(coords[0])[2]\n self.assertFalse(trigger)",
"def test_gaussian_args(self):\n self.logTestName()\n\n with self.assertRaisesRegex(TypeError, \"missing 1 required positional argument: 'wires'\"):\n dev = qml.device('strawberryfields.gaussian')",
"def __check_output_distr(self):\n if self.decoder_output_distr not in [\n 'gaussian', 'bernoulli']:\n raise Exception(\n 'output neither gaussian nor bernoulli.')",
"def testGaussian(self):\n random.seed(42)\n\n us = UniformSample()\n for _ in range(300):\n us.update(random.gauss(42.0, 13.0))\n self.assertAlmostEqual(us.mean, 43.143067271195235, places=5)\n self.assertAlmostEqual(us.stddev, 13.008553229943168, places=5)\n\n us.clear()\n for _ in range(30000):\n us.update(random.gauss(0.0012, 0.00005))\n self.assertAlmostEqual(us.mean, 0.0012015284549517493, places=5)\n self.assertAlmostEqual(us.stddev, 4.9776450250869146e-05, places=5)",
"def test_stable_global_norm_unchanged(self):\n tf.set_random_seed(1234)\n tensors = [tf.random_uniform([3] * i, -10.0, 10.0) for i in range(6)]\n gnorm = tf.global_norm(tensors)\n precond_gnorm = tfgan_losses._numerically_stable_global_norm(tensors)\n\n with self.test_session(use_gpu=True) as sess:\n # spot check closeness on more than one sample.\n for _ in range(10):\n gnorm_np, precond_gnorm_np = sess.run([gnorm, precond_gnorm])\n self.assertNear(gnorm_np, precond_gnorm_np, 1e-5)",
"def test_gate_not_defined(self):\n xir_prog = xir.Program()\n xir_prog.add_statement(xir.Statement(\"np\", [1, 2, 3], (0,)))\n\n with pytest.raises(NameError, match=\"operation 'np' not defined\"):\n io.to_program(xir_prog)",
"def test(x_norm, x_unnorm):\n # NOTE: closes over x_np & x_norm_correct_np\n assert x_norm.dtype == x_norm_correct_np.dtype\n assert x_unnorm.dtype == x_np.dtype\n assert np.allclose(x_norm, x_norm_correct_np)\n assert not np.allclose(x_norm, x_np)\n assert np.all(np.max(x_norm, axis=(0,1)) <= 1)\n assert np.all(np.max(x_norm, axis=(0,1)) > 0)\n assert np.all(np.min(x_norm, axis=(0,1)) >= -1)\n assert np.all(np.min(x_norm, axis=(0,1)) < 0)\n assert np.allclose(x_unnorm, x_np, rtol=1e-4, atol=1e-7)",
"def test_operation_not_supporting_analytic_gradient(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.Hermitian(np.diag([x, 0]), 0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(ValueError, match=\"analytic gradient method cannot be used with\"):\n node.jacobian(0.5, method=\"A\")",
"def test_stable_global_norm_unchanged(self):\n tf.compat.v1.set_random_seed(1234)\n tensors = [tf.random.uniform([3] * i, -10.0, 10.0) for i in range(6)]\n gnorm = tf.linalg.global_norm(tensors)\n precond_gnorm = numerically_stable_global_norm(tensors)\n\n with self.cached_session() as sess:\n for _ in range(10): # spot check closeness on more than one sample.\n gnorm_np, precond_gnorm_np = sess.run([gnorm, precond_gnorm])\n self.assertNear(gnorm_np, precond_gnorm_np, 1e-4)",
"def test_gaussian_node(self):\n means = [0.0, 0.5, 1.0]\n stds = [1.0, 2.0, 3.0]\n gauss0 = GaussianNode(mean=means[0], std=stds[0], scope=0)\n gauss1 = GaussianNode(mean=means[1], std=stds[1], scope=1)\n gauss2 = GaussianNode(mean=means[2], std=stds[2], scope=2)\n sample1 = np.array([1, 2, 3])\n sample2 = np.array([10, 20, 30])\n x = torch.Tensor([sample1, sample2])\n\n # Get results\n res_gauss0 = gauss0(x)\n res_gauss1 = gauss1(x)\n res_gauss2 = gauss2(x)\n\n # Expect results from normal distributions\n normal0 = torch.distributions.Normal(loc=means[0], scale=stds[0])\n normal1 = torch.distributions.Normal(loc=means[1], scale=stds[1])\n normal2 = torch.distributions.Normal(loc=means[2], scale=stds[2])\n\n exp_gauss0 = normal0.log_prob(torch.Tensor([1, 10]))\n exp_gauss1 = normal1.log_prob(torch.Tensor([2, 20]))\n exp_gauss2 = normal2.log_prob(torch.Tensor([3, 30]))\n\n # Assertions\n self.assertEqual(len(res_gauss0.tolist()), 2)\n self.assertEqual(len(res_gauss1.tolist()), 2)\n self.assertEqual(len(res_gauss2.tolist()), 2)\n\n # Assert that results are numerically equal\n self.assertTrue(np.isclose(res_gauss0.tolist(), exp_gauss0, atol=DELTA).all())\n self.assertTrue(np.isclose(res_gauss1.tolist(), exp_gauss1, atol=DELTA).all())\n self.assertTrue(np.isclose(res_gauss2.tolist(), exp_gauss2, atol=DELTA).all())",
"def test_normalize_else():\n hops = HOPS(\n sys_param,\n noise_param=noise_param,\n hierarchy_param=hier_param,\n eom_param=eom_param,\n integration_param=integrator_param,\n )\n hops.initialize([2, 3])\n hops.basis.eom.normalized = False\n norm = hops.normalize([2, 3])\n known_norm = [2, 3]\n assert np.allclose(norm, known_norm)",
"def test_gaussian_profile(): \n\n # check sigma input\n obj = galsim.Gaussian(sigma=sigma)\n image_galsim_sigma = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_sigma = gf.lightprofiles.gaussian(sigma=[sigma], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check batch input\n obj1 = galsim.Gaussian(sigma=sigma)\n obj2 = galsim.Gaussian(sigma=sigma*2)\n image_galsim_batch1 = obj1.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galsim_batch2 = obj2.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galsim_batch = np.stack([image_galsim_batch1, image_galsim_batch2], axis=0)\n image_galflow_batch = gf.lightprofiles.gaussian(sigma=[sigma, sigma*2], nx=stamp_size, ny=stamp_size)\n\n # check half_light_radius input\n obj = galsim.Gaussian(half_light_radius=hlr)\n image_galsim_hlr = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_hlr = gf.lightprofiles.gaussian(half_light_radius=[hlr], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check fwhm input\n obj = galsim.Gaussian(fwhm=fwhm)\n image_galsim_fwhm = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_fwhm = gf.lightprofiles.gaussian(fwhm=[fwhm], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check fwhm input\n obj = galsim.Gaussian(fwhm=fwhm)\n image_galsim_scale = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=scale, method='no_pixel').array\n image_galflow_scale = gf.lightprofiles.gaussian(fwhm=[fwhm], nx=stamp_size, ny=stamp_size, scale=scale)[0,...]\n\n # check flux input\n obj = galsim.Gaussian(fwhm=fwhm, flux=flux)\n image_galsim_flux = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_flux = gf.lightprofiles.gaussian(fwhm=[fwhm], flux=[flux], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check even and odd stamp sizes\n obj = galsim.Gaussian(fwhm=fwhm, flux=flux)\n image_galsim_size = obj.drawImage(nx=stamp_size, ny=stamp_size+1, scale=1., method='no_pixel').array\n image_galflow_size = gf.lightprofiles.gaussian(fwhm=[fwhm], flux=[flux], nx=stamp_size, ny=stamp_size+1)[0,...]\n\n assert_allclose(image_galsim_sigma, image_galflow_sigma, atol=1e-5)\n assert_allclose(image_galsim_batch, image_galflow_batch, atol=1e-5)\n assert_allclose(image_galsim_hlr, image_galflow_hlr, atol=1e-5)\n assert_allclose(image_galsim_fwhm, image_galflow_fwhm, atol=1e-5)\n assert_allclose(image_galsim_scale, image_galflow_scale, rtol=1e-5)\n assert_allclose(image_galsim_flux, image_galflow_flux, atol=1e-5)\n assert_allclose(image_galsim_size, image_galflow_size, atol=1e-5)",
"def test_normalize_if():\n hops = HOPS(\n sys_param,\n noise_param=noise_param,\n hierarchy_param=hier_param,\n eom_param=eom_param,\n integration_param=integrator_param,\n )\n hops.initialize([2, 3])\n hops.basis.eom.normalized = True\n norm = hops.normalize([2, 3])\n known_norm = [0.5547002, 0.83205029]\n assert np.allclose(norm, known_norm)",
"def test_SMEB():\n testing_function('sme', bilinear=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Provides a QNode for the subsequent tests of inv
|
def qnode_for_inverse(self, mock_device):
def circuit(x):
qml.RZ(x, wires=[1]).inv()
qml.RZ(x, wires=[1]).inv().inv()
return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliZ(1))
node = qml.QNode(circuit, mock_device)
node.construct([1.0], {})
return node
|
[
"def test_inv_queuing(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom Operation\"\"\"\n num_wires = 1\n\n with qml.tape.QuantumTape() as tape:\n op = DummyOp(wires=[0]).inv()\n assert op.inverse is True\n\n assert op.inverse is True",
"def test_operation_inverse_defined(self, qnode_for_inverse):\n assert qnode_for_inverse.qtape.operations[0].name == \"RZ.inv\"\n assert qnode_for_inverse.qtape.operations[0].inverse\n assert issubclass(qnode_for_inverse.qtape.operations[0].__class__, qml.operation.Operation)\n assert qnode_for_inverse.qtape.operations[1].name == \"RZ\"\n assert not qnode_for_inverse.qtape.operations[1].inverse\n assert issubclass(qnode_for_inverse.qtape.operations[1].__class__, qml.operation.Operation)",
"def zero_inv(Q):\n\n temp = np.diag([Q[2,2], Q[3,3], Q[4,4]])\n temp = inv(temp)\n return np.diag([Q[0,0], Q[1,1], temp[0,0], temp[1,1],temp[2,2]])",
"def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))",
"def define_projectq_gate(self):",
"def test_inv(self):\n\n operation = CirqOperation(\n lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]\n )\n\n assert not operation.is_inverse\n\n operation.inv()\n\n assert operation.is_inverse\n\n operation.inv()\n\n assert not operation.is_inverse",
"def qubit(self, vertex):\n raise NotImplementedError(\"Not implemented on backend\" + type(self).backend)",
"def invertQTransform(tr):\n try:\n det = tr.determinant()\n detr = 1.0 / det # let singular matrices raise ZeroDivisionError\n inv = tr.adjoint()\n inv *= detr\n return inv\n except ZeroDivisionError:\n return _pinv_fallback(tr)",
"def _setup_Q(self):\n self.Q_s = [None for _ in range(self.p+1)]\n self.Q_s[self.p] = np.eye(self.layers[self.p-1])\n for i in range(self.p-1, -1, -1):\n self.Q_s[i] = np.dot(self.U_s[i], self.Q_s[i+1])",
"def negate( quat ):\n return quat * -1.0",
"def __ne__(self, pQua2):\n return _almathswig.Quaternion___ne__(self, pQua2)",
"def __init__(self, q=list()):\n self.q = q",
"def createQ(self, state):\n\n ########### \n ## TO DO ##\n ###########\n # When learning, check if the 'state' is not in the Q-table\n # If it is not, create a new dictionary for that state\n # Then, for each action available, set the initial Q-value to 0\n if not state in self.Q:\n print(\"inserting new state {}\",state)\n self.Q.update({state:{}})\n for action in self.valid_actions:\n self.Q[state][action]=0.0\n return",
"def add_qubit(self):\n z0 = StabilizerState([[0, 1]])\n self.__init__(self.tensor_product(z0))",
"def query( self ):\n if ( self.childCount() ):\n q = Q()\n for i in range(self.childCount()):\n q &= self.child(i).query()\n return q\n else:\n op_name = projex.text.classname(self.text(2))\n q = Q(str(self.text(1)))\n q.setOperatorType(Q.Op[op_name])\n q.setValueString(str(self.text(3)))\n return q",
"def Q_inv_rotate_V(Q, V):\n r = R.from_quat(Q[:, [1, 2, 3, 0]])\n return r.inv().apply(V)",
"def tf_to_pyqt(q):\n return pyqt.Quaternion(w=q.w, x=q.x, y=q.y, z=q.z)",
"def q_learn(initial_Q,initial_state,transition,\n num_episodes,gamma, alpha, epsilon=0.1): \n \n \"\"\" \n Your code\n \"\"\"\n \n return Q, steps, rewards",
"def test_repr():\n op = qml.FlipSign([0, 1], wires=(\"a\", \"b\"))\n expected = \"FlipSign([0, 1], wires=['a', 'b'])\"\n assert repr(op) == expected"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the inverse of an operation is added to the QNode queue and the operation is an instance of the original class
|
def test_operation_inverse_defined(self, qnode_for_inverse):
assert qnode_for_inverse.qtape.operations[0].name == "RZ.inv"
assert qnode_for_inverse.qtape.operations[0].inverse
assert issubclass(qnode_for_inverse.qtape.operations[0].__class__, qml.operation.Operation)
assert qnode_for_inverse.qtape.operations[1].name == "RZ"
assert not qnode_for_inverse.qtape.operations[1].inverse
assert issubclass(qnode_for_inverse.qtape.operations[1].__class__, qml.operation.Operation)
|
[
"def test_inv_queuing(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom Operation\"\"\"\n num_wires = 1\n\n with qml.tape.QuantumTape() as tape:\n op = DummyOp(wires=[0]).inv()\n assert op.inverse is True\n\n assert op.inverse is True",
"def test_different_queue_operation_inside(self):\n with qml.tape.QuantumTape() as tape1:\n with qml.tape.QuantumTape() as tape2:\n op1 = qml.PauliZ(0)\n op2 = qml.apply(op1, tape1)\n\n assert tape1.operations == [tape2, op2]\n assert tape2.operations == [op1]",
"def test_remove_not_in_queue(self):\n\n with Queue() as q1:\n op1 = qml.PauliZ(0)\n op2 = qml.PauliZ(1)\n q1.append(op1)\n q1.append(op2)\n\n with Queue() as q2:\n q2.append(op1)\n\n with pytest.raises(ValueError, match=\"not in list\"):\n q2.remove(op2)",
"def test_different_queue_operation_outside(self):\n op = qml.PauliZ(0)\n\n with qml.tape.QuantumTape() as tape1:\n with qml.tape.QuantumTape() as tape2:\n qml.apply(op, tape1)\n\n assert tape1.operations == [tape2, op]\n assert tape2.operations == []",
"def test_remove_not_in_queue(self):\n\n with AnnotatedQueue() as q1:\n op1 = qml.PauliZ(0)\n op2 = qml.PauliZ(1)\n q1.append(op1)\n q1.append(op2)\n\n with AnnotatedQueue() as q2:\n q2.append(op1)\n with pytest.raises(KeyError):\n q2.remove(op2)",
"def test_default_queue_operation_inside(self):\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliZ(0)\n op2 = qml.apply(op1)\n\n assert tape.operations == [op1, op2]",
"def test_queuing_defined_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n T = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n T.queue()\n\n assert len(tape.queue) == 1\n assert tape.queue[0] is T\n\n assert tape._queue[T] == {\"owns\": (op1, op2)}",
"def test_arbitrary_obj(self):\n\n objs = [5, \"hi\", 1.2, np.einsum, lambda x: x + 1]\n with Queue() as q:\n for obj in objs:\n q.append(obj)\n assert q.queue == objs\n\n with q:\n for _ in range(len(objs)):\n obj = objs.pop()\n q.remove(obj)\n assert q.queue == objs",
"def test_append_tensor_ops(self):\n\n with Queue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]",
"def test_append_qubit_observables(self):\n with Queue() as q:\n # wire repetition is deliberate, Queue contains no checks/logic\n # for circuits\n ops = [\n qml.Hadamard(wires=0),\n qml.PauliX(wires=1),\n qml.PauliY(wires=1),\n qml.Hermitian(np.ones([2, 2]), wires=7),\n ]\n assert q.queue == ops",
"def test_queue_idx(self):\n op = ValidOp(*self.simple_operands)\n assert op.queue_idx is None",
"def test_operation_ordering(self, opqueue_test_node):\n\n assert opqueue_test_node.ops[0].name == \"RX\"\n assert opqueue_test_node.ops[1].name == \"CNOT\"\n assert opqueue_test_node.ops[2].name == \"RY\"\n assert opqueue_test_node.ops[3].name == \"RZ\"\n assert opqueue_test_node.ops[4].name == \"PauliX\"\n assert opqueue_test_node.ops[5].name == \"PauliZ\"",
"def test_enqueue_empty_queue_new_head_tail(new_q_1):\n assert new_q_1._container.head is new_q_1._container.tail",
"def test_append_annotating_object(self):\n\n with AnnotatedQueue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n\n assert q.queue == [A, B, tensor_op]\n assert q._get_info(A) == {\"owner\": tensor_op}\n assert q._get_info(B) == {\"owner\": tensor_op}\n assert q._get_info(tensor_op) == {\"owns\": (A, B)}",
"def _operate(self, operation, other, qty=None, reverse=False, opname=None):\n op = lambda a,b: operation(b, a) if reverse else operation(a, b)\n\n # Perform operation\n v = None\n otherName = 'const'\n data = self.data[:]\n if type(other) == float or type(other) == int:\n v = op(data, other)\n otherName = str(other)\n elif type(other) == np.ndarray:\n if self.data.shape != other.shape:\n raise OutputException(\"Mismatching dimensions of operands: {} and {}.\".format(self.data.shape, other.shape))\n \n v = op(data, other)\n elif self.data.shape == other.data.shape or (np.isscalar(data) or np.isscalar(other.data)):\n v = op(data, other.data)\n\n # If different types, we locate the closest\n # common ancestor and convert to that type\n if type(other) != qty:\n classes = [type(self).mro(), type(other).mro()]\n for x in classes[0]:\n if all(x in mro for mro in classes):\n qty = x\n break\n\n print('Closest ancestor: {}'.format(type(qty)))\n\n\n otherName = other.name\n else:\n raise OutputException(\"Unsupported type of operand: {}\".format(type(other)))\n\n # Determine new name\n def nametransform(n):\n if ' ' in n: return '('+n+')'\n else: return n\n\n name1 = nametransform(self.name)\n name2 = nametransform(otherName)\n\n if reverse:\n name1, name2 = name2, name1\n\n newname = self.name\n if opname is not None:\n if type(opname) == str:\n newname = '{} {} {}'.format(name1, opname, name2)\n else:\n newname = opname(self.name, otherName)\n\n # Construct new object\n if qty is None:\n return UnknownQuantity(name=newname, data=v, grid=self.grid, output=self.output, attr={'description': '', 'equation': newname})\n else:\n return qty(name=newname, data=v, grid=self.grid, output=self.output, attr={'description': '', 'equation': newname})",
"def test_append_tensor_ops(self):\n\n with AnnotatedQueue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]",
"def test_operator_create_operator(self):\n pass",
"def test_queuing(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n T = Tensor(op1, op2)\n\n assert len(tape.queue) == 3\n assert tape.queue[0] is op1\n assert tape.queue[1] is op2\n assert tape.queue[2] is T\n\n assert tape._queue[op1] == {\"owner\": T}\n assert tape._queue[op2] == {\"owner\": T}\n assert tape._queue[T] == {\"owns\": (op1, op2)}",
"def test_operator_update_operator(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test construction of a tensor product
|
def test_construct(self):
X = qml.PauliX(0)
Y = qml.PauliY(2)
T = Tensor(X, Y)
assert T.obs == [X, Y]
T = Tensor(T, Y)
assert T.obs == [X, Y, Y]
with pytest.raises(
ValueError, match="Can only perform tensor products between observables"
):
Tensor(T, qml.CNOT(wires=[0, 1]))
|
[
"def test_prod_(self):\n self.run_test(\"\"\"\n def np_prod_(a):\n return a.prod()\"\"\",\n numpy.arange(10),\n np_prod_=[NDArray[int,:]])",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def test_dot_product(self):\n vec_a = Vec3(1, 2, 3)\n vec_b = Vec3(0, 1, 3)\n dot_product = vec_a * vec_b\n\n self.assertEqual(11, dot_product, \"Asserting dot_product\")",
"def test_product_node(self):\n # Init product node\n id0 = IdentityLeaf(scope=0)\n id1 = IdentityLeaf(scope=1)\n id2 = IdentityLeaf(scope=2)\n prod = ProductNode(children=[id0, id1, id2])\n\n # Define input: Two samples with three features\n sample1 = np.array([1, 2, 3])\n sample2 = np.array([10, 20, 30])\n x = torch.Tensor([sample1, sample2])\n\n # Get product node result\n result = prod(x)\n\n # Product in logspace is sum\n expected_result = [np.sum(np.log(sample1)), np.sum(np.log(sample2))]\n\n # Assertions\n self.assertEqual(len(result.tolist()), 2)\n self.assertTrue(np.isclose(result.tolist(), expected_result, atol=DELTA).all())",
"def test_tensor(self, j):\n labels1 = [\"XX\", \"YY\"]\n labels2 = [j * \"I\", j * \"Z\"]\n with self.assertWarns(DeprecationWarning):\n pauli1 = PauliTable.from_labels(labels1)\n pauli2 = PauliTable.from_labels(labels2)\n\n value = pauli1.tensor(pauli2)\n with self.assertWarns(DeprecationWarning):\n target = PauliTable.from_labels([i + j for i in labels1 for j in labels2])\n self.assertEqual(value, target)",
"def tensor_product(vec1, vec2):\n # reshape np.array([1, 2, 3]) -> np.array([[1,], [2,], [3,]]).astype(np.float32)\n a = vec1.reshape(vec1.shape[0], 1).astype(np.float32)\n b = vec2.astype(np.float32)\n \n # copy the arrays to the graphics memory\n a_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)\n b_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b)\n \n # generate zeros array that has tha shape of resulting matrix\n r = np.zeros(shape=(a.shape[0], b.shape[0])).astype(np.float32)\n # create write buffer for the resulting matrix in the graphics memory\n res_g = cl.Buffer(ctx, mf.WRITE_ONLY, r.nbytes)\n \n # run the prg shader\n prg.tensor(queue, r.shape, None, np.int32(len(a)), a_g, b_g, res_g)\n \n # create a variable for the result\n res_np = np.empty_like(r)\n # move the result from the graphics memory to the system memory\n cl.enqueue_copy(queue, res_np, res_g)\n return res_np",
"def test_classic_2x2(self):\r\n # problem\r\n A = [[0, 1], [1, 0]]\r\n B = [[2, 3], [3, 2]]\r\n\r\n # solution\r\n answer = [[3, 2], [2, 3]]\r\n\r\n # test\r\n C = matrix_multiply(A, B)\r\n self.assertEqual(C, answer)",
"def test_queuing_tensor_matmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t = Tensor(op1, op2)\n\n op3 = qml.PauliZ(2)\n t2 = t @ op3\n\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}\n assert tape._queue[op3] == {\"owner\": t2}",
"def test_dot(self):\n\n # If no arrays, return 0\n self.assertAllClose(linalg.dot(),\n 0)\n # If only one array, return itself\n self.assertAllClose(linalg.dot([[1,2,3],\n [4,5,6]]),\n [[1,2,3],\n [4,5,6]])\n # Basic test of two arrays: (2,3) * (3,2)\n self.assertAllClose(linalg.dot([[1,2,3],\n [4,5,6]],\n [[7,8],\n [9,1],\n [2,3]]),\n [[31,19],\n [85,55]])\n # Basic test of four arrays: (2,3) * (3,2) * (2,1) * (1,2)\n self.assertAllClose(linalg.dot([[1,2,3],\n [4,5,6]],\n [[7,8],\n [9,1],\n [2,3]],\n [[4],\n [5]],\n [[6,7]]),\n [[1314,1533],\n [3690,4305]])\n\n # Test broadcasting: (2,2,2) * (2,2,2,2)\n self.assertAllClose(linalg.dot([[[1,2],\n [3,4]],\n [[5,6],\n [7,8]]],\n [[[[1,2],\n [3,4]],\n [[5,6],\n [7,8]]],\n [[[9,1],\n [2,3]],\n [[4,5],\n [6,7]]]]),\n [[[[ 7, 10],\n [ 15, 22]],\n\n [[ 67, 78],\n [ 91, 106]]],\n\n\n [[[ 13, 7],\n [ 35, 15]],\n\n [[ 56, 67],\n [ 76, 91]]]])\n\n # Inconsistent shapes: (2,3) * (2,3)\n self.assertRaises(ValueError,\n linalg.dot,\n [[1,2,3],\n [4,5,6]],\n [[1,2,3],\n [4,5,6]])\n # Other axes do not broadcast: (2,2,2) * (3,2,2)\n self.assertRaises(ValueError,\n linalg.dot,\n [[[1,2],\n [3,4]],\n [[5,6],\n [7,8]]],\n [[[1,2],\n [3,4]],\n [[5,6],\n [7,8]],\n [[9,1],\n [2,3]]])\n # Do not broadcast matrix axes: (2,1) * (3,2)\n self.assertRaises(ValueError,\n linalg.dot,\n [[1],\n [2]],\n [[1,2,3],\n [4,5,6]])\n # Do not accept less than 2-D arrays: (2) * (2,2)\n self.assertRaises(ValueError,\n linalg.dot,\n [1,2],\n [[1,2,3],\n [4,5,6]])",
"def test_tensor_initialization():\n LOGGER.info(\"Testing invalid tensor initializations.\")\n for t in invalid_tensors:\n err = None\n tensor = None\n try:\n tensor = tj.tensor(t)\n except ValueError as e:\n err = e\n\n assert isinstance(err, ValueError),\\\n \"%s is supposed to be invalid -- %s : %s\" % (t, tensor, err)\n\n LOGGER.info(\"Testing valid tensor initializations.\")\n for t in valid_tensors:\n err = None\n try:\n tj.tensor(t)\n except Exception as e:\n err = e\n\n assert err is None,\\\n \"%s is supposed to be a valid tensor -- %s\" % (t, err)",
"def check_elementwise_random(op='sum', shape=(1, 3, 224, 224)):\n a = mx.sym.Variable('a')\n b = mx.sym.Variable('b')\n if op == 'sum':\n sym = a + b\n elif op == 'sub':\n sym = a - b\n elif op == 'mul':\n sym = a * b\n\n a_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n b_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n\n executor = sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y = executor.forward(is_train=False, a=a_data, b=b_data)\n trt_sym = sym.get_backend_symbol('TensorRT')\n original_precision_value = mx.contrib.tensorrt.get_use_fp16()\n try:\n mx.contrib.tensorrt.set_use_fp16(True)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt = executor.forward(is_train=False, a=a_data, b=b_data)\n mx.contrib.tensorrt.set_use_fp16(False)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt_fp32 = executor.forward(is_train=False, a=a_data, b=b_data)\n assert_almost_equal(y[0].asnumpy(), y_trt[0].asnumpy(), 1e-1, 1e-2)\n assert_almost_equal(y[0].asnumpy(), y_trt_fp32[0].asnumpy(), 1e-4, 1e-4)\n finally:\n mx.contrib.tensorrt.set_use_fp16(original_precision_value)",
"def test_multiply_array(ctx_factory):\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)\n\n a_gpu = cl_array.to_device(queue, a)\n b_gpu = cl_array.to_device(queue, a)\n\n a_squared = (b_gpu * a_gpu).get()\n\n assert (a * a == a_squared).all()",
"def test_dot_product(self):\n vector1 = Vector(*self.test_vector)\n vector2 = Vector(*self.test_vector_alternate)\n\n dot_product = sum(\n x * y for x, y in zip(self.test_vector, self.test_vector_alternate)\n )\n\n self.assertEqual(dot_product, vector1.dot(vector2))\n self.assertEqual(dot_product, vector2.dot(vector1))",
"def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):\n n_wires = 3\n dev = device(n_wires)\n skip_if(dev, {\"inverse_operations\": False})\n\n rnd_state = init_state(3)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(wires=range(n_wires)).inv()\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n mat = mat.conj().T\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.analytic))",
"def test_small(self):\n self.assert_tensor_equal(\n [[[0, 0], [0, 1]], [[1, 0], [1, 1]]],\n index_tensor([2, 2]))",
"def gen_test_tensor_cpd():\n return TensorCPD(*gen_test_data())",
"def testPdot(self):\n \n #Try some random matrices \n numRuns = 10 \n for i in range(numRuns): \n m = numpy.random.randint(1, 50) \n n = numpy.random.randint(1, 50) \n p = numpy.random.randint(1, 50) \n \n A = numpy.random.rand(m, n)\n B = numpy.random.rand(n, p)\n v = numpy.random.rand(n, p)\n \n C = A.dot(B)\n \n Ahat = csarray(A, storagetype=\"row\")\n Chat = Ahat.pdot(B)\n \n nptst.assert_array_almost_equal(C, Chat, 3)\n \n u = A.dot(v)\n uHat = Ahat.pdot(v)\n nptst.assert_array_almost_equal(u, uHat)",
"def test_init(self):\n ft_shape = (3, 4, 5) # define shape of the tensor in full form\n R = 2 # define Kryskal rank of a tensor in CP form\n core_values = np.ones(R)\n true_orig_fmat_list = [np.arange(orig_dim * R).reshape(orig_dim, R) for orig_dim in ft_shape]\n fmat_list = [fmat.copy() for fmat in true_orig_fmat_list]\n true_mode_names = [\"mode-0\", \"mode-1\", \"mode-2\"]\n\n tensor_cpd = TensorCPD(fmat=fmat_list, core_values=core_values)\n assert isinstance(tensor_cpd.fmat, list)\n assert tensor_cpd.order == len(fmat_list)\n assert isinstance(tensor_cpd.rank, tuple)\n assert tensor_cpd.rank == (R,)\n assert isinstance(tensor_cpd._core_values, np.ndarray)\n np.testing.assert_array_equal(tensor_cpd._core_values, core_values)\n assert tensor_cpd._core_values is not core_values\n assert tensor_cpd.mode_names == true_mode_names\n assert tensor_cpd.ft_shape == ft_shape\n\n # ------ tests for factor matrices\n for mode, fmat in enumerate(tensor_cpd.fmat):\n # check that values are the same but there are not references\n np.testing.assert_array_equal(fmat, fmat_list[mode])\n assert fmat is not fmat_list[mode]\n\n # check that changes to the matrices have no affect on the TensorCPD\n # (double check for not being references)\n fmat_list[mode] = fmat_list[mode] * 2\n np.testing.assert_array_equal(fmat, true_orig_fmat_list[mode])\n assert fmat is not true_orig_fmat_list[mode]\n\n # ------ tests for core\n true_core = np.array([[[1., 0.],\n [0., 0.]],\n\n [[0., 0.],\n [0., 1.]]]\n )\n assert isinstance(tensor_cpd.core, Tensor)\n np.testing.assert_array_equal(tensor_cpd.core.data, true_core)",
"def bipartite_op_tensor_product(A, B):\n return np.matrix(np.tensordot(A, B, axes=([],[])).\n transpose((0,2,1,3)).reshape(4, 4))",
"def test_cross_entropy():\n net = CategoricalCrossEntropy()\n probs_b = Tensor([0.3, 0.1, 0.6], dtype=dtype.float32)\n probs_a = Tensor([0.7, 0.2, 0.1], dtype=dtype.float32)\n ans = net(probs_b, probs_a)\n assert isinstance(ans, Tensor)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the queuing of a Tensor object.
|
def test_queuing_defined_outside(self):
op1 = qml.PauliX(0)
op2 = qml.PauliY(1)
T = Tensor(op1, op2)
with qml.tape.QuantumTape() as tape:
T.queue()
assert len(tape.queue) == 1
assert tape.queue[0] is T
assert tape._queue[T] == {"owns": (op1, op2)}
|
[
"def test_queuing(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n T = Tensor(op1, op2)\n\n assert len(tape.queue) == 3\n assert tape.queue[0] is op1\n assert tape.queue[1] is op2\n assert tape.queue[2] is T\n\n assert tape._queue[op1] == {\"owner\": T}\n assert tape._queue[op2] == {\"owner\": T}\n assert tape._queue[T] == {\"owns\": (op1, op2)}",
"def test_queuing_tensor_matmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t = Tensor(op1, op2)\n\n op3 = qml.PauliZ(2)\n t2 = t @ op3\n\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}\n assert tape._queue[op3] == {\"owner\": t2}",
"def test_append_tensor_ops(self):\n\n with Queue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]",
"def test_append_tensor_ops(self):\n\n with AnnotatedQueue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]",
"def test_queuing_tensor_rmatmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n\n t1 = op1 @ op2\n\n op3 = qml.PauliZ(3)\n\n t2 = op3 @ t1\n\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op3, op1, op2)}",
"def test_queuing_tensor_matmul_components_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t1 = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n op3 = qml.PauliZ(2)\n t2 = t1 @ op3\n\n assert len(tape._queue) == 2\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}",
"def test_put_queue(self):\n from time import sleep\n testitem = {\n 'a': 'dictionary'\n }\n pool = None\n try:\n pool = self.ThreadPool(TestThread,initial_size=1, keywordarg=\"string\")\n pool.put_queue(testitem)\n \n sleep(1)\n thread_ref = pool._thread_list[0] #use internal thread ref to test with\n self.assertEqual(thread_ref.outputvalue,testitem)\n finally:\n if pool is not None: pool.safe_terminate()",
"def test_empty_dequeue(empty_q):\n assert empty_q.dequeue() is False",
"def test_append_annotating_object(self):\n\n with AnnotatedQueue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n\n assert q.queue == [A, B, tensor_op]\n assert q._get_info(A) == {\"owner\": tensor_op}\n assert q._get_info(B) == {\"owner\": tensor_op}\n assert q._get_info(tensor_op) == {\"owns\": (A, B)}",
"def test_arbitrary_obj(self):\n\n objs = [5, \"hi\", 1.2, np.einsum, lambda x: x + 1]\n with Queue() as q:\n for obj in objs:\n q.append(obj)\n assert q.queue == objs\n\n with q:\n for _ in range(len(objs)):\n obj = objs.pop()\n q.remove(obj)\n assert q.queue == objs",
"def test_default_queue_measurements_outside(self, obs):\n op = qml.expval(obs)\n\n with qml.tape.QuantumTape() as tape:\n qml.apply(op)\n\n assert tape.measurements == [op]",
"def test_enqueue(self):\n self.fail()",
"def test_default_queue_operation_inside(self):\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliZ(0)\n op2 = qml.apply(op1)\n\n assert tape.operations == [op1, op2]",
"def assert_is_post_batch(self, x: TensorLike) -> None:",
"def test_if_queue_enqueue_and_pop_one_item(empty_queue):\n empty_queue.enqueue(1)\n assert empty_queue.dequeue() == 1",
"def test_enqueue_empty_queue_new_head_tail(new_q_1):\n assert new_q_1._container.head is new_q_1._container.tail",
"def assert_is_pre_batch(self, x: TensorLike) -> None:",
"def test_length_of_full_queue(value_queue):\n assert value_queue.size() == 3",
"def test_dequeue_empty():\n test_list = que_.Queue()\n with pytest.raises(IndexError):\n test_list.dequeue()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the queuing of a Tensor object.
|
def test_queuing(self):
with qml.tape.QuantumTape() as tape:
op1 = qml.PauliX(0)
op2 = qml.PauliY(1)
T = Tensor(op1, op2)
assert len(tape.queue) == 3
assert tape.queue[0] is op1
assert tape.queue[1] is op2
assert tape.queue[2] is T
assert tape._queue[op1] == {"owner": T}
assert tape._queue[op2] == {"owner": T}
assert tape._queue[T] == {"owns": (op1, op2)}
|
[
"def test_queuing_defined_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n T = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n T.queue()\n\n assert len(tape.queue) == 1\n assert tape.queue[0] is T\n\n assert tape._queue[T] == {\"owns\": (op1, op2)}",
"def test_queuing_tensor_matmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t = Tensor(op1, op2)\n\n op3 = qml.PauliZ(2)\n t2 = t @ op3\n\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}\n assert tape._queue[op3] == {\"owner\": t2}",
"def test_append_tensor_ops(self):\n\n with Queue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]",
"def test_append_tensor_ops(self):\n\n with AnnotatedQueue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]",
"def test_queuing_tensor_rmatmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n\n t1 = op1 @ op2\n\n op3 = qml.PauliZ(3)\n\n t2 = op3 @ t1\n\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op3, op1, op2)}",
"def test_queuing_tensor_matmul_components_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t1 = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n op3 = qml.PauliZ(2)\n t2 = t1 @ op3\n\n assert len(tape._queue) == 2\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}",
"def test_put_queue(self):\n from time import sleep\n testitem = {\n 'a': 'dictionary'\n }\n pool = None\n try:\n pool = self.ThreadPool(TestThread,initial_size=1, keywordarg=\"string\")\n pool.put_queue(testitem)\n \n sleep(1)\n thread_ref = pool._thread_list[0] #use internal thread ref to test with\n self.assertEqual(thread_ref.outputvalue,testitem)\n finally:\n if pool is not None: pool.safe_terminate()",
"def test_empty_dequeue(empty_q):\n assert empty_q.dequeue() is False",
"def test_append_annotating_object(self):\n\n with AnnotatedQueue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n\n assert q.queue == [A, B, tensor_op]\n assert q._get_info(A) == {\"owner\": tensor_op}\n assert q._get_info(B) == {\"owner\": tensor_op}\n assert q._get_info(tensor_op) == {\"owns\": (A, B)}",
"def test_arbitrary_obj(self):\n\n objs = [5, \"hi\", 1.2, np.einsum, lambda x: x + 1]\n with Queue() as q:\n for obj in objs:\n q.append(obj)\n assert q.queue == objs\n\n with q:\n for _ in range(len(objs)):\n obj = objs.pop()\n q.remove(obj)\n assert q.queue == objs",
"def test_default_queue_measurements_outside(self, obs):\n op = qml.expval(obs)\n\n with qml.tape.QuantumTape() as tape:\n qml.apply(op)\n\n assert tape.measurements == [op]",
"def test_enqueue(self):\n self.fail()",
"def test_default_queue_operation_inside(self):\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliZ(0)\n op2 = qml.apply(op1)\n\n assert tape.operations == [op1, op2]",
"def assert_is_post_batch(self, x: TensorLike) -> None:",
"def test_if_queue_enqueue_and_pop_one_item(empty_queue):\n empty_queue.enqueue(1)\n assert empty_queue.dequeue() == 1",
"def test_enqueue_empty_queue_new_head_tail(new_q_1):\n assert new_q_1._container.head is new_q_1._container.tail",
"def assert_is_pre_batch(self, x: TensorLike) -> None:",
"def test_length_of_full_queue(value_queue):\n assert value_queue.size() == 3",
"def test_dequeue_empty():\n test_list = que_.Queue()\n with pytest.raises(IndexError):\n test_list.dequeue()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests the tensorspecific matmul method updates queuing metadata.
|
def test_queuing_tensor_matmul(self):
with qml.tape.QuantumTape() as tape:
op1 = qml.PauliX(0)
op2 = qml.PauliY(1)
t = Tensor(op1, op2)
op3 = qml.PauliZ(2)
t2 = t @ op3
assert tape._queue[t2] == {"owns": (op1, op2, op3)}
assert tape._queue[op3] == {"owner": t2}
|
[
"def test_queuing_tensor_rmatmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n\n t1 = op1 @ op2\n\n op3 = qml.PauliZ(3)\n\n t2 = op3 @ t1\n\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op3, op1, op2)}",
"def test_queuing_tensor_matmul_components_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t1 = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n op3 = qml.PauliZ(2)\n t2 = t1 @ op3\n\n assert len(tape._queue) == 2\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}",
"def mul(self, matrix):",
"def test_multiply_array(ctx_factory):\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)\n\n a_gpu = cl_array.to_device(queue, a)\n b_gpu = cl_array.to_device(queue, a)\n\n a_squared = (b_gpu * a_gpu).get()\n\n assert (a * a == a_squared).all()",
"def __mul__(self, matrix):",
"def element_mul(self, matrix):",
"def test_matmul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Can only perform tensor products between operators.\"):\n _ = qml.PauliX(0) @ \"dummy\"",
"def __mul__(self, other: Union['Tensor', TensorableT]) -> 'Tensor':\r\n return mul(self, assure_tensor(other))",
"def test_matmul(self, matrices):\n # Instantiate the 10x10 matrix and test matrix multiplication\n square_mat = chap5.Matrix(matrices.square)\n square_np = np.array(matrices.square)\n square_matmul = (square_mat @ square_mat)._matrix\n square_np_result = square_np @ square_np\n # Compare to the Numpy result of multiplying the matrix times itself\n assert (np.array(square_matmul) == square_np_result).all()\n # Instantiate a 5x10 and 10x5 matrix as Matrix class and Numpy array\n half_row_mat = chap5.Matrix(matrices.half_row)\n half_col_mat = chap5.Matrix(matrices.half_col)\n half_row_np = np.array(matrices.half_row)\n half_col_np = np.array(matrices.half_col)\n # Matrix multiplication amongst the 10x10, 5x10, and 10x5 matrices\n result1 = half_row_mat @ half_col_mat # (5x10) @ (10x5)\n exp_result1 = half_row_np @ half_col_np # (5x10) @ (10x5)\n result2 = half_col_mat @ half_row_mat # (10x5) @ (5x10)\n exp_result2 = half_col_np @ half_row_np # (10x5) @ (5x10)\n result3 = half_row_mat @ square_mat # (5x10) @ (10x10)\n exp_result3 = half_row_np @ square_np # (5x10) @ (10x10)\n result4 = square_mat @ half_col_mat # (10x10) @ (10x5)\n exp_result4 = square_np @ half_col_np # (10x10) @ (10x5)\n assert (np.array(result1._matrix) == exp_result1).all()\n assert (np.array(result2._matrix) == exp_result2).all()\n assert (np.array(result3._matrix) == exp_result3).all()\n assert (np.array(result4._matrix) == exp_result4).all()",
"def test_multiply_scalar(self):\n self.assertEqual(self.OneType(1, 2, 3) * 2, self.OneType(2, 4, 6))",
"def test_mul_funcs(self):\r\n n = 10\r\n x = Variable(n)\r\n obj = Minimize(norm(x, 1))\r\n constraints = [x >= 2]\r\n prob = Problem(obj, constraints)\r\n data, dims = prob.get_problem_data(solver=SCS)\r\n A = data[\"A\"]\r\n objective, constr_map = prob.canonicalize()\r\n dims = prob._format_for_solver(constr_map, SCS)\r\n\r\n all_ineq = constr_map[s.EQ] + constr_map[s.LEQ]\r\n var_offsets, var_sizes, x_length = prob._get_var_offsets(objective,\r\n all_ineq)\r\n opts = {}\r\n constraints = constr_map[s.EQ] + constr_map[s.LEQ]\r\n constraints = prune_constants(constraints)\r\n Amul, ATmul = iterative.get_mul_funcs(constraints, dims,\r\n var_offsets, var_sizes,\r\n x_length)\r\n vec = np.array(range(x_length))\r\n # A*vec\r\n result = np.zeros(A.shape[0])\r\n Amul(vec, result)\r\n self.assertItemsAlmostEqual(A*vec, result)\r\n Amul(vec, result)\r\n self.assertItemsAlmostEqual(2*A*vec, result)\r\n # A.T*vec\r\n vec = np.array(range(A.shape[0]))\r\n result = np.zeros(A.shape[1])\r\n ATmul(vec, result)\r\n self.assertItemsAlmostEqual(A.T*vec, result)\r\n ATmul(vec, result)\r\n self.assertItemsAlmostEqual(2*A.T*vec, result)",
"def __rmatmul__(self, a):\n if isinstance(a, tm):\n return tm(a.TM @ self.TM)\n else:\n if isinstance(a, np.ndarray):\n return tm(a @ self.TM)\n return tm(a * self.TAA)",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def test_multiplication():\n\n # Same units\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = unyt_array([4, 5, 6], \"cm\")\n a3 = [4 * cm, 5 * cm, 6 * cm]\n answer = unyt_array([4, 10, 18], \"cm**2\")\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # different units, same dimension\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = unyt_array([4, 5, 6], \"m\")\n a3 = [4 * m, 5 * m, 6 * m]\n answer1 = unyt_array([400, 1000, 1800], \"cm**2\")\n answer2 = unyt_array([0.04, 0.10, 0.18], \"m**2\")\n answer3 = unyt_array([4, 10, 18], \"cm*m\")\n\n operate_and_compare(a1, a2, operator.mul, answer1)\n operate_and_compare(a2, a1, operator.mul, answer2)\n operate_and_compare(a1, a3, operator.mul, answer1)\n operate_and_compare(a3, a1, operator.mul, answer2)\n operate_and_compare(a1, a2, np.multiply, answer3)\n operate_and_compare(a2, a1, np.multiply, answer3)\n operate_and_compare(a1, a3, np.multiply, answer3)\n operate_and_compare(a3, a1, np.multiply, answer3)\n\n # different dimensions\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = unyt_array([4, 5, 6], \"g\")\n a3 = [4 * g, 5 * g, 6 * g]\n answer = unyt_array([4, 10, 18], \"cm*g\")\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # One dimensionless, one unitful\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = array([4, 5, 6])\n a3 = [4, 5, 6]\n answer = unyt_array([4, 10, 18], \"cm\")\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # Both dimensionless quantities\n a1 = unyt_array([1, 2, 3])\n a2 = array([4, 5, 6])\n a3 = [4, 5, 6]\n answer = unyt_array([4, 10, 18])\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # With np.multiply.reduce\n a = unyt_array([1.0, 2.0, 3.0], \"cm\")\n answer = unyt_quantity(6.0, \"cm**3\")\n assert_equal(np.multiply.reduce(a), answer)\n a = unyt_array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], \"cm\")\n answer = unyt_array([6.0, 120.0], \"cm**3\")\n assert_equal(np.multiply.reduce(a, axis=1), answer)",
"def testMul(self):\n f25 = self.f25\n a = f25(1, 2)\n b = f25(1, 1)\n self.assertEqual(a * b, f25(-1, 1))",
"def matmul_resources(self, op):\n\t\tinputs = op.inputs\n\t\tleft = inputs[0]\n\t\tright = inputs[1]\n\t\t\n\t\tif left.op.type == \"Const\":\n\t\t\tmatrix = self.sess.run(left) if not op.get_attr(\"transpose_a\") else self.sess.run(left).transpose()\n\t\telse:\n\t\t\tmatrix = self.sess.run(right).transpose() if not op.get_attr(\"transpose_b\") else self.sess.run(right)\n\t\treturn (matrix,)",
"def __matmul__(self, a):\n if isinstance(a, tm):\n return tm(self.TM @ a.TM)\n else:\n if isinstance(a, np.ndarray):\n return tm(self.TM @ a)",
"def test_mul():\n\tcomplexnr1 = Complex(1, 1)\n\tcomplexnr2 = Complex(2, 2)\n\tassert complexnr1*complexnr2 == Complex(0, 4)",
"def test_Image_inplace_scalar_multiply():\n for i in range(ntypes):\n # First try using the dictionary-type Image init\n image1 = galsim.Image(ref_array.astype(types[i]))\n image2 = galsim.Image((2 * ref_array).astype(types[i]))\n image1 *= 2\n np.testing.assert_array_equal(image1.array, image2.array,\n err_msg=\"Inplace scalar multiply in Image class (dictionary \"\n +\"call) does not match reference for dtype = \"+str(types[i]))\n\n image3 = galsim.Image(ref_array.astype(types[i]))\n image3 *= 2.\n np.testing.assert_allclose(image3.array, image1.array)\n\n # Then try using the eval command to mimic use via ImageD, ImageF etc.\n image_init_func = eval(\"galsim.Image\"+tchar[i])\n slice_array = large_array.copy().astype(types[i])[::3,::2]\n image1 = image_init_func(slice_array)\n image2 = image_init_func((2 * ref_array).astype(types[i]))\n image1 *= 2\n np.testing.assert_array_equal(image1.array, image2.array,\n err_msg=\"Inplace scalar multiply in Image class does\"\n +\" not match reference for dtype = \"+str(types[i]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests the tensorspecific matmul method when components are defined outside the queuing context.
|
def test_queuing_tensor_matmul_components_outside(self):
op1 = qml.PauliX(0)
op2 = qml.PauliY(1)
t1 = Tensor(op1, op2)
with qml.tape.QuantumTape() as tape:
op3 = qml.PauliZ(2)
t2 = t1 @ op3
assert len(tape._queue) == 2
assert tape._queue[op3] == {"owner": t2}
assert tape._queue[t2] == {"owns": (op1, op2, op3)}
|
[
"def test_queuing_tensor_matmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t = Tensor(op1, op2)\n\n op3 = qml.PauliZ(2)\n t2 = t @ op3\n\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}\n assert tape._queue[op3] == {\"owner\": t2}",
"def test_matmul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Can only perform tensor products between operators.\"):\n _ = qml.PauliX(0) @ \"dummy\"",
"def test_queuing_tensor_rmatmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n\n t1 = op1 @ op2\n\n op3 = qml.PauliZ(3)\n\n t2 = op3 @ t1\n\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op3, op1, op2)}",
"def test_multiply_array(ctx_factory):\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)\n\n a_gpu = cl_array.to_device(queue, a)\n b_gpu = cl_array.to_device(queue, a)\n\n a_squared = (b_gpu * a_gpu).get()\n\n assert (a * a == a_squared).all()",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def mul(self, matrix):",
"def matmul_resources(self, op):\n\t\tinputs = op.inputs\n\t\tleft = inputs[0]\n\t\tright = inputs[1]\n\t\t\n\t\tif left.op.type == \"Const\":\n\t\t\tmatrix = self.sess.run(left) if not op.get_attr(\"transpose_a\") else self.sess.run(left).transpose()\n\t\telse:\n\t\t\tmatrix = self.sess.run(right).transpose() if not op.get_attr(\"transpose_b\") else self.sess.run(right)\n\t\treturn (matrix,)",
"def element_mul(self, matrix):",
"def __mul__(self, other: Union['Tensor', TensorableT]) -> 'Tensor':\r\n return mul(self, assure_tensor(other))",
"def test_mul_funcs(self):\r\n n = 10\r\n x = Variable(n)\r\n obj = Minimize(norm(x, 1))\r\n constraints = [x >= 2]\r\n prob = Problem(obj, constraints)\r\n data, dims = prob.get_problem_data(solver=SCS)\r\n A = data[\"A\"]\r\n objective, constr_map = prob.canonicalize()\r\n dims = prob._format_for_solver(constr_map, SCS)\r\n\r\n all_ineq = constr_map[s.EQ] + constr_map[s.LEQ]\r\n var_offsets, var_sizes, x_length = prob._get_var_offsets(objective,\r\n all_ineq)\r\n opts = {}\r\n constraints = constr_map[s.EQ] + constr_map[s.LEQ]\r\n constraints = prune_constants(constraints)\r\n Amul, ATmul = iterative.get_mul_funcs(constraints, dims,\r\n var_offsets, var_sizes,\r\n x_length)\r\n vec = np.array(range(x_length))\r\n # A*vec\r\n result = np.zeros(A.shape[0])\r\n Amul(vec, result)\r\n self.assertItemsAlmostEqual(A*vec, result)\r\n Amul(vec, result)\r\n self.assertItemsAlmostEqual(2*A*vec, result)\r\n # A.T*vec\r\n vec = np.array(range(A.shape[0]))\r\n result = np.zeros(A.shape[1])\r\n ATmul(vec, result)\r\n self.assertItemsAlmostEqual(A.T*vec, result)\r\n ATmul(vec, result)\r\n self.assertItemsAlmostEqual(2*A.T*vec, result)",
"def __mul__(self, matrix):",
"def vm_impl_mat_mul(self):\n\n def vm_impl(x, w):\n x = x.asnumpy()\n w = w.asnumpy()\n if self.transpose_a:\n x = x.transpose()\n if self.transpose_b:\n w = w.transpose()\n z = x @ w\n return Tensor(z)\n\n return vm_impl",
"def test_mul():\n\tcomplexnr1 = Complex(1, 1)\n\tcomplexnr2 = Complex(2, 2)\n\tassert complexnr1*complexnr2 == Complex(0, 4)",
"def testMul(self):\n f25 = self.f25\n a = f25(1, 2)\n b = f25(1, 1)\n self.assertEqual(a * b, f25(-1, 1))",
"def matmul(mat_list):\n if len(mat_list) <= 1:\n prod = mat_list[0]\n else:\n prod = reduce(np.matmul, mat_list)\n return prod",
"def test_multiplication():\n\n # Same units\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = unyt_array([4, 5, 6], \"cm\")\n a3 = [4 * cm, 5 * cm, 6 * cm]\n answer = unyt_array([4, 10, 18], \"cm**2\")\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # different units, same dimension\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = unyt_array([4, 5, 6], \"m\")\n a3 = [4 * m, 5 * m, 6 * m]\n answer1 = unyt_array([400, 1000, 1800], \"cm**2\")\n answer2 = unyt_array([0.04, 0.10, 0.18], \"m**2\")\n answer3 = unyt_array([4, 10, 18], \"cm*m\")\n\n operate_and_compare(a1, a2, operator.mul, answer1)\n operate_and_compare(a2, a1, operator.mul, answer2)\n operate_and_compare(a1, a3, operator.mul, answer1)\n operate_and_compare(a3, a1, operator.mul, answer2)\n operate_and_compare(a1, a2, np.multiply, answer3)\n operate_and_compare(a2, a1, np.multiply, answer3)\n operate_and_compare(a1, a3, np.multiply, answer3)\n operate_and_compare(a3, a1, np.multiply, answer3)\n\n # different dimensions\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = unyt_array([4, 5, 6], \"g\")\n a3 = [4 * g, 5 * g, 6 * g]\n answer = unyt_array([4, 10, 18], \"cm*g\")\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # One dimensionless, one unitful\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = array([4, 5, 6])\n a3 = [4, 5, 6]\n answer = unyt_array([4, 10, 18], \"cm\")\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # Both dimensionless quantities\n a1 = unyt_array([1, 2, 3])\n a2 = array([4, 5, 6])\n a3 = [4, 5, 6]\n answer = unyt_array([4, 10, 18])\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # With np.multiply.reduce\n a = unyt_array([1.0, 2.0, 3.0], \"cm\")\n answer = unyt_quantity(6.0, \"cm**3\")\n assert_equal(np.multiply.reduce(a), answer)\n a = unyt_array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], \"cm\")\n answer = unyt_array([6.0, 120.0], \"cm**3\")\n assert_equal(np.multiply.reduce(a, axis=1), answer)",
"def __mul__(self, *args, **kwargs):\n return _decomp.SOM___mul__(self, *args, **kwargs)",
"def test_mul_elemwise(self):\r\n c = [[1, -1], [2, -2]]\r\n expr = mul_elemwise(c, self.A)\r\n obj = Minimize(normInf(expr))\r\n p = Problem(obj, [self.A == 5])\r\n result = p.solve()\r\n self.assertAlmostEqual(result, 10)\r\n self.assertItemsAlmostEqual(expr.value, [5, -5] + [10, -10])\r\n\r\n # Test with a sparse matrix.\r\n import cvxopt\r\n interface = intf.get_matrix_interface(cvxopt.spmatrix)\r\n c = interface.const_to_matrix([1,2])\r\n expr = mul_elemwise(c, self.x)\r\n obj = Minimize(normInf(expr))\r\n p = Problem(obj, [self.x == 5])\r\n result = p.solve()\r\n self.assertAlmostEqual(result, 10)\r\n self.assertItemsAlmostEqual(expr.value, [5, 10])\r\n\r\n # Test promotion.\r\n c = [[1, -1], [2, -2]]\r\n expr = mul_elemwise(c, self.a)\r\n obj = Minimize(normInf(expr))\r\n p = Problem(obj, [self.a == 5])\r\n result = p.solve()\r\n self.assertAlmostEqual(result, 10)\r\n self.assertItemsAlmostEqual(expr.value, [5, -5] + [10, -10])",
"def matmul(\n inp1: Tensor,\n inp2: Tensor,\n transpose_a=False,\n transpose_b=False,\n compute_mode=\"default\",\n) -> Tensor:\n return _matmul(inp1, inp2, transpose_a, transpose_b, compute_mode)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests tensorspecific rmatmul updates queuing metatadata.
|
def test_queuing_tensor_rmatmul(self):
with qml.tape.QuantumTape() as tape:
op1 = qml.PauliX(0)
op2 = qml.PauliY(1)
t1 = op1 @ op2
op3 = qml.PauliZ(3)
t2 = op3 @ t1
assert tape._queue[op3] == {"owner": t2}
assert tape._queue[t2] == {"owns": (op3, op1, op2)}
|
[
"def test_queuing_tensor_matmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t = Tensor(op1, op2)\n\n op3 = qml.PauliZ(2)\n t2 = t @ op3\n\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}\n assert tape._queue[op3] == {\"owner\": t2}",
"def test_hamiltonian_rmatmul(self, H1, H2, H):\n assert H.compare(H1.__rmatmul__(H2))",
"def __rmatmul__(self, a):\n if isinstance(a, tm):\n return tm(a.TM @ self.TM)\n else:\n if isinstance(a, np.ndarray):\n return tm(a @ self.TM)\n return tm(a * self.TAA)",
"def test_queuing_tensor_matmul_components_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t1 = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n op3 = qml.PauliZ(2)\n t2 = t1 @ op3\n\n assert len(tape._queue) == 2\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}",
"def test_update_matrices(self):\n crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3)\n\n new_s = np.array([[7, 8, 9]])\n new_reward = 20\n new_d = np.array([[13, 14, 15]])\n\n expected_S = np.vstack((self.S_2x3, new_s))\n expected_C_diag = np.append(np.diagonal(self.C_2x2), 1.0 / new_reward)\n expected_C = np.diag(expected_C_diag)\n expected_D = np.vstack((self.D_2x3, new_d))\n\n crkr.update_matrices(new_s, new_reward, new_d)\n\n assert_true(np.allclose(expected_S, crkr.S))\n assert_true(np.allclose(expected_C, crkr.C))\n assert_true(np.allclose(expected_D, crkr.D))",
"def test_matmul(self, matrices):\n # Instantiate the 10x10 matrix and test matrix multiplication\n square_mat = chap5.Matrix(matrices.square)\n square_np = np.array(matrices.square)\n square_matmul = (square_mat @ square_mat)._matrix\n square_np_result = square_np @ square_np\n # Compare to the Numpy result of multiplying the matrix times itself\n assert (np.array(square_matmul) == square_np_result).all()\n # Instantiate a 5x10 and 10x5 matrix as Matrix class and Numpy array\n half_row_mat = chap5.Matrix(matrices.half_row)\n half_col_mat = chap5.Matrix(matrices.half_col)\n half_row_np = np.array(matrices.half_row)\n half_col_np = np.array(matrices.half_col)\n # Matrix multiplication amongst the 10x10, 5x10, and 10x5 matrices\n result1 = half_row_mat @ half_col_mat # (5x10) @ (10x5)\n exp_result1 = half_row_np @ half_col_np # (5x10) @ (10x5)\n result2 = half_col_mat @ half_row_mat # (10x5) @ (5x10)\n exp_result2 = half_col_np @ half_row_np # (10x5) @ (5x10)\n result3 = half_row_mat @ square_mat # (5x10) @ (10x10)\n exp_result3 = half_row_np @ square_np # (5x10) @ (10x10)\n result4 = square_mat @ half_col_mat # (10x10) @ (10x5)\n exp_result4 = square_np @ half_col_np # (10x10) @ (10x5)\n assert (np.array(result1._matrix) == exp_result1).all()\n assert (np.array(result2._matrix) == exp_result2).all()\n assert (np.array(result3._matrix) == exp_result3).all()\n assert (np.array(result4._matrix) == exp_result4).all()",
"def test_multiply_array(ctx_factory):\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)\n\n a_gpu = cl_array.to_device(queue, a)\n b_gpu = cl_array.to_device(queue, a)\n\n a_squared = (b_gpu * a_gpu).get()\n\n assert (a * a == a_squared).all()",
"def check_elementwise_random(op='sum', shape=(1, 3, 224, 224)):\n a = mx.sym.Variable('a')\n b = mx.sym.Variable('b')\n if op == 'sum':\n sym = a + b\n elif op == 'sub':\n sym = a - b\n elif op == 'mul':\n sym = a * b\n\n a_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n b_data = mx.ndarray.random.uniform(shape=shape, ctx=mx.gpu())\n\n executor = sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y = executor.forward(is_train=False, a=a_data, b=b_data)\n trt_sym = sym.get_backend_symbol('TensorRT')\n original_precision_value = mx.contrib.tensorrt.get_use_fp16()\n try:\n mx.contrib.tensorrt.set_use_fp16(True)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt = executor.forward(is_train=False, a=a_data, b=b_data)\n mx.contrib.tensorrt.set_use_fp16(False)\n executor = trt_sym.simple_bind(ctx=mx.gpu(), a=shape, b=shape,\n grad_req='null', force_rebind=True)\n y_trt_fp32 = executor.forward(is_train=False, a=a_data, b=b_data)\n assert_almost_equal(y[0].asnumpy(), y_trt[0].asnumpy(), 1e-1, 1e-2)\n assert_almost_equal(y[0].asnumpy(), y_trt_fp32[0].asnumpy(), 1e-4, 1e-4)\n finally:\n mx.contrib.tensorrt.set_use_fp16(original_precision_value)",
"def batch_matmul_strategy_rocm(attrs, inputs, out_type, target):\n strategy = _op.OpStrategy()\n strategy.add_implementation(\n wrap_compute_batch_matmul(topi.cuda.batch_matmul),\n wrap_topi_schedule(topi.cuda.schedule_batch_matmul),\n name=\"batch_matmul.cuda\",\n plevel=10,\n )\n if target.kind.name == \"rocm\" and \"rocblas\" in target.libs:\n assert out_type.dtype == inputs[0].dtype, \"Mixed precision not supported.\"\n strategy.add_implementation(\n wrap_compute_batch_matmul(topi.rocm.batch_matmul_rocblas),\n wrap_topi_schedule(topi.rocm.schedule_batch_matmul_rocblas),\n name=\"batch_matmul_rocblas.rocm\",\n plevel=12,\n )\n return strategy",
"def test_matmul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Can only perform tensor products between operators.\"):\n _ = qml.PauliX(0) @ \"dummy\"",
"def matmul_resources(self, op):\n\t\tinputs = op.inputs\n\t\tleft = inputs[0]\n\t\tright = inputs[1]\n\t\t\n\t\tif left.op.type == \"Const\":\n\t\t\tmatrix = self.sess.run(left) if not op.get_attr(\"transpose_a\") else self.sess.run(left).transpose()\n\t\telse:\n\t\t\tmatrix = self.sess.run(right).transpose() if not op.get_attr(\"transpose_b\") else self.sess.run(right)\n\t\treturn (matrix,)",
"def test_matrix_tuple_multiplication(self):\n\n M = matrices.Matrix(4, 4)\n M.set_row(0, [1, 2, 3, 4])\n M.set_row(1, [2, 4, 4, 2])\n M.set_row(2, [8, 6, 4, 1])\n M.set_row(3, [0, 0, 0, 1])\n\n t = tuples.Tuple([\"x\", \"y\", \"z\", \"w\"], 1, 2, 3, 1)\n\n t2 = M * t\n\n self.assertEqual(t2, tuples.Tuple(\n [\"x\", \"y\", \"z\", \"w\"], 18, 24, 33, 1))",
"def mul(self, matrix):",
"def __matmul__(self, a):\n if isinstance(a, tm):\n return tm(self.TM @ a.TM)\n else:\n if isinstance(a, np.ndarray):\n return tm(self.TM @ a)",
"def vm_impl_mat_mul(self):\n\n def vm_impl(x, w):\n x = x.asnumpy()\n w = w.asnumpy()\n if self.transpose_a:\n x = x.transpose()\n if self.transpose_b:\n w = w.transpose()\n z = x @ w\n return Tensor(z)\n\n return vm_impl",
"def test_multiplication():\n\n # Same units\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = unyt_array([4, 5, 6], \"cm\")\n a3 = [4 * cm, 5 * cm, 6 * cm]\n answer = unyt_array([4, 10, 18], \"cm**2\")\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # different units, same dimension\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = unyt_array([4, 5, 6], \"m\")\n a3 = [4 * m, 5 * m, 6 * m]\n answer1 = unyt_array([400, 1000, 1800], \"cm**2\")\n answer2 = unyt_array([0.04, 0.10, 0.18], \"m**2\")\n answer3 = unyt_array([4, 10, 18], \"cm*m\")\n\n operate_and_compare(a1, a2, operator.mul, answer1)\n operate_and_compare(a2, a1, operator.mul, answer2)\n operate_and_compare(a1, a3, operator.mul, answer1)\n operate_and_compare(a3, a1, operator.mul, answer2)\n operate_and_compare(a1, a2, np.multiply, answer3)\n operate_and_compare(a2, a1, np.multiply, answer3)\n operate_and_compare(a1, a3, np.multiply, answer3)\n operate_and_compare(a3, a1, np.multiply, answer3)\n\n # different dimensions\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = unyt_array([4, 5, 6], \"g\")\n a3 = [4 * g, 5 * g, 6 * g]\n answer = unyt_array([4, 10, 18], \"cm*g\")\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # One dimensionless, one unitful\n a1 = unyt_array([1, 2, 3], \"cm\")\n a2 = array([4, 5, 6])\n a3 = [4, 5, 6]\n answer = unyt_array([4, 10, 18], \"cm\")\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # Both dimensionless quantities\n a1 = unyt_array([1, 2, 3])\n a2 = array([4, 5, 6])\n a3 = [4, 5, 6]\n answer = unyt_array([4, 10, 18])\n\n operate_and_compare(a1, a2, operator.mul, answer)\n operate_and_compare(a2, a1, operator.mul, answer)\n operate_and_compare(a1, a3, operator.mul, answer)\n operate_and_compare(a3, a1, operator.mul, answer)\n operate_and_compare(a1, a2, np.multiply, answer)\n operate_and_compare(a2, a1, np.multiply, answer)\n operate_and_compare(a1, a3, np.multiply, answer)\n operate_and_compare(a3, a1, np.multiply, answer)\n\n # With np.multiply.reduce\n a = unyt_array([1.0, 2.0, 3.0], \"cm\")\n answer = unyt_quantity(6.0, \"cm**3\")\n assert_equal(np.multiply.reduce(a), answer)\n a = unyt_array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], \"cm\")\n answer = unyt_array([6.0, 120.0], \"cm**3\")\n assert_equal(np.multiply.reduce(a, axis=1), answer)",
"def gear_mulmatrix_op(mA, mB, target=False, transform='srt'):\n node = pm.createNode(\"mgear_mulMatrix\")\n for m, mi in zip([mA, mB], ['matrixA', 'matrixB']):\n if isinstance(m, datatypes.Matrix):\n pm.setAttr(node.attr(mi), m)\n else:\n pm.connectAttr(m, node.attr(mi))\n if target:\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(node + \".output\", dm_node + \".inputMatrix\")\n if 't' in transform:\n pm.connectAttr(dm_node + \".outputTranslate\",\n target.attr(\"translate\"), f=True)\n if 'r' in transform:\n pm.connectAttr(dm_node + \".outputRotate\",\n target.attr(\"rotate\"), f=True)\n if 's' in transform:\n pm.connectAttr(dm_node + \".outputScale\",\n target.attr(\"scale\"), f=True)\n\n return node",
"def tf_matmul(x, y, name=None):\n if x.ndim == y.ndim == 1:\n x_ = tf.reshape(x, shape=(1, -1))\n y_ = tf.reshape(y, shape=(-1, 1))\n return tf.matmul(x_, y_, name=name)\n else:\n return tf.matmul(x, y, name=name)",
"def matmul(\n inp1: Tensor,\n inp2: Tensor,\n transpose_a=False,\n transpose_b=False,\n compute_mode=\"default\",\n) -> Tensor:\n return _matmul(inp1, inp2, transpose_a, transpose_b, compute_mode)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that Tensors are labelled as expected
|
def test_label(self):
x = qml.PauliX(0)
y = qml.PauliZ(2)
T = Tensor(x, y)
assert T.label() == "X@Z"
assert T.label(decimals=2) == "X@Z"
assert T.label(base_label=["X0", "Z2"]) == "X0@Z2"
with pytest.raises(ValueError, match=r"Tensor label requires"):
T.label(base_label="nope")
|
[
"def test_label():\n label_path = pjoin(data_path, \"label\", \"lh.BA1.label\")\n label = read_label(label_path)\n # XXX : test more\n assert_true(np.all(label > 0))\n\n labels, scalars = read_label(label_path, True)\n assert_true(np.all(labels == label))\n assert_true(len(labels) == len(scalars))",
"def test_tensor(self, j):\n labels1 = [\"XX\", \"YY\"]\n labels2 = [j * \"I\", j * \"Z\"]\n with self.assertWarns(DeprecationWarning):\n pauli1 = PauliTable.from_labels(labels1)\n pauli2 = PauliTable.from_labels(labels2)\n\n value = pauli1.tensor(pauli2)\n with self.assertWarns(DeprecationWarning):\n target = PauliTable.from_labels([i + j for i in labels1 for j in labels2])\n self.assertEqual(value, target)",
"def test_predict(self):\n assert 2 == 2",
"def test_init(self):\n # ------ tests for basic properties of a tensor with default mode names\n true_shape = (2, 4, 8)\n true_size = reduce(lambda x, y: x * y, true_shape)\n true_order = len(true_shape)\n true_data = np.ones(true_size).reshape(true_shape)\n true_default_mode_names = ['mode-0', 'mode-1', 'mode-2']\n true_default_state = State(normal_shape=true_shape)\n tensor = Tensor(array=true_data)\n np.testing.assert_array_equal(tensor.data, true_data)\n assert (tensor.frob_norm == 8.0)\n assert (tensor.shape == true_shape)\n assert (tensor.ft_shape == true_shape)\n assert (tensor.order == true_order)\n assert (tensor.size == true_size)\n assert (tensor.mode_names == true_default_mode_names)\n assert (tensor._state == true_default_state)\n assert (tensor._data is not true_data) # check that is not a reference\n\n # ------ tests for creating a Tensor object with custom mode names\n true_custom_mode_names = ['time', 'frequency', 'channel']\n tensor = Tensor(array=true_data, mode_names=true_custom_mode_names)\n assert (tensor.mode_names == true_custom_mode_names)\n\n # ------ tests for creating a Tensor object in custom state\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J*K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n true_mode_names = [\"mode-0\", \"mode-1_mode-2\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_mode_names)\n\n # ------ tests for creating a Tensor object with in custom state and with custom mode names\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J * K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n custom_mode_names = [\"time\", \"frequency\", \"channel\"]\n true_custom_mode_names = [\"time\", \"frequency_channel\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state, mode_names=custom_mode_names)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_custom_mode_names)",
"def test_labels_round_trip(self):\n target = [\"III\", \"IXZ\", \"XYI\", \"ZZZ\"]\n with self.assertWarns(DeprecationWarning):\n value = PauliTable.from_labels(target).to_labels()\n self.assertEqual(value, target)",
"def test_relabel():\n nb_classes = 3\n inputs = np.array([[True, False, False], [False, True, False], [True, False, True]])\n expected_0 = np.array([True, False, True])\n expected_1 = np.array([False, True, False])\n expected_2 = np.array([False, False, True])\n\n assert np.array_equal(relabel(inputs, 0, nb_classes), expected_0)\n assert np.array_equal(relabel(inputs, 1, nb_classes), expected_1)\n assert np.array_equal(relabel(inputs, 2, nb_classes), expected_2)",
"def test_from_labels_1q(self):\n labels = [\"I\", \"Z\", \"Z\", \"X\", \"Y\"]\n array = np.array(\n [[False, False], [False, True], [False, True], [True, False], [True, True]], dtype=bool\n )\n with self.assertWarns(DeprecationWarning):\n target = PauliTable(array)\n value = PauliTable.from_labels(labels)\n self.assertEqual(target, value)",
"def test_map_1(self):\n annotations = self.get_annotations(0)\n detections = self.get_detections_from(annotations)\n self.assertEqual(1., float(self.map(annotations, detections)[0]))",
"def test_api_v3_labels_post(self):\n pass",
"def test_small(self):\n self.assert_tensor_equal(\n [[[0, 0], [0, 1]], [[1, 0], [1, 1]]],\n index_tensor([2, 2]))",
"def get_fake_label(b: int) -> Tensor:\n return rand(b) * (0.3 - 0.0) + 0.0",
"def test_init_fail(self):\n # ------ the following tests should FAIL\n correct_shape = (2, 4, 8)\n size = reduce(lambda x, y: x * y, correct_shape)\n order = len(correct_shape)\n correct_data = np.ones(size).reshape(correct_shape)\n\n # ------ tests that Tensor object can be created only from numpy array\n # can not create from list\n with pytest.raises(TypeError):\n incorrect_data = [[1, 2, 3], [4, 5, 6]]\n Tensor(array=incorrect_data)\n\n # can not create from another Tensor\n with pytest.raises(TypeError):\n incorrect_data = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))\n Tensor(array=incorrect_data)\n\n # ------ tests for custom mode names being incorrectly defined\n # mode names are not of list type\n with pytest.raises(ModeError):\n incorrect_mode_names = {mode: \"{}-mode\".format(mode) for mode in range(order)}\n Tensor(array=correct_data, mode_names=incorrect_mode_names)\n\n # not enough mode names\n with pytest.raises(ModeError):\n incorrect_mode_names = [\"{}-mode\".format(mode) for mode in range(order - 1)]\n Tensor(array=correct_data, mode_names=incorrect_mode_names)\n\n # too many mode names\n with pytest.raises(ModeError):\n incorrect_mode_names = [\"{}-mode\".format(mode) for mode in range(order + 1)]\n Tensor(array=correct_data, mode_names=incorrect_mode_names)\n\n # all mode names should be strings\n with pytest.raises(ModeError):\n incorrect_mode_names = [\"{}-mode\".format(mode) for mode in range(order)]\n incorrect_mode_names[0] = 0\n Tensor(array=correct_data, mode_names=incorrect_mode_names)\n\n # ------ tests for custom state being incorrectly defined\n # custom state should be passed as a dict\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n correct_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = [correct_normal_shape,\n correct_mode_order,\n correct_rtype]\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # custom state not fully defined\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n correct_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=correct_mode_order)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # normal shape of custom state should be a tuple\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n incorrect_normal_shape = [I, J, K]\n correct_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=incorrect_normal_shape,\n mode_order=correct_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # normal shape of custom state is inconsistent with the shape of provided data\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n incorrect_normal_shape = (I+1, J, K)\n correct_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=incorrect_normal_shape,\n mode_order=correct_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # mode order of custom state should be a !! TUPLE !! of lists\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n incorrect_mode_order = [[0], [1], [2]]\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=incorrect_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # mode order of custom state should be a tuple of !! LISTS !!\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n incorrect_mode_order = (0, 1, 2)\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=incorrect_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # number of list in mode order should correspond to the number of dimensions of provided data\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n incorrect_mode_order = ([0], [1, 2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=incorrect_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J*K)\n correct_normal_shape = (I, J, K)\n incorrect_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=incorrect_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # length of mode order of custom state is inconsistent with the normal shape\n with pytest.raises(StateError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n incorrect_mode_order = ([0], [1], [2, 3])\n correct_rtype = \"T\"\n\n incorrect_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=incorrect_mode_order,\n rtype=correct_rtype)\n Tensor(array=correct_data, custom_state=incorrect_custom_state)\n\n # length of normal shape of custom state is inconsistent with the length of provided mode names\n with pytest.raises(ModeError):\n I, J, K = 2, 4, 8\n correct_data = np.ones(I * J * K).reshape(I, J, K)\n correct_normal_shape = (I, J, K)\n correct_mode_order = ([0], [1], [2])\n correct_rtype = \"T\"\n correct_custom_state = dict(normal_shape=correct_normal_shape,\n mode_order=correct_mode_order,\n rtype=correct_rtype)\n incorrect_mode_names = [\"frequency\", \"time\"]\n Tensor(array=correct_data, custom_state=correct_custom_state, mode_names=incorrect_mode_names)",
"def test_initialization(data_and_labels):\n labeled_data = LabeledData(*data_and_labels)\n\n assert hasattr(labeled_data, \"_data\")\n assert hasattr(labeled_data, \"_label\")",
"def testWithNoLabel(self):\n record_defaults = [\n constant_op.constant([], dtypes.int32),\n constant_op.constant([], dtypes.int64),\n constant_op.constant([], dtypes.float32),\n constant_op.constant([], dtypes.float64),\n constant_op.constant([], dtypes.string)\n ]\n\n column_names = [\"col%d\" % i for i in range(5)]\n inputs = [[\",\".join(x for x in column_names), \"0,1,2,3,4\", \"5,6,7,8,9\"], [\n \",\".join(x for x in column_names), \"10,11,12,13,14\", \"15,16,17,18,19\"\n ]]\n expected_output = [[0, 1, 2, 3, b\"4\"], [5, 6, 7, 8, b\"9\"],\n [10, 11, 12, 13, b\"14\"], [15, 16, 17, 18, b\"19\"]]\n\n self._test_dataset(\n inputs,\n expected_output=expected_output,\n expected_keys=column_names,\n column_names=column_names,\n batch_size=1,\n num_epochs=1,\n shuffle=False,\n header=True,\n column_defaults=record_defaults,\n )",
"def test_attr_reference(self):\n tensor_data = np.random.rand(2, 2, 4, 4).astype(np.float32)\n with Graph(\"test_graph\", \"Reference\") as test_graph:\n input_tensor = Tensor(data_layout=types_pb2.NHWC, tensor_data=tensor_data)\n act = input_data(input_tensor, \"input\")\n graph_proto, tensor_data_array = test_graph.to_proto()\n self.assertEqual(graph_proto.backend, \"Reference\")\n node = get_node_proto(graph_proto, \"input\")\n self.assertEqual(node.input_tensors[0].data_type, types_pb2.Float32)\n self.assertEqual(node.input_tensors[0].shape.dims, [2, 2, 4, 4])\n self.assertEqual(node.input_tensors[0].shape.layout, types_pb2.NHWC)\n self.assertEqual(node.input_tensors[0].shape.alignment, 0)\n tensor_data_proto = get_tensor_data(\n tensor_data_array, node.input_tensors[0].name)\n self.assertEqual(tensor_data_proto.float_data, list(tensor_data.flatten()))\n self.assertEqual(len(tensor_data_proto.half_data), 0)\n self.assertEqual(len(tensor_data_proto.double_data), 0)\n self.assertEqual(len(tensor_data_proto.int_data), 0)\n self.assertEqual(len(tensor_data_proto.int64_data), 0)",
"def test_from_labels_5q(self):\n labels = [5 * \"I\", 5 * \"X\", 5 * \"Y\", 5 * \"Z\"]\n array = np.array(\n [10 * [False], 5 * [True] + 5 * [False], 10 * [True], 5 * [False] + 5 * [True]],\n dtype=bool,\n )\n with self.assertWarns(DeprecationWarning):\n target = PauliTable(array)\n value = PauliTable.from_labels(labels)\n self.assertEqual(target, value)",
"def test_update_labels():\n allure.dynamic.label('user_label', 'very cool')\n pass",
"def test_to_labels_1q(self):\n with self.assertWarns(DeprecationWarning):\n pauli = PauliTable(\n np.array(\n [[False, False], [False, True], [False, True], [True, False], [True, True]],\n dtype=bool,\n )\n )\n target = [\"I\", \"Z\", \"Z\", \"X\", \"Y\"]\n value = pauli.to_labels()\n self.assertEqual(value, target)",
"def assert_is_model_tensor(self, x: TensorLike) -> None:",
"def test_labels_round_trip_array(self):\n labels = [\"III\", \"IXZ\", \"XYI\", \"ZZZ\"]\n target = np.array(labels)\n with self.assertWarns(DeprecationWarning):\n value = PauliTable.from_labels(labels).to_labels(array=True)\n self.assertTrue(np.all(value == target))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that multiplying a tensor by an observable produces a tensor
|
def test_multiply_tensor_obs(self):
X = qml.PauliX(0)
Y = qml.Hadamard(2)
Z = qml.PauliZ(1)
t = X @ Y
t = t @ Z
assert isinstance(t, Tensor)
assert t.obs == [X, Y, Z]
|
[
"def test_subscribe_tensors_on_different_devices(self):\n c1 = constant_op.constant(10)\n c2 = constant_op.constant(20)\n\n with ops.device('cpu:0'):\n add = math_ops.add(c1, c2)\n\n with ops.device('cpu:1'):\n mul = math_ops.multiply(c1, c2)\n\n def sub(t):\n return t\n\n add_sub = subscribe.subscribe(\n add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n mul_sub = subscribe.subscribe(\n mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n # Expect the identity tensors injected by subscribe to have been created\n # on the same device as their original tensors.\n self.assertNotEqual(add_sub.device, mul_sub.device)\n self.assertEqual(add.device, add_sub.device)\n self.assertEqual(mul.device, mul_sub.device)",
"def test_tensor_number_operator(self, tol):\n cutoff_dim = 10\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n gate_name = \"TensorN\"\n assert dev.supports_observable(gate_name)\n\n op = qml.TensorN\n sf_expectation = dev._observable_map[gate_name]\n wires = [0, 1]\n\n @qml.qnode(dev)\n def circuit():\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(wires=wires))\n\n expval = circuit()\n assert np.allclose(\n expval, SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0\n )",
"def test_queuing_tensor_matmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t = Tensor(op1, op2)\n\n op3 = qml.PauliZ(2)\n t2 = t @ op3\n\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}\n assert tape._queue[op3] == {\"owner\": t2}",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def __mul__(self, other: Union['Tensor', TensorableT]) -> 'Tensor':\r\n return mul(self, assure_tensor(other))",
"def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))",
"def mul(self, other: Union['Tensor', TensorableT]) -> 'Tensor':\r\n return mul(self, other)",
"def test_queuing_tensor_matmul_components_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t1 = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n op3 = qml.PauliZ(2)\n t2 = t1 @ op3\n\n assert len(tape._queue) == 2\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}",
"def vm_impl_mul(self):\n\n def vm_impl(x, y):\n x = x.asnumpy()\n y = y.asnumpy()\n return Tensor(x * y)\n\n return vm_impl",
"def test_multiply_array(ctx_factory):\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)\n\n a_gpu = cl_array.to_device(queue, a)\n b_gpu = cl_array.to_device(queue, a)\n\n a_squared = (b_gpu * a_gpu).get()\n\n assert (a * a == a_squared).all()",
"def vm_impl_mat_mul(self):\n\n def vm_impl(x, w):\n x = x.asnumpy()\n w = w.asnumpy()\n if self.transpose_a:\n x = x.transpose()\n if self.transpose_b:\n w = w.transpose()\n z = x @ w\n return Tensor(z)\n\n return vm_impl",
"def test_tensor(self, j):\n labels1 = [\"XX\", \"YY\"]\n labels2 = [j * \"I\", j * \"Z\"]\n with self.assertWarns(DeprecationWarning):\n pauli1 = PauliTable.from_labels(labels1)\n pauli2 = PauliTable.from_labels(labels2)\n\n value = pauli1.tensor(pauli2)\n with self.assertWarns(DeprecationWarning):\n target = PauliTable.from_labels([i + j for i in labels1 for j in labels2])\n self.assertEqual(value, target)",
"def test_train_static_multiplication():\n\n train_retry(\n epoch_count=4000,\n expected_interpolation_loss=0.0001,\n expected_extrapolation_loss=0.0001,\n learning_rate=0.05,\n task=lambda a, b: a * b,\n )",
"def test_multiply_scalar(self):\n self.assertEqual(self.OneType(1, 2, 3) * 2, self.OneType(2, 4, 6))",
"def test_queuing_tensor_rmatmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n\n t1 = op1 @ op2\n\n op3 = qml.PauliZ(3)\n\n t2 = op3 @ t1\n\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op3, op1, op2)}",
"def test_ContinuousModel_multivariate():\n\n class MyModel(ContinuousModel):\n def __init__(self):\n self.weight = Parameter([5, 3], name=\"Weight\")\n self.bias = Parameter([1, 3], name=\"Bias\")\n self.std = ScaleParameter([1, 3], name=\"Std\")\n\n def __call__(self, x):\n return Normal(x @ self.weight() + self.bias(), self.std())\n\n # Instantiate the model\n model = MyModel()\n\n # Data\n x = np.random.randn(100, 5).astype(\"float32\")\n w = np.random.randn(5, 3).astype(\"float32\")\n y = x @ w + 1\n\n # Fit the model\n model.fit(x, y, batch_size=50, epochs=2, lr=0.01)\n\n # pred_dist_plot should not work with nonscalar output\n with pytest.raises(NotImplementedError):\n model.pred_dist_plot(x[:10, :], n=10)\n\n # predictive_prc should not work with nonscalar output\n with pytest.raises(NotImplementedError):\n model.predictive_prc(x[:10, :], y[:10, :], n=10)",
"def __call__(self, t):\n return self.a(t) * self.b(t)",
"def test_mul(a, b):\n assert Surreal(a) * Surreal(b) == Surreal(a * b)",
"def multiply(S, T):\r\n return youngtableau(wordFromMatrix(multiplyMatrix(S.matrix, T.matrix)))",
"def test_append_tensor_ops(self):\n\n with Queue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that multiplying a tensor by a tensor produces a tensor
|
def test_multiply_tensor_tensor(self):
X = qml.PauliX(0)
Y = qml.PauliY(2)
Z = qml.PauliZ(1)
H = qml.Hadamard(3)
t1 = X @ Y
t2 = Z @ H
t = t2 @ t1
assert isinstance(t, Tensor)
assert t.obs == [Z, H, X, Y]
|
[
"def __mul__(self, other: Union['Tensor', TensorableT]) -> 'Tensor':\r\n return mul(self, assure_tensor(other))",
"def mul(self, other: Union['Tensor', TensorableT]) -> 'Tensor':\r\n return mul(self, other)",
"def test_queuing_tensor_matmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t = Tensor(op1, op2)\n\n op3 = qml.PauliZ(2)\n t2 = t @ op3\n\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}\n assert tape._queue[op3] == {\"owner\": t2}",
"def _transpose_mul(self, a, b):\n return tf.transpose(tf.multiply(tf.transpose(a), b))",
"def vm_impl_mat_mul(self):\n\n def vm_impl(x, w):\n x = x.asnumpy()\n w = w.asnumpy()\n if self.transpose_a:\n x = x.transpose()\n if self.transpose_b:\n w = w.transpose()\n z = x @ w\n return Tensor(z)\n\n return vm_impl",
"def vm_impl_mul(self):\n\n def vm_impl(x, y):\n x = x.asnumpy()\n y = y.asnumpy()\n return Tensor(x * y)\n\n return vm_impl",
"def cTensorMultiply():\n code_text = \"\"\"\n KERNEL void cTensorMultiply(\n const unsigned int batch, // batch \n const unsigned int dim, // dimensions\n GLOBAL_MEM const unsigned int *Nd, // In batch mode, Nd*batch but is calculated outside the kernel\n GLOBAL_MEM const unsigned int *Nd_elements, // Number of elements to move along the dimension = strides / itemsize\n GLOBAL_MEM const float *invNd_elements, // float: inverse of the Nd_elements, which aims for fast division // batch mode: Nd_elements / batch\n GLOBAL_MEM const float *vec, // Real, vector, length sum Nd[dimid]\n GLOBAL_MEM float2 *outdata, \n const unsigned int div) \n {\n const unsigned int gid=get_global_id(0); \n const unsigned int pid = (float)gid / (float)batch;\n // const unsigned int bat = gid - pid * batch;\n \n unsigned int group;\n unsigned int Nd_indx_shift = 0;\n float mul = 1.0; \n unsigned int res = pid; \n \n for (unsigned int dimid = 0; dimid < dim; dimid ++){\n group = (float)res * invNd_elements[dimid]; // The index along the axis\n res = res - group * Nd_elements[dimid];\n \n const unsigned int N = Nd[dimid]; \n \n mul = mul * vec[group + Nd_indx_shift];\n \n Nd_indx_shift = Nd_indx_shift + N;\n }\n \n if (div == 1){\n // for (unsigned int bat = 0; bat < batch; bat ++ )\n // {\n float2 tmp = outdata[gid];\n tmp.x = tmp.x / mul;\n tmp.y = tmp.y / mul;\n outdata[gid] = tmp;\n // };\n };\n if (div == 0){\n // for (unsigned int bat = 0; bat < batch; bat ++ )\n // {\n float2 tmp = outdata[gid];\n tmp.x = tmp.x * mul;\n tmp.y = tmp.y * mul;\n outdata[gid] = tmp;\n // };\n }; \n \n };\n \"\"\" \n return code_text",
"def mul(tensorA, tensorB, a_axis, b_axis):\r\n with tf.name_scope('contraction') as scope:\r\n if isinstance(a_axis, int):\r\n a_axis = [a_axis]\r\n if isinstance(b_axis, int):\r\n b_axis = [b_axis]\r\n A = t2mat(tensorA, a_axis, -1)\r\n B = t2mat(tensorB, b_axis, -1)\r\n mat_dot = tf.matmul(A, B, transpose_a=True)\r\n back_shape = [tensorA.get_shape()[_].value for _ in range(tensorA.get_shape().ndims) if _ not in a_axis] + \\\r\n [tensorB.get_shape()[_].value for _ in range(tensorB.get_shape().ndims) if _ not in b_axis]\r\n return tf.reshape(mat_dot, back_shape)",
"def test_multiply_scalar(self):\n self.assertEqual(self.OneType(1, 2, 3) * 2, self.OneType(2, 4, 6))",
"def test_tensor(self, j):\n labels1 = [\"XX\", \"YY\"]\n labels2 = [j * \"I\", j * \"Z\"]\n with self.assertWarns(DeprecationWarning):\n pauli1 = PauliTable.from_labels(labels1)\n pauli2 = PauliTable.from_labels(labels2)\n\n value = pauli1.tensor(pauli2)\n with self.assertWarns(DeprecationWarning):\n target = PauliTable.from_labels([i + j for i in labels1 for j in labels2])\n self.assertEqual(value, target)",
"def tf_matmul(x, y, name=None):\n if x.ndim == y.ndim == 1:\n x_ = tf.reshape(x, shape=(1, -1))\n y_ = tf.reshape(y, shape=(-1, 1))\n return tf.matmul(x_, y_, name=name)\n else:\n return tf.matmul(x, y, name=name)",
"def test_tensor_number_operator(self, tol):\n cutoff_dim = 10\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n gate_name = \"TensorN\"\n assert dev.supports_observable(gate_name)\n\n op = qml.TensorN\n sf_expectation = dev._observable_map[gate_name]\n wires = [0, 1]\n\n @qml.qnode(dev)\n def circuit():\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(wires=wires))\n\n expval = circuit()\n assert np.allclose(\n expval, SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0\n )",
"def test_mul(a, b):\n assert Surreal(a) * Surreal(b) == Surreal(a * b)",
"def test_queuing_tensor_rmatmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n\n t1 = op1 @ op2\n\n op3 = qml.PauliZ(3)\n\n t2 = op3 @ t1\n\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op3, op1, op2)}",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def test_queuing_tensor_matmul_components_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t1 = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n op3 = qml.PauliZ(2)\n t2 = t1 @ op3\n\n assert len(tape._queue) == 2\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}",
"def matmul(\n inp1: Tensor,\n inp2: Tensor,\n transpose_a=False,\n transpose_b=False,\n compute_mode=\"default\",\n) -> Tensor:\n return _matmul(inp1, inp2, transpose_a, transpose_b, compute_mode)",
"def mul(self, matrix):",
"def test_multiply_array(ctx_factory):\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)\n\n a_gpu = cl_array.to_device(queue, a)\n b_gpu = cl_array.to_device(queue, a)\n\n a_squared = (b_gpu * a_gpu).get()\n\n assert (a * a == a_squared).all()",
"def test_subscribe_tensors_on_different_devices(self):\n c1 = constant_op.constant(10)\n c2 = constant_op.constant(20)\n\n with ops.device('cpu:0'):\n add = math_ops.add(c1, c2)\n\n with ops.device('cpu:1'):\n mul = math_ops.multiply(c1, c2)\n\n def sub(t):\n return t\n\n add_sub = subscribe.subscribe(\n add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n mul_sub = subscribe.subscribe(\n mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n # Expect the identity tensors injected by subscribe to have been created\n # on the same device as their original tensors.\n self.assertNotEqual(add_sub.device, mul_sub.device)\n self.assertEqual(add.device, add_sub.device)\n self.assertEqual(mul.device, mul_sub.device)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that multiplying a tensor inplace produces a tensor
|
def test_multiply_tensor_in_place(self):
X = qml.PauliX(0)
Y = qml.PauliY(2)
Z = qml.PauliZ(1)
H = qml.Hadamard(3)
t = X
t @= Y
t @= Z @ H
assert isinstance(t, Tensor)
assert t.obs == [X, Y, Z, H]
|
[
"def __mul__(self, other: Union['Tensor', TensorableT]) -> 'Tensor':\r\n return mul(self, assure_tensor(other))",
"def vm_impl_mat_mul(self):\n\n def vm_impl(x, w):\n x = x.asnumpy()\n w = w.asnumpy()\n if self.transpose_a:\n x = x.transpose()\n if self.transpose_b:\n w = w.transpose()\n z = x @ w\n return Tensor(z)\n\n return vm_impl",
"def test_queuing_tensor_matmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t = Tensor(op1, op2)\n\n op3 = qml.PauliZ(2)\n t2 = t @ op3\n\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}\n assert tape._queue[op3] == {\"owner\": t2}",
"def mul(self, other: Union['Tensor', TensorableT]) -> 'Tensor':\r\n return mul(self, other)",
"def vm_impl_mul(self):\n\n def vm_impl(x, y):\n x = x.asnumpy()\n y = y.asnumpy()\n return Tensor(x * y)\n\n return vm_impl",
"def _transpose_mul(self, a, b):\n return tf.transpose(tf.multiply(tf.transpose(a), b))",
"def cTensorMultiply():\n code_text = \"\"\"\n KERNEL void cTensorMultiply(\n const unsigned int batch, // batch \n const unsigned int dim, // dimensions\n GLOBAL_MEM const unsigned int *Nd, // In batch mode, Nd*batch but is calculated outside the kernel\n GLOBAL_MEM const unsigned int *Nd_elements, // Number of elements to move along the dimension = strides / itemsize\n GLOBAL_MEM const float *invNd_elements, // float: inverse of the Nd_elements, which aims for fast division // batch mode: Nd_elements / batch\n GLOBAL_MEM const float *vec, // Real, vector, length sum Nd[dimid]\n GLOBAL_MEM float2 *outdata, \n const unsigned int div) \n {\n const unsigned int gid=get_global_id(0); \n const unsigned int pid = (float)gid / (float)batch;\n // const unsigned int bat = gid - pid * batch;\n \n unsigned int group;\n unsigned int Nd_indx_shift = 0;\n float mul = 1.0; \n unsigned int res = pid; \n \n for (unsigned int dimid = 0; dimid < dim; dimid ++){\n group = (float)res * invNd_elements[dimid]; // The index along the axis\n res = res - group * Nd_elements[dimid];\n \n const unsigned int N = Nd[dimid]; \n \n mul = mul * vec[group + Nd_indx_shift];\n \n Nd_indx_shift = Nd_indx_shift + N;\n }\n \n if (div == 1){\n // for (unsigned int bat = 0; bat < batch; bat ++ )\n // {\n float2 tmp = outdata[gid];\n tmp.x = tmp.x / mul;\n tmp.y = tmp.y / mul;\n outdata[gid] = tmp;\n // };\n };\n if (div == 0){\n // for (unsigned int bat = 0; bat < batch; bat ++ )\n // {\n float2 tmp = outdata[gid];\n tmp.x = tmp.x * mul;\n tmp.y = tmp.y * mul;\n outdata[gid] = tmp;\n // };\n }; \n \n };\n \"\"\" \n return code_text",
"def test_queuing_tensor_matmul_components_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t1 = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n op3 = qml.PauliZ(2)\n t2 = t1 @ op3\n\n assert len(tape._queue) == 2\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def test_inverse_transformation(self, seed_pair, repeat):\n dim = 128\n x = tf.random.uniform((dim,))\n transformed_x = compression_utils.randomized_hadamard_transform(\n x, seed_pair=seed_pair, repeat=repeat)\n reverted_x = compression_utils.inverse_randomized_hadamard_transform(\n transformed_x, original_dim=dim, seed_pair=seed_pair, repeat=repeat)\n x, reverted_x = self.evaluate([x, reverted_x])\n self.assertAllClose(x, reverted_x)",
"def lazy_mul(a, b):\r\n return a * b",
"def matmul(\n inp1: Tensor,\n inp2: Tensor,\n transpose_a=False,\n transpose_b=False,\n compute_mode=\"default\",\n) -> Tensor:\n return _matmul(inp1, inp2, transpose_a, transpose_b, compute_mode)",
"def test_queuing_tensor_rmatmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n\n t1 = op1 @ op2\n\n op3 = qml.PauliZ(3)\n\n t2 = op3 @ t1\n\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op3, op1, op2)}",
"def test_tensor(self, j):\n labels1 = [\"XX\", \"YY\"]\n labels2 = [j * \"I\", j * \"Z\"]\n with self.assertWarns(DeprecationWarning):\n pauli1 = PauliTable.from_labels(labels1)\n pauli2 = PauliTable.from_labels(labels2)\n\n value = pauli1.tensor(pauli2)\n with self.assertWarns(DeprecationWarning):\n target = PauliTable.from_labels([i + j for i in labels1 for j in labels2])\n self.assertEqual(value, target)",
"def mul(tensorA, tensorB, a_axis, b_axis):\r\n with tf.name_scope('contraction') as scope:\r\n if isinstance(a_axis, int):\r\n a_axis = [a_axis]\r\n if isinstance(b_axis, int):\r\n b_axis = [b_axis]\r\n A = t2mat(tensorA, a_axis, -1)\r\n B = t2mat(tensorB, b_axis, -1)\r\n mat_dot = tf.matmul(A, B, transpose_a=True)\r\n back_shape = [tensorA.get_shape()[_].value for _ in range(tensorA.get_shape().ndims) if _ not in a_axis] + \\\r\n [tensorB.get_shape()[_].value for _ in range(tensorB.get_shape().ndims) if _ not in b_axis]\r\n return tf.reshape(mat_dot, back_shape)",
"def test_multiply_scalar(self):\n self.assertEqual(self.OneType(1, 2, 3) * 2, self.OneType(2, 4, 6))",
"def mul(self, matrix):",
"def test_multiply_array(ctx_factory):\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)\n\n a_gpu = cl_array.to_device(queue, a)\n b_gpu = cl_array.to_device(queue, a)\n\n a_squared = (b_gpu * a_gpu).get()\n\n assert (a * a == a_squared).all()",
"def tf_matmul(x, y, name=None):\n if x.ndim == y.ndim == 1:\n x_ = tf.reshape(x, shape=(1, -1))\n y_ = tf.reshape(y, shape=(-1, 1))\n return tf.matmul(x_, y_, name=name)\n else:\n return tf.matmul(x, y, name=name)",
"def test_tensor_number_operator(self, tol):\n cutoff_dim = 10\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n gate_name = \"TensorN\"\n assert dev.supports_observable(gate_name)\n\n op = qml.TensorN\n sf_expectation = dev._observable_map[gate_name]\n wires = [0, 1]\n\n @qml.qnode(dev)\n def circuit():\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(wires=wires))\n\n expval = circuit()\n assert np.allclose(\n expval, SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that an exception is raised if an observable is multiplied by an operation
|
def test_operation_multiply_invalid(self):
X = qml.PauliX(0)
Y = qml.CNOT(wires=[0, 1])
Z = qml.PauliZ(0)
with pytest.raises(
ValueError, match="Can only perform tensor products between observables"
):
T = X @ Z
T @ Y
with pytest.raises(
ValueError, match="Can only perform tensor products between observables"
):
T = X @ Z
4 @ T
|
[
"def test_multiply_except(self):\n chan = SuperOp(self.sopI)\n self.assertRaises(QiskitError, chan.multiply, 's')\n self.assertRaises(QiskitError, chan.multiply, chan)",
"def test_mul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Cannot multiply Observable by\"):\n _ = \"dummy\" * qml.PauliX(0)",
"def test_observable_not_returned(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"All measured observables\"):\n node(0.5)",
"def test_operations_after_observables(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n qml.RY(0.5, wires=[0])\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"gates must precede\"):\n node(0.5)",
"def test_power_except(self):\n chan = SuperOp(self.depol_sop(1))\n # Non-integer power raises error\n self.assertRaises(QiskitError, chan.power, 0.5)",
"def test_pow_method_with_non_numeric_power_raises_error(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operator\"\"\"\n num_wires = 1\n\n with pytest.raises(ValueError, match=\"Cannot raise an Operator\"):\n _ = DummyOp(wires=[0]) ** DummyOp(wires=[0])",
"def test_observable_order_violated(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0)), ex\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"All measured observables\"):\n node(0.5)",
"def test_return_of_non_observable(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(wires=0)), 0.3\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"must return either\"):\n node(0.5)",
"def test_estimation_cost_error(norm, error):\n with pytest.raises(ValueError, match=\"must be greater than zero\"):\n qml.resource.DoubleFactorization.estimation_cost(norm, error)",
"def test_observable_on_nonexistant_wire(self, operable_mock_device_2_wires):\n\n operable_mock_device_2_wires.num_wires = 2\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(2))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)",
"def test_raise_error_fewer_than_2_operands(self):\n with pytest.raises(ValueError, match=\"Require at least two operators to combine;\"):\n _ = ValidOp(qml.PauliX(0))",
"def test_exceptions(self):\n NUM_PROD = 1\n NUM_CONS = 1\n\n p_args = []\n c_args = []\n\n for i in range(NUM_PROD):\n p_args.append({\"add\": None})\n\n for i in range(NUM_CONS):\n c_args.append({\"add\": None})\n\n c = Calculon(prod_function, p_args, False, cons_function, c_args, True)\n result = c.start()",
"def test_single_excitation_unitary_exceptions(self, weight, single_wires, msg_match):\n dev = qml.device(\"default.qubit\", wires=5)\n\n def circuit(weight=weight):\n SingleExcitationUnitary(weight=weight, wires=single_wires)\n return qml.expval(qml.PauliZ(0))\n\n qnode = qml.QNode(circuit, dev)\n\n with pytest.raises(ValueError, match=msg_match):\n qnode(weight=weight)",
"def test_build_ops_error():\n qubit = cirq.LineQubit.range(1)\n with pytest.raises(ValueError):\n cirq_utils.qubit_op_to_gate('W', qubit[0])",
"def test_operation_on_nonexistant_wire(self, operable_mock_device_2_wires):\n\n operable_mock_device_2_wires.num_wires = 2\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 2])\n return qml.expval(qml.PauliZ(0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)",
"def test_double_excitation_unitary_exceptions(self, weight, wires1, wires2, msg_match):\n dev = qml.device(\"default.qubit\", wires=10)\n\n def circuit(weight=weight):\n DoubleExcitationUnitary(weight=weight, wires1=wires1, wires2=wires2)\n return qml.expval(qml.PauliZ(0))\n\n qnode = qml.QNode(circuit, dev)\n\n with pytest.raises(ValueError, match=msg_match):\n qnode(weight=weight)",
"def test_perform_error():\n intent = Error(ValueError(\"foo\"))\n with raises(ValueError):\n sync_perform(TypeDispatcher({Error: perform_error}), Effect(intent))",
"def test_inv_error(self):\n\n operation = CirqOperation(\n lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]\n )\n operation.parametrize(0.1, 0.2, 0.3)\n\n with pytest.raises(\n qml.DeviceError, match=\"CirqOperation can't be inverted after it was parametrized\"\n ):\n operation.inv()",
"def test_arithmetic_errors(self):\n H = qml.Hamiltonian([1], [qml.PauliZ(0)])\n A = [[1, 0], [0, -1]]\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H @ A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = A @ H\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H + A\n with pytest.raises(TypeError, match=\"can't multiply sequence by non-int\"):\n _ = H * A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n _ = H - A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H += A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H *= A\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n H -= A"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the correct eigenvalues are returned for the Tensor
|
def test_eigvals(self):
X = qml.PauliX(0)
Y = qml.PauliY(2)
t = Tensor(X, Y)
assert np.array_equal(t.eigvals(), np.kron([1, -1], [1, -1]))
# test that the eigvals are now cached and not recalculated
assert np.array_equal(t._eigvals_cache, t.eigvals())
|
[
"def test_eigen_caching(self):\n diag_op = ValidOp(*self.simple_operands)\n eig_decomp = diag_op.eigendecomposition\n\n eig_vecs = eig_decomp[\"eigvec\"]\n eig_vals = eig_decomp[\"eigval\"]\n\n eigs_cache = diag_op._eigs[diag_op.hash]\n cached_vecs = eigs_cache[\"eigvec\"]\n cached_vals = eigs_cache[\"eigval\"]\n\n assert np.allclose(eig_vals, cached_vals)\n assert np.allclose(eig_vecs, cached_vecs)",
"def test_eigenvals_calcfunction(\n configure, # pylint: disable=unused-argument\n sample,\n):\n eigenvals_calcfunction = CalculationFactory(\n 'tbmodels.calcfunctions.eigenvals'\n )\n tb_model = DataFactory('singlefile')(file=sample('model.hdf5'))\n\n k_mesh = DataFactory('array.kpoints')()\n k_mesh.set_kpoints_mesh([4, 4, 4], offset=[0, 0, 0])\n\n res = eigenvals_calcfunction(tb_model=tb_model, kpoints=k_mesh)\n assert isinstance(res, DataFactory('array.bands'))\n assert res.get_array('bands').shape == (64, 14)",
"def eigen():\n global vecs, vals, _arr\n vals, vecs = la.eig(_arr)",
"def test_eigenvals(\n configure_with_daemon, # pylint: disable=unused-argument\n sample,\n get_tbmodels_process_builder\n):\n builder = get_tbmodels_process_builder('tbmodels.eigenvals')\n\n builder.tb_model = DataFactory('singlefile')(file=sample('model.hdf5'))\n\n k_mesh = DataFactory('array.kpoints')()\n k_mesh.set_kpoints_mesh([4, 4, 4], offset=[0, 0, 0])\n builder.kpoints = k_mesh\n\n output = run(builder)\n assert isinstance(output['bands'], DataFactory('array.bands'))",
"def test_getE():\n\n from pydft.schrodinger import _getE\n from numpy.matlib import randn\n\n s = [6,6,4]\n R = np.array([[6,0,0],[0,6,0],[0,0,6]])\n W = np.array(randn(np.prod(s), 4) + 1j*randn(np.prod(s), 4))\n\n out = _getE(s,R,W)\n\n assert np.allclose(out.imag,0)",
"def computeStatsEigen(self):\n # TO-DO: figure out why this op has delays (possibly moving\n # eigenvectors around?)\n with tf.device('/cpu:0'):\n def removeNone(tensor_list):\n local_list = []\n for item in tensor_list:\n if item is not None:\n local_list.append(item)\n return local_list\n\n def copyStats(var_list):\n print(\"copying stats to buffer tensors before eigen decomp\")\n redundant_stats = {}\n copied_list = []\n for item in var_list:\n if item is not None:\n if item not in redundant_stats:\n if self._use_float64:\n redundant_stats[item] = tf.cast(\n tf.identity(item), tf.float64)\n else:\n redundant_stats[item] = tf.identity(item)\n copied_list.append(redundant_stats[item])\n else:\n copied_list.append(None)\n return copied_list\n #stats = [copyStats(self.fStats), copyStats(self.bStats)]\n #stats = [self.fStats, self.bStats]\n\n stats_eigen = self.stats_eigen\n computedEigen = {}\n eigen_reverse_lookup = {}\n updateOps = []\n # sync copied stats\n # with tf.control_dependencies(removeNone(stats[0]) +\n # removeNone(stats[1])):\n with tf.control_dependencies([]):\n for stats_var in stats_eigen:\n if stats_var not in computedEigen:\n eigens = tf.self_adjoint_eig(stats_var)\n e = eigens[0]\n Q = eigens[1]\n if self._use_float64:\n e = tf.cast(e, tf.float32)\n Q = tf.cast(Q, tf.float32)\n updateOps.append(e)\n updateOps.append(Q)\n computedEigen[stats_var] = {'e': e, 'Q': Q}\n eigen_reverse_lookup[e] = stats_eigen[stats_var]['e']\n eigen_reverse_lookup[Q] = stats_eigen[stats_var]['Q']\n\n self.eigen_reverse_lookup = eigen_reverse_lookup\n self.eigen_update_list = updateOps\n\n if KFAC_DEBUG:\n self.eigen_update_list = [item for item in updateOps]\n with tf.control_dependencies(updateOps):\n updateOps.append(tf.Print(tf.constant(\n 0.), [tf.convert_to_tensor('computed factor eigen')]))\n\n return updateOps",
"def eigenvalues(self):\n if self._eigenvalues is None:\n self._eigenvalues = self.K.linalg.eigvalsh(self.matrix)\n return self._eigenvalues",
"def eigenvalues(M):\n return roots(characteristic_polynomial(M))",
"def test_Ea(self):\n self.assertAlmostEqual(self.surfarr.Ea.value_si * 0.001, self.Ea, 6)",
"def test_eigval(file):\n filedir = \"__testfiles__/\" + file\n mass = sol.input_reader(filedir)[0]\n x_min, x_max = sol.input_reader(filedir)[1][:2]\n length = x_max - x_min\n eigmin, eigmax = sol.input_reader(filedir)[4:6]\n # getting the eigenvalues for the specific problem from data or equations\n eigvallist = []\n if file == \"test_infpot.txt\":\n for nn in range(eigmin, eigmax + 1):\n eigval = (4 * np.pi**2) / (8 * mass * length**2) * nn**2\n eigvallist.append(eigval)\n elif file == \"test_harmonic.txt\":\n for nn in range(eigmin - 1, eigmax):\n eigval = 1 / 2 * (nn + 1 / 2)\n eigvallist.append(eigval)\n elif file == \"test_pot.txt\":\n eigvallist = np.loadtxt(\"__unittestfiles__/test_pot_energy.dat\")\n elif file == \"test_dualpot_lin.txt\":\n eigvallist = np.loadtxt\\\n (\"__unittestfiles__/test_dualpot_lin_energy.dat\")\n elif file == \"test_dualpot_cspline.txt\":\n eigvallist = np.loadtxt\\\n (\"__unittestfiles__/test_dualpot_cspline_energy.dat\")\n elif file == \"test_asympot.txt\":\n eigvallist = np.loadtxt(\"__unittestfiles__/test_asympot_energy.dat\")\n else:\n eigvallist = np.ones((1, eigmax - eigmin + 1))\n eigvalarray = np.array(eigvallist)\n sol.run(filedir, \"__output__\")\n testeigarray = np.loadtxt(\"__output__/energies.dat\")\n assert np.all(np.abs(eigvalarray - testeigarray) < ERROR)",
"def CalcEigenValVec(T):\n #get eigenvalues and eigenvectors\n eigvals, eigvec = np.linalg.eig(T)\n\n #sort by eigenvalues\n #put eigenvalues and and eigenvectors together in one list and sort this one\n valvec = []\n for i in range(len(eigvals)):\n valvec.append([eigvals[i], np.transpose(eigvec)[i].tolist()])\n\n #sort eigenvalues and eigenvectors by eigenvalues\n valvec.sort(lambda x, y: -cmp(x[0], y[0])) # sort from large to small\n\n for i in range(len(valvec)):\n eigvals[i] = valvec[i][0]\n eigvec[i] = valvec[i][1]\n\n return eigvals, eigvec",
"def comparisonEigenvalues(iterations):\n A = np.random.rand(100,100)\n B = A.T.dot(A)\n B = (B+B.T)/2.0\n\n #QR algorithm\n eigenvaluesQR = np.diagonal(QRAlgorithm(B, iterations))\n eigenvaluesQR = np.sort(eigenvaluesQR)\n eigenvaluesQR = eigenvaluesQR[::-1] #Reverse the array to get larger value first\n #print(eigenvaluesQR[::-1])\n\n #numpy\n eigenvaluesNumpy, eigenVectors = np.linalg.eig(B)\n #print(eigenvaluesNumpy)\n\n #Computes difference\n difference = np.sqrt(np.sum(np.square(eigenvaluesNumpy-eigenvaluesQR)))\n print(\"Difference between eigenvalues : %f\" % difference)",
"def eig(mat, qrTol):\n\n n = len(mat)\n\n PTOTAL = np.identity(n)\n\n for k in range(1, n-1):\n s = 0.0\n for j in range(k+1, n+1):\n s += mat[j-1, k-1]**2\n alpha = -np.sign(mat[k, k-1])*np.sqrt(s)\n\n r = np.sqrt(0.5*alpha**2 - 0.5*alpha*mat[k, k-1])\n\n # Create the w vector\n w = np.zeros([n])\n w[k] = (mat[k, k-1] - alpha)/(2*r)\n\n for j in range(k+2, n+1):\n w[j-1] = mat[j-1, k-1]/(2*r)\n\n # The Householder reflector P_k\n P = np.identity(n) - 2*np.outer(w, w.T)\n\n # Apply the similarity transformation\n mat = np.dot(P, np.dot(mat, P))\n\n # Save the eigenvectors\n # U = P_n*...*P_2*P_1*I\n PTOTAL = np.dot(PTOTAL, P)\n\n H = mat\n\n \"\"\"\n Here begins the QR section.\n\n The QR method is used to transform a symmetric matrix to a diagonal\n matrix using similarity transformations so the eigenvalues and\n eigenvectors can be obtained.\n \"\"\"\n\n k = 0\n QTOTAL = np.identity(n)\n flag = False\n while flag is False:\n qr = qr_by_givens(H) # Factor H = QR\n # qr = np.linalg.qr(H)\n Q = qr[0]\n R = qr[1]\n\n H = np.dot(R, Q)\n QTOTAL = np.dot(QTOTAL, Q) # Save the eigenvectors\n\n k += 1\n\n # Check if all the off-diagonal elements are ~0\n flag = True\n for i in range(n):\n for j in range(n):\n if i != j and abs(H[i, j]) > qrTol:\n flag = False\n break\n\n # The eigenvalues are the diagonal elements of\n D = np.diagonal(H)\n\n # The eigenvectors are the columns of\n V = np.dot(PTOTAL, QTOTAL)\n\n return D, V",
"def test_minor_symmetry_tensor(self):\n if not available:\n self.skipTest(reason)\n e_tensor = PyEshelbyTensor(6.0, 5.0, 4.0, 0.3)\n\n for indx in product([0, 1, 2], repeat=4):\n val1 = e_tensor(indx[0], indx[1], indx[2], indx[3])\n\n val2 = e_tensor(indx[0], indx[1], indx[3], indx[2])\n self.assertAlmostEqual(val1, val2)\n\n val2 = e_tensor(indx[1], indx[0], indx[3], indx[2])\n self.assertAlmostEqual(val1, val2)\n\n val2 = e_tensor(indx[1], indx[0], indx[2], indx[3])\n self.assertAlmostEqual(val1, val2)",
"def check_stability(self, verbose=True):\n (eigenvalues, eigenvectors) = np.linalg.eig(self.Cvoigt)\n if not (np.amin(eigenvalues) > 0.0):\n print 'Eigenvalues:', eigenvalues\n raise ValueError('Elastic tensor is not stable to small strains (Voigt matrix is not positive definite) !')\n if verbose: print 'Stability checked! Eigenvalues:', eigenvalues\n return",
"def test_tensor_scalar_attributes():\n ### DEFINING ANALYTICAL VALUES ###\n evals = np.array([2., 1., 0.])\n a = 1. / np.sqrt(2)\n #evec[:,j] is pair with eval[j]\n evecs = np.array([[a, 0, -a], [a, 0, a], [0, 1., 0]])\n D = np.array([[1., 1., 0], [1., 1., 0], [0, 0, 1.]])\n FA = np.sqrt(1./2*(1+4+1)/(1+4+0)) # 0.7745966692414834\n MD = 1.\n\n ### CALCULATE ESTIMATE VALUES ###\n dummy_data = np.ones((1,10)) #single voxel\n dummy_gtab = np.zeros((10,3))\n dummy_bval = np.zeros((10,))\n tensor = dti.Tensor(dummy_data,dummy_bval,dummy_gtab)\n tensor.model_params = np.r_['-1,2', evals, evecs.ravel()]\n\n ### TESTS ###\n assert_almost_equal(np.abs(np.dot(evecs[:, 2],\n tensor[0].evecs[:, 2].T)), 1.,\n msg = \"Calculation of third eigenvector is not right\")\n assert_array_almost_equal(D, tensor[0].D, err_msg = \"Recovery of self diffusion tensor from eig not adaquate\")\n assert_almost_equal(FA, tensor.fa(), msg = \"Calculation of FA of self diffusion tensor is not adequate\")\n assert_almost_equal(MD, tensor.md(), msg = \"Calculation of MD of self diffusion tensor is not adequate\")\n assert_equal(True, tensor.mask.all())\n\n #assert_equal(m_list.shape, n_list.shape)\n #assert_equal(m_list.ndim, 2)\n #assert_equal(m_list.shape, (45,1))\n #assert_true(np.all(np.abs(m_list) <= n_list))\n #assert_array_equal(n_list % 2, 0)\n #assert_raises(ValueError, qball.sph_harm_ind_list, 1)",
"def testA2KR(self):\n\n for i in range(50):\n with self.subTest(i = i):\n A = np.random.rand(3, 3)\n K, R = geoCV.camera.A2KR(A)\n\n self.assertLessEqual(np.linalg.norm(A - K.dot(R)), 1e-3)",
"def test_eigen_pts(self):\n line = ElementaryLine([0.0, 1.2, 0.8])\n eigen_pts = line.eigen_points\n\n self.assertTrue(eigen_pts.has_barrier)\n self.assertTupleEqual(eigen_pts.A, (0.0, 0.0))\n self.assertTupleEqual(eigen_pts.B, (1.0, 0.0))\n self.assertTupleEqual(eigen_pts.C, (1.5151515151515151, 1.2008062953822003))\n self.assertTupleEqual(eigen_pts.D, (2.0, 0.80000000000000004))\n self.assertTupleEqual(eigen_pts.E, (3.0, 0.80000000000000004))\n\n line = ElementaryLine([0.0, 0.8])\n eigen_pts = line.eigen_points\n\n self.assertFalse(eigen_pts.has_barrier)\n self.assertTupleEqual(eigen_pts.A, (0.0, 0.0))\n self.assertTupleEqual(eigen_pts.B, (1.0, 0.0))\n self.assertTupleEqual(eigen_pts.C, (2.0, 0.80000000000000004))\n self.assertTupleEqual(eigen_pts.D, (2.0, 0.80000000000000004))\n self.assertTupleEqual(eigen_pts.E, (3.0, 0.80000000000000004))",
"def ev(A):\n\n # Compute to Hessenberg and return Q matrix\n Q = hessenbergQ(A)\n\n # Computing eigenvalues and eigenvectors of H\n V = hessenberg_ev(A)\n\n # Converting into eigenvalues of A\n V = Q.dot(V)\n\n return V",
"def eig(S):\n\n eigenvalues, eigenvectors = np.linalg.eig(S)\n\n idx = eigenvalues.argsort()[::-1]\n eigenvalues = eigenvalues[idx]\n eigenvectors = eigenvectors[:, idx]\n\n return eigenvalues, eigenvectors # <-- EDIT THIS to return eigenvalues and corresp eigenvectors"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the correct eigenvalues are returned for the Tensor containing an Hermitian observable
|
def test_eigvals_hermitian(self, tol):
X = qml.PauliX(0)
hamiltonian = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
Herm = qml.Hermitian(hamiltonian, wires=[1, 2])
t = Tensor(X, Herm)
d = np.kron(np.array([1.0, -1.0]), np.array([-1.0, 1.0, 1.0, 1.0]))
t = t.eigvals()
assert np.allclose(t, d, atol=tol, rtol=0)
|
[
"def test_eigenvals_calcfunction(\n configure, # pylint: disable=unused-argument\n sample,\n):\n eigenvals_calcfunction = CalculationFactory(\n 'tbmodels.calcfunctions.eigenvals'\n )\n tb_model = DataFactory('singlefile')(file=sample('model.hdf5'))\n\n k_mesh = DataFactory('array.kpoints')()\n k_mesh.set_kpoints_mesh([4, 4, 4], offset=[0, 0, 0])\n\n res = eigenvals_calcfunction(tb_model=tb_model, kpoints=k_mesh)\n assert isinstance(res, DataFactory('array.bands'))\n assert res.get_array('bands').shape == (64, 14)",
"def test_eigen_caching(self):\n diag_op = ValidOp(*self.simple_operands)\n eig_decomp = diag_op.eigendecomposition\n\n eig_vecs = eig_decomp[\"eigvec\"]\n eig_vals = eig_decomp[\"eigval\"]\n\n eigs_cache = diag_op._eigs[diag_op.hash]\n cached_vecs = eigs_cache[\"eigvec\"]\n cached_vals = eigs_cache[\"eigval\"]\n\n assert np.allclose(eig_vals, cached_vals)\n assert np.allclose(eig_vecs, cached_vecs)",
"def test_eigenvals(\n configure_with_daemon, # pylint: disable=unused-argument\n sample,\n get_tbmodels_process_builder\n):\n builder = get_tbmodels_process_builder('tbmodels.eigenvals')\n\n builder.tb_model = DataFactory('singlefile')(file=sample('model.hdf5'))\n\n k_mesh = DataFactory('array.kpoints')()\n k_mesh.set_kpoints_mesh([4, 4, 4], offset=[0, 0, 0])\n builder.kpoints = k_mesh\n\n output = run(builder)\n assert isinstance(output['bands'], DataFactory('array.bands'))",
"def test_getE():\n\n from pydft.schrodinger import _getE\n from numpy.matlib import randn\n\n s = [6,6,4]\n R = np.array([[6,0,0],[0,6,0],[0,0,6]])\n W = np.array(randn(np.prod(s), 4) + 1j*randn(np.prod(s), 4))\n\n out = _getE(s,R,W)\n\n assert np.allclose(out.imag,0)",
"def eigenvalues(M):\n return roots(characteristic_polynomial(M))",
"def eigen():\n global vecs, vals, _arr\n vals, vecs = la.eig(_arr)",
"def ev(A):\n\n # Compute to Hessenberg and return Q matrix\n Q = hessenbergQ(A)\n\n # Computing eigenvalues and eigenvectors of H\n V = hessenberg_ev(A)\n\n # Converting into eigenvalues of A\n V = Q.dot(V)\n\n return V",
"def hessenberg_ev(H):\n m, n = H.shape\n assert(m == n)\n assert(np.linalg.norm(H[np.tril_indices(m, -2)]) < 1.0e-6)\n ee, V = np.linalg.eig(H)\n return ee, V",
"def test_hamiltonian_error(self):\n\n n_wires = 2\n dev = qml.device(\"default.qubit\", wires=n_wires)\n\n hamiltonian = np.array([[1, 1], [1, 1]])\n\n @qml.qnode(dev)\n def circuit():\n ApproxTimeEvolution(hamiltonian, 2, 3)\n return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_wires)]\n\n with pytest.raises(ValueError, match=\"hamiltonian must be of type pennylane.Hamiltonian\"):\n circuit()",
"def computeStatsEigen(self):\n # TO-DO: figure out why this op has delays (possibly moving\n # eigenvectors around?)\n with tf.device('/cpu:0'):\n def removeNone(tensor_list):\n local_list = []\n for item in tensor_list:\n if item is not None:\n local_list.append(item)\n return local_list\n\n def copyStats(var_list):\n print(\"copying stats to buffer tensors before eigen decomp\")\n redundant_stats = {}\n copied_list = []\n for item in var_list:\n if item is not None:\n if item not in redundant_stats:\n if self._use_float64:\n redundant_stats[item] = tf.cast(\n tf.identity(item), tf.float64)\n else:\n redundant_stats[item] = tf.identity(item)\n copied_list.append(redundant_stats[item])\n else:\n copied_list.append(None)\n return copied_list\n #stats = [copyStats(self.fStats), copyStats(self.bStats)]\n #stats = [self.fStats, self.bStats]\n\n stats_eigen = self.stats_eigen\n computedEigen = {}\n eigen_reverse_lookup = {}\n updateOps = []\n # sync copied stats\n # with tf.control_dependencies(removeNone(stats[0]) +\n # removeNone(stats[1])):\n with tf.control_dependencies([]):\n for stats_var in stats_eigen:\n if stats_var not in computedEigen:\n eigens = tf.self_adjoint_eig(stats_var)\n e = eigens[0]\n Q = eigens[1]\n if self._use_float64:\n e = tf.cast(e, tf.float32)\n Q = tf.cast(Q, tf.float32)\n updateOps.append(e)\n updateOps.append(Q)\n computedEigen[stats_var] = {'e': e, 'Q': Q}\n eigen_reverse_lookup[e] = stats_eigen[stats_var]['e']\n eigen_reverse_lookup[Q] = stats_eigen[stats_var]['Q']\n\n self.eigen_reverse_lookup = eigen_reverse_lookup\n self.eigen_update_list = updateOps\n\n if KFAC_DEBUG:\n self.eigen_update_list = [item for item in updateOps]\n with tf.control_dependencies(updateOps):\n updateOps.append(tf.Print(tf.constant(\n 0.), [tf.convert_to_tensor('computed factor eigen')]))\n\n return updateOps",
"def test_minor_symmetry_tensor(self):\n if not available:\n self.skipTest(reason)\n e_tensor = PyEshelbyTensor(6.0, 5.0, 4.0, 0.3)\n\n for indx in product([0, 1, 2], repeat=4):\n val1 = e_tensor(indx[0], indx[1], indx[2], indx[3])\n\n val2 = e_tensor(indx[0], indx[1], indx[3], indx[2])\n self.assertAlmostEqual(val1, val2)\n\n val2 = e_tensor(indx[1], indx[0], indx[3], indx[2])\n self.assertAlmostEqual(val1, val2)\n\n val2 = e_tensor(indx[1], indx[0], indx[2], indx[3])\n self.assertAlmostEqual(val1, val2)",
"def test_analytic_hellinger_nlls(self):\n self.cost_func = HellingerNLLSCostFunc(self.fitting_problem)\n self.cost_func.jacobian = self.jacobian\n self.cost_func.hessian = self.hessian\n eval_result, _ = self.cost_func.hes_res(params=self.params)\n actual_hessian = grad2_r_hellinger(self.fitting_problem.data_x)\n\n self.assertTrue(np.isclose(actual_hessian, eval_result).all())",
"def eigenvalues(self):\n if self._eigenvalues is None:\n self._eigenvalues = self.K.linalg.eigvalsh(self.matrix)\n return self._eigenvalues",
"def test_Ea(self):\n self.assertAlmostEqual(self.surfarr.Ea.value_si * 0.001, self.Ea, 6)",
"def test_turbomole_parse_hessian(h2o_nprhessian):\n hessian = parse_hessian(h2o_nprhessian)\n assert hessian.shape == (9, 9)\n eigvals, _ = np.linalg.eigh(hessian)\n assert eigvals[-1] == pytest.approx(1.12157030e00)",
"def eigen_basis(self, hx = 0.):\n\t\treturn self.hamiltonian_cont.eigh(time=hx)",
"def check_hess(f):\n #try:\n # np.linalg.cholesky(f.hess(f.min))\n #except:\n # return False\n #else:\n # return True\n return np.all(np.linalg.eigvals(f.hess(f.min)) >= 0.0)\n #return True",
"def test_eigval(file):\n filedir = \"__testfiles__/\" + file\n mass = sol.input_reader(filedir)[0]\n x_min, x_max = sol.input_reader(filedir)[1][:2]\n length = x_max - x_min\n eigmin, eigmax = sol.input_reader(filedir)[4:6]\n # getting the eigenvalues for the specific problem from data or equations\n eigvallist = []\n if file == \"test_infpot.txt\":\n for nn in range(eigmin, eigmax + 1):\n eigval = (4 * np.pi**2) / (8 * mass * length**2) * nn**2\n eigvallist.append(eigval)\n elif file == \"test_harmonic.txt\":\n for nn in range(eigmin - 1, eigmax):\n eigval = 1 / 2 * (nn + 1 / 2)\n eigvallist.append(eigval)\n elif file == \"test_pot.txt\":\n eigvallist = np.loadtxt(\"__unittestfiles__/test_pot_energy.dat\")\n elif file == \"test_dualpot_lin.txt\":\n eigvallist = np.loadtxt\\\n (\"__unittestfiles__/test_dualpot_lin_energy.dat\")\n elif file == \"test_dualpot_cspline.txt\":\n eigvallist = np.loadtxt\\\n (\"__unittestfiles__/test_dualpot_cspline_energy.dat\")\n elif file == \"test_asympot.txt\":\n eigvallist = np.loadtxt(\"__unittestfiles__/test_asympot_energy.dat\")\n else:\n eigvallist = np.ones((1, eigmax - eigmin + 1))\n eigvalarray = np.array(eigvallist)\n sol.run(filedir, \"__output__\")\n testeigarray = np.loadtxt(\"__output__/energies.dat\")\n assert np.all(np.abs(eigvalarray - testeigarray) < ERROR)",
"def test_lih_energy():\n eref = -8.877719570384043\n norb = 6\n nalpha = 2\n nbeta = 2\n nele = nalpha + nbeta\n h1e, h2e, lih_ground = build_lih_data.build_lih_data('energy')\n\n elec_hamil = restricted_hamiltonian.RestrictedHamiltonian((h1e, h2e))\n wfn = Wavefunction([[nele, nalpha - nbeta, norb]])\n wfn.set_wfn(strategy='from_data',\n raw_data={(nele, nalpha - nbeta): lih_ground})\n\n ecalc = wfn.expectationValue(elec_hamil)\n assert round(abs(eref - ecalc), 8) == 0",
"def test_compute_mne_inverse():\n\n setno = 0\n snr = 3.0\n lambda2 = 1.0 / snr**2\n dSPM = True\n\n res = mne.compute_inverse(fname_data, setno, fname_inv, lambda2, dSPM,\n baseline=(None, 0))\n\n assert np.all(res['sol'] > 0)\n assert np.all(res['sol'] < 35)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the correct eigenvalues are returned for the Tensor containing an Identity
|
def test_eigvals_identity(self, tol):
X = qml.PauliX(0)
Iden = qml.Identity(1)
t = Tensor(X, Iden)
d = np.kron(np.array([1.0, -1.0]), np.array([1.0, 1.0]))
t = t.eigvals()
assert np.allclose(t, d, atol=tol, rtol=0)
|
[
"def test_eigen_caching(self):\n diag_op = ValidOp(*self.simple_operands)\n eig_decomp = diag_op.eigendecomposition\n\n eig_vecs = eig_decomp[\"eigvec\"]\n eig_vals = eig_decomp[\"eigval\"]\n\n eigs_cache = diag_op._eigs[diag_op.hash]\n cached_vecs = eigs_cache[\"eigvec\"]\n cached_vals = eigs_cache[\"eigval\"]\n\n assert np.allclose(eig_vals, cached_vals)\n assert np.allclose(eig_vecs, cached_vecs)",
"def eigen():\n global vecs, vals, _arr\n vals, vecs = la.eig(_arr)",
"def eigenvalues(M):\n return roots(characteristic_polynomial(M))",
"def test_eigenvals_calcfunction(\n configure, # pylint: disable=unused-argument\n sample,\n):\n eigenvals_calcfunction = CalculationFactory(\n 'tbmodels.calcfunctions.eigenvals'\n )\n tb_model = DataFactory('singlefile')(file=sample('model.hdf5'))\n\n k_mesh = DataFactory('array.kpoints')()\n k_mesh.set_kpoints_mesh([4, 4, 4], offset=[0, 0, 0])\n\n res = eigenvals_calcfunction(tb_model=tb_model, kpoints=k_mesh)\n assert isinstance(res, DataFactory('array.bands'))\n assert res.get_array('bands').shape == (64, 14)",
"def eigenvalues(self):\n if self._eigenvalues is None:\n self._eigenvalues = self.K.linalg.eigvalsh(self.matrix)\n return self._eigenvalues",
"def test_getE():\n\n from pydft.schrodinger import _getE\n from numpy.matlib import randn\n\n s = [6,6,4]\n R = np.array([[6,0,0],[0,6,0],[0,0,6]])\n W = np.array(randn(np.prod(s), 4) + 1j*randn(np.prod(s), 4))\n\n out = _getE(s,R,W)\n\n assert np.allclose(out.imag,0)",
"def test_tensor_scalar_attributes():\n ### DEFINING ANALYTICAL VALUES ###\n evals = np.array([2., 1., 0.])\n a = 1. / np.sqrt(2)\n #evec[:,j] is pair with eval[j]\n evecs = np.array([[a, 0, -a], [a, 0, a], [0, 1., 0]])\n D = np.array([[1., 1., 0], [1., 1., 0], [0, 0, 1.]])\n FA = np.sqrt(1./2*(1+4+1)/(1+4+0)) # 0.7745966692414834\n MD = 1.\n\n ### CALCULATE ESTIMATE VALUES ###\n dummy_data = np.ones((1,10)) #single voxel\n dummy_gtab = np.zeros((10,3))\n dummy_bval = np.zeros((10,))\n tensor = dti.Tensor(dummy_data,dummy_bval,dummy_gtab)\n tensor.model_params = np.r_['-1,2', evals, evecs.ravel()]\n\n ### TESTS ###\n assert_almost_equal(np.abs(np.dot(evecs[:, 2],\n tensor[0].evecs[:, 2].T)), 1.,\n msg = \"Calculation of third eigenvector is not right\")\n assert_array_almost_equal(D, tensor[0].D, err_msg = \"Recovery of self diffusion tensor from eig not adaquate\")\n assert_almost_equal(FA, tensor.fa(), msg = \"Calculation of FA of self diffusion tensor is not adequate\")\n assert_almost_equal(MD, tensor.md(), msg = \"Calculation of MD of self diffusion tensor is not adequate\")\n assert_equal(True, tensor.mask.all())\n\n #assert_equal(m_list.shape, n_list.shape)\n #assert_equal(m_list.ndim, 2)\n #assert_equal(m_list.shape, (45,1))\n #assert_true(np.all(np.abs(m_list) <= n_list))\n #assert_array_equal(n_list % 2, 0)\n #assert_raises(ValueError, qball.sph_harm_ind_list, 1)",
"def test_eigenvals(\n configure_with_daemon, # pylint: disable=unused-argument\n sample,\n get_tbmodels_process_builder\n):\n builder = get_tbmodels_process_builder('tbmodels.eigenvals')\n\n builder.tb_model = DataFactory('singlefile')(file=sample('model.hdf5'))\n\n k_mesh = DataFactory('array.kpoints')()\n k_mesh.set_kpoints_mesh([4, 4, 4], offset=[0, 0, 0])\n builder.kpoints = k_mesh\n\n output = run(builder)\n assert isinstance(output['bands'], DataFactory('array.bands'))",
"def test_minor_symmetry_tensor(self):\n if not available:\n self.skipTest(reason)\n e_tensor = PyEshelbyTensor(6.0, 5.0, 4.0, 0.3)\n\n for indx in product([0, 1, 2], repeat=4):\n val1 = e_tensor(indx[0], indx[1], indx[2], indx[3])\n\n val2 = e_tensor(indx[0], indx[1], indx[3], indx[2])\n self.assertAlmostEqual(val1, val2)\n\n val2 = e_tensor(indx[1], indx[0], indx[3], indx[2])\n self.assertAlmostEqual(val1, val2)\n\n val2 = e_tensor(indx[1], indx[0], indx[2], indx[3])\n self.assertAlmostEqual(val1, val2)",
"def test_sparse_matrix_error(self):\n\n t = qml.PauliX(0) @ qml.Hermitian(np.eye(4), wires=[1, 2])\n with pytest.raises(ValueError, match=\"Can only compute\"):\n t.sparse_matrix()",
"def testCTFInverse(self):\n m=4\n n=3\n k=2\n A = crandom.random((m,k))\n ctfInvA = CTFinverse(A)\n npA = la.pinv(A.to_nparray())\n for i in range(len(npA)):\n for j in range(len(npA[0])):\n assert(abs(ctfInvA[i][j] - npA[i][j]) < .00000000001)\n print(\"Inverse Works!\")",
"def comparisonEigenvalues(iterations):\n A = np.random.rand(100,100)\n B = A.T.dot(A)\n B = (B+B.T)/2.0\n\n #QR algorithm\n eigenvaluesQR = np.diagonal(QRAlgorithm(B, iterations))\n eigenvaluesQR = np.sort(eigenvaluesQR)\n eigenvaluesQR = eigenvaluesQR[::-1] #Reverse the array to get larger value first\n #print(eigenvaluesQR[::-1])\n\n #numpy\n eigenvaluesNumpy, eigenVectors = np.linalg.eig(B)\n #print(eigenvaluesNumpy)\n\n #Computes difference\n difference = np.sqrt(np.sum(np.square(eigenvaluesNumpy-eigenvaluesQR)))\n print(\"Difference between eigenvalues : %f\" % difference)",
"def __left_eigvec( A ):\n def __apply_left( vec ):\n # Reshape vec\n vec = np.reshape( vec, (A[0].shape[1], A[0].shape[1]) )\n\n # Contract as if transfer matrix\n vec = np.tensordot( vec, A[0], axes=(0,1) ) # (lt lb)(d lt rt) -> (lb d rt)\n vec = np.tensordot( vec, np.conjugate(A[0]), axes=((0,1),(1,0)) ) #(lb d rt)(d lb rb) -> (rt rb)\n\n if len(A) > 1:\n for s in range(1,len(A)):\n vec = np.tensordot( vec, A[s], axes=(0,1) )\n vec = np.tensordot( vec, np.conjugate(A[s]), axes=((0,1),(1,0)) )\n\n return np.reshape( vec, A[-1].shape[2]*A[-1].shape[2] )\n\n E = LinearOperator( (A[-1].shape[2]*A[-1].shape[2], A[0].shape[1]*A[0].shape[1]), matvec = __apply_left, dtype=np.complex128 )\n\n # Hermitian initial guess!\n init = np.random.rand( A[0].shape[1], A[0].shape[1] ) + 1j*np.random.rand( A[0].shape[1], A[0].shape[1] )\n init = 0.5*(init + np.conjugate(np.transpose(init)))\n init = np.reshape( init, A[0].shape[1]*A[0].shape[1] )\n\n ev, eigvec = sp.sparse.linalg.eigs(E, k=1, which='LM', v0=init, maxiter=1e4)\n return ev, np.array(np.reshape(eigvec, (A[-1].shape[2], A[-1].shape[2])))",
"def __right_eigvec( A ):\n\n def __apply_right( vec ):\n # Reshape vec\n vec = np.reshape( vec, (A[-1].shape[2],A[-1].shape[2]) )\n\n # Contract as if transfer matrix\n vec = np.tensordot( vec, A[-1], axes=(0,2) ) # (rt rb)(d lt rt) -> (rb d lt)\n vec = np.tensordot( vec, np.conjugate(A[-1]), axes=( (0,1),(2,0) ) ) # (rb d lt)(d lb rb) -> (lt lb)\n\n if len(A) > 1:\n for s in range(len(A)-2,-1,-1):\n vec = np.tensordot( vec, A[s], axes=(0,2) )\n vec = np.tensordot( vec, np.conjugate(A[s]), axes=( (0,1),(2,0)) )\n\n return np.reshape( vec, A[0].shape[1]*A[0].shape[1] )\n\n E = LinearOperator( (A[0].shape[1]*A[0].shape[1], A[-1].shape[2]*A[-1].shape[2]), matvec = __apply_right, dtype=np.complex128 )\n\n # Hermitian initial guess!\n init = np.random.rand( A[-1].shape[2], A[-1].shape[2] ) + 1j*np.random.rand( A[-1].shape[2], A[-1].shape[2] )\n init = 0.5*(init + np.conjugate(np.transpose(init)))\n init = np.reshape( init, A[-1].shape[2]*A[-1].shape[2] )\n\n ev, eigvec = sp.sparse.linalg.eigs(E, k=1, which='LM', v0=init, maxiter=1e4)\n return ev, np.array(np.reshape(eigvec, (A[0].shape[1], A[0].shape[1])))",
"def eig(mat, qrTol):\n\n n = len(mat)\n\n PTOTAL = np.identity(n)\n\n for k in range(1, n-1):\n s = 0.0\n for j in range(k+1, n+1):\n s += mat[j-1, k-1]**2\n alpha = -np.sign(mat[k, k-1])*np.sqrt(s)\n\n r = np.sqrt(0.5*alpha**2 - 0.5*alpha*mat[k, k-1])\n\n # Create the w vector\n w = np.zeros([n])\n w[k] = (mat[k, k-1] - alpha)/(2*r)\n\n for j in range(k+2, n+1):\n w[j-1] = mat[j-1, k-1]/(2*r)\n\n # The Householder reflector P_k\n P = np.identity(n) - 2*np.outer(w, w.T)\n\n # Apply the similarity transformation\n mat = np.dot(P, np.dot(mat, P))\n\n # Save the eigenvectors\n # U = P_n*...*P_2*P_1*I\n PTOTAL = np.dot(PTOTAL, P)\n\n H = mat\n\n \"\"\"\n Here begins the QR section.\n\n The QR method is used to transform a symmetric matrix to a diagonal\n matrix using similarity transformations so the eigenvalues and\n eigenvectors can be obtained.\n \"\"\"\n\n k = 0\n QTOTAL = np.identity(n)\n flag = False\n while flag is False:\n qr = qr_by_givens(H) # Factor H = QR\n # qr = np.linalg.qr(H)\n Q = qr[0]\n R = qr[1]\n\n H = np.dot(R, Q)\n QTOTAL = np.dot(QTOTAL, Q) # Save the eigenvectors\n\n k += 1\n\n # Check if all the off-diagonal elements are ~0\n flag = True\n for i in range(n):\n for j in range(n):\n if i != j and abs(H[i, j]) > qrTol:\n flag = False\n break\n\n # The eigenvalues are the diagonal elements of\n D = np.diagonal(H)\n\n # The eigenvectors are the columns of\n V = np.dot(PTOTAL, QTOTAL)\n\n return D, V",
"def CalcEigenValVec(T):\n #get eigenvalues and eigenvectors\n eigvals, eigvec = np.linalg.eig(T)\n\n #sort by eigenvalues\n #put eigenvalues and and eigenvectors together in one list and sort this one\n valvec = []\n for i in range(len(eigvals)):\n valvec.append([eigvals[i], np.transpose(eigvec)[i].tolist()])\n\n #sort eigenvalues and eigenvectors by eigenvalues\n valvec.sort(lambda x, y: -cmp(x[0], y[0])) # sort from large to small\n\n for i in range(len(valvec)):\n eigvals[i] = valvec[i][0]\n eigvec[i] = valvec[i][1]\n\n return eigvals, eigvec",
"def eig(S):\n\n eigenvalues, eigenvectors = np.linalg.eig(S)\n\n idx = eigenvalues.argsort()[::-1]\n eigenvalues = eigenvalues[idx]\n eigenvectors = eigenvectors[:, idx]\n\n return eigenvalues, eigenvectors # <-- EDIT THIS to return eigenvalues and corresp eigenvectors",
"def principal_stresses(stress_tensor):\n return np.linalg.eigvals(stress_tensor)",
"def computeStatsEigen(self):\n # TO-DO: figure out why this op has delays (possibly moving\n # eigenvectors around?)\n with tf.device('/cpu:0'):\n def removeNone(tensor_list):\n local_list = []\n for item in tensor_list:\n if item is not None:\n local_list.append(item)\n return local_list\n\n def copyStats(var_list):\n print(\"copying stats to buffer tensors before eigen decomp\")\n redundant_stats = {}\n copied_list = []\n for item in var_list:\n if item is not None:\n if item not in redundant_stats:\n if self._use_float64:\n redundant_stats[item] = tf.cast(\n tf.identity(item), tf.float64)\n else:\n redundant_stats[item] = tf.identity(item)\n copied_list.append(redundant_stats[item])\n else:\n copied_list.append(None)\n return copied_list\n #stats = [copyStats(self.fStats), copyStats(self.bStats)]\n #stats = [self.fStats, self.bStats]\n\n stats_eigen = self.stats_eigen\n computedEigen = {}\n eigen_reverse_lookup = {}\n updateOps = []\n # sync copied stats\n # with tf.control_dependencies(removeNone(stats[0]) +\n # removeNone(stats[1])):\n with tf.control_dependencies([]):\n for stats_var in stats_eigen:\n if stats_var not in computedEigen:\n eigens = tf.self_adjoint_eig(stats_var)\n e = eigens[0]\n Q = eigens[1]\n if self._use_float64:\n e = tf.cast(e, tf.float32)\n Q = tf.cast(Q, tf.float32)\n updateOps.append(e)\n updateOps.append(Q)\n computedEigen[stats_var] = {'e': e, 'Q': Q}\n eigen_reverse_lookup[e] = stats_eigen[stats_var]['e']\n eigen_reverse_lookup[Q] = stats_eigen[stats_var]['Q']\n\n self.eigen_reverse_lookup = eigen_reverse_lookup\n self.eigen_update_list = updateOps\n\n if KFAC_DEBUG:\n self.eigen_update_list = [item for item in updateOps]\n with tf.control_dependencies(updateOps):\n updateOps.append(tf.Print(tf.constant(\n 0.), [tf.convert_to_tensor('computed factor eigen')]))\n\n return updateOps",
"def _get_precondition_gradient_eigen(self):\n raise NotImplementedError('Use inv method for embedding layers')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the diagonalizing gate set numerically diagonalizes the tensor observable
|
def test_diagonalizing_gates_numerically_diagonalizes(self, tol):
# create a tensor observable acting on consecutive wires
H = np.diag([1, 2, 3, 4])
O = qml.PauliX(0) @ qml.PauliY(1) @ qml.Hermitian(H, [2, 3])
O_mat = O.matrix()
diag_gates = O.diagonalizing_gates()
# group the diagonalizing gates based on what wires they act on
U_list = []
for _, g in itertools.groupby(diag_gates, lambda x: x.wires.tolist()):
# extract the matrices of each diagonalizing gate
mats = [i.matrix() for i in g]
# Need to revert the order in which the matrices are applied such that they adhere to the order
# of matrix multiplication
# E.g. for PauliY: [PauliZ(wires=self.wires), S(wires=self.wires), Hadamard(wires=self.wires)]
# becomes Hadamard @ S @ PauliZ, where @ stands for matrix multiplication
mats = mats[::-1]
if len(mats) > 1:
# multiply all unitaries together before appending
mats = [multi_dot(mats)]
# append diagonalizing unitary for specific wire to U_list
U_list.append(mats[0])
# since the test is assuming consecutive wires for each observable
# in the tensor product, it is sufficient to Kronecker product
# the entire list.
U = reduce(np.kron, U_list)
res = U @ O_mat @ U.conj().T
expected = np.diag(O.eigvals())
# once diagonalized by U, the result should be a diagonal
# matrix of the eigenvalues.
assert np.allclose(res, expected, atol=tol, rtol=0)
|
[
"def test_diagonalizing_gates_non_overlapping(self):\n diag_op = ValidOp(qml.PauliZ(wires=0), qml.Identity(wires=1))\n assert diag_op.diagonalizing_gates() == []",
"def test_diagonalizing_gates_overlapping(self):\n diag_op = ValidOp(qml.S(0), qml.PauliX(0))\n diagonalizing_gates = diag_op.diagonalizing_gates()\n\n assert len(diagonalizing_gates) == 1\n diagonalizing_mat = diagonalizing_gates[0].matrix()\n\n true_mat = np.eye(2)\n\n assert np.allclose(diagonalizing_mat, true_mat)",
"def overload_diag(inputs: torch.Tensor):\r\n inputs[:, 0, :, :, :] = inputs[:, 0, :, :, :] + EPSILON\r\n inputs[:, 4, :, :, :] = inputs[:, 4, :, :, :] + EPSILON\r\n inputs[:, 8, :, :, :] = inputs[:, 8, :, :, :] + EPSILON\r\n\r\n return inputs",
"def _batch_diagonal(tensor: torch.Tensor) -> torch.Tensor:\n return torch.diagonal(tensor, dim1=-2, dim2=-1).unsqueeze(1)",
"def test_add_diagonals_simple():\n a = np.array([\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [1, 2, 3, 4]\n ])\n b = np.array([\n [1, 2, 3, 4],\n [5, 6, 7, 8]\n ])\n expected_output = np.array([\n [2, 4, 6, 8],\n [10, 12, 14, 16],\n [1, 2, 3, 4]\n ])\n output = _banded_utils._add_diagonals(a, b)\n\n assert_array_equal(output, expected_output)",
"def is_diagonalizable(self):\n # TODO\n pass",
"def test_diff_1_diags(data_size, lower_only):\n diagonal_data = _banded_utils._diff_1_diags(data_size, lower_only)\n\n diff_matrix = _banded_utils.difference_matrix(data_size, 1)\n diag_matrix = (diff_matrix.T @ diff_matrix).todia()\n actual_diagonal_data = diag_matrix.data[::-1]\n if lower_only:\n actual_diagonal_data = actual_diagonal_data[1:]\n\n assert_array_equal(diagonal_data, actual_diagonal_data)",
"def zero_diag(mat: torch.Tensor) -> torch.Tensor:\n return mat - torch.diag(mat.diag())",
"def test_get_diagonal_hamiltonian():\n diag = numpy.zeros((5,), dtype=numpy.complex128)\n e_0 = -4.2\n test = diagonal_hamiltonian.Diagonal(diag, e_0)\n test2 = fqe.get_diagonal_hamiltonian(diag, e_0)\n\n assert test == test2",
"def make_matrix_set_diag_tests(options):\n\n test_parameters = [\n {\n \"input_diag_shapes\": [([3, 3], [3]), ([2, 3], [2]), ([2, 4,\n 4], [2, 4]),\n ([3, 4, 5, 6], [3, 4, 5])],\n \"input_dtype\": [tf.int32, tf.float32, tf.uint8],\n },\n ]\n\n def build_graph(parameters):\n input_shape = parameters[\"input_diag_shapes\"][0]\n diag_shape = parameters[\"input_diag_shapes\"][1]\n input_tensor = tf.compat.v1.placeholder(\n dtype=parameters[\"input_dtype\"], name=\"input\", shape=input_shape)\n diag_tensor = tf.compat.v1.placeholder(\n dtype=parameters[\"input_dtype\"], name=\"diagonal\", shape=diag_shape)\n outs = tf.linalg.set_diag(input_tensor, diag_tensor)\n return [input_tensor, diag_tensor], [outs]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_shape = parameters[\"input_diag_shapes\"][0]\n diag_shape = parameters[\"input_diag_shapes\"][1]\n input_values = create_tensor_data(parameters[\"input_dtype\"], input_shape)\n diag_values = create_tensor_data(parameters[\"input_dtype\"], diag_shape)\n return [input_values, diag_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values, diag_values])))\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)",
"def test_diagouter():\n from pydft.schrodinger import _diagouter\n\n A = np.random.normal(0,5,(10,3)) + np.random.normal(0,5,(10,3))*1j\n B = np.random.normal(0,5,(10,3)) + np.random.normal(0,5,(10,3))*1j\n out = np.dot(A,np.conj(B.T))\n assert np.allclose(_diagouter(A,B),np.diag(out))",
"def test_add_diagonals_fails():\n a = np.array([\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [1, 2, 3, 4]\n ])\n b = np.array([\n [1, 2, 3, 4],\n [5, 6, 7, 8]\n ])\n\n # row mismatch is not a multiple of 2 when lower_only=False\n with pytest.raises(ValueError):\n _banded_utils._add_diagonals(a, b, lower_only=False)\n\n # mismatched number of columns\n with pytest.raises(ValueError):\n _banded_utils._add_diagonals(a[:, 1:], b)",
"def _off_diagonal(x: torch.Tensor) -> torch.Tensor:\n n, m = x.shape\n assert n == m\n return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()",
"def is_diagonal_matrix(tensor: ndarray):\n assert len(tensor.shape) == 2\n i, j = tensor.shape\n assert i == j\n test = tensor.reshape(-1)[:-1].reshape(i-1, j+1)\n return ~np.any(test[:, 1:])",
"def sweep_diagonal(self):\n self._sampler_obs.reset()\n\n # Burnout phase\n self._sampler_obs.generate_samples(self._n_discard)\n\n # Generate samples and store them\n self._samples_obs = self._sampler_obs.generate_samples(\n self._n_samples_node_obs, samples=self._samples_obs\n )\n\n self._obs_samples_valid = True",
"def test_random_p_matrix_diag_vector(self):\n for i in range(NUM_TESTS):\n diag = [0, 0.2, 0.6, 1.0]\n p = Probs.random(RnaPairs, diag)._data\n for i, d, row in zip(range(4), diag, p):\n self.assertFloatEqual(sum(row), 1.0)\n self.assertEqual(row[i], diag[i])",
"def test_metric_matrix_is_block_diagonal(self, metric_args, base_point):\n metric = self.Metric(*metric_args)\n result = metric.metric_matrix(base_point)\n individual_metric_matrices = [metric.matrix for metric in metric_args[0]]\n expected = reduce(gs.kron, individual_metric_matrices)\n self.assertAllClose(result, expected)",
"def test_fixNegsDiag(self):\n q = Rates([[-6,2,2,2],[-6,-2,4,4],[2,2,-6,2],[4,4,-2,-6]], RnaPairs)\n m = q.fixNegsDiag()._data\n self.assertEqual(m,array([[-6,2,2,2],[0,-8,4,4],[2,2,-6,2],[4,4,0,-8]]))",
"def diagonal(a, *parms):\n return a.diagonal()",
"def _approx_diag(self):\n if self.size(-2) != self.size(-1):\n raise NotImplementedError(\n \"diag does not make sense when matrix is not square\"\n )\n\n # calling approx diag\n with torch.set_grad_enabled(True):\n loss = self.criterion(self.model(self.data), self.target)\n\n ones_list = []\n for param in self.model.parameters():\n ones_list.append(torch.ones_like(param))\n\n # this may not strictly be an upper bound because J^T 1 may not\n # be a good approximator of \\sum_p |df/d\\theta_p|\n # J^T 1 = \\sum_j J_{ij} (returns a n dimensional vector)\n jac_sum_by_point = Rop(loss, self.model.parameters(), ones_list)[0]\n\n if self.num_outputs == 1:\n jac_sum_by_point = jac_sum_by_point.squeeze(-1)\n elif len(jac_sum_by_point.shape) > 1:\n jac_sum_by_point = jac_sum_by_point.t()\n\n # squares the n dimensional vector\n return jac_sum_by_point.pow(2.0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that an exception is raised if a wire_order is passed to the matrix method
|
def test_matrix_wire_order_not_implemented(self):
O = qml.PauliX(0) @ qml.PauliY(1)
with pytest.raises(NotImplementedError, match="wire_order"):
O.matrix(wire_order=[1, 0])
|
[
"def test_sparse_matrix_undefined(self):\n with pytest.raises(NotImplementedError):\n MyOp(wires=\"a\").sparse_matrix(wire_order=[\"a\", \"b\"])\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n MyOp.compute_sparse_matrix()\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n op.sparse_matrix()",
"def test_inv_3d_blockmatrix_odd(self):\n matrix = np.random.rand(3, 5, 5)\n with self.assertRaises(ValueError):\n inversematrix = geometry.inv_3d_blockmatrix(matrix)",
"def test_reconstruct_not_raised(self, *shapes):\n self.assert_exception_is_not_raised(matting.reconstruct, shapes)",
"def _check_matrix(data_matrix: List[List]):\n if len(data_matrix) < 3:\n raise IndexError(\"Not enough rows to process file\")\n\n if len(data_matrix[0]) != 6:\n raise IndexError(\"Row 1 has not the right amount of columns\")\n if len(data_matrix[1]) != 8:\n raise IndexError(\"Row 1 has not the right amount of columns\")\n if len(data_matrix[2]) != 5:\n raise IndexError(\"Row 1 has not the right amount of columns\")",
"def test_incompatible_dimensions(self, matrices):\n # Instantiate 5x10, 10x5, and 10x10 matrices as Matrix class\n square_mat = chap5.Matrix(matrices.square)\n half_row_mat = chap5.Matrix(matrices.half_row)\n half_col_mat = chap5.Matrix(matrices.half_col)\n # Verify Matrix class raises AssertionError for incompatible dimensions\n with pytest.raises(AssertionError):\n half_row_mat + half_col_mat # (5x10) + (10x5)\n with pytest.raises(AssertionError):\n half_col_mat + half_row_mat # (10x5) + (5x10)\n with pytest.raises(AssertionError):\n half_col_mat @ square_mat # (10x5) @ (10x10)\n with pytest.raises(AssertionError):\n square_mat @ half_row_mat # (10x10) @ (5x10)",
"def test_difference_matrix_order_neg():\n with pytest.raises(ValueError):\n _banded_utils.difference_matrix(10, diff_order=-2)",
"def test_operation_on_nonexistant_wire(self, operable_mock_device_2_wires):\n\n operable_mock_device_2_wires.num_wires = 2\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 2])\n return qml.expval(qml.PauliZ(0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)",
"def test_sparse_matrix_error(self):\n\n t = qml.PauliX(0) @ qml.Hermitian(np.eye(4), wires=[1, 2])\n with pytest.raises(ValueError, match=\"Can only compute\"):\n t.sparse_matrix()",
"def test_purity_non_density_matrix():\n rho = np.array([[1, 2], [3, 4]])\n\n with np.testing.assert_raises(ValueError):\n purity(rho)",
"def test_normalize_bad_axis():\n X = np.array([[1, 2], [0, 1], [1, 1]])\n assert_raises(ValueError, normalize_matrix_on_axis, X, axis=3)",
"def test_observable_on_nonexistant_wire(self, operable_mock_device_2_wires):\n\n operable_mock_device_2_wires.num_wires = 2\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(2))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)",
"def test_create_input_matrix(self):\n input_matrix = create_input_matrix(self.log_return_dataframe, 'angular')\n self.check_angular_distance(input_matrix)\n # An incorrect sub type raises Value Error\n self.assertRaises(ValueError, create_input_matrix, self.log_return_dataframe, 'invalid matrix subtype')",
"def test_same_wires(self):\n\n with pytest.raises(qml.QuantumFunctionError, match=\"The target wires and estimation wires\"):\n QuantumPhaseEstimation(np.eye(2), target_wires=[0, 1], estimation_wires=[1, 2])",
"def test_has_matrix_false(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n assert not MyOp.has_matrix\n assert not MyOp(wires=0).has_matrix",
"def test_create_zero_matrix_bad_inputs(self):\n expected = []\n bad_inputs = [[], {}, (), '', 9.22, -1, 0, -6, None, True]\n for bad_input in bad_inputs:\n actual_left = create_zero_matrix(bad_input, 1)\n actual_right = create_zero_matrix(1, bad_input)\n self.assertEqual(expected, actual_left)\n self.assertEqual(expected, actual_right)",
"def test_no_wire_order_returns_base_matrix(self):\n res = qml.operation.expand_matrix(self.base_matrix_2, wires=[0, 2])\n assert np.allclose(self.base_matrix_2, res)",
"def test_invalid_mesh_exception(self):\n dev = qml.device(\"default.gaussian\", wires=2)\n varphi = [0.42342, 0.234]\n\n @qml.qnode(dev)\n def circuit(varphi, mesh=None):\n Interferometer(theta=[0.21], phi=[0.53], varphi=varphi, mesh=mesh, wires=[0, 1])\n return qml.expval(qml.NumberOperator(0))\n\n with pytest.raises(ValueError, match=\"did not recognize mesh\"):\n circuit(varphi, mesh=\"a\")",
"def testDistanceMatrixForUnknownFeature(self):\n csv = StringIO('Ignored, A, B, C\\n'\n 'name1, 1, 2, 3\\n'\n 'name2, 4, 5, 6\\n')\n m = Matrix(csv)\n error = \"^'XXX'$\"\n assertRaisesRegex(self, KeyError, error, m.distanceMatrix, 'XXX')",
"def test_valid_morphology(self):\n\n # We're using vertices with inconsistent dimensions here, which Numpy\n # does not like.\n # Ignore the VisibleDeprecationWarning that numpy throws.\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\", \"Creating an ndarray from ragged nested sequences\"\n )\n vertices = [[0, 0, 0], [1, 1]]\n connectivity = [-1, 0]\n self.assertRaises(Exception, am.ArrayMorphology, vertices, connectivity)\n\n vertices = [[0, 0, 0], [1, 1, 1]]\n connectivity = [-1, 0, 0]\n self.assertRaises(AssertionError, am.ArrayMorphology, vertices, connectivity)\n\n vertices = [[0, 0, 0], [1, 1, 1]]\n connectivity = []\n self.assertRaises(AssertionError, am.ArrayMorphology, vertices, connectivity)",
"def test_inv_3d_blockmatrix_nonquadratic(self):\n matrix = np.random.rand(3, 4, 6)\n with self.assertRaises(ValueError):\n inversematrix = geometry.inv_3d_blockmatrix(matrix)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that a warning is raised if the wires the factors in the tensor product act on have partial overlaps.
|
def test_tensor_matrix_partial_wires_overlap_warning(self, tol):
H = np.diag([1, 2, 3, 4])
O1 = qml.PauliX(0) @ qml.Hermitian(H, [0, 1])
O2 = qml.Hermitian(H, [0, 1]) @ qml.PauliY(1)
for O in (O1, O2):
with pytest.warns(UserWarning, match="partially overlapping"):
O.matrix()
|
[
"def test_dimension_warning(self):\n np.random.seed(0)\n X = np.random.rand(3, 10)\n with pytest.warns(UserWarning, match=\"has more columns than rows\") as w:\n linmdtw.linmdtw(X, X)\n with pytest.warns(UserWarning, match=\"has more columns than rows\") as w:\n linmdtw.dtw_brute_backtrace(X, X)",
"def testWarnings(self):\n radialTransform = afwGeom.RadialXYTransform([0, 2.0, 3.0])\n wcs = afwImage.DistortedTanWcs(self.tanWcs, radialTransform)\n self.assertRaises(UserWarning, approximateWcs, wcs=wcs, bbox=self.bbox, order=2)",
"def check_illegal(self):\n for i in range(self.__sample_size):\n j = 0\n while j < self.__dimension.get_dim_size():\n if not (self.get_region(j)[0] < self.__population[i].get_feature(j) < self.get_region(j)[1]):\n break\n else:\n j += 1\n if j == self.__dimension.get_dim_size():\n return False\n return True",
"def check_collisions(self):\n\t\tpass",
"def _admissible(self, x: np.ndarray) -> bool:\n return np.all(x <= self.ub) and np.all(x >= self.lb)",
"def consistency_check(self):\n for _row in self.lattice:\n assert len(_row) == self.col_dim\n assert callable(self.neighbor_function)\n assert callable(self.weight_function)",
"def is_lossless(self):\n\n return np.allclose(np.abs(self.s[0, 0])**2 + np.abs(self.s[0, 1])**2, np.ones_like(self.f))",
"def test_multiple_constraints_with_units(self):\n constraint_dict = {\n \"name\": \"probability_of_precipitation_rate_above_threshold\",\n self.threshold_coord: lambda cell: any(np.isclose(cell.point, [0.03])),\n }\n constr = iris.Constraint(**constraint_dict)\n cube = apply_extraction(self.precip_cube, constr, self.units_dict)\n self.assertIsInstance(cube, iris.cube.Cube)\n reference_data = self.precip_cube.data[0, :, :]\n self.assertArrayEqual(cube.data, reference_data)",
"def _check_constraints(self):\n for emb in (self.instance.entity_embeddings, self.instance.relation_embeddings):\n assert all_in_bounds(emb(indices=None).norm(p=2, dim=-1), high=1.0, a_tol=1.0e-06)",
"def test_non_commuting_overlapping_targets(self):\n op1 = qml.ops.op_math.Controlled(qml.PauliZ(3), control_wires=(0, 1, 2))\n op2 = qml.ops.op_math.Controlled(qml.RX(1.2, 3), control_wires=(0, 1))\n assert not qml.is_commuting(op1, op2)\n assert not qml.is_commuting(op2, op1)",
"def test_known_bad_pairs(self):\n for start, goal, iternum in self.should_fail:\n self.assertFalse(mu_test(start, goal, iternum))",
"def test_warning_direction():\n with pytest.warns(UserWarning):\n get_wind_components(3,361)",
"def _check_constraints(self):\n for emb in (self.instance.entity_embeddings, self.instance.relation_embeddings):\n assert all_in_bounds(emb(indices=None).norm(p=2, dim=-1), high=1.0, a_tol=EPSILON)",
"def test_warning_direction():\n with pytest.warns(UserWarning):\n get_wind_components(4.,554.)",
"def test_tensor_matrix_too_large_warning(self, tol):\n O = qml.PauliX(0) @ qml.PauliX(1) @ qml.PauliX(0)\n with pytest.warns(UserWarning, match=\"The size of the returned matrix\"):\n O.matrix()",
"def NO_test(s, A, W):\n\n\n for w in W.extreme_points:\n scalar_utility_s = Utils.dot(w, s)\n for t in A:\n scalar_utility_t = Utils.dot(w, t)\n if scalar_utility_s - scalar_utility_t < 0:\n return False\n return True",
"def test_PlasmaPyWarning_subclassing(warning):\n with pytest.warns(PlasmaPyWarning, message=(\n f\"Problem with subclassing of {warning}\")):\n warnings.warn(\"Electrons are WEIRD.\", warning)",
"def _check_constraints(self):\n assert all_in_bounds(self.instance.entity_embeddings(indices=None).norm(p=2, dim=-1), high=1.0, a_tol=EPSILON)",
"def test_auxiliary_loss_consistency(self):\n\n model = MyObjectDetectionWithMatchingModel()\n outputs, batch, indices = fake_model_outputs_batch(num_boxes=4)\n\n # Test loss function in the pmapped setup:\n loss_function_pmapped = jax.pmap(model.loss_function, axis_name='batch')\n\n indices_replicated = jax_utils.replicate(\n # Fake matching for the final output + 2 aux outputs:\n [indices] * 3)\n outputs_replicated, batch_replicated = (jax_utils.replicate(outputs),\n jax_utils.replicate(batch))\n _, metrics_dict = loss_function_pmapped(outputs_replicated,\n batch_replicated,\n indices_replicated)\n\n metrics_dict = jax_utils.unreplicate(metrics_dict)\n\n for key in ['loss_class', 'loss_bbox', 'loss_giou']:\n for i in range(NUM_AUX_OUTPUTS):\n self.assertAlmostEqual(\n metrics_dict[key + '_unscaled'],\n metrics_dict[key + f'_aux_{i}_unscaled'],\n places=5)\n self.assertAlmostEqual(\n metrics_dict[key], metrics_dict[key + f'_aux_{i}'], places=5)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that a warning is raised if wires occur in multiple of the factors in the tensor product, leading to a wronglysized matrix.
|
def test_tensor_matrix_too_large_warning(self, tol):
O = qml.PauliX(0) @ qml.PauliX(1) @ qml.PauliX(0)
with pytest.warns(UserWarning, match="The size of the returned matrix"):
O.matrix()
|
[
"def test_dimension_warning(self):\n np.random.seed(0)\n X = np.random.rand(3, 10)\n with pytest.warns(UserWarning, match=\"has more columns than rows\") as w:\n linmdtw.linmdtw(X, X)\n with pytest.warns(UserWarning, match=\"has more columns than rows\") as w:\n linmdtw.dtw_brute_backtrace(X, X)",
"def test_tensor_matrix_partial_wires_overlap_warning(self, tol):\n H = np.diag([1, 2, 3, 4])\n O1 = qml.PauliX(0) @ qml.Hermitian(H, [0, 1])\n O2 = qml.Hermitian(H, [0, 1]) @ qml.PauliY(1)\n\n for O in (O1, O2):\n with pytest.warns(UserWarning, match=\"partially overlapping\"):\n O.matrix()",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def test_error_on_multidim_tensors(metric_class=RelativeSquaredError):\n metric = metric_class()\n with pytest.raises(\n ValueError,\n match=r\"Expected both prediction and target to be 1D or 2D tensors, but received tensors with dimension .\",\n ):\n metric(torch.randn(10, 20, 5), torch.randn(10, 20, 5))",
"def print_matrix_warning(matIn1, matIn2):\n\n\tprint(\"Matrix dims incorrect, please provide correct input matrices\")\n\tprint(\"Should be:\")\n\tprint(\"matIn1: nXm\")\n\tprint(\"matIn2: mXr\")\n\tprint(\"Dims provided: \")\n\tprint(\"matIn1:\", matIn1.shape[0],\"X\",matIn1.shape[1])\n\tprint(\"matIn2:\", matIn2.shape[0],\"X\",matIn2.shape[1])",
"def check_mul(self):\n if self.bits >> 25 & 1 == 0:\n if self.bits >> 7 & 1 == 1:\n if self.bits >> 4 & 1 == 1:\n return True",
"def test_incompatible_dimensions(self, matrices):\n # Instantiate 5x10, 10x5, and 10x10 matrices as Matrix class\n square_mat = chap5.Matrix(matrices.square)\n half_row_mat = chap5.Matrix(matrices.half_row)\n half_col_mat = chap5.Matrix(matrices.half_col)\n # Verify Matrix class raises AssertionError for incompatible dimensions\n with pytest.raises(AssertionError):\n half_row_mat + half_col_mat # (5x10) + (10x5)\n with pytest.raises(AssertionError):\n half_col_mat + half_row_mat # (10x5) + (5x10)\n with pytest.raises(AssertionError):\n half_col_mat @ square_mat # (10x5) @ (10x10)\n with pytest.raises(AssertionError):\n square_mat @ half_row_mat # (10x10) @ (5x10)",
"def test_classified_correctly():\n w_0 = np.array([[0], [0]])\n b_0 = 0\n for i in range(n):\n assert(not classified_correctly(w_0, b_0, i))",
"def test_init_fail(self):\n # ------ the following tests should FAIL\n ft_shape = (5, 6, 7) # define shape of the tensor in full form\n ml_rank = (2, 3, 4) # define multi-linear rank of a tensor in Tucker form\n correct_core_values = np.ones(ml_rank)\n correct_fmat = [np.ones([ft_shape[mode], ml_rank[mode]]) for mode in range(len(ft_shape))]\n\n # core_values should be in form of numpy array\n with pytest.raises(TypeError):\n incorrect_core_values = list(correct_core_values)\n TensorTKD(fmat=correct_fmat, core_values=incorrect_core_values)\n\n # factor matrices should be passed as a list\n with pytest.raises(TypeError):\n incorrect_fmat = tuple(correct_fmat)\n TensorTKD(fmat=incorrect_fmat, core_values=correct_core_values)\n\n # all factor matrices should be in form of numpy array\n with pytest.raises(TypeError):\n incorrect_fmat = [fmat.copy() for fmat in correct_fmat]\n mode = 0\n incorrect_fmat[mode] = list(incorrect_fmat[mode])\n TensorTKD(fmat=incorrect_fmat, core_values=correct_core_values)\n\n # all factor matrices should be a 2-dimensional numpy array\n with pytest.raises(TensorTopologyError):\n incorrect_fmat = [fmat.copy() for fmat in correct_fmat]\n mode = 0\n incorrect_fmat[mode] = np.ones([ft_shape[mode], ml_rank[mode], 2])\n TensorTKD(fmat=incorrect_fmat, core_values=correct_core_values)\n\n # Not enough factor matrices for the specified core tensor\n with pytest.raises(TensorTopologyError):\n incorrect_core_values = np.ones(correct_core_values.shape + (2,))\n TensorTKD(fmat=correct_fmat, core_values=incorrect_core_values)\n\n # number of columns of some factor matrices does not match the size of the corresponding mode of the core\n with pytest.raises(TensorTopologyError):\n incorrect_fmat = [fmat.copy() for fmat in correct_fmat]\n incorrect_fmat[0] = incorrect_fmat[0].T\n TensorTKD(fmat=incorrect_fmat, core_values=correct_core_values)",
"def test_init_fail(self):\n # ------ the following tests should FAIL\n\n ft_shape = (3, 4, 5) # define shape of the tensor in full form\n R = 2 # define Kryskal rank of a tensor in CP form\n correct_core_values = np.ones(R)\n correct_fmat = [np.arange(orig_dim * R).reshape(orig_dim, R) for orig_dim in ft_shape]\n\n # core_values should be in form of numpy array\n with pytest.raises(TypeError):\n incorrect_core_values = list(correct_core_values)\n TensorCPD(fmat=correct_fmat, core_values=incorrect_core_values)\n\n # factor matrices should be passed as a list\n with pytest.raises(TypeError):\n incorrect_fmat = tuple(correct_fmat)\n TensorCPD(fmat=incorrect_fmat, core_values=correct_core_values)\n\n # all factor matrices should be in form of numpy array\n with pytest.raises(TypeError):\n incorrect_fmat = [fmat.copy() for fmat in correct_fmat]\n incorrect_fmat[0] = list(incorrect_fmat[0])\n TensorCPD(fmat=incorrect_fmat, core_values=correct_core_values)\n\n # all factor matrices should be a 2-dimensional numpy array\n with pytest.raises(TensorTopologyError):\n incorrect_fmat = [fmat.copy() for fmat in correct_fmat]\n incorrect_fmat[0] = np.ones([2, 2, 2])\n TensorCPD(fmat=incorrect_fmat, core_values=correct_core_values)\n\n # too many (or not enough) `core_values` for `fmat`\n with pytest.raises(TensorTopologyError):\n incorrect_core_values = np.ones(correct_core_values.size + 1)\n TensorCPD(fmat=correct_fmat, core_values=incorrect_core_values)\n\n # dimension all factor matrices should have the same number of columns\n with pytest.raises(TensorTopologyError):\n incorrect_fmat = [fmat.copy() for fmat in correct_fmat]\n incorrect_fmat[0] = incorrect_fmat[0].T\n TensorCPD(fmat=incorrect_fmat, core_values=correct_core_values)",
"def test_diff_penalty_diagonals_datasize_too_small():\n with pytest.raises(ValueError):\n _banded_utils.diff_penalty_diagonals(0)\n with pytest.raises(ValueError):\n _banded_utils.diff_penalty_diagonals(-1)",
"def test_multiscale_zero(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x, self.x))",
"def consistency_check(self):\n for _row in self.lattice:\n assert len(_row) == self.col_dim\n assert callable(self.neighbor_function)\n assert callable(self.weight_function)",
"def test_matmul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Can only perform tensor products between operators.\"):\n _ = qml.PauliX(0) @ \"dummy\"",
"def test_sparse_matrix_error(self):\n\n t = qml.PauliX(0) @ qml.Hermitian(np.eye(4), wires=[1, 2])\n with pytest.raises(ValueError, match=\"Can only compute\"):\n t.sparse_matrix()",
"def test_classic_2x2(self):\r\n # problem\r\n A = [[0, 1], [1, 0]]\r\n B = [[2, 3], [3, 2]]\r\n\r\n # solution\r\n answer = [[3, 2], [2, 3]]\r\n\r\n # test\r\n C = matrix_multiply(A, B)\r\n self.assertEqual(C, answer)",
"def QACheckMatrixSize(dataBody):\r\n if dataBody.shape[0] != 512:\r\n # raise ValueError('Matrix has wrong size, it is not 512')\r\n print('Matrix has wrong size, it is not 512')\r\n if dataBody.shape[1] != 512:\r\n # raise ValueError('Matrix has wrong size, it is not 512')\r\n print('Matrix has wrong size, it is not 512')",
"def test_purity_non_density_matrix():\n rho = np.array([[1, 2], [3, 4]])\n\n with np.testing.assert_raises(ValueError):\n purity(rho)",
"def test_multiple_featuresets_and_featurehasher_throws_warning(self):\n # make a simple config file for feature hasher warning test\n values_to_fill_dict = {\n \"experiment_name\": \"test_warning_multiple_featuresets\",\n \"train_directory\": train_dir,\n \"task\": \"train\",\n \"grid_search\": \"false\",\n \"objectives\": \"['f1_score_micro']\",\n \"learners\": \"['LogisticRegression']\",\n \"featuresets\": \"[['test_input_3examples_1', 'test_input_3examples_2']]\",\n \"featureset_names\": \"['feature_hasher']\",\n \"suffix\": \".jsonlines\",\n \"logs\": output_dir,\n \"models\": output_dir,\n \"feature_hasher\": \"true\",\n \"hasher_features\": \"4\",\n }\n\n config_template_path = config_dir / \"test_warning_multiple_featuresets.template.cfg\"\n\n config_path = fill_in_config_options(\n config_template_path, values_to_fill_dict, \"feature_hasher\"\n )\n\n # run the experiment\n print(config_path)\n run_configuration(config_path, quiet=True, local=True)\n\n # test if it throws any warning\n logfile_path = (\n output_dir / \"test_warning_multiple_featuresets_feature_hasher_LogisticRegression.log\"\n )\n with open(logfile_path) as f:\n warning_pattern = re.compile(\n r\"Since there are multiple feature files, feature hashing applies\"\n r\" to each specified feature file separately.\"\n )\n matches = re.findall(warning_pattern, f.read())\n self.assertEqual(len(matches), 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that the non_identity_obs property returns a list that contains no Identity instances.
|
def test_non_identity_obs(self, tensor_observable, expected):
O = tensor_observable
for idx, obs in enumerate(O.non_identity_obs):
assert type(obs) == type(expected[idx])
assert obs.wires == expected[idx].wires
|
[
"def test_list_of_unallocated_people(self):\r\n self.assertIsNotNone(self.amity.get_a_list_of_unallocated_people())",
"def get_not_verified_nins_list(request, user):\n active_nins = user.get_nins()\n nins = []\n verifications = request.db.verifications\n not_verified_nins = verifications.find({\n 'model_name': 'norEduPersonNIN',\n 'user_oid': user.get_id(),\n }, sort=[('timestamp', 1)])\n if active_nins:\n active_nin = active_nins[-1]\n nin_found = False\n for nin in not_verified_nins:\n if active_nin == nin['obj_id']:\n nin_found = True\n elif nin_found and not nin['verified']:\n nins.append(nin['obj_id'])\n else:\n for nin in not_verified_nins:\n if not nin['verified']:\n nins.append(nin['obj_id'])\n # As we no longer remove verification documents make the list items unique\n return list(set(nins))",
"def test_no_data_no_evidence(self):\n annotated_indicator = self.get_annotated_indicator()\n self.assertEqual(annotated_indicator.results_with_evidence_count, 0)",
"def test_no_identities(mock_tools):\n mock_tools.subprocess.check_output.return_value = security_result(\"no-identities\")\n\n simulators = get_identities(mock_tools, \"codesigning\")\n\n assert simulators == {}",
"def test_Nothing_observation(model):\n assert make_obs(ecole.observation.Nothing(), model) is None",
"def test_get_unseen(self):\n pass",
"def testAccessEmptyTable(self):\n results = [(idx,) for idx in self.manager.snimpyEmptyDescr]\n self.assertEqual(results, [])",
"def test_empty(self):\n s = IntegerSet()\n self.assertEqual(s.cardinality(), 0)\n self.assertNotIn(1337, s)\n L = list(s)\n self.assertEqual(L, [])\n self.assertFalse(s)\n self.assertEqual(0, len(s))",
"def has_no_entities(self):\n return not any(self._entities)",
"def test_id_list(ontology):\n result = ontology.id_list('MONDO')\n assert result\n\n result2 = ontology.id_list('A BAD ONTOLOGY')\n assert not result2",
"def test_identity_is_unique() -> None:\n notifications: set[str] = set()\n for _ in range(1000):\n notifications.add(Notification(\"test\").identity)\n assert len(notifications) == 1000",
"def test_none(self):\n res = get_areas_in_filter(self.fid)\n self.assertEqual(res, [])",
"def test_no_invoice_true(base_store: Store, helpers):\n\n # GIVEN a database with a case and one no_invoice sample\n new_case = add_case(helpers, base_store)\n sample = helpers.add_sample(base_store, no_invoice=True)\n base_store.relate_sample(new_case, sample, \"unknown\")\n assert sample.no_invoice\n\n # WHEN getting active cases\n cases = base_store.cases()\n\n # THEN cases should contain zero samples to invoice\n assert cases\n for case in cases:\n assert case.get(\"samples_to_invoice\") == 0",
"def testIterateWhenThereAreNoEntities(self):\n query = _Entity.query().order(_Entity.index)\n entities = [entity for entity in Iterate(query, batch_run=False)]\n self.assertEqual(entities, [])",
"def testEmpty(self):\n assert Iter.empty(Iter.map(lambda x: x, iter([])))",
"def test_initialization_state_object_properties_keep_list(self):\n\n self.assertIn('http://purl.obolibrary.org/obo/RO_0000086', self.owl_nets.keep_properties)\n self.assertIn('http://www.w3.org/2000/01/rdf-schema#subClassOf', self.owl_nets.keep_properties)\n\n return None",
"def test_one_data_no_evidence(self):\n indicator = self.get_indicator()\n self.add_data(indicator)\n annotated_indicator = self.get_annotated_indicator(indicator)\n self.assertEqual(annotated_indicator.results_with_evidence_count, 0)",
"def skip_simulation_of_units_with_empty_inlets(self):\n return bst.Unit._skip_simulation_when_inlets_are_empty",
"def test_empty_list(self, fake_app, authenticated_user):\n result = fake_app.get(self.url)\n assert result.json == {'elements': []}",
"def test_oci_session_find_vcns_not_found(self):\n\n _c_list = self.setUpSession().find_vcns('_do_not_exits__')\n self.assertTrue(len(_c_list) == 0, 'Wrong list length returned, should be empty.')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that the prune method returns the expected Tensor or single nonTensor Observable.
|
def test_prune(self, tensor_observable, expected):
O = tensor_observable
O_expected = expected
O_pruned = O.prune()
assert type(O_pruned) == type(expected)
assert O_pruned.wires == expected.wires
|
[
"def test_not_break_torch(self):\n length = 5\n a = torch.zeros(length)\n b = torch.zeros(length)\n self.assertEqual(len(a == b), length)\n self.assertTrue(torch.all(a == b))\n\n c = Tensor(torch.ones(5))\n # If Tensor is either argument, it uses the equality method that returns bool.\n self.assertNotEqual(c, a)\n self.assertNotEqual(a, c)",
"def prune(self, threshold=1e-3):\n\n pass",
"def test_observable_not_returned(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"All measured observables\"):\n node(0.5)",
"def test_cp_dropout():\n shape = (10, 11, 12)\n rank = 8\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='CP')\n tensor = tensor_dropout(tensor, 1)\n weights = tensor().weights\n assert (len(weights) == (1))\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n weights = tensor().weights\n assert (len(weights) == rank)",
"def test_get_prune_tree_style(): # ***Incomplete test\n ##########################\n # Arrange.\n \n\n ##########################\n # Act.\n #x = get_prune_tree_style()\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def test_noise_no_trend(self):\n self.assertFalse(self.data_item.is_noise(20))\n self.assertFalse(self.data_item.is_noise(20.1))\n self.assertFalse(self.data_item.is_noise(10))",
"def test_return_of_non_observable(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(wires=0)), 0.3\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"must return either\"):\n node(0.5)",
"def try_prune(self) -> None:\n min_imp = 1e5\n min_unit = self.mutable_units[0]\n for unit in self.mutable_units:\n if unit.mutable_channel.activated_channels > 1:\n imp = unit.importance()\n if imp.isnan().any():\n if dist.get_rank() == 0:\n print_log(\n f'{unit.name} detects nan in importance, this pruning skips.' # noqa\n )\n return\n if imp.min() < min_imp:\n min_imp = imp.min().item()\n min_unit = unit\n if min_unit.try_to_prune_min_channel():\n if dist.get_rank() == 0:\n print_log(\n f'{min_unit.name} prunes a channel with min imp = {min_imp}' # noqa\n )",
"def test_tt_dropout():\n shape = (10, 11, 12)\n # Use the same rank for all factors\n rank = 4\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='TT')\n tensor = tensor_dropout(tensor, 1)\n factors = tensor().factors\n for f in factors:\n assert (f.shape[0] == f.shape[-1] == 1)\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n factors = tensor().factors\n for i, f in enumerate(factors):\n if i:\n assert (f.shape[0] == rank)\n else: # boundary conditions: first and last rank are equal to 1\n assert (f.shape[-1] == rank)",
"def _concrete_dropout(self, x: Tensor) -> Tensor:\n\n eps = 1e-7\n tmp = 0.1\n\n self.p = torch.sigmoid(self.p_logit)\n u_noise = torch.rand_like(x)\n\n drop_prob = (torch.log(self.p + eps) -\n torch.log(1 - self.p + eps) +\n torch.log(u_noise + eps) -\n torch.log(1 - u_noise + eps))\n\n drop_prob = torch.sigmoid(drop_prob / tmp)\n\n random_tensor = 1 - drop_prob\n retain_prob = 1 - self.p\n\n x = torch.mul(x, random_tensor) / retain_prob\n\n return x",
"def should_prune(self) -> bool:\n\n return False",
"def test_tucker_dropout():\n shape = (10, 11, 12)\n rank = (7, 8, 9)\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='Tucker')\n tensor = tensor_dropout(tensor, 1)\n core = tensor().core\n assert (tl.shape(core) == (1, 1, 1))\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n core = tensor().core\n assert (tl.shape(core) == rank)",
"def test_Nothing_observation(model):\n assert make_obs(ecole.observation.Nothing(), model) is None",
"def test_one_count(self):\n self.assert_tensor_equal(\n pick(sample_tensor(3, 3), 1, seed=self.random_seed),\n self.sample_tensor_subtensors([0]))",
"def prune(model: tf.keras.Model, \n prop: float,\n initial_weights: Dict[str, np.array],\n criterion: str = \"lf\",\n) -> tf.keras.Model:\n\n weights = {w.name:w for w in model.variables}\n # Filter kernel weights (names)\n kernel_names = [w.name for w in model.variables if (\"_bias\" not in w.name) and (\"_mask\" not in w.name)]\n \n for w_name in kernel_names:\n wf = weights[w_name].numpy()\n wi = initial_weights[w_name]\n scores = pruning_criterion(wf, wi, prop, criterion)\n if w_name != kernel_names[-1]:\n quantile = np.quantile(scores, prop)\n else:\n # Prune the last layer at half of prop (as in the original paper).\n quantile = np.quantile(scores, prop/2)\n\n new_mask = scores > quantile\n weights[w_name+\"_mask:0\"].assign(new_mask)\n \n return model",
"def prune(self, t, default_mask=None, importance_scores=None):\n if importance_scores is not None:\n assert (\n importance_scores.shape == t.shape\n ), \"importance_scores should have the same shape as tensor t\"\n else:\n importance_scores = t\n default_mask = default_mask if default_mask is not None else paddle.ones_like(t)\n return t * self.compute_mask(importance_scores, default_mask=default_mask)",
"def test_prune(self):\n self.assertEqual(0, self.cache.get_cache_size())\n\n # Add a bunch of images to the cache. The max cache size for the cache\n # is set to 5KB and each image is 1K. We use 11 images in this test.\n # The first 10 are added to and retrieved from cache in the same order.\n # Then, the 11th image is added to cache but not retrieved before we\n # prune. We should see only 5 images left after pruning, and the\n # images that are least recently accessed should be the ones pruned...\n for x in range(10):\n FIXTURE_FILE = io.BytesIO(FIXTURE_DATA)\n self.assertTrue(self.cache.cache_image_file(x, FIXTURE_FILE))\n\n self.assertEqual(10 * units.Ki, self.cache.get_cache_size())\n\n # OK, hit the images that are now cached...\n for x in range(10):\n buff = io.BytesIO()\n with self.cache.open_for_read(x) as cache_file:\n for chunk in cache_file:\n buff.write(chunk)\n\n # Add a new image to cache.\n # This is specifically to test the bug: 1438564\n FIXTURE_FILE = io.BytesIO(FIXTURE_DATA)\n self.assertTrue(self.cache.cache_image_file(99, FIXTURE_FILE))\n\n self.cache.prune()\n\n self.assertEqual(5 * units.Ki, self.cache.get_cache_size())\n\n # Ensure images 0, 1, 2, 3, 4 & 5 are not cached anymore\n for x in range(0, 6):\n self.assertFalse(self.cache.is_cached(x),\n \"Image %s was cached!\" % x)\n\n # Ensure images 6, 7, 8 and 9 are still cached\n for x in range(6, 10):\n self.assertTrue(self.cache.is_cached(x),\n \"Image %s was not cached!\" % x)\n\n # Ensure the newly added image, 99, is still cached\n self.assertTrue(self.cache.is_cached(99), \"Image 99 was not cached!\")",
"def prune(root, X_prune, y_prune):\n accuracy = calculate_accuracy(root, X_prune, y_prune)\n # print('Accuracy on pruning set before pruning: ', accuracy) # Enable to print before pruning accuracy\n Q = Queue()\n q_list = [] # to be able to check if items are in que\n\n # put all leaves in que\n for node in PreOrderIter(root):\n if node.is_leaf:\n Q.put(node)\n q_list.append(node)\n\n while not Q.empty():\n node = Q.get()\n q_list.remove(node)\n if not node.is_root:\n parent = node.parent\n if not q_list.__contains__(parent):\n Q.put(parent)\n q_list.append(parent)\n if not node.is_leaf:\n # find sum of errors on descendants\n descendants = node.descendants\n sum_error_descendants = 0 # R\n for des in descendants:\n sum_error_descendants += des.error\n if sum_error_descendants >= node.error: # if R >= E\n if not len(node.children) == 1:\n node.name = node.M[0]\n node.children = []\n node.node_type = 'class'",
"def test_op_successors_observables_only(self, opqueue_test_node):\n\n observable_successors = opqueue_test_node._op_successors(0, only=\"E\")\n\n assert opqueue_test_node.ops[0] not in observable_successors\n assert opqueue_test_node.ops[1] not in observable_successors\n assert opqueue_test_node.ops[4] in observable_successors",
"def test_prune_constants(self):\r\n x = Variable(2)\r\n A = np.matrix(\"1 2; 3 4\")\r\n constraints = (A*x <= 2).canonical_form[1]\r\n pruned = prune_constants(constraints)\r\n prod = mul(pruned[0].expr, {})\r\n self.assertItemsAlmostEqual(prod, np.zeros(A.shape[0]))\r\n\r\n # Test no-op\r\n constraints = (0*x <= 2).canonical_form[1]\r\n pruned = prune_constants(constraints)\r\n prod = mul(pruned[0].expr, {x.id: 1})\r\n self.assertItemsAlmostEqual(prod, np.zeros(A.shape[0]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that pruning a tensor to a tensor in a tape context registers the pruned tensor as owned by the measurement, and turns the original tensor into an orphan without an owner.
|
def test_prune_while_queueing_return_tensor(self):
with qml.tape.QuantumTape() as tape:
# we assign operations to variables here so we can compare them below
a = qml.PauliX(wires=0)
b = qml.PauliY(wires=1)
c = qml.Identity(wires=2)
T = qml.operation.Tensor(a, b, c)
T_pruned = T.prune()
m = qml.expval(T_pruned)
ann_queue = tape._queue
# the pruned tensor became the owner of Paulis
assert ann_queue[a]["owner"] == T_pruned
assert ann_queue[b]["owner"] == T_pruned
# the Identity is still owned by the original Tensor
assert ann_queue[c]["owner"] == T
# the original tensor still owns all three observables
# but is not owned by a measurement
assert ann_queue[T]["owns"] == (a, b, c)
assert not hasattr(ann_queue[T], "owner")
# the pruned tensor is owned by the measurement
# and owns the two Paulis
assert ann_queue[T_pruned]["owner"] == m
assert ann_queue[T_pruned]["owns"] == (a, b)
assert ann_queue[m]["owns"] == T_pruned
|
[
"def prune(tp):\n if isinstance(tp, TypeVariable):\n if tp.instance is not None:\n tp.instance = prune(tp.instance)\n return tp.instance\n return tp",
"def test_tucker_dropout():\n shape = (10, 11, 12)\n rank = (7, 8, 9)\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='Tucker')\n tensor = tensor_dropout(tensor, 1)\n core = tensor().core\n assert (tl.shape(core) == (1, 1, 1))\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n core = tensor().core\n assert (tl.shape(core) == rank)",
"def test_change_owner_does_not_remove_non_empty_mountpoint(self):\n pool = StoragePool(reactor, create_zfs_pool(self),\n FilePath(self.mktemp()))\n volume = Volume(uuid=u\"my-uuid\", name=u\"volume\", _pool=pool)\n new_volume = Volume(uuid=u\"other-uuid\", name=u\"volume\", _pool=pool)\n original_mount = volume.get_filesystem().get_path()\n d = pool.create(volume)\n\n def created_filesystems(igonred):\n filesystem_name = volume.get_filesystem().name\n subprocess.check_call(['zfs', 'unmount', filesystem_name])\n # Create a file hiding under the original mount point\n original_mount.child('file').setContent('content')\n # Remount the volume at the original mount point as a legacy mount.\n subprocess.check_call(['zfs', 'set', 'mountpoint=legacy',\n filesystem_name])\n subprocess.check_call(['mount', '-t', 'zfs', filesystem_name,\n original_mount.path])\n return pool.change_owner(volume, new_volume)\n d.addCallback(created_filesystems)\n\n self.assertFailure(d, OSError)\n\n def changed_owner(filesystem):\n self.assertEqual(original_mount.child('file').getContent(),\n b'content')\n d.addCallback(changed_owner)\n return d",
"def promote_test(self):\n t = self.t.copy()\n t[0].delete(promote=True)\n\n tgt = IdTree.fromstring('''(ROOT (DT The) (NP (NN Boy)))''')\n self.assertTrue(t.similar(tgt))\n\n t2 = self.t.copy()\n t2[0].delete(promote=False)\n\n tgt = IdTree.fromstring('''(ROOT)''')\n self.assertTrue(t2.similar(tgt))\n\n t3 = self.t.copy()\n t3[0][1].delete(promote=True)\n tgt=IdTree.fromstring('''(ROOT (NP (DT The) (NN Boy)))''')\n\n self.assertTrue(t3.similar(tgt))\n\n t4 = self.t.copy()\n t4[0][1].delete(promote=False)\n tgt=IdTree.fromstring('''(ROOT (NP (DT The)))''')\n self.assertTrue(t4.similar(tgt))",
"def _skip_tensor(self, op_id, out_tensor, user_included,\n user_excluded):\n\n # Skips a tensor if the tensor has a non-numeric type.\n # Note: we cannot use check_ops.is_numeric_tensor(out_tensor)\n # because it also excludes tensors with dtypes, bool, and\n # float32_ref, which we actually want to trace.\n non_numeric_tensor_types = set([dtypes.variant, dtypes.resource,\n dtypes.string])\n if out_tensor.dtype in non_numeric_tensor_types:\n self._instrument_records[out_tensor.name] = TensorTracer.reason(\n op_id, _REASON_NON_NUMERIC_TENSOR)\n return True\n # Skip a tensor if it feeds a special while loop op.\n if [consumer for consumer in out_tensor.consumers() if\n TensorTracer.while_loop_op(consumer)]:\n self._instrument_records[out_tensor.name] = TensorTracer.reason(\n op_id, _REASON_FEEDS_WHILELOOP_OP)\n return True\n if user_included:\n self._instrument_records[out_tensor.name] = TensorTracer.reason(\n op_id, _REASON_USER_INCLUDED)\n return False\n if user_excluded:\n self._instrument_records[out_tensor.name] = TensorTracer.reason(\n op_id, _REASON_USER_EXCLUDED)\n return True\n if not out_tensor.get_shape().is_fully_defined():\n # If trace mode is nan-inf, norm or max, then the tensor will be reduced\n # to a scalar before the outside compilation call.\n if self._parameters.trace_mode in [\n tensor_tracer_flags.TRACE_MODE_NAN_INF,\n tensor_tracer_flags.TRACE_MODE_NORM,\n tensor_tracer_flags.TRACE_MODE_MAX_ABS\n ]:\n self._instrument_records[out_tensor.name] = TensorTracer.reason(\n op_id, _REASON_TENSOR_GET_TRACED)\n return False\n else:\n self._instrument_records[out_tensor.name] = TensorTracer.reason(\n op_id, _REASON_DYNAMIC_SHAPE)\n return True\n rank = len(out_tensor.shape)\n if rank < 1:\n # scalar\n if self._parameters.trace_scalar_ops:\n if TensorTracer.unsafe_scalar_trace(out_tensor.op):\n self._instrument_records[out_tensor.name] = TensorTracer.reason(\n op_id, _REASON_UNSAFE_SCALAR)\n return True\n else:\n self._instrument_records[out_tensor.name] = TensorTracer.reason(\n op_id, _REASON_SCALAR_GET_TRACED)\n return False\n else:\n self._instrument_records[out_tensor.name] = TensorTracer.reason(\n op_id, _REASON_SKIP_SCALAR)\n return True\n else:\n # tensor\n self._instrument_records[out_tensor.name] = TensorTracer.reason(\n op_id, _REASON_TENSOR_GET_TRACED)\n return False",
"def test_identity_permutation_tape(self):\n\n with qml.tape.QuantumTape() as tape:\n Permute([0, \"a\", \"c\", \"d\"], wires=[0, \"a\", \"c\", \"d\"])\n\n assert len(tape.operations) == 0",
"def _clean_isolated(self, obstruction: GriddedPerm) -> GriddedPerm:\n cells_to_remove: Set[Cell] = set()\n for factor in obstruction.factors():\n if self._griddedperm_implied_by_some_requirement(factor):\n cells_to_remove.update(factor.pos)\n if cells_to_remove:\n obstruction = obstruction.remove_cells(cells_to_remove)\n return obstruction",
"def test_tt_dropout():\n shape = (10, 11, 12)\n # Use the same rank for all factors\n rank = 4\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='TT')\n tensor = tensor_dropout(tensor, 1)\n factors = tensor().factors\n for f in factors:\n assert (f.shape[0] == f.shape[-1] == 1)\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n factors = tensor().factors\n for i, f in enumerate(factors):\n if i:\n assert (f.shape[0] == rank)\n else: # boundary conditions: first and last rank are equal to 1\n assert (f.shape[-1] == rank)",
"def test_copy(self):\n r1, r2 = 2, 3\n I, J, K = 4, 5, 6\n core_1 = np.arange(I * r1).reshape(I, r1)\n core_2 = np.arange(r1 * J * r2).reshape(r1, J, r2)\n core_3 = np.arange(r2 * K).reshape(r2, K)\n core_values = [core_1, core_2, core_3]\n ft_shape = (I, J, K)\n tensor_tt = TensorTT(core_values=core_values)\n\n tensor_tt_copy = tensor_tt.copy()\n\n # tests that the values are the same but not a reference\n assert tensor_tt_copy is not tensor_tt\n assert tensor_tt_copy.ft_shape is not tensor_tt.ft_shape\n assert tensor_tt_copy.ft_shape == tensor_tt.ft_shape\n assert tensor_tt_copy.rank == tensor_tt.rank\n assert tensor_tt_copy.order == tensor_tt.order\n\n assert tensor_tt_copy._core_values is not tensor_tt._core_values\n for i in range(tensor_tt_copy.order):\n assert tensor_tt_copy._core_values[i] is not tensor_tt._core_values[i]\n np.testing.assert_array_equal(tensor_tt_copy._core_values[i], core_values[i])\n np.testing.assert_array_equal(tensor_tt_copy._core_values[i], tensor_tt._core_values[i])\n assert tensor_tt_copy.core(i) is not tensor_tt.core(i)\n np.testing.assert_array_equal(tensor_tt_copy.core(i).data, tensor_tt.core(i).data)\n\n assert tensor_tt_copy.cores is not tensor_tt.cores",
"def test_queuing_tensor_matmul_components_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t1 = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n op3 = qml.PauliZ(2)\n t2 = t1 @ op3\n\n assert len(tape._queue) == 2\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}",
"def test_cp_dropout():\n shape = (10, 11, 12)\n rank = 8\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='CP')\n tensor = tensor_dropout(tensor, 1)\n weights = tensor().weights\n assert (len(weights) == (1))\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n weights = tensor().weights\n assert (len(weights) == rank)",
"def test_queuing_defined_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n T = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n T.queue()\n\n assert len(tape.queue) == 1\n assert tape.queue[0] is T\n\n assert tape._queue[T] == {\"owns\": (op1, op2)}",
"def test_owner_no_ownership(self):\n self.assert_ownership(True)",
"def test_targetsuccessive_identity_advanced_removal(self):\n\n # ┌───┐┌───┐ »\n # q_0: ┤ H ├┤ X ├───────■─────────────────────────────■───────────────────■──»\n # ├───┤└─┬─┘ │ │ │ »\n # q_1: ┤ H ├──■─────────■─────────────────────────────■───────────────────■──»\n # ├───┤┌───┐ │ ┌───┐ │ │ »\n # q_2: ┤ H ├┤ X ├───────┼──┤ X ├──■──────────────■────┼───────────────────┼──»\n # ├───┤└─┬─┘ ┌─┴─┐└─┬─┘ │ │ ┌─┴─┐ ┌─┴─┐»\n # q_3: ┤ H ├──■────■──┤ X ├──■────■──────────────■──┤ X ├──■─────────■──┤ X ├»\n # ├───┤┌───┐ │ └───┘ │ ┌───┐ │ └───┘ │ │ └───┘»\n # q_4: ┤ H ├┤ X ├──┼──────────────┼──┤ X ├──■────┼─────────┼─────────┼───────»\n # ├───┤└─┬─┘┌─┴─┐ ┌─┴─┐└─┬─┘ │ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ »\n # q_5: ┤ H ├──■──┤ X ├──────────┤ X ├──■────■──┤ X ├─────┤ X ├──■──┤ X ├─────»\n # └───┘ └───┘ └───┘ ┌─┴─┐└───┘ └───┘┌─┴─┐├───┤ »\n # q_6: ───────────────────────────────────┤ X ├───────────────┤ X ├┤ X ├─────»\n # └───┘ └───┘└───┘ »\n # q_7: ──────────────────────────────────────────────────────────────────────»\n # »\n # « ┌───┐┌───┐ »\n # «q_0: ──────────────────────■──┤ X ├┤ X ├──■─────────────────────────────■──»\n # « │ └─┬─┘└─┬─┘ │ │ »\n # «q_1: ──────────────────────■────■────■────■─────────────────────────────■──»\n # « ┌───┐ │ │ │ ┌───┐ │ »\n # «q_2: ──■─────────■──┤ X ├──┼─────────┼────┼──┤ X ├──■──────────────■────┼──»\n # « │ │ └─┬─┘┌─┴─┐ │ ┌─┴─┐└─┬─┘ │ │ ┌─┴─┐»\n # «q_3: ──■─────────■────■──┤ X ├───────┼──┤ X ├──■────■──────────────■──┤ X ├»\n # « │ ┌───┐ │ └───┘ │ └───┘ │ │ ┌───┐ │ └───┘»\n # «q_4: ──┼──┤ X ├──┼───────────────────┼─────────┼────┼──┤ X ├──■────┼───────»\n # « ┌─┴─┐└─┬─┘┌─┴─┐ │ │ ┌─┴─┐└─┬─┘ │ ┌─┴─┐ »\n # «q_5: ┤ X ├──■──┤ X ├─────────────────┼─────────┼──┤ X ├──■────■──┤ X ├─────»\n # « └───┘ └───┘ │ │ └───┘ │ │ └───┘ »\n # «q_6: ────────────────────────────────■─────────■─────────■────■────────────»\n # « ┌─┴─┐ »\n # «q_7: ───────────────────────────────────────────────────────┤ X ├──────────»\n # « └───┘ »\n # «\n # «q_0: ───────────────\n # «\n # «q_1: ───────────────\n # « ┌───┐\n # «q_2: ─────┤ X ├─────\n # « └─┬─┘\n # «q_3: ──■────■───────\n # « │ ┌───┐\n # «q_4: ──┼──┤ X ├─────\n # « ┌─┴─┐└─┬─┘\n # «q_5: ┤ X ├──■────■──\n # « └───┘ │\n # «q_6: ────────────■──\n # « ┌─┴─┐\n # «q_7: ──────────┤ X ├\n # « └───┘\n circuit = QuantumCircuit(8)\n circuit.h(0)\n circuit.h(1)\n circuit.h(2)\n circuit.h(3)\n circuit.h(4)\n circuit.h(5)\n for i in range(3):\n circuit.cx(i * 2 + 1, i * 2)\n circuit.cx(3, 5)\n for i in range(2):\n circuit.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n circuit.cx(i * 2 + 3, i * 2 + 2)\n circuit.ccx(4, 5, 6)\n for i in range(1, -1, -1):\n circuit.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n circuit.cx(3, 5)\n circuit.cx(5, 6)\n circuit.cx(3, 5)\n circuit.x(6)\n for i in range(2):\n circuit.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n for i in range(1, -1, -1):\n circuit.cx(i * 2 + 3, i * 2 + 2)\n circuit.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n circuit.cx(1, 0)\n circuit.ccx(6, 1, 0)\n circuit.ccx(0, 1, 3)\n circuit.ccx(6, 3, 2)\n circuit.ccx(2, 3, 5)\n circuit.ccx(6, 5, 4)\n circuit.append(XGate().control(3), [4, 5, 6, 7], [])\n for i in range(1, -1, -1):\n circuit.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n circuit.cx(3, 5)\n for i in range(1, 3):\n circuit.cx(i * 2 + 1, i * 2)\n circuit.ccx(5, 6, 7)\n\n # ┌───┐┌───┐ »\n # q_0: ┤ H ├┤ X ├───────■─────────────────────────────■───────────────────■──»\n # ├───┤└─┬─┘ │ │ │ »\n # q_1: ┤ H ├──■─────────■─────────────────────────────■───────────────────■──»\n # ├───┤┌───┐ │ ┌───┐ │ │ »\n # q_2: ┤ H ├┤ X ├───────┼──┤ X ├──■──────────────■────┼───────────────────┼──»\n # ├───┤└─┬─┘ ┌─┴─┐└─┬─┘ │ │ ┌─┴─┐ ┌─┴─┐»\n # q_3: ┤ H ├──■────■──┤ X ├──■────■──────────────■──┤ X ├──■─────────■──┤ X ├»\n # ├───┤┌───┐ │ └───┘ │ ┌───┐ │ └───┘ │ │ └───┘»\n # q_4: ┤ H ├┤ X ├──┼──────────────┼──┤ X ├──■────┼─────────┼─────────┼───────»\n # ├───┤└─┬─┘┌─┴─┐ ┌─┴─┐└─┬─┘ │ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ »\n # q_5: ┤ H ├──■──┤ X ├──────────┤ X ├──■────■──┤ X ├─────┤ X ├──■──┤ X ├─────»\n # └───┘ └───┘ └───┘ ┌─┴─┐└───┘ └───┘┌─┴─┐├───┤ »\n # q_6: ───────────────────────────────────┤ X ├───────────────┤ X ├┤ X ├─────»\n # └───┘ └───┘└───┘ »\n # q_7: ──────────────────────────────────────────────────────────────────────»\n # »\n # « ┌───┐┌───┐ »\n # «q_0: ──────────────────────■──┤ X ├┤ X ├──■────────────────────────■───────»\n # « │ └─┬─┘└─┬─┘ │ │ »\n # «q_1: ──────────────────────■────■────■────■────────────────────────■───────»\n # « ┌───┐ │ │ │ ┌───┐ │ »\n # «q_2: ──■─────────■──┤ X ├──┼─────────┼────┼──┤ X ├──■─────────■────┼───────»\n # « │ │ └─┬─┘┌─┴─┐ │ ┌─┴─┐└─┬─┘ │ │ ┌─┴─┐ »\n # «q_3: ──■─────────■────■──┤ X ├───────┼──┤ X ├──■────■─────────■──┤ X ├──■──»\n # « │ ┌───┐ │ └───┘ │ └───┘ │ │ ┌───┐ │ └───┘ │ »\n # «q_4: ──┼──┤ X ├──┼───────────────────┼─────────┼────┼──┤ X ├──┼─────────┼──»\n # « ┌─┴─┐└─┬─┘┌─┴─┐ │ │ ┌─┴─┐└─┬─┘┌─┴─┐ ┌─┴─┐»\n # «q_5: ┤ X ├──■──┤ X ├─────────────────┼─────────┼──┤ X ├──■──┤ X ├─────┤ X ├»\n # « └───┘ └───┘ │ │ └───┘ │ └───┘ └───┘»\n # «q_6: ────────────────────────────────■─────────■─────────■─────────────────»\n # « »\n # «q_7: ──────────────────────────────────────────────────────────────────────»\n # « »\n # «\n # «q_0: ─────\n # «\n # «q_1: ─────\n # « ┌───┐\n # «q_2: ┤ X ├\n # « └─┬─┘\n # «q_3: ──■──\n # « ┌───┐\n # «q_4: ┤ X ├\n # « └─┬─┘\n # «q_5: ──■──\n # «\n # «q_6: ─────\n # «\n # «q_7: ─────\n # «\n expected = QuantumCircuit(8)\n expected.h(0)\n expected.h(1)\n expected.h(2)\n expected.h(3)\n expected.h(4)\n expected.h(5)\n for i in range(3):\n expected.cx(i * 2 + 1, i * 2)\n expected.cx(3, 5)\n for i in range(2):\n expected.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n expected.cx(i * 2 + 3, i * 2 + 2)\n expected.ccx(4, 5, 6)\n for i in range(1, -1, -1):\n expected.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n expected.cx(3, 5)\n expected.cx(5, 6)\n expected.cx(3, 5)\n expected.x(6)\n for i in range(2):\n expected.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n for i in range(1, -1, -1):\n expected.cx(i * 2 + 3, i * 2 + 2)\n expected.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n expected.cx(1, 0)\n expected.ccx(6, 1, 0)\n expected.ccx(0, 1, 3)\n expected.ccx(6, 3, 2)\n expected.ccx(2, 3, 5)\n expected.ccx(6, 5, 4)\n for i in range(1, -1, -1):\n expected.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n expected.cx(3, 5)\n for i in range(1, 3):\n expected.cx(i * 2 + 1, i * 2)\n\n stv = Statevector.from_label(\"0\" * circuit.num_qubits)\n self.assertEqual(stv & circuit, stv & expected)\n\n pass_ = HoareOptimizer(size=5)\n result = pass_.run(circuit_to_dag(circuit))\n\n self.assertEqual(result, circuit_to_dag(expected))",
"def test_01_remove_last_owner_of_resource(self):\n scratching = self.scratching\n dog = self.dog\n self.assertTrue(dog.uaccess.owns_resource(scratching))\n self.assertEqual(scratching.raccess.owners.count(), 1)\n\n # try to downgrade your own privilege\n with self.assertRaises(PermissionDenied) as cm:\n dog.uaccess.share_resource_with_user(\n scratching, dog, PrivilegeCodes.VIEW)\n self.assertEqual(\n str(cm.exception),\n 'Cannot remove sole owner of resource')",
"def test_queuing_tensor_matmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t = Tensor(op1, op2)\n\n op3 = qml.PauliZ(2)\n t2 = t @ op3\n\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}\n assert tape._queue[op3] == {\"owner\": t2}",
"def test_removal_of_shallow_interms(simple_drudge):\n\n dr = simple_drudge\n\n r = dr.r\n a, b, c, d = dr.ds[:4]\n\n x = IndexedBase('x')\n y = IndexedBase('y')\n z = IndexedBase('z')\n u = IndexedBase('u')\n\n targets = [\n dr.define(\n u, (a, r), (b, r), (c, r),\n dr.sum((d, r), x[a, d] * y[b, d] * z[c, d])\n )\n ]\n\n for i in [True, False]:\n eval_seq = optimize(targets, remove_shallow=i)\n verify_eval_seq(eval_seq, targets)\n assert len(eval_seq) == (\n 1 if i else 2\n )\n continue",
"def hint_xobj_notheld(actionsystem, action) :\n @actionsystem.verify(action)\n @docstring(\"Makes \"+repr(action)+\" more logical if object x is not held by the actor. Added by hint_xobj_notheld.\")\n def _verify_xobj_notheld(actor, x, ctxt, **kwargs) :\n if not ctxt.world.query_relation(Has(actor, x)) :\n return VeryLogicalOperation()",
"def test_not_break_torch(self):\n length = 5\n a = torch.zeros(length)\n b = torch.zeros(length)\n self.assertEqual(len(a == b), length)\n self.assertTrue(torch.all(a == b))\n\n c = Tensor(torch.ones(5))\n # If Tensor is either argument, it uses the equality method that returns bool.\n self.assertNotEqual(c, a)\n self.assertNotEqual(a, c)",
"def test_terminal_nodes_with_partial_run_and_programatically_skipped(self):\n # Check the expected skipped and terminal nodes.\n self._example_gen.execution_options.skip.SetInParent()\n self._chore_a.execution_options.skip.SetInParent()\n self._chore_b.execution_options.skip.SetInParent()\n self._evaluator.execution_options.skip.SetInParent()\n\n # Mark trainer as programatically skipped, not as part of the partial run.\n with self._mlmd_connection as m:\n pipeline_state = test_utils.get_or_create_pipeline_state(\n m, self._pipeline\n )\n with pipeline_state:\n with pipeline_state.node_state_update_context(\n task_lib.NodeUid.from_node(self._pipeline, self._trainer)\n ) as node_state:\n assert node_state.is_programmatically_skippable()\n node_state.update(\n pstate.NodeState.SKIPPED,\n status_lib.Status(\n code=status_lib.Code.OK,\n message='Node skipped by client request.',\n ),\n )\n node_states_dict = pipeline_state.get_node_states_dict()\n\n expected_skipped_node_ids = {\n 'my_example_gen',\n 'chore_a',\n 'chore_b',\n 'my_evaluator',\n 'my_trainer',\n }\n self.assertEqual(\n expected_skipped_node_ids, sptg._skipped_node_ids(node_states_dict)\n )\n layers = sptg._topsorted_layers(self._pipeline)\n\n # Check that parent nodes of terminal skipped nodes are terminal\n expected_terminal_nodes = {'my_transform', 'my_example_validator'}\n self.assertSetEqual(\n expected_terminal_nodes,\n sptg._terminal_node_ids(layers, expected_skipped_node_ids),\n )\n # All downstream nodes of transform are marked as skipped, so it's\n # considered a terminal node.\n self.assertEqual(\n {\n self._transform.node_info.id,\n self._example_validator.node_info.id,\n },\n sptg._terminal_node_ids(layers, expected_skipped_node_ids),\n )\n\n # Start executing the pipeline:\n test_utils.fake_cached_example_gen_run(\n self._mlmd_connection, self._example_gen\n )\n self._run_next(False, expect_nodes=[self._stats_gen])\n self._run_next(False, expect_nodes=[self._schema_gen])\n\n # Trigger PAUSE on transform so it doesn't get run next.\n with self._mlmd_connection as m:\n pipeline_state = test_utils.get_or_create_pipeline_state(\n m, self._pipeline\n )\n with pipeline_state:\n with pipeline_state.node_state_update_context(\n task_lib.NodeUid.from_node(self._pipeline, self._transform)\n ) as node_state:\n assert node_state.is_stoppable()\n node_state.update(\n pstate.NodeState.STOPPING,\n status_lib.Status(\n code=status_lib.Code.CANCELLED,\n message='Cancellation requested by client.',\n ),\n )\n\n # Let example_validator \"finish running\".\n self._run_next(False, expect_nodes=[self._example_validator])\n\n # All tasks that can be run have been run, assume nothing happens since\n # transform is paused.\n tasks = self._generate(False, True)\n self.assertEmpty(tasks)\n\n # Pause the pipeline\n with self._mlmd_connection as m:\n pipeline_state = test_utils.get_or_create_pipeline_state(\n m, self._pipeline\n )\n with pipeline_state:\n pipeline_state.initiate_stop(\n status_lib.Status(\n code=status_lib.Code.CANCELLED,\n message='Cancellation requested by client.',\n ),\n )\n # All tasks that can be run have been run, assume nothing happens since\n # transform is paused.\n tasks = self._generate(False, True)\n self.assertEmpty(tasks)\n\n # Unpause just pipeline and transform and make sure pipeline will not\n # finalize.\n with self._mlmd_connection as m:\n pipeline_state = test_utils.get_or_create_pipeline_state(\n m, self._pipeline\n )\n with pipeline_state:\n pipeline_state.initiate_resume()\n\n tasks = self._generate(False, True)\n self.assertEmpty(tasks)\n\n # Unpause transform and make sure pipeline can continue as expected.\n with self._mlmd_connection as m:\n pipeline_state = test_utils.get_or_create_pipeline_state(\n m, self._pipeline\n )\n with pipeline_state:\n with pipeline_state.node_state_update_context(\n task_lib.NodeUid.from_node(self._pipeline, self._transform)\n ) as node_state:\n node_state.update(\n pstate.NodeState.STARTED,\n status_lib.Status(\n code=status_lib.Code.OK,\n ),\n )\n\n self._run_next(False, expect_nodes=[self._transform])\n # All runnable nodes executed, finalization task should be produced.\n [finalize_task] = self._generate(False, True)\n self.assertIsInstance(finalize_task, task_lib.FinalizePipelineTask)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that pruning a tensor to an observable in a tape context registers the pruned observable as owned by the measurement, and turns the original tensor into an orphan without an owner.
|
def test_prune_while_queueing_return_obs(self):
with qml.tape.QuantumTape() as tape:
a = qml.PauliX(wires=0)
c = qml.Identity(wires=2)
T = qml.operation.Tensor(a, c)
T_pruned = T.prune()
m = qml.expval(T_pruned)
ann_queue = tape._queue
# the pruned tensor is the Pauli observable
assert T_pruned == a
# pruned tensor/Pauli is owned by the measurement
# since the entry in the dictionary got updated
# when the pruned tensor's owner was memorized
assert ann_queue[a]["owner"] == m
# the Identity is still owned by the original Tensor
assert ann_queue[c]["owner"] == T
# the original tensor still owns both observables
# but is not owned by a measurement
assert ann_queue[T]["owns"] == (a, c)
assert not hasattr(ann_queue[T], "owner")
# the measurement owns the Pauli/pruned tensor
assert ann_queue[m]["owns"] == T_pruned
|
[
"def prune(tp):\n if isinstance(tp, TypeVariable):\n if tp.instance is not None:\n tp.instance = prune(tp.instance)\n return tp.instance\n return tp",
"def test_owner_no_ownership(self):\n self.assert_ownership(True)",
"def hint_xobj_notheld(actionsystem, action) :\n @actionsystem.verify(action)\n @docstring(\"Makes \"+repr(action)+\" more logical if object x is not held by the actor. Added by hint_xobj_notheld.\")\n def _verify_xobj_notheld(actor, x, ctxt, **kwargs) :\n if not ctxt.world.query_relation(Has(actor, x)) :\n return VeryLogicalOperation()",
"def _clean_isolated(self, obstruction: GriddedPerm) -> GriddedPerm:\n cells_to_remove: Set[Cell] = set()\n for factor in obstruction.factors():\n if self._griddedperm_implied_by_some_requirement(factor):\n cells_to_remove.update(factor.pos)\n if cells_to_remove:\n obstruction = obstruction.remove_cells(cells_to_remove)\n return obstruction",
"def test_change_owner_does_not_remove_non_empty_mountpoint(self):\n pool = StoragePool(reactor, create_zfs_pool(self),\n FilePath(self.mktemp()))\n volume = Volume(uuid=u\"my-uuid\", name=u\"volume\", _pool=pool)\n new_volume = Volume(uuid=u\"other-uuid\", name=u\"volume\", _pool=pool)\n original_mount = volume.get_filesystem().get_path()\n d = pool.create(volume)\n\n def created_filesystems(igonred):\n filesystem_name = volume.get_filesystem().name\n subprocess.check_call(['zfs', 'unmount', filesystem_name])\n # Create a file hiding under the original mount point\n original_mount.child('file').setContent('content')\n # Remount the volume at the original mount point as a legacy mount.\n subprocess.check_call(['zfs', 'set', 'mountpoint=legacy',\n filesystem_name])\n subprocess.check_call(['mount', '-t', 'zfs', filesystem_name,\n original_mount.path])\n return pool.change_owner(volume, new_volume)\n d.addCallback(created_filesystems)\n\n self.assertFailure(d, OSError)\n\n def changed_owner(filesystem):\n self.assertEqual(original_mount.child('file').getContent(),\n b'content')\n d.addCallback(changed_owner)\n return d",
"def test_Nothing_observation(model):\n assert make_obs(ecole.observation.Nothing(), model) is None",
"def test_queuing_tensor_matmul_components_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t1 = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n op3 = qml.PauliZ(2)\n t2 = t1 @ op3\n\n assert len(tape._queue) == 2\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}",
"def promote_test(self):\n t = self.t.copy()\n t[0].delete(promote=True)\n\n tgt = IdTree.fromstring('''(ROOT (DT The) (NP (NN Boy)))''')\n self.assertTrue(t.similar(tgt))\n\n t2 = self.t.copy()\n t2[0].delete(promote=False)\n\n tgt = IdTree.fromstring('''(ROOT)''')\n self.assertTrue(t2.similar(tgt))\n\n t3 = self.t.copy()\n t3[0][1].delete(promote=True)\n tgt=IdTree.fromstring('''(ROOT (NP (DT The) (NN Boy)))''')\n\n self.assertTrue(t3.similar(tgt))\n\n t4 = self.t.copy()\n t4[0][1].delete(promote=False)\n tgt=IdTree.fromstring('''(ROOT (NP (DT The)))''')\n self.assertTrue(t4.similar(tgt))",
"def test_targetsuccessive_identity_advanced_removal(self):\n\n # ┌───┐┌───┐ »\n # q_0: ┤ H ├┤ X ├───────■─────────────────────────────■───────────────────■──»\n # ├───┤└─┬─┘ │ │ │ »\n # q_1: ┤ H ├──■─────────■─────────────────────────────■───────────────────■──»\n # ├───┤┌───┐ │ ┌───┐ │ │ »\n # q_2: ┤ H ├┤ X ├───────┼──┤ X ├──■──────────────■────┼───────────────────┼──»\n # ├───┤└─┬─┘ ┌─┴─┐└─┬─┘ │ │ ┌─┴─┐ ┌─┴─┐»\n # q_3: ┤ H ├──■────■──┤ X ├──■────■──────────────■──┤ X ├──■─────────■──┤ X ├»\n # ├───┤┌───┐ │ └───┘ │ ┌───┐ │ └───┘ │ │ └───┘»\n # q_4: ┤ H ├┤ X ├──┼──────────────┼──┤ X ├──■────┼─────────┼─────────┼───────»\n # ├───┤└─┬─┘┌─┴─┐ ┌─┴─┐└─┬─┘ │ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ »\n # q_5: ┤ H ├──■──┤ X ├──────────┤ X ├──■────■──┤ X ├─────┤ X ├──■──┤ X ├─────»\n # └───┘ └───┘ └───┘ ┌─┴─┐└───┘ └───┘┌─┴─┐├───┤ »\n # q_6: ───────────────────────────────────┤ X ├───────────────┤ X ├┤ X ├─────»\n # └───┘ └───┘└───┘ »\n # q_7: ──────────────────────────────────────────────────────────────────────»\n # »\n # « ┌───┐┌───┐ »\n # «q_0: ──────────────────────■──┤ X ├┤ X ├──■─────────────────────────────■──»\n # « │ └─┬─┘└─┬─┘ │ │ »\n # «q_1: ──────────────────────■────■────■────■─────────────────────────────■──»\n # « ┌───┐ │ │ │ ┌───┐ │ »\n # «q_2: ──■─────────■──┤ X ├──┼─────────┼────┼──┤ X ├──■──────────────■────┼──»\n # « │ │ └─┬─┘┌─┴─┐ │ ┌─┴─┐└─┬─┘ │ │ ┌─┴─┐»\n # «q_3: ──■─────────■────■──┤ X ├───────┼──┤ X ├──■────■──────────────■──┤ X ├»\n # « │ ┌───┐ │ └───┘ │ └───┘ │ │ ┌───┐ │ └───┘»\n # «q_4: ──┼──┤ X ├──┼───────────────────┼─────────┼────┼──┤ X ├──■────┼───────»\n # « ┌─┴─┐└─┬─┘┌─┴─┐ │ │ ┌─┴─┐└─┬─┘ │ ┌─┴─┐ »\n # «q_5: ┤ X ├──■──┤ X ├─────────────────┼─────────┼──┤ X ├──■────■──┤ X ├─────»\n # « └───┘ └───┘ │ │ └───┘ │ │ └───┘ »\n # «q_6: ────────────────────────────────■─────────■─────────■────■────────────»\n # « ┌─┴─┐ »\n # «q_7: ───────────────────────────────────────────────────────┤ X ├──────────»\n # « └───┘ »\n # «\n # «q_0: ───────────────\n # «\n # «q_1: ───────────────\n # « ┌───┐\n # «q_2: ─────┤ X ├─────\n # « └─┬─┘\n # «q_3: ──■────■───────\n # « │ ┌───┐\n # «q_4: ──┼──┤ X ├─────\n # « ┌─┴─┐└─┬─┘\n # «q_5: ┤ X ├──■────■──\n # « └───┘ │\n # «q_6: ────────────■──\n # « ┌─┴─┐\n # «q_7: ──────────┤ X ├\n # « └───┘\n circuit = QuantumCircuit(8)\n circuit.h(0)\n circuit.h(1)\n circuit.h(2)\n circuit.h(3)\n circuit.h(4)\n circuit.h(5)\n for i in range(3):\n circuit.cx(i * 2 + 1, i * 2)\n circuit.cx(3, 5)\n for i in range(2):\n circuit.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n circuit.cx(i * 2 + 3, i * 2 + 2)\n circuit.ccx(4, 5, 6)\n for i in range(1, -1, -1):\n circuit.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n circuit.cx(3, 5)\n circuit.cx(5, 6)\n circuit.cx(3, 5)\n circuit.x(6)\n for i in range(2):\n circuit.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n for i in range(1, -1, -1):\n circuit.cx(i * 2 + 3, i * 2 + 2)\n circuit.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n circuit.cx(1, 0)\n circuit.ccx(6, 1, 0)\n circuit.ccx(0, 1, 3)\n circuit.ccx(6, 3, 2)\n circuit.ccx(2, 3, 5)\n circuit.ccx(6, 5, 4)\n circuit.append(XGate().control(3), [4, 5, 6, 7], [])\n for i in range(1, -1, -1):\n circuit.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n circuit.cx(3, 5)\n for i in range(1, 3):\n circuit.cx(i * 2 + 1, i * 2)\n circuit.ccx(5, 6, 7)\n\n # ┌───┐┌───┐ »\n # q_0: ┤ H ├┤ X ├───────■─────────────────────────────■───────────────────■──»\n # ├───┤└─┬─┘ │ │ │ »\n # q_1: ┤ H ├──■─────────■─────────────────────────────■───────────────────■──»\n # ├───┤┌───┐ │ ┌───┐ │ │ »\n # q_2: ┤ H ├┤ X ├───────┼──┤ X ├──■──────────────■────┼───────────────────┼──»\n # ├───┤└─┬─┘ ┌─┴─┐└─┬─┘ │ │ ┌─┴─┐ ┌─┴─┐»\n # q_3: ┤ H ├──■────■──┤ X ├──■────■──────────────■──┤ X ├──■─────────■──┤ X ├»\n # ├───┤┌───┐ │ └───┘ │ ┌───┐ │ └───┘ │ │ └───┘»\n # q_4: ┤ H ├┤ X ├──┼──────────────┼──┤ X ├──■────┼─────────┼─────────┼───────»\n # ├───┤└─┬─┘┌─┴─┐ ┌─┴─┐└─┬─┘ │ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ »\n # q_5: ┤ H ├──■──┤ X ├──────────┤ X ├──■────■──┤ X ├─────┤ X ├──■──┤ X ├─────»\n # └───┘ └───┘ └───┘ ┌─┴─┐└───┘ └───┘┌─┴─┐├───┤ »\n # q_6: ───────────────────────────────────┤ X ├───────────────┤ X ├┤ X ├─────»\n # └───┘ └───┘└───┘ »\n # q_7: ──────────────────────────────────────────────────────────────────────»\n # »\n # « ┌───┐┌───┐ »\n # «q_0: ──────────────────────■──┤ X ├┤ X ├──■────────────────────────■───────»\n # « │ └─┬─┘└─┬─┘ │ │ »\n # «q_1: ──────────────────────■────■────■────■────────────────────────■───────»\n # « ┌───┐ │ │ │ ┌───┐ │ »\n # «q_2: ──■─────────■──┤ X ├──┼─────────┼────┼──┤ X ├──■─────────■────┼───────»\n # « │ │ └─┬─┘┌─┴─┐ │ ┌─┴─┐└─┬─┘ │ │ ┌─┴─┐ »\n # «q_3: ──■─────────■────■──┤ X ├───────┼──┤ X ├──■────■─────────■──┤ X ├──■──»\n # « │ ┌───┐ │ └───┘ │ └───┘ │ │ ┌───┐ │ └───┘ │ »\n # «q_4: ──┼──┤ X ├──┼───────────────────┼─────────┼────┼──┤ X ├──┼─────────┼──»\n # « ┌─┴─┐└─┬─┘┌─┴─┐ │ │ ┌─┴─┐└─┬─┘┌─┴─┐ ┌─┴─┐»\n # «q_5: ┤ X ├──■──┤ X ├─────────────────┼─────────┼──┤ X ├──■──┤ X ├─────┤ X ├»\n # « └───┘ └───┘ │ │ └───┘ │ └───┘ └───┘»\n # «q_6: ────────────────────────────────■─────────■─────────■─────────────────»\n # « »\n # «q_7: ──────────────────────────────────────────────────────────────────────»\n # « »\n # «\n # «q_0: ─────\n # «\n # «q_1: ─────\n # « ┌───┐\n # «q_2: ┤ X ├\n # « └─┬─┘\n # «q_3: ──■──\n # « ┌───┐\n # «q_4: ┤ X ├\n # « └─┬─┘\n # «q_5: ──■──\n # «\n # «q_6: ─────\n # «\n # «q_7: ─────\n # «\n expected = QuantumCircuit(8)\n expected.h(0)\n expected.h(1)\n expected.h(2)\n expected.h(3)\n expected.h(4)\n expected.h(5)\n for i in range(3):\n expected.cx(i * 2 + 1, i * 2)\n expected.cx(3, 5)\n for i in range(2):\n expected.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n expected.cx(i * 2 + 3, i * 2 + 2)\n expected.ccx(4, 5, 6)\n for i in range(1, -1, -1):\n expected.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n expected.cx(3, 5)\n expected.cx(5, 6)\n expected.cx(3, 5)\n expected.x(6)\n for i in range(2):\n expected.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n for i in range(1, -1, -1):\n expected.cx(i * 2 + 3, i * 2 + 2)\n expected.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n expected.cx(1, 0)\n expected.ccx(6, 1, 0)\n expected.ccx(0, 1, 3)\n expected.ccx(6, 3, 2)\n expected.ccx(2, 3, 5)\n expected.ccx(6, 5, 4)\n for i in range(1, -1, -1):\n expected.ccx(i * 2, i * 2 + 1, i * 2 + 3)\n expected.cx(3, 5)\n for i in range(1, 3):\n expected.cx(i * 2 + 1, i * 2)\n\n stv = Statevector.from_label(\"0\" * circuit.num_qubits)\n self.assertEqual(stv & circuit, stv & expected)\n\n pass_ = HoareOptimizer(size=5)\n result = pass_.run(circuit_to_dag(circuit))\n\n self.assertEqual(result, circuit_to_dag(expected))",
"def test_copy(self):\n r1, r2 = 2, 3\n I, J, K = 4, 5, 6\n core_1 = np.arange(I * r1).reshape(I, r1)\n core_2 = np.arange(r1 * J * r2).reshape(r1, J, r2)\n core_3 = np.arange(r2 * K).reshape(r2, K)\n core_values = [core_1, core_2, core_3]\n ft_shape = (I, J, K)\n tensor_tt = TensorTT(core_values=core_values)\n\n tensor_tt_copy = tensor_tt.copy()\n\n # tests that the values are the same but not a reference\n assert tensor_tt_copy is not tensor_tt\n assert tensor_tt_copy.ft_shape is not tensor_tt.ft_shape\n assert tensor_tt_copy.ft_shape == tensor_tt.ft_shape\n assert tensor_tt_copy.rank == tensor_tt.rank\n assert tensor_tt_copy.order == tensor_tt.order\n\n assert tensor_tt_copy._core_values is not tensor_tt._core_values\n for i in range(tensor_tt_copy.order):\n assert tensor_tt_copy._core_values[i] is not tensor_tt._core_values[i]\n np.testing.assert_array_equal(tensor_tt_copy._core_values[i], core_values[i])\n np.testing.assert_array_equal(tensor_tt_copy._core_values[i], tensor_tt._core_values[i])\n assert tensor_tt_copy.core(i) is not tensor_tt.core(i)\n np.testing.assert_array_equal(tensor_tt_copy.core(i).data, tensor_tt.core(i).data)\n\n assert tensor_tt_copy.cores is not tensor_tt.cores",
"def test_memory_saving_invertible_model_wrapper(device, coupling, keep_input):\n\n if device == 'cpu':\n pytest.skip('Unreliable metrics, should be fixed.')\n\n if device == 'cuda' and not torch.cuda.is_available():\n pytest.skip('This test requires a GPU to be available')\n\n gc.disable()\n gc.collect()\n\n with torch.set_grad_enabled(True):\n dims = [2, 10, 10, 10]\n depth = 5\n\n xx = torch.rand(*dims, device=device, dtype=torch.float32).requires_grad_()\n ytarget = torch.rand(*dims, device=device, dtype=torch.float32)\n\n # same convolution test\n network = SubModuleStack(SubModule(in_filters=5, out_filters=5), depth=depth, keep_input=keep_input, coupling=coupling,\n implementation_fwd=-1, implementation_bwd=-1)\n network.to(device)\n network.train()\n network.zero_grad()\n optim = torch.optim.RMSprop(network.parameters())\n optim.zero_grad()\n mem_start = 0 if not device == 'cuda' else \\\n torch.cuda.memory_allocated() / float(1024 ** 2)\n\n y = network(xx)\n gc.collect()\n mem_after_forward = torch.cuda.memory_allocated() / float(1024 ** 2)\n loss = torch.nn.MSELoss()(y, ytarget)\n optim.zero_grad()\n loss.backward()\n optim.step()\n gc.collect()\n # mem_after_backward = torch.cuda.memory_allocated() / float(1024 ** 2)\n gc.enable()\n\n memuse = float(np.prod(dims + [depth, 4, ])) / float(1024 ** 2)\n\n measured_memuse = mem_after_forward - mem_start\n if keep_input:\n assert measured_memuse >= memuse\n else:\n assert measured_memuse < 1\n # assert math.floor(mem_after_backward - mem_start) >= 9",
"def test_queuing_defined_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n T = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n T.queue()\n\n assert len(tape.queue) == 1\n assert tape.queue[0] is T\n\n assert tape._queue[T] == {\"owns\": (op1, op2)}",
"def test_tucker_dropout():\n shape = (10, 11, 12)\n rank = (7, 8, 9)\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='Tucker')\n tensor = tensor_dropout(tensor, 1)\n core = tensor().core\n assert (tl.shape(core) == (1, 1, 1))\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n core = tensor().core\n assert (tl.shape(core) == rank)",
"async def test_remove_isolate(\n isolate_id,\n mongo,\n snapshot,\n test_otu,\n test_sequence,\n static_time,\n tmp_path,\n data_layer,\n):\n test_otu[\"isolates\"].append(\n {\n \"default\": False,\n \"id\": \"bar\",\n \"source_type\": \"isolate\",\n \"source_name\": \"A\",\n }\n )\n\n await gather(\n mongo.otus.insert_one(test_otu), mongo.sequences.insert_one(test_sequence)\n )\n\n await data_layer.otus.remove_isolate(\"6116cba1\", isolate_id, \"bob\")\n\n assert (\n await asyncio.gather(\n mongo.otus.find_one(),\n mongo.history.find_one(),\n mongo.sequences.find().to_list(None),\n )\n == snapshot\n )",
"def test_not_break_torch(self):\n length = 5\n a = torch.zeros(length)\n b = torch.zeros(length)\n self.assertEqual(len(a == b), length)\n self.assertTrue(torch.all(a == b))\n\n c = Tensor(torch.ones(5))\n # If Tensor is either argument, it uses the equality method that returns bool.\n self.assertNotEqual(c, a)\n self.assertNotEqual(a, c)",
"def test_cp_dropout():\n shape = (10, 11, 12)\n rank = 8\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='CP')\n tensor = tensor_dropout(tensor, 1)\n weights = tensor().weights\n assert (len(weights) == (1))\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n weights = tensor().weights\n assert (len(weights) == rank)",
"def clean_isolated(\n self, obstructions: Tuple[GriddedPerm, ...], gp: GriddedPerm\n ) -> Set[GriddedPerm]:\n cleaned_obs: Set[GriddedPerm] = set()\n for ob in obstructions:\n cells_to_remove: Set[Cell] = set()\n for factor in ob.factors():\n if self._griddedperm_implied_by_requirement(factor, (gp,)):\n cells_to_remove.update(factor.pos)\n if cells_to_remove:\n cleaned_obs.add(ob.remove_cells(cells_to_remove))\n return cleaned_obs",
"def test_not_commuting_one_target_not_commute_with_ctrl(self):\n op1 = qml.ops.op_math.Controlled(qml.PauliX(3), control_wires=0)\n op2 = qml.ops.op_math.Controlled(qml.PauliZ(2), control_wires=3)\n assert not qml.is_commuting(op1, op2)\n assert not qml.is_commuting(op2, op1)",
"def test_prune_objects(self):\n # create objects to add to map\n rng_list = [12.512, 44, 50]\n bearing_list = [-22, 81.5, 2]\n type_list = [ObjectType.BUOY, ObjectType.BOAT, ObjectType.BUOY]\n\n update_hist_list = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], \\\n [1, 0, 1, 0, 1, 0, 1, 0, 1, 1], \\\n [1, 0, 0, 1, 0, 0, 0, 0, 1, 0]]\n\n # loop through objects and add to map\n for ii, (rng, bearing, obj_type) in enumerate(zip(rng_list, bearing_list, type_list)):\n obj = Object(bearing, rng, time_in_millis(), objectType = obj_type)\n obj.updateHist = update_hist_list[ii]\n self.map.object_list.append(obj)\n\n # create local copy of original object list\n orig_object_list = self.map.object_list\n\n # call prune objects\n self.map.prune_objects()\n\n # create truth object list\n truth_obj_list = orig_object_list[0:2]\n\n # ensure that only the first two objects remain in the object list\n self.assertEqual(truth_obj_list, self.map.object_list)",
"def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that the correct sparse matrix representation is used when the custom wires swap the order.
|
def test_sparse_matrix_swapped_wires(self):
t = qml.PauliX(0) @ qml.PauliZ(1)
s = t.sparse_matrix(wires=[1, 0])
assert np.allclose(s.data, [1, 1, -1, -1])
assert np.allclose(s.indices, [1, 0, 3, 2])
assert np.allclose(s.indptr, [0, 1, 2, 3, 4])
|
[
"def test_sparse_matrix_extra_wire(self):\n\n t = qml.PauliX(0) @ qml.PauliZ(1)\n s = t.sparse_matrix(wires=[0, 1, 2])\n\n assert s.shape == (8, 8)\n assert np.allclose(s.data, [1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0])\n assert np.allclose(s.indices, [4, 5, 6, 7, 0, 1, 2, 3])\n assert np.allclose(s.indptr, [0, 1, 2, 3, 4, 5, 6, 7, 8])",
"def test_to_matrix_2q_sparse(self):\n labels = [\"IX\", \"II\", \"ZY\", \"YZ\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))",
"def test_matrix_iter_sparse(self):\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n with self.assertWarns(DeprecationWarning):\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(pauli.matrix_iter(sparse=True)):\n self.assertTrue(isinstance(i, csr_matrix))\n self.assertTrue(np.all(i.toarray() == pauli_mat(labels[idx])))",
"def test_sparse_format(self):\n\n coeffs = [-0.25, 0.75]\n obs = [\n qml.PauliX(wires=[0]) @ qml.PauliZ(wires=[1]),\n qml.PauliY(wires=[0]) @ qml.PauliZ(wires=[1]),\n ]\n H = qml.Hamiltonian(coeffs, obs)\n\n sparse_matrix = H.sparse_matrix()\n\n assert isinstance(sparse_matrix, scipy.sparse.csr_matrix)",
"def test_to_matrix_5q_sparse(self):\n labels = [\"XXXYY\", \"IXIZY\", \"ZYXIX\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))",
"def test_to_matrix_1q_sparse(self):\n labels = [\"X\", \"I\", \"Z\", \"Y\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))",
"def test_sparse_matrix_undefined(self):\n with pytest.raises(NotImplementedError):\n MyOp(wires=\"a\").sparse_matrix(wire_order=[\"a\", \"b\"])\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n MyOp.compute_sparse_matrix()\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n op.sparse_matrix()",
"def test_csc_matrix():\n printer = AmiciCxxCodePrinter()\n matrix = sp.Matrix([[1, 0], [2, 3]])\n (\n symbol_col_ptrs,\n symbol_row_vals,\n sparse_list,\n symbol_list,\n sparse_matrix,\n ) = printer.csc_matrix(\n matrix,\n rownames=[sp.Symbol(\"a1\"), sp.Symbol(\"a2\")],\n colnames=[sp.Symbol(\"b1\"), sp.Symbol(\"b2\")],\n )\n\n assert symbol_col_ptrs == [0, 2, 3]\n assert symbol_row_vals == [0, 1, 1]\n assert sparse_list == sp.Matrix([[1], [2], [3]])\n assert symbol_list == [\"da1_db1\", \"da2_db1\", \"da2_db2\"]\n assert str(sparse_matrix) == \"Matrix([[da1_db1, 0], [da2_db1, da2_db2]])\"",
"def test_sparse_csr_check():\n dense = _dense_matrix_example()\n shape, data, channels, spikes_ptr = _sparse_matrix_example()\n\n # Dense to sparse conversion not implemented yet.\n with raises(NotImplementedError):\n csr_matrix(dense)\n\n # Need the three sparse components and the shape.\n with raises(ValueError):\n csr_matrix(data=data, channels=channels)\n with raises(ValueError):\n csr_matrix(data=data, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(data=data, channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape,\n data=data[:-1], channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, channels=[[0]])\n with raises(ValueError):\n csr_matrix(shape=shape, spikes_ptr=[0])\n with raises(ValueError):\n csr_matrix(shape=(4, 5, 6), data=data, channels=np.zeros((2, 2)),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=np.zeros((2, 2)),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=channels,\n spikes_ptr=np.zeros((2, 2)))\n with raises(ValueError):\n csr_matrix(shape=shape, data=np.zeros((100)), channels=channels,\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=np.zeros(100),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=channels,\n spikes_ptr=np.zeros(100))\n\n # This one should pass.\n sparse = csr_matrix(shape=shape,\n data=data, channels=channels, spikes_ptr=spikes_ptr)\n assert isinstance(sparse, SparseCSR)\n ae(sparse.shape, shape)\n ae(sparse._data, data)\n ae(sparse._channels, channels)\n ae(sparse._spikes_ptr, spikes_ptr)",
"def test_encodeSynSparse2(self):\n\n syn = lil_matrix([1, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0, 4])\n synIds = np.array(syn.rows[0])\n synVals = np.array(syn.data[0])\n\n synapseEncoder = SynapseEncoder(self.numWeightBits, 2, 'sparse', True)\n synapseEncoder.encode(synIds, synVals, 0, 0)\n\n synEntries = synapseEncoder.getSynEntries()\n synFmts = synapseEncoder.getSynFmts()\n\n numBits = []\n numPrefixBits = []\n numSynBits = []\n for synEntry in synEntries:\n numBits.append(synEntry.numSynBits + synEntry.numPrefixBits)\n numPrefixBits.append(synEntry.numPrefixBits)\n numSynBits.append(synEntry.numSynBits)\n\n self.assertEqual(len(synEntries), 2)\n self.assertEqual(len(synFmts), 2)\n\n if self.verbose:\n synEntries[0].print()\n\n self.assertEqual(synEntries[0].prefixOffset, 0)\n self.assertEqual(tuple(synEntries[0].idxs), (0, 5))\n self.assertEqual(tuple(synEntries[0].weights), (1, 2))\n self.assertEqual(synEntries[0].synFmtId, 0)\n self.assertEqual(synEntries[0].numSyn, 2)\n self.assertEqual(numPrefixBits[0], 10)\n self.assertEqual(numSynBits[0], 28)\n self.assertEqual(numBits[0], 38)\n\n self.assertEqual(synFmts[0].numIdxBits, 6)\n self.assertEqual(synFmts[0].numSkipBits, 0)\n self.assertEqual(synFmts[0].numWgtBits, 8)\n\n if self.verbose:\n synEntries[1].print()\n\n self.assertEqual(synEntries[1].prefixOffset, 0)\n self.assertEqual(tuple(synEntries[1].idxs), (6, 11))\n self.assertEqual(tuple(synEntries[1].weights), (3, 4))\n self.assertEqual(synEntries[1].synFmtId, 1)\n self.assertEqual(synEntries[1].numSyn, 2)\n self.assertEqual(numPrefixBits[0], 10)\n self.assertEqual(numSynBits[0], 28)\n self.assertEqual(numBits[0], 38)\n\n self.assertEqual(synFmts[1].numIdxBits, 6)\n self.assertEqual(synFmts[1].numSkipBits, 0)\n self.assertEqual(synFmts[1].numWgtBits, 8)",
"def test_read_write_indexed_matrix():\n\n print(\"Simple real sparse matrix\")\n M = np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]])\n Id = scipy.sparse.eye(3, format='coo')\n M = scipy.sparse.kron(M, Id)\n filename = tempfilename()\n qdyn.io.write_indexed_matrix(M, filename)\n print_file(filename)\n O = qdyn.io.read_indexed_matrix(filename)\n assert identical_matrices(M, O)\n os.unlink(filename)\n print(\"\")\n\n print(\"Complex sparse matrix\")\n filename = tempfilename()\n M2 = make_hermitian((M + 0.5j * M).toarray())\n qdyn.io.write_indexed_matrix(M2, filename)\n print_file(filename)\n O2 = qdyn.io.read_indexed_matrix(filename)\n assert identical_matrices(M2, O2)\n os.unlink(filename)\n print(\"\")\n\n print(\"Complex non-Hermitian sparse matrix\")\n filename = tempfilename()\n M3 = (M + 0.5j * M).toarray()\n qdyn.io.write_indexed_matrix(M3, filename, hermitian=False)\n print_file(filename)\n O3 = qdyn.io.read_indexed_matrix(filename, expand_hermitian=False)\n assert identical_matrices(M3, O3)\n os.unlink(filename)\n print(\"\")",
"def test_double_sparse(self):\n dname = os.path.dirname(os.path.abspath(__file__))\n mf = mf_c(label='water', cd=dname)\n pb = mf.pb\n v_dab_array = pb.get_dp_vertex_array()\n nnn = v_dab_array.size\n vds = pb.get_dp_vertex_doubly_sparse(axis=0)\n self.assertEqual(vds.shape, v_dab_array.shape)\n self.assertTrue(abs(vds.toarray()-v_dab_array).sum()/nnn<1e-14)\n vds = pb.get_dp_vertex_doubly_sparse(axis=1)\n self.assertTrue(abs(vds.toarray()-v_dab_array).sum()/nnn<1e-14)\n vds = pb.get_dp_vertex_doubly_sparse(axis=2)\n self.assertTrue(abs(vds.toarray()-v_dab_array).sum()/nnn<1e-14)",
"def test_sparse_matrix_error(self):\n\n t = qml.PauliX(0) @ qml.Hermitian(np.eye(4), wires=[1, 2])\n with pytest.raises(ValueError, match=\"Can only compute\"):\n t.sparse_matrix()",
"def test_similarity_shape(self):\n self._test_similarity_shape()\n try:\n self._test_similarity_shape_sparse()\n except TypeError:\n # computation of kernel is not supported on sparse matrices\n pass",
"def testCalculateInternalSMatrix(self):\n absoluteTolerance = 0.0001;\n relativeTolerance = 0.001;\n\n l0 = 2.7;\n k0 = 2.3271;\n kx = 1.00063;\n ky = 0.424741;\n\n er = [2.0, 1.0];\n ur = [1.0, 3.0];\n L = [0.25*l0, 0.5*l0];\n Wg = complexIdentity(2);\n Vg = complexArray([\n [0 - 0.4250j, 0 - 1.1804j],\n [0 + 2.0013j, 0 + 0.4250j]]);\n\n i = 0;\n SiCalculated = calculateInternalSMatrix(kx, ky, er[i], ur[i], k0, L[i], Wg, Vg);\n\n SiActual = complexZeros((2,2,2,2));\n SiActual[0,0] = complexArray([\n [0.0039 - 0.0006j, -0.0398 + 0.0060j],\n [-0.0398 + 0.0060j, 0.0808 - 0.0121j]]);\n SiActual[0,1] = complexArray([\n [0.1490 + 0.9880j, 0.0005 + 0.0017j],\n [0.0005 + 0.0017j, 0.1480 + 0.9848j]]);\n SiActual[1,0] = complexArray([\n [0.1490 + 0.9880j, 0.0005 + 0.0017j],\n [0.0005 + 0.0017j, 0.1480 + 0.9848j]]);\n SiActual[1,1] = complexArray([\n [0.0039 - 0.0006j, -0.0398 + 0.0060j],\n [-0.0398 + 0.0060j, 0.0808 - 0.0121j]]);\n\n assertAlmostEqual(SiActual, SiCalculated, absoluteTolerance, relativeTolerance);\n\n i = 1;\n SiCalculated = calculateInternalSMatrix(kx, ky, er[i], ur[i], k0, L[i], Wg, Vg);\n SiActual[0,0] = complexArray([\n [0.6997 - 0.2262j, 0.0517 - 0.0014j],\n [0.0517-0.0014j, 0.5998 - 0.2235j]]);\n SiActual[0,1] = complexArray([\n [-0.2093 - 0.6406j, 0.0311 + 0.0390j],\n [0.0311 + 0.0390j, -0.2693 - 0.7160j]]);\n SiActual[1,0] = complexArray([\n [-0.2093 - 0.6406j, 0.0311 + 0.0390j],\n [0.0311 + 0.0390j, -0.2693 - 0.7160j]]);\n SiActual[1,1] = complexArray([\n [0.6997 - 0.2262j, 0.0517 - 0.0014j],\n [0.0517-0.0014j, 0.5998 - 0.2235j]]);\n\n assertAlmostEqual(SiActual, SiCalculated, absoluteTolerance, relativeTolerance);",
"def test_csc_matrix_empty():\n printer = AmiciCxxCodePrinter()\n matrix = sp.Matrix()\n (\n symbol_col_ptrs,\n symbol_row_vals,\n sparse_list,\n symbol_list,\n sparse_matrix,\n ) = printer.csc_matrix(matrix, rownames=[], colnames=[])\n\n assert symbol_col_ptrs == []\n assert symbol_row_vals == []\n assert sparse_list == sp.Matrix(0, 0, [])\n assert symbol_list == []\n assert str(sparse_matrix) == \"Matrix(0, 0, [])\"",
"def check_matrix_equality(self, conn, w):\n pos = conn.positive\n neg = conn.negative\n wPos = pos.getConnectionState('weight')\n wNeg = neg.getConnectionState('weight')\n ww = wPos + wNeg\n self.assertEqual(np.array_equal(ww, w), True)",
"def test_superlu_2_dense(small_superlu):\n superlu_mat = small_superlu\n\n dense_mat = LinearAlgebraTools.superlu_sparse_2_dense(superlu_mat)\n comparison_mat = numpy.loadtxt(os.path.join(os.path.dirname(__file__),\n 'sparse_mat_1.txt'))\n assert numpy.allclose(dense_mat,comparison_mat)",
"def to_sparse(self, sparseDims): # real signature unknown; restored from __doc__\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that the correct sparse matrix representation is used when the custom wires add an extra wire with an implied identity operation.
|
def test_sparse_matrix_extra_wire(self):
t = qml.PauliX(0) @ qml.PauliZ(1)
s = t.sparse_matrix(wires=[0, 1, 2])
assert s.shape == (8, 8)
assert np.allclose(s.data, [1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0])
assert np.allclose(s.indices, [4, 5, 6, 7, 0, 1, 2, 3])
assert np.allclose(s.indptr, [0, 1, 2, 3, 4, 5, 6, 7, 8])
|
[
"def test_sparse_matrix_undefined(self):\n with pytest.raises(NotImplementedError):\n MyOp(wires=\"a\").sparse_matrix(wire_order=[\"a\", \"b\"])\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n MyOp.compute_sparse_matrix()\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n op.sparse_matrix()",
"def test_sparse_format(self):\n\n coeffs = [-0.25, 0.75]\n obs = [\n qml.PauliX(wires=[0]) @ qml.PauliZ(wires=[1]),\n qml.PauliY(wires=[0]) @ qml.PauliZ(wires=[1]),\n ]\n H = qml.Hamiltonian(coeffs, obs)\n\n sparse_matrix = H.sparse_matrix()\n\n assert isinstance(sparse_matrix, scipy.sparse.csr_matrix)",
"def test_sparse_matrix_swapped_wires(self):\n\n t = qml.PauliX(0) @ qml.PauliZ(1)\n s = t.sparse_matrix(wires=[1, 0])\n\n assert np.allclose(s.data, [1, 1, -1, -1])\n assert np.allclose(s.indices, [1, 0, 3, 2])\n assert np.allclose(s.indptr, [0, 1, 2, 3, 4])",
"def test_matrix_iter_sparse(self):\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n with self.assertWarns(DeprecationWarning):\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(pauli.matrix_iter(sparse=True)):\n self.assertTrue(isinstance(i, csr_matrix))\n self.assertTrue(np.all(i.toarray() == pauli_mat(labels[idx])))",
"def test_to_matrix_5q_sparse(self):\n labels = [\"XXXYY\", \"IXIZY\", \"ZYXIX\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))",
"def test_to_matrix_1q_sparse(self):\n labels = [\"X\", \"I\", \"Z\", \"Y\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))",
"def test_sparse_csr_check():\n dense = _dense_matrix_example()\n shape, data, channels, spikes_ptr = _sparse_matrix_example()\n\n # Dense to sparse conversion not implemented yet.\n with raises(NotImplementedError):\n csr_matrix(dense)\n\n # Need the three sparse components and the shape.\n with raises(ValueError):\n csr_matrix(data=data, channels=channels)\n with raises(ValueError):\n csr_matrix(data=data, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(data=data, channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape,\n data=data[:-1], channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, channels=[[0]])\n with raises(ValueError):\n csr_matrix(shape=shape, spikes_ptr=[0])\n with raises(ValueError):\n csr_matrix(shape=(4, 5, 6), data=data, channels=np.zeros((2, 2)),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=np.zeros((2, 2)),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=channels,\n spikes_ptr=np.zeros((2, 2)))\n with raises(ValueError):\n csr_matrix(shape=shape, data=np.zeros((100)), channels=channels,\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=np.zeros(100),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=channels,\n spikes_ptr=np.zeros(100))\n\n # This one should pass.\n sparse = csr_matrix(shape=shape,\n data=data, channels=channels, spikes_ptr=spikes_ptr)\n assert isinstance(sparse, SparseCSR)\n ae(sparse.shape, shape)\n ae(sparse._data, data)\n ae(sparse._channels, channels)\n ae(sparse._spikes_ptr, spikes_ptr)",
"def test_sparse_matrix_error(self):\n\n t = qml.PauliX(0) @ qml.Hermitian(np.eye(4), wires=[1, 2])\n with pytest.raises(ValueError, match=\"Can only compute\"):\n t.sparse_matrix()",
"def test_to_matrix_2q_sparse(self):\n labels = [\"IX\", \"II\", \"ZY\", \"YZ\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))",
"def test_has_matrix_true(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n @staticmethod\n def compute_matrix():\n return np.eye(2)\n\n assert MyOp.has_matrix\n assert MyOp(wires=0).has_matrix",
"def test_has_matrix_false(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n assert not MyOp.has_matrix\n assert not MyOp(wires=0).has_matrix",
"def test_csc_matrix_empty():\n printer = AmiciCxxCodePrinter()\n matrix = sp.Matrix()\n (\n symbol_col_ptrs,\n symbol_row_vals,\n sparse_list,\n symbol_list,\n sparse_matrix,\n ) = printer.csc_matrix(matrix, rownames=[], colnames=[])\n\n assert symbol_col_ptrs == []\n assert symbol_row_vals == []\n assert sparse_list == sp.Matrix(0, 0, [])\n assert symbol_list == []\n assert str(sparse_matrix) == \"Matrix(0, 0, [])\"",
"def test_csc_matrix():\n printer = AmiciCxxCodePrinter()\n matrix = sp.Matrix([[1, 0], [2, 3]])\n (\n symbol_col_ptrs,\n symbol_row_vals,\n sparse_list,\n symbol_list,\n sparse_matrix,\n ) = printer.csc_matrix(\n matrix,\n rownames=[sp.Symbol(\"a1\"), sp.Symbol(\"a2\")],\n colnames=[sp.Symbol(\"b1\"), sp.Symbol(\"b2\")],\n )\n\n assert symbol_col_ptrs == [0, 2, 3]\n assert symbol_row_vals == [0, 1, 1]\n assert sparse_list == sp.Matrix([[1], [2], [3]])\n assert symbol_list == [\"da1_db1\", \"da2_db1\", \"da2_db2\"]\n assert str(sparse_matrix) == \"Matrix([[da1_db1, 0], [da2_db1, da2_db2]])\"",
"def test_encodeSynSparse2(self):\n\n syn = lil_matrix([1, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0, 4])\n synIds = np.array(syn.rows[0])\n synVals = np.array(syn.data[0])\n\n synapseEncoder = SynapseEncoder(self.numWeightBits, 2, 'sparse', True)\n synapseEncoder.encode(synIds, synVals, 0, 0)\n\n synEntries = synapseEncoder.getSynEntries()\n synFmts = synapseEncoder.getSynFmts()\n\n numBits = []\n numPrefixBits = []\n numSynBits = []\n for synEntry in synEntries:\n numBits.append(synEntry.numSynBits + synEntry.numPrefixBits)\n numPrefixBits.append(synEntry.numPrefixBits)\n numSynBits.append(synEntry.numSynBits)\n\n self.assertEqual(len(synEntries), 2)\n self.assertEqual(len(synFmts), 2)\n\n if self.verbose:\n synEntries[0].print()\n\n self.assertEqual(synEntries[0].prefixOffset, 0)\n self.assertEqual(tuple(synEntries[0].idxs), (0, 5))\n self.assertEqual(tuple(synEntries[0].weights), (1, 2))\n self.assertEqual(synEntries[0].synFmtId, 0)\n self.assertEqual(synEntries[0].numSyn, 2)\n self.assertEqual(numPrefixBits[0], 10)\n self.assertEqual(numSynBits[0], 28)\n self.assertEqual(numBits[0], 38)\n\n self.assertEqual(synFmts[0].numIdxBits, 6)\n self.assertEqual(synFmts[0].numSkipBits, 0)\n self.assertEqual(synFmts[0].numWgtBits, 8)\n\n if self.verbose:\n synEntries[1].print()\n\n self.assertEqual(synEntries[1].prefixOffset, 0)\n self.assertEqual(tuple(synEntries[1].idxs), (6, 11))\n self.assertEqual(tuple(synEntries[1].weights), (3, 4))\n self.assertEqual(synEntries[1].synFmtId, 1)\n self.assertEqual(synEntries[1].numSyn, 2)\n self.assertEqual(numPrefixBits[0], 10)\n self.assertEqual(numSynBits[0], 28)\n self.assertEqual(numBits[0], 38)\n\n self.assertEqual(synFmts[1].numIdxBits, 6)\n self.assertEqual(synFmts[1].numSkipBits, 0)\n self.assertEqual(synFmts[1].numWgtBits, 8)",
"def test_read_write_indexed_matrix():\n\n print(\"Simple real sparse matrix\")\n M = np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]])\n Id = scipy.sparse.eye(3, format='coo')\n M = scipy.sparse.kron(M, Id)\n filename = tempfilename()\n qdyn.io.write_indexed_matrix(M, filename)\n print_file(filename)\n O = qdyn.io.read_indexed_matrix(filename)\n assert identical_matrices(M, O)\n os.unlink(filename)\n print(\"\")\n\n print(\"Complex sparse matrix\")\n filename = tempfilename()\n M2 = make_hermitian((M + 0.5j * M).toarray())\n qdyn.io.write_indexed_matrix(M2, filename)\n print_file(filename)\n O2 = qdyn.io.read_indexed_matrix(filename)\n assert identical_matrices(M2, O2)\n os.unlink(filename)\n print(\"\")\n\n print(\"Complex non-Hermitian sparse matrix\")\n filename = tempfilename()\n M3 = (M + 0.5j * M).toarray()\n qdyn.io.write_indexed_matrix(M3, filename, hermitian=False)\n print_file(filename)\n O3 = qdyn.io.read_indexed_matrix(filename, expand_hermitian=False)\n assert identical_matrices(M3, O3)\n os.unlink(filename)\n print(\"\")",
"def create_sparse_matrix(A, x, s, options=\"non-sparse\"):\n\n if options == \"non-sparse\":\n # print(\"*********create sparse matrix (non-sparse)*********\")\n m, n = np.shape(A)\n i, j, k = sparse.find(A)\n # A transpose and I\n row_index = np.append(j, range(m + n, m + 2 * n))\n col_index = np.append(i + n, range(m + n, m + 2 * n))\n values = np.append(k, np.ones(n))\n # A\n row_index = np.append(row_index, i + n)\n col_index = np.append(col_index, j)\n values = np.append(values, k)\n # S\n row_index = np.append(row_index, range(m + n, m + 2 * n))\n col_index = np.append(col_index, range(0, n))\n values = np.append(values, s)\n # X\n row_index = np.append(row_index, range(m + n, m + 2 * n))\n col_index = np.append(col_index, range(m + n, m + 2 * n))\n values = np.append(values, x)\n # check\n # print(\"sparse matrix non-zero element :\")\n # print(\"row :\", len(row_index))\n # print(\"col :\", len(col_index))\n # print(\"values :\", len(values))\n return sparse.coo_matrix(\n (values, (row_index, col_index)), shape=(m + 2 * n, m + 2 * n)\n )\n # return sparse.coo_matrix((values, (row_index, col_index)))\n elif options == \"sparse\":\n # print(\"***create sparse matrix (sparse)***\")\n # try:\n # i, j, k, m, n = A\n # except:\n i, j, k = sparse.find(A)\n m, n = np.shape(A)\n # print(\"row :\", len(i))\n # print(\"col :\", len(j))\n # print(\"values :\", len(k))\n # print(\"variables :\", n)\n # print(\"constraints :\", m)\n # print(\"number of row :\", max(i))\n # print(\"number of column :\", max(j))\n # A transpose and I\n row_index = np.append(j, range(0, n))\n col_index = np.append(i + n, range(m + n, m + 2 * n))\n values = np.append(k, np.ones(n))\n # A\n row_index = np.append(row_index, i + n)\n col_index = np.append(col_index, j)\n values = np.append(values, k)\n # S\n row_index = np.append(row_index, range(m + n, m + 2 * n))\n col_index = np.append(col_index, range(n))\n values = np.append(values, s)\n # X\n row_index = np.append(row_index, range(m + n, m + 2 * n))\n col_index = np.append(col_index, range(m + n, m + 2 * n))\n values = np.append(values, x)\n # print(\"****full matrix version****\")\n # print(\"variables :\", m + 2 * n)\n # print(\"constraints :\", m + 2 * n)\n # print(\"min index of row :\", min(row_index))\n # print(\"max index of row :\", max(row_index))\n # print(\"min index of column :\", min(col_index))\n # print(\"max index of column :\", max(col_index))\n return sparse.csc_matrix(\n (values, (row_index, col_index)), shape=(m + 2 * n, m + 2 * n)\n )\n # return sparse.csc_matrix((values, (row_index, col_index)))\n elif options == \"tosparse\":\n row_index, col_index, values, m, n = A\n return sparse.csc_matrix((values, (row_index, col_index)), shape=(m, n))\n else:\n raise Exception(\"options must be specific as sparse or non-sparse\")",
"def test_no_wire_order_returns_base_matrix(self):\n res = qml.operation.expand_matrix(self.base_matrix_2, wires=[0, 2])\n assert np.allclose(self.base_matrix_2, res)",
"def testCalculateInternalSMatrix(self):\n absoluteTolerance = 0.0001;\n relativeTolerance = 0.001;\n\n l0 = 2.7;\n k0 = 2.3271;\n kx = 1.00063;\n ky = 0.424741;\n\n er = [2.0, 1.0];\n ur = [1.0, 3.0];\n L = [0.25*l0, 0.5*l0];\n Wg = complexIdentity(2);\n Vg = complexArray([\n [0 - 0.4250j, 0 - 1.1804j],\n [0 + 2.0013j, 0 + 0.4250j]]);\n\n i = 0;\n SiCalculated = calculateInternalSMatrix(kx, ky, er[i], ur[i], k0, L[i], Wg, Vg);\n\n SiActual = complexZeros((2,2,2,2));\n SiActual[0,0] = complexArray([\n [0.0039 - 0.0006j, -0.0398 + 0.0060j],\n [-0.0398 + 0.0060j, 0.0808 - 0.0121j]]);\n SiActual[0,1] = complexArray([\n [0.1490 + 0.9880j, 0.0005 + 0.0017j],\n [0.0005 + 0.0017j, 0.1480 + 0.9848j]]);\n SiActual[1,0] = complexArray([\n [0.1490 + 0.9880j, 0.0005 + 0.0017j],\n [0.0005 + 0.0017j, 0.1480 + 0.9848j]]);\n SiActual[1,1] = complexArray([\n [0.0039 - 0.0006j, -0.0398 + 0.0060j],\n [-0.0398 + 0.0060j, 0.0808 - 0.0121j]]);\n\n assertAlmostEqual(SiActual, SiCalculated, absoluteTolerance, relativeTolerance);\n\n i = 1;\n SiCalculated = calculateInternalSMatrix(kx, ky, er[i], ur[i], k0, L[i], Wg, Vg);\n SiActual[0,0] = complexArray([\n [0.6997 - 0.2262j, 0.0517 - 0.0014j],\n [0.0517-0.0014j, 0.5998 - 0.2235j]]);\n SiActual[0,1] = complexArray([\n [-0.2093 - 0.6406j, 0.0311 + 0.0390j],\n [0.0311 + 0.0390j, -0.2693 - 0.7160j]]);\n SiActual[1,0] = complexArray([\n [-0.2093 - 0.6406j, 0.0311 + 0.0390j],\n [0.0311 + 0.0390j, -0.2693 - 0.7160j]]);\n SiActual[1,1] = complexArray([\n [0.6997 - 0.2262j, 0.0517 - 0.0014j],\n [0.0517-0.0014j, 0.5998 - 0.2235j]]);\n\n assertAlmostEqual(SiActual, SiCalculated, absoluteTolerance, relativeTolerance);",
"def test_encodeSynSparse1(self):\n\n syn = lil_matrix([1, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0, 4])\n synIds = np.array(syn.rows[0])\n synVals = np.array(syn.data[0])\n\n synapseEncoder = SynapseEncoder(self.numWeightBits,\n self.maxNumSynPerSynEntry, 'sparse',\n True)\n synapseEncoder.encode(synIds, synVals, 0, 0)\n\n synEntries = synapseEncoder.getSynEntries()\n synFmts = synapseEncoder.getSynFmts()\n\n numBits = []\n numPrefixBits = []\n numSynBits = []\n for synEntry in synEntries:\n numBits.append(synEntry.numSynBits + synEntry.numPrefixBits)\n numPrefixBits.append(synEntry.numPrefixBits)\n numSynBits.append(synEntry.numSynBits)\n\n self.assertEqual(len(synEntries), 1)\n self.assertEqual(len(synFmts), 1)\n\n if self.verbose:\n synEntries[0].print()\n\n self.assertEqual(synEntries[0].prefixOffset, 0)\n self.assertEqual(tuple(synEntries[0].idxs), (0, 5, 6, 11))\n self.assertEqual(tuple(synEntries[0].weights), (1, 2, 3, 4))\n self.assertEqual(synEntries[0].synFmtId, 0)\n self.assertEqual(synEntries[0].numSyn, 4)\n self.assertEqual(numPrefixBits[0], 10)\n self.assertEqual(numSynBits[0], 56)\n self.assertEqual(numBits[0], 66)\n\n self.assertEqual(synFmts[0].numIdxBits, 6)\n self.assertEqual(synFmts[0].numSkipBits, 0)\n self.assertEqual(synFmts[0].numWgtBits, 8)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that an error is raised if the sparse matrix is computed for a tensor whose constituent operations are not all singlequbit gates.
|
def test_sparse_matrix_error(self):
t = qml.PauliX(0) @ qml.Hermitian(np.eye(4), wires=[1, 2])
with pytest.raises(ValueError, match="Can only compute"):
t.sparse_matrix()
|
[
"def test_sparse_matrix_undefined(self):\n with pytest.raises(NotImplementedError):\n MyOp(wires=\"a\").sparse_matrix(wire_order=[\"a\", \"b\"])\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n MyOp.compute_sparse_matrix()\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n op.sparse_matrix()",
"def _check_csr(self):\n pass",
"def test_similarity_shape(self):\n self._test_similarity_shape()\n try:\n self._test_similarity_shape_sparse()\n except TypeError:\n # computation of kernel is not supported on sparse matrices\n pass",
"def testSparseInputsFromMultivaluedOp(self):\n with tf.Graph().as_default():\n one, _ = tf.sparse_split(\n sp_input=tf.SparseTensor(indices=[[0, 1], [1, 2]], values=[1, 2],\n dense_shape=[2, 3]),\n num_split=2, axis=0, name=\"op1\")\n _, two = tf.sparse_split(\n sp_input=tf.SparseTensor(indices=[[0, 0], [1, 1]], values=[3, 4],\n dense_shape=[2, 3]),\n num_split=2, axis=0, name=\"op2\")\n three = tf.SparseTensor(indices=[[0]], values=[5], dense_shape=[2])\n message = native_module.find_signature_inputs_from_multivalued_ops(\n dict(one=one, two=two, three=three))\n self.assertRegexpMatches(\n message,\n \".*single output.*\\nAffected inputs: \"\n \"one.indices='op1:0', one.values='op1:2', one.dense_shape='op1:4', \"\n \"two.indices='op2:1', two.values='op2:3', two.dense_shape='op2:5'$\")\n # Also test the case of no errors.\n with tf.Graph().as_default():\n one = tf.SparseTensor(indices=[[0]], values=[1], dense_shape=[2])\n two = tf.SparseTensor(indices=[[1]], values=[2], dense_shape=[2])\n message = native_module.find_signature_inputs_from_multivalued_ops(\n dict(one=one, two=two, three=three))\n self.assertIsNone(message)",
"def assert_is_sparse(a):\n if not is_sparse(a):\n raise TypeError('a is not a sparse array/matrix.')",
"def is_sparse(tensor):\n\treturn isinstance(tensor, tf.SparseTensor)",
"def test_sparse_csr_check():\n dense = _dense_matrix_example()\n shape, data, channels, spikes_ptr = _sparse_matrix_example()\n\n # Dense to sparse conversion not implemented yet.\n with raises(NotImplementedError):\n csr_matrix(dense)\n\n # Need the three sparse components and the shape.\n with raises(ValueError):\n csr_matrix(data=data, channels=channels)\n with raises(ValueError):\n csr_matrix(data=data, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(data=data, channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape,\n data=data[:-1], channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, channels=[[0]])\n with raises(ValueError):\n csr_matrix(shape=shape, spikes_ptr=[0])\n with raises(ValueError):\n csr_matrix(shape=(4, 5, 6), data=data, channels=np.zeros((2, 2)),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=np.zeros((2, 2)),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=channels,\n spikes_ptr=np.zeros((2, 2)))\n with raises(ValueError):\n csr_matrix(shape=shape, data=np.zeros((100)), channels=channels,\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=np.zeros(100),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=channels,\n spikes_ptr=np.zeros(100))\n\n # This one should pass.\n sparse = csr_matrix(shape=shape,\n data=data, channels=channels, spikes_ptr=spikes_ptr)\n assert isinstance(sparse, SparseCSR)\n ae(sparse.shape, shape)\n ae(sparse._data, data)\n ae(sparse._channels, channels)\n ae(sparse._spikes_ptr, spikes_ptr)",
"def is_matrix_continuous_sparse(obj):\n try:\n sp.rand\n return sp.issparse(obj) and len(obj.shape) == 2\n except:\n return False",
"def test_has_matrix_false(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n assert not MyOp.has_matrix\n assert not MyOp(wires=0).has_matrix",
"def is_sparse(constant):\r\n return sp.issparse(constant) or isinstance(constant, cvxopt.spmatrix)",
"def assert_is_tf_sparse(a):\n if not is_tf_sparse(a):\n raise TypeError('a is not a tf.SparseTensorValue object.')",
"def _check_if_sparse_super_is_set(self) \\\r\n -> bool:\r\n if (int(self.ext4fs.superblock.data['feature_ro_compat'], 0) & 0x1) == 0x1:\r\n return True\r\n else:\r\n return False",
"def test_matrix_iter_sparse(self):\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n with self.assertWarns(DeprecationWarning):\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(pauli.matrix_iter(sparse=True)):\n self.assertTrue(isinstance(i, csr_matrix))\n self.assertTrue(np.all(i.toarray() == pauli_mat(labels[idx])))",
"def test_to_matrix_1q_sparse(self):\n labels = [\"X\", \"I\", \"Z\", \"Y\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))",
"def test_gaussian_euclidean_ndim_invalid(shape):\n x = jnp.ones(shape=shape)\n\n with pytest.raises(ValueError) as e:\n metrics.gaussian_euclidean(x)\n assert \"The mass matrix has the wrong number of dimensions\" in str(e)",
"def test_sparse_format(self):\n\n coeffs = [-0.25, 0.75]\n obs = [\n qml.PauliX(wires=[0]) @ qml.PauliZ(wires=[1]),\n qml.PauliY(wires=[0]) @ qml.PauliZ(wires=[1]),\n ]\n H = qml.Hamiltonian(coeffs, obs)\n\n sparse_matrix = H.sparse_matrix()\n\n assert isinstance(sparse_matrix, scipy.sparse.csr_matrix)",
"def _check_square(matrix):\n if matrix.ndim != 2 or (matrix.shape[0] != matrix.shape[-1]):\n raise ValueError('Expected a square matrix, got array of shape'\n ' {0}.'.format(matrix.shape))",
"def test_diagonalizing_gates_overlapping(self):\n diag_op = ValidOp(qml.S(0), qml.PauliX(0))\n diagonalizing_gates = diag_op.diagonalizing_gates()\n\n assert len(diagonalizing_gates) == 1\n diagonalizing_mat = diagonalizing_gates[0].matrix()\n\n true_mat = np.eye(2)\n\n assert np.allclose(diagonalizing_mat, true_mat)",
"def test_small(self):\n self.assert_tensor_equal(\n [[[0, 0], [0, 1]], [[1, 0], [1, 1]]],\n index_tensor([2, 2]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests the data() method for Tensors and Observables
|
def test_data(self):
obs = qml.PauliZ(0)
data = obs._obs_data()
assert data == {("PauliZ", Wires(0), ())}
obs = qml.PauliZ(0) @ qml.PauliX(1)
data = obs._obs_data()
assert data == {("PauliZ", Wires(0), ()), ("PauliX", Wires(1), ())}
obs = qml.Hermitian(np.array([[1, 0], [0, -1]]), 0)
data = obs._obs_data()
assert data == {
(
"Hermitian",
Wires(0),
(
b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff",
),
)
}
|
[
"def test_temperature_data(self):\n self.assertEqual(self.thermodata.Tdata.value_si.shape, self.Tdata.shape)\n for T, T0 in zip(self.thermodata.Tdata.value_si, self.Tdata):\n self.assertAlmostEqual(T, T0, 4)",
"def test_got_data(self):\n # Create and initialize the instrument driver with a mock port agent\n driver = InstrumentDriver(self._got_data_event_callback)\n self.assert_initialize_driver(driver)\n\n self.assert_raw_particle_published(driver, True)\n\n # Start validating data particles\n self.assert_particle_published(driver, self.VALID_SAMPLE_01, self.assert_particle_sample, True)\n self.assert_particle_published(driver, self.VALID_SAMPLE_02, self.assert_particle_sample_2, True)\n\n # validate status particles\n self.assert_particle_published(driver, self.VALID_STATUS_01, self.assert_status_particle, True)",
"def test_got_data(self):\n # Create and initialize the instrument driver with a mock port agent\n driver = InstrumentDriver(self._got_data_event_callback)\n self.assert_initialize_driver(driver)\n\n self.assert_raw_particle_published(driver, True)\n\n # Start validating data particles\n self.assert_particle_published(driver, self.VALID_SAMPLE, self.assert_particle_sample, True)\n self.assert_particle_published(driver, self.VALID_GETHD_RESPONSE, self.assert_particle_hardware, True)\n self.assert_particle_published(driver, self.VALID_GETCC_RESPONSE, self.assert_particle_calibration, True)\n self.assert_particle_published(driver, self.VALID_GETSD_RESPONSE, self.assert_particle_status, True)\n self.assert_particle_published(driver, self.VALID_GETCD_RESPONSE, self.assert_particle_configuration, True)",
"def test_data(self):\n self.assertEqual(self.node.data, 10)\n self.assertNotEqual(self.node.data, 5)",
"def test_DataUser(self):\n do = Plot('', conf=self.config)\n self.assertTrue(isinstance(do, DataUser))",
"def test(self):\n\n reconstructed_data = torch.zeros(size=(1, self.dataset_dims))\n\n for i, data in enumerate(self.test_dataloader, 0):\n real, real_labels = data\n real = real.cuda().squeeze().view(-1, self.dataset_dims)\n\n with torch.no_grad():\n\n mean, logvar = self.model.encode(real)\n latent_vector = self.reparametrize(mean, logvar)\n\n generated = self.model.decode(latent_vector)\n reconstructed_data = torch.cat([reconstructed_data, generated], dim=0)\n\n return reconstructed_data[1:, :].cpu().detach().numpy()",
"def getTestingData(self):",
"def test_ParticleDataset_from_array():\n pass",
"def test_data_timeseries(self):\n data = [0, 1, 2, 3]\n timestamps1 = [0.0, 0.1, 0.2, 0.3]\n timestamps2 = [1.0, 1.1, 1.2, 1.3]\n ts1 = TimeSeries(\n name=\"test_ts1\", data=data, unit=\"grams\", timestamps=timestamps1\n )\n ts2 = TimeSeries(\n name=\"test_ts2\", data=ts1, unit=\"grams\", timestamps=timestamps2\n )\n self.assertEqual(ts2.data, data)\n self.assertEqual(ts1.num_samples, ts2.num_samples)\n self.assertEqual(ts1.data_link, set([ts2]))",
"def test_agentdata_to_datapoints(self):\n # Setup AgentPolledData\n agent_program = 'panda_bear'\n polling_interval = 20\n apd = AgentPolledData(agent_program, polling_interval)\n\n # Initialize TargetDataPoints\n target = 'teddy_bear'\n ddv = TargetDataPoints(target)\n\n # Setup DataPoint\n value = 457\n key = 'gummy_bear'\n data_type = DATA_INT\n variable = DataPoint(key, value, data_type=data_type)\n\n # Add data to TargetDataPoints\n ddv.add(variable)\n\n # Test TargetGateway to AgentPolledData\n apd.add(ddv)\n\n # Test contents\n expected_metadata = {\n 'pattoo_agent_id': apd.agent_id,\n 'pattoo_agent_program': agent_program,\n 'pattoo_agent_hostname': apd.agent_hostname,\n 'pattoo_agent_polled_target': target,\n 'pattoo_agent_polling_interval': apd.agent_polling_interval\n }\n result = converter.agentdata_to_datapoints(apd)\n\n self.assertEqual(len(result), 1)\n item = result[0]\n self.assertTrue(isinstance(item, DataPoint))\n self.assertEqual(item.value, value)\n self.assertEqual(item.data_type, DATA_INT)\n self.assertEqual(item.key, key)\n self.assertTrue(isinstance(item.metadata, dict))\n self.assertEqual(len(item.metadata), len(expected_metadata))\n for key, value in item.metadata.items():\n self.assertTrue(isinstance(value, str))\n self.assertTrue(isinstance(key, str))\n self.assertEqual(value, str(expected_metadata[key]))",
"def test_dataset_get_data(benchmark_dataset):\n _, dataset_class = benchmark_dataset\n\n # skip the test if the dataset is not installed\n if not dataset_class.is_installed():\n pytest.skip(\"Dataset is not installed\")\n\n dataset = dataset_class.get_instance()\n\n if dataset_class.name.lower() == 'finance':\n pytest.skip(\"Do not download finance.\")\n\n res = dataset._get_data()\n assert isinstance(res, tuple), (\n \"Output of get_data should be a 2-tuple\"\n )\n assert len(res) == 2, (\n \"Output of get_data should be a 2-tuple\"\n )\n\n dimension, data = res\n\n assert isinstance(dimension, tuple), (\n \"First output of get_data should be an integer or a tuple of integers.\"\n f\" Got {dimension}.\"\n )\n assert all(isinstance(d, numbers.Integral) for d in dimension), (\n \"First output of get_data should be an integer or a tuple of integers.\"\n f\" Got {dimension}.\"\n )\n assert isinstance(data, dict), (\n f\"Second output of get_data should be a dict. Got {data}.\"\n )",
"def test_move_data_to_device(self) -> None:\n device = init_from_env()\n my_module = torch.nn.Linear(2, 2)\n\n auto_unit = DummyAutoUnit(\n module=my_module,\n device=device,\n )\n\n state = get_dummy_train_state()\n\n dummy_data = (torch.ones(2, 2), torch.ones(2, 2))\n data_iter = iter([dummy_data])\n\n with patch.object(\n DummyAutoUnit, \"move_data_to_device\"\n ) as move_data_to_device_mock:\n dummy_data = copy_data_to_device(dummy_data, device)\n move_data_to_device_mock.return_value = dummy_data\n auto_unit.train_step(state=state, data=data_iter)\n move_data_to_device_mock.assert_called_once()",
"def test_weather_dataset():\n dm = DataManager()\n\n # check weather dataset was loaded\n assert 'weather' in dm.datasources\n\n # check Source type\n weather = dm['weather']\n assert isinstance(weather, HDF5Source)\n\n # load the data\n if NO_LFS:\n print('No LFS, skipping partial test')\n return\n\n data = weather.read()\n assert isinstance(data, xr.Dataset)",
"def testDatasetV1(self):\n p = _TestTFDataInputV1.Params()\n self.assertIn('args', p)\n self.assertIn('begin', p.args)\n self.assertIn('end', p.args)\n self.assertEqual(p.args.begin, 0)\n self.assertEqual(p.args.end, 10)\n\n ig = p.Instantiate()\n self.assertIsInstance(ig, _TestTFDataInputV1)\n\n with self.session() as sess:\n\n @test_utils.DefineAndTrace()\n def data(): # pylint: disable=invalid-name\n res = ig.GetPreprocessedInputBatch()\n self._commonChecks(res)\n return res\n\n # Consumes all data.\n for i in range(p.args.begin, p.args.end):\n self.assertEqual(sess.run(data).value, i)\n\n with self.assertRaises(tf.errors.OutOfRangeError):\n sess.run(data)",
"def mock_data(self):\n return NotImplemented",
"def test_get_data(self):\n loader = Loader('')\n loader.data = [0, 1, 2]\n self.assertEqual(loader.get_data(), [0, 1, 2])",
"def test_data():\n\t\n\t# Create DataFrame.\n\tdf = data.manipulate()\n\n\t# Check type of output.\n\tassert isinstance(df, pd.DataFrame)\n\n\t# Check column count.\n\tassert len(df.columns) == 44",
"def test_forceTempThrshEvent(self):\n\n self.myNode.addSensor(IPTempSensor('TMP-1',[0,1],self.myNode,'LOW'))\n self.myNode.scanSensors('LOW') # scan once all sensors\n\n self.assertEqual(self.myNode.getHub().getEventQ().getEventQLength(),1)",
"def test_to_device() -> None:\n data_structure = {\n \"input_1\": torch.Tensor(1),\n \"input_3\": \"str\",\n \"input_4\": 1,\n }\n\n assert to_device(data_structure, \"cpu\") == data_structure\n assert np.array_equal(to_device(np.array([0, 1, 2]), \"cpu\"), np.array([0, 1, 2]))",
"def test_kah_DATA_object():\n \n # Test that the object is returned.\n assert DATA\n\n # Test that only one subject remains.\n for dataset in DATASETS:\n assert np.all(getattr(DATA, dataset)['subject'] == TESTSUBJ)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests addition between Tensors and Observables
|
def test_addition(self, obs1, obs2, obs):
assert obs.compare(obs1 + obs2)
|
[
"def test_subscribe_tensors_on_different_devices(self):\n c1 = constant_op.constant(10)\n c2 = constant_op.constant(20)\n\n with ops.device('cpu:0'):\n add = math_ops.add(c1, c2)\n\n with ops.device('cpu:1'):\n mul = math_ops.multiply(c1, c2)\n\n def sub(t):\n return t\n\n add_sub = subscribe.subscribe(\n add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n mul_sub = subscribe.subscribe(\n mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n # Expect the identity tensors injected by subscribe to have been created\n # on the same device as their original tensors.\n self.assertNotEqual(add_sub.device, mul_sub.device)\n self.assertEqual(add.device, add_sub.device)\n self.assertEqual(mul.device, mul_sub.device)",
"def test_append_tensor_ops(self):\n\n with AnnotatedQueue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]",
"def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))",
"def test_forceTempThrshEvent(self):\n\n self.myNode.addSensor(IPTempSensor('TMP-1',[0,1],self.myNode,'LOW'))\n self.myNode.scanSensors('LOW') # scan once all sensors\n\n self.assertEqual(self.myNode.getHub().getEventQ().getEventQLength(),1)",
"def test_append_tensor_ops(self):\n\n with Queue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]",
"def test_add_observer(self):\n o = Observable()\n mock_method = Mock()\n o.add_observer(mock_method)\n self.assertSetEqual({mock_method}, o.observers)\n self.assertEqual(1, len(o.observers))",
"def test_writer(self, mock_is_primary_func: mock.MagicMock) -> None:\n for phase_idx, primary in product([0, 1, 2], [True, False]):\n train, phase_type = (\n (True, \"train\") if phase_idx % 2 == 0 else (False, \"test\")\n )\n mock_is_primary_func.return_value = primary\n\n # set up the task and state\n config = get_test_task_config()\n config[\"dataset\"][\"train\"][\"batchsize_per_replica\"] = 2\n config[\"dataset\"][\"test\"][\"batchsize_per_replica\"] = 5\n task = build_task(config)\n task.prepare()\n task.advance_phase()\n task.phase_idx = phase_idx\n task.train = train\n\n losses = [1.23, 4.45, 12.3, 3.4]\n sample_fetch_times = [1.1, 2.2, 3.3, 2.2]\n\n summary_writer = SummaryWriter(self.base_dir)\n # create a spy on top of summary_writer\n summary_writer = mock.MagicMock(wraps=summary_writer)\n\n # create a loss lr tensorboard hook\n tensorboard_plot_hook = TensorboardPlotHook(summary_writer)\n\n # run the hook in the correct order\n tensorboard_plot_hook.on_phase_start(task)\n\n # test tasks which do not pass the sample_fetch_times as well\n disable_sample_fetch_times = phase_idx == 0\n\n for loss, sample_fetch_time in zip(losses, sample_fetch_times):\n task.losses.append(loss)\n step_data = (\n {}\n if disable_sample_fetch_times\n else {\"sample_fetch_time\": sample_fetch_time}\n )\n task.last_batch = LastBatchInfo(None, None, None, None, step_data)\n tensorboard_plot_hook.on_step(task)\n\n tensorboard_plot_hook.on_phase_end(task)\n\n if primary:\n # add_scalar() should have been called with the right scalars\n if train:\n learning_rate_key = f\"Learning Rate/{phase_type}\"\n summary_writer.add_scalar.assert_any_call(\n learning_rate_key,\n mock.ANY,\n global_step=mock.ANY,\n walltime=mock.ANY,\n )\n avg_loss_key = f\"Losses/{phase_type}\"\n summary_writer.add_scalar.assert_any_call(\n avg_loss_key, mock.ANY, global_step=mock.ANY\n )\n for meter in task.meters:\n for name in meter.value:\n meter_key = f\"Meters/{phase_type}/{meter.name}/{name}\"\n summary_writer.add_scalar.assert_any_call(\n meter_key, mock.ANY, global_step=mock.ANY\n )\n if step_data:\n summary_writer.add_scalar.assert_any_call(\n f\"Speed/{phase_type}/cumulative_sample_fetch_time\",\n mock.ANY,\n global_step=mock.ANY,\n walltime=mock.ANY,\n )\n else:\n # add_scalar() shouldn't be called since is_primary() is False\n summary_writer.add_scalar.assert_not_called()\n summary_writer.add_scalar.reset_mock()",
"def test_sum_with_operator(self):\n sum_op = qml.PauliX(0) + qml.RX(1, 0)\n final_op = qml.op_sum(qml.PauliX(0), qml.RX(1, 0))\n # TODO: Use qml.equal when fixed.\n assert isinstance(sum_op, qml.ops.Sum)\n for s1, s2 in zip(sum_op.summands, final_op.summands):\n assert s1.name == s2.name\n assert s1.wires == s2.wires\n assert s1.data == s2.data\n assert np.allclose(a=sum_op.matrix(), b=final_op.matrix(), rtol=0)",
"def test_on():\n obs = Observable()\n nose.assert_false(obs.events)\n\n def on_test():\n pass\n\n obs.on(\"on_test\", on_test)\n nose.assert_in(on_test, obs.events[\"on_test\"])",
"def test_add_op_jit():\n paddle.set_device(\"cpu\")\n paddle.seed(33)\n custom_ops = load(name=\"add_op_jit\", sources=[current_path + \"/add_op_const.cc\"])\n\n x = paddle.to_tensor(np.array([[1, 1], [1, 1]]).astype(\"float32\"))\n x1 = paddle.to_tensor(np.array([[1, 1], [1, 1]]).astype(\"float32\"))\n x.stop_gradient = False\n x1.stop_gradient = False\n print(x)\n out = custom_ops.add_test(x, x1)\n assert np.allclose(out.numpy(), np.array([[2, 2], [2, 2]]).astype(\"float32\"))\n out.retain_grads()\n out.backward()\n assert np.allclose(out.grad, np.array([[1, 1], [1, 1]]).astype(\"float32\"))\n assert np.allclose(x.grad, np.array([[1, 1], [1, 1]]).astype(\"float32\"))\n assert np.allclose(x1.grad, np.array([[1, 1], [1, 1]]).astype(\"float32\"))",
"def test_operations_after_observables(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n qml.RY(0.5, wires=[0])\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"gates must precede\"):\n node(0.5)",
"async def test_multiple_numeric_observations(hass: HomeAssistant) -> None:\n\n config = {\n \"binary_sensor\": {\n \"platform\": \"bayesian\",\n \"name\": \"Test_Binary\",\n \"observations\": [\n {\n \"platform\": \"numeric_state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"below\": 10,\n \"above\": 0,\n \"prob_given_true\": 0.4,\n \"prob_given_false\": 0.0001,\n },\n {\n \"platform\": \"numeric_state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"below\": 100,\n \"above\": 30,\n \"prob_given_true\": 0.6,\n \"prob_given_false\": 0.0001,\n },\n ],\n \"prior\": 0.1,\n }\n }\n\n assert await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n hass.states.async_set(\"sensor.test_monitored\", STATE_UNKNOWN)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n for _, attrs in state.attributes.items():\n json.dumps(attrs)\n assert state.attributes.get(\"occurred_observation_entities\") == []\n assert state.attributes.get(\"probability\") == 0.1\n # No observations made so probability should be the prior\n\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", 20)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert round(abs(0.026 - state.attributes.get(\"probability\")), 7) < 0.01\n # Step 1 Calculated where P(A) = 0.1, P(~B|A) = 0.6 (negative obs), P(~B|notA) = 0.9999 -> 0.0625\n # Step 2 P(A) = 0.0625, P(B|A) = 0.4 (negative obs), P(B|notA) = 0.9999 -> 0.26\n\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", 35)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert abs(1 - state.attributes.get(\"probability\")) < 0.01\n # Step 1 Calculated where P(A) = 0.1, P(~B|A) = 0.6 (negative obs), P(~B|notA) = 0.9999 -> 0.0625\n # Step 2 P(A) = 0.0625, P(B|A) = 0.6, P(B|notA) = 0.0001 -> 0.9975\n\n assert state.state == \"on\"\n assert state.attributes.get(\"observations\")[0][\"platform\"] == \"numeric_state\"\n assert state.attributes.get(\"observations\")[1][\"platform\"] == \"numeric_state\"",
"def test_append_qubit_observables(self):\n with Queue() as q:\n # wire repetition is deliberate, Queue contains no checks/logic\n # for circuits\n ops = [\n qml.Hadamard(wires=0),\n qml.PauliX(wires=1),\n qml.PauliY(wires=1),\n qml.Hermitian(np.ones([2, 2]), wires=7),\n ]\n assert q.queue == ops",
"def testMultipleOutputs(self):\n sparse_tensor_1 = sparse_tensor.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])\n sparse_tensor_2 = sparse_tensor.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[2, 3], dense_shape=[3, 4])\n\n # This op has three outputs.\n sparse_add = sparse_ops.sparse_add(sparse_tensor_1, sparse_tensor_2)\n\n self.assertEqual(3, len(sparse_add.op.outputs))\n\n c1 = constant_op.constant(1)\n\n with ops.control_dependencies(sparse_add.op.outputs):\n # This op depends on all the three outputs.\n neg = -c1\n\n shared = []\n def sub(t):\n shared.append(t)\n return t\n\n # Subscribe the three outputs at once.\n subscribe.subscribe(sparse_add.op.outputs,\n lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n with self.cached_session() as sess:\n self.evaluate([neg])\n\n # All three ops have been processed.\n self.assertEqual(3, len(shared))",
"def test_operator_update_operator(self):\n pass",
"def test_add_reading(self):\n total_readings = len(self.temperature_reading_manager1.get_all_readings())\n self.temperature_reading_manager1.add_reading(self.reading)\n self.assertEqual(len(self.temperature_reading_manager1.get_all_readings()),total_readings+1)",
"def test_individual(self):\n sensor1 = make_test_output_thing_from_vallist(TEST_SENSOR1, VALUE_STREAM)\n sensor2 = make_test_output_thing_from_vallist(TEST_SENSOR2, VALUE_STREAM)\n writer = PredixWriter(PREDIX_INGEST_URL, PREDIX_ZONE_ID, PREDIX_TOKEN,\n extractor=EventExtractor(attributes={'test':True}),\n batch_size=1)\n sensor1.connect(writer)\n sensor2.connect(writer)\n scheduler = Scheduler(asyncio.get_event_loop())\n scheduler.schedule_periodic(sensor1, 0.5)\n scheduler.schedule_periodic(sensor2, 0.5)\n\n start_time = time.time()\n scheduler.run_forever()\n\n # Now we read the events back\n reader1 = PredixReader(PREDIX_QUERY_URL, PREDIX_ZONE_ID, PREDIX_TOKEN, TEST_SENSOR1,\n start_time=start_time,\n one_shot=False)\n reader2 = PredixReader(PREDIX_QUERY_URL, PREDIX_ZONE_ID, PREDIX_TOKEN, TEST_SENSOR2,\n start_time=start_time,\n one_shot=False)\n ti1 = TestInput(reader1, 'sensor-1')\n ti2 = TestInput(reader2, 'sensor-2')\n scheduler.schedule_periodic(reader1, 2)\n scheduler.schedule_periodic(reader2, 2)\n scheduler.run_forever()\n self.assertListEqual(VALUE_STREAM, ti1.values)\n self.assertListEqual(VALUE_STREAM, ti2.values)",
"def test_reduction_append(self):\n dev = qml.device(\"default.qubit\", wires=2)\n\n with qml.queuing.AnnotatedQueue() as q1:\n qml.RX(0.4, wires=0)\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n with qml.queuing.AnnotatedQueue() as q2:\n qml.RX(0.4, wires=0)\n qml.RX(0.6, wires=0)\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n tape1.trainable_params = {0}\n tape2.trainable_params = {0, 1}\n\n tapes = [tape1, tape2]\n tangents = [np.array([1.0]), np.array([1.0, 1.0])]\n\n v_tapes, fn = qml.gradients.batch_jvp(tapes, tangents, param_shift, reduction=\"append\")\n res = fn(dev.batch_execute(v_tapes))\n\n # Returned JVPs will be appended to a list, one JVP per tape\n\n assert len(res) == 2\n assert all(isinstance(r, np.ndarray) for r in res)\n assert res[0].shape == ()\n assert res[1].shape == ()",
"def test_tensor_number_operator(self, tol):\n cutoff_dim = 10\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n gate_name = \"TensorN\"\n assert dev.supports_observable(gate_name)\n\n op = qml.TensorN\n sf_expectation = dev._observable_map[gate_name]\n wires = [0, 1]\n\n @qml.qnode(dev)\n def circuit():\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(wires=wires))\n\n expval = circuit()\n assert np.allclose(\n expval, SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0\n )",
"def test_multiple_observe():\n dt1 = DynamicAtom()\n observer = Observer()\n\n dt1.observe((\"val\", \"val2\"), observer.react)\n dt1.val = 1\n assert observer.count == 1\n dt1.val2 = 1\n assert observer.count == 2"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests adding Tensors and Observables to zero
|
def test_add_zero(self, obs):
assert obs.compare(obs + 0)
assert obs.compare(0 + obs)
assert obs.compare(obs + 0.0)
assert obs.compare(0.0 + obs)
assert obs.compare(obs + 0e1)
assert obs.compare(0e1 + obs)
|
[
"def test_sensor_init():\n assert len(app.binary_sensors) > 0",
"def test_observation_zeroing(self):\n obs_shape = (84, 84, 1)\n er = ExperienceReplay(5, obs_shape)\n\n for terminal_idx in range(5):\n obs_ = []\n obs_next_ = []\n for i in range(1, 6):\n partial_obs = np.ones(obs_shape) * i\n terminal = 1 if i == terminal_idx else 0\n er.append(partial_obs, 0, 0, terminal)\n\n if i <= terminal_idx:\n partial_obs *= 0\n if i < 5:\n obs_.append(partial_obs)\n if i > 1:\n obs_next_.append(partial_obs)\n obs_ = np.transpose(np.array(obs_), (3, 1, 2, 0))\n obs_next_ = np.transpose(np.array(obs_next_), (3, 1, 2, 0))\n\n batch = er.sample(1)\n obs, rewards, actions, obs_next, terminals = batch\n assert np.array_equal(obs_, obs)\n assert np.array_equal(obs_next_, obs_next)",
"def test_metrics_are_zero(self):\n verifier = MetricVerifier(self.impalad_test_service)\n verifier.verify_metrics_are_zero()",
"def test_empty(self):\n self.assert_tensor_equal([], index_tensor([]))",
"def test_forceTempThrshEvent(self):\n\n self.myNode.addSensor(IPTempSensor('TMP-1',[0,1],self.myNode,'LOW'))\n self.myNode.scanSensors('LOW') # scan once all sensors\n\n self.assertEqual(self.myNode.getHub().getEventQ().getEventQLength(),1)",
"def test_add_empty_potentials(self):\n # first we test construction of empty potential\n p = Potential()\n self.assertEqual(p.labels, [])\n self.assertEqual(p.nsites, 0)\n self.assertEqual(p.npols, 0)\n\n p2 = p + p # maybe we should just raise a value error instead?\n self.assertEqual(p2.nsites, 0)\n self.assertEqual(p2.npols, 0)",
"def test_initialize():\n\n\n # Checks to make sure storage is TrajectoryStorage when calculation is non-adaptive\n hops = HOPS(\n sys_param,\n noise_param=noise_param,\n hierarchy_param=hier_param,\n eom_param=eom_param,\n integration_param=integrator_param,\n )\n hops.initialize(psi_0)\n storage = hops.storage\n TS = HopsStorage(True,{})\n assert type(storage) == type(TS)\n\n # Checks to make sure noise was properly initialized\n t_axis = np.array([0, 1, 2, 3, 4], dtype=np.float64)\n noise = np.array([[1, 2, 2, 1, 3], [2, 3, 3, 2, 4]], dtype=np.float64)\n noise = hops.noise1\n sys_param[\"NSITE\"] = len(sys_param[\"HAMILTONIAN\"][0])\n sys_param[\"NMODES\"] = len(sys_param[\"GW_SYSBATH\"][0])\n sys_param[\"N_L2\"] = 2\n sys_param[\"L_IND_BY_NMODE1\"] = [0, 1]\n sys_param[\"LIND_DICT\"] = {0: loperator[0, :, :], 1: loperator[1, :, :]}\n noise_corr = {\n \"CORR_FUNCTION\": sys_param[\"ALPHA_NOISE1\"],\n \"N_L2\": sys_param[\"N_L2\"],\n \"LIND_BY_NMODE\": sys_param[\"L_IND_BY_NMODE1\"],\n \"CORR_PARAM\": sys_param[\"PARAM_NOISE1\"],\n }\n noiseModel = HopsNoise(noise_param, noise_corr)\n assert type(noise) == type(noiseModel)\n\n noise_param_0 = noise_param.copy()\n noise_param_0[\"MODEL\"] = \"ZERO\"\n ZN = HopsNoise(noise_param_0, hops.basis.system.param)\n a = ZN\n b = hops.noise2\n assert type(a) == type(b)\n\n # checks to make sure correct dimensions are stored\n N_dim = hops.storage.n_dim\n known = 2\n assert N_dim == known\n\n # checks to make sure calculation is locked after initializing\n lock = hops.__initialized__\n known_lock = True\n assert lock == known_lock",
"def test_Nothing_observation(model):\n assert make_obs(ecole.observation.Nothing(), model) is None",
"def test_simple(self):\n with mn.model() as m:\n mn.stock('Foo', 1, 0)\n FooVelocity = mn.velocity('FooVelocity', 'Foo')\n\n self.assertEqual(FooVelocity[''], 0)\n m.step()\n self.assertEqual(FooVelocity[''], 1)\n m.step()\n self.assertEqual(FooVelocity[''], 1)\n m.reset()\n self.assertEqual(FooVelocity[''], 0)",
"def zero_test():\n x, y , theta, t = simulate(Theta=0)\n if abs(x.max()) > 0 or abs(y.max()) > 0:\n\t\t print \"Error in the numerical scheme!\"\n else:\n\t\t print \"Theta = 0 and epsilon = 0 gives x = y = 0 for all times, as intended.\"",
"def test_no_tke_1d():\n observations = 5\n # given all the values are the same, there should not be any tke\n u = np.ones(observations)\n v = np.ones(observations)\n w = np.ones(observations)\n e_zero = 0\n assert_array_equal(e_zero, tke(u, v, w))",
"def test_multiscale_zero(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x, self.x))",
"def test_add_potentials_no_polarization(self):\n p3 = self.p1 + self.p2\n self.assertEqual(p3.nsites, 2)\n self.assertEqual(p3.npols, 0)",
"def test_blank_transaction_initialized(blank_tester):\n assert blank_tester.orig_usd is None",
"def test_not_break_torch(self):\n length = 5\n a = torch.zeros(length)\n b = torch.zeros(length)\n self.assertEqual(len(a == b), length)\n self.assertTrue(torch.all(a == b))\n\n c = Tensor(torch.ones(5))\n # If Tensor is either argument, it uses the equality method that returns bool.\n self.assertNotEqual(c, a)\n self.assertNotEqual(a, c)",
"def test_mmlut_initialization(self):\n mml = self.cal.mmlut\n assert np.all(mml.origin == np.zeros(3))\n assert mml.nr == 0\n assert mml.nz == 0\n assert mml.rw == 0\n assert mml.data is None\n # assert isinstance(mml.data, np.ndarray)\n # assert mml.data.shape == (3,)",
"def test_zeros(self):\n win_s, hop_s = 1024, 256\n f = pvoc (win_s, hop_s)\n t = fvec (hop_s)\n for _ in range( int ( 4 * win_s / hop_s ) ):\n s = f(t)\n r = f.rdo(s)\n assert_equal ( t, 0.)\n assert_equal ( s.norm, 0.)\n assert_equal ( s.phas, 0.)\n assert_equal ( r, 0.)",
"def test_T0(self):\n self.m.T0.value = 50044.3322\n # I don't understand why this is failing... something about float128\n # Does not fail for me (both lines) -- RvH 02/22/2015\n self.assertTrue(numpy.isclose(self.m.T0.value, 50044.3322))\n self.assertEqual(self.m.T0.value, 50044.3322)",
"def test_timeseries_set_zeros(self):\n ts = self.ts.clone()\n shape = ts.shape()\n\n ts.set_zeros()\n\n self.assertTrue(np.array_equal(ts.tseries, np.zeros(shape)))\n\n ts1 = self.ts.clone().set_zeros(new=True)\n self.assertTrue(np.array_equal(ts1.tseries, np.zeros(shape)))",
"def test_set_goals_rt_zero_input(self):\r\n\r\n self.DUT.mission_time = 10.0\r\n self.DUT.reliability_goal = 0.0\r\n self.DUT.goal_measure = 1\r\n\r\n self.assertTrue(self.DUT.calculate_goals())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests subtraction between Tensors and Observables
|
def test_subtraction(self, obs1, obs2, obs):
assert obs.compare(obs1 - obs2)
|
[
"def test_subscribe_tensors_on_different_devices(self):\n c1 = constant_op.constant(10)\n c2 = constant_op.constant(20)\n\n with ops.device('cpu:0'):\n add = math_ops.add(c1, c2)\n\n with ops.device('cpu:1'):\n mul = math_ops.multiply(c1, c2)\n\n def sub(t):\n return t\n\n add_sub = subscribe.subscribe(\n add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n mul_sub = subscribe.subscribe(\n mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n # Expect the identity tensors injected by subscribe to have been created\n # on the same device as their original tensors.\n self.assertNotEqual(add_sub.device, mul_sub.device)\n self.assertEqual(add.device, add_sub.device)\n self.assertEqual(mul.device, mul_sub.device)",
"def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))",
"def test_add_remove_sensors(self):\n an_int = self.server._sensors[\"an.int\"]\n self.server.remove_sensor(an_int)\n # TODO remove_sensor test that checks that everything is indeed gone\n self.server.add_sensor(an_int)\n self.test_sampling()",
"def __call__(self, t):\n return self.a(t) - self.b(t)",
"def test_unsubscribe_keeps_tango_subscription(self):\n attr = Attribute(\"device\", \"name\")\n with call_via_mocks() as (_, mock_proxy):\n executor = TangoExecutor(proxy_factory=TangoDeviceProxyFactory())\n pid = executor.subscribe_event(attr)\n executor.unsubscribe_event(attr, pid)\n assert not mock_proxy.unsubscribe_event.called",
"async def test_velocity_difference(self):\n self.set_source_parameter(\"velocity_type\", \"completed points minus committed points\")\n response = await self.collect(\n get_request_json_side_effect=[self.boards_json1, self.boards_json2, self.velocity_json]\n )\n self.assert_measurement(\n response,\n value=\"-16\",\n landing_url=self.landing_url,\n entities=[\n self.sprint_entity(key=\"4\", points_completed=42.0, points_committed=40.0),\n self.sprint_entity(key=\"3\", points_completed=48.0, points_committed=62.0),\n self.sprint_entity(key=\"2\", points_completed=30.0, points_committed=65.0, goal=False),\n ],\n )",
"def test_not_break_torch(self):\n length = 5\n a = torch.zeros(length)\n b = torch.zeros(length)\n self.assertEqual(len(a == b), length)\n self.assertTrue(torch.all(a == b))\n\n c = Tensor(torch.ones(5))\n # If Tensor is either argument, it uses the equality method that returns bool.\n self.assertNotEqual(c, a)\n self.assertNotEqual(a, c)",
"def test_unobserving_multiple_members(observed_atom):\n a, _, _ = observed_atom\n assert a.has_observers(\"val\")\n assert a.has_observers(\"val2\")\n a.unobserve((\"val\", \"val2\"))\n assert not a.has_observers(\"val\")\n assert not a.has_observers(\"val2\")\n assert a.has_observers(\"val3\")",
"def test_trend_same(self):\n self.assertEquals(self.data_item.compute_trend(20), 0)",
"def test_removing_specific_observer2(observed_atom):\n a, ob1, ob2 = observed_atom\n for m in (\"val\", \"val2\", \"val3\"):\n assert a.has_observer(m, ob1.react)\n assert a.has_observer(m, ob2.react)\n a.unobserve((\"val\", \"val2\"), ob2.react)\n for m in (\"val\", \"val2\"):\n assert a.has_observer(m, ob1.react)\n assert not a.has_observer(\"val\", ob2.react)\n assert a.has_observer(\"val3\", ob1.react)\n assert a.has_observer(\"val3\", ob2.react)",
"def test_observable_not_returned(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"All measured observables\"):\n node(0.5)",
"def test_operations_after_observables(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n qml.RY(0.5, wires=[0])\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"gates must precede\"):\n node(0.5)",
"def test_different_queue_measurements_outside(self, obs):\n\n with qml.tape.QuantumTape() as tape1:\n with qml.tape.QuantumTape() as tape2:\n op1 = qml.expval(obs)\n op2 = qml.apply(op1, tape1)\n\n assert tape1.measurements == [op2]\n assert tape2.measurements == [op1]",
"def test_forceTempThrshEvent(self):\n\n self.myNode.addSensor(IPTempSensor('TMP-1',[0,1],self.myNode,'LOW'))\n self.myNode.scanSensors('LOW') # scan once all sensors\n\n self.assertEqual(self.myNode.getHub().getEventQ().getEventQLength(),1)",
"async def test_validate_statistics_unit_change_equivalent_units_2(\n recorder_mock: Recorder,\n hass: HomeAssistant,\n hass_ws_client: WebSocketGenerator,\n attributes,\n unit1,\n unit2,\n supported_unit,\n) -> None:\n\n id = 1\n\n def next_id():\n nonlocal id\n id += 1\n return id\n\n async def assert_validation_result(client, expected_result):\n await client.send_json(\n {\"id\": next_id(), \"type\": \"recorder/validate_statistics\"}\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == expected_result\n\n async def assert_statistic_ids(expected_result):\n with session_scope(hass=hass, read_only=True) as session:\n db_states = list(session.query(StatisticsMeta))\n assert len(db_states) == len(expected_result)\n for i in range(len(db_states)):\n assert db_states[i].statistic_id == expected_result[i][\"statistic_id\"]\n assert (\n db_states[i].unit_of_measurement\n == expected_result[i][\"unit_of_measurement\"]\n )\n\n now = dt_util.utcnow()\n\n await async_setup_component(hass, \"sensor\", {})\n await async_recorder_block_till_done(hass)\n client = await hass_ws_client()\n\n # No statistics, no state - empty response\n await assert_validation_result(client, {})\n\n # No statistics, original unit - empty response\n hass.states.async_set(\n \"sensor.test\", 10, attributes={**attributes, **{\"unit_of_measurement\": unit1}}\n )\n await assert_validation_result(client, {})\n\n # Run statistics\n await async_recorder_block_till_done(hass)\n do_adhoc_statistics(hass, start=now)\n await async_recorder_block_till_done(hass)\n await assert_statistic_ids(\n [{\"statistic_id\": \"sensor.test\", \"unit_of_measurement\": unit1}]\n )\n\n # Units changed to an equivalent unit which is not known by the unit converters\n hass.states.async_set(\n \"sensor.test\", 12, attributes={**attributes, **{\"unit_of_measurement\": unit2}}\n )\n expected = {\n \"sensor.test\": [\n {\n \"data\": {\n \"metadata_unit\": unit1,\n \"state_unit\": unit2,\n \"statistic_id\": \"sensor.test\",\n \"supported_unit\": supported_unit,\n },\n \"type\": \"units_changed\",\n }\n ],\n }\n await assert_validation_result(client, expected)\n\n # Run statistics one hour later, metadata will not be updated\n await async_recorder_block_till_done(hass)\n do_adhoc_statistics(hass, start=now + timedelta(hours=1))\n await async_recorder_block_till_done(hass)\n await assert_statistic_ids(\n [{\"statistic_id\": \"sensor.test\", \"unit_of_measurement\": unit1}]\n )\n await assert_validation_result(client, expected)",
"def testUnsubscribeAll(self):\r\n nVis = NAOVision(IP, PORT)\r\n nVis._subscribeToVideoProxy(0)\r\n nVis._unsubscribeAll()\r\n\r\n #Testing for bottom client\r\n boolB0 = \"_clientBottom_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Testing for top client\r\n boolT0 = \"_clientTop_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Making sure that none of the two modules exist\r\n boolAll = boolB0 and boolT0\r\n\r\n #boolAll should return false if both modules\r\n #don't exist in the subscribers list\r\n self.assertEqual(boolAll, False)",
"def test_dem_subtraction():\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n diff = xdem.spatial_tools.subtract_rasters(\n examples.get_path(\"longyearbyen_ref_dem\"),\n examples.get_path(\"longyearbyen_tba_dem\"))\n\n assert np.nanmean(np.abs(diff.data)) < 100",
"def test_one_switch_oversubscribe(self):\n pass",
"def test_simple(self):\n with mn.model() as m:\n mn.stock('Foo', 1, 0)\n FooVelocity = mn.velocity('FooVelocity', 'Foo')\n\n self.assertEqual(FooVelocity[''], 0)\n m.step()\n self.assertEqual(FooVelocity[''], 1)\n m.step()\n self.assertEqual(FooVelocity[''], 1)\n m.reset()\n self.assertEqual(FooVelocity[''], 0)",
"def test_observable_order_violated(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0)), ex\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"All measured observables\"):\n node(0.5)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests the tensor product between Observables
|
def test_tensor_product(self, obs1, obs2, res):
assert res.compare(obs1 @ obs2)
|
[
"def test_subscribe_tensors_on_different_devices(self):\n c1 = constant_op.constant(10)\n c2 = constant_op.constant(20)\n\n with ops.device('cpu:0'):\n add = math_ops.add(c1, c2)\n\n with ops.device('cpu:1'):\n mul = math_ops.multiply(c1, c2)\n\n def sub(t):\n return t\n\n add_sub = subscribe.subscribe(\n add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n mul_sub = subscribe.subscribe(\n mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n # Expect the identity tensors injected by subscribe to have been created\n # on the same device as their original tensors.\n self.assertNotEqual(add_sub.device, mul_sub.device)\n self.assertEqual(add.device, add_sub.device)\n self.assertEqual(mul.device, mul_sub.device)",
"def test_operation_multiply_invalid(self):\n X = qml.PauliX(0)\n Y = qml.CNOT(wires=[0, 1])\n Z = qml.PauliZ(0)\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n T @ Y\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n T = X @ Z\n 4 @ T",
"def test_prod_(self):\n self.run_test(\"\"\"\n def np_prod_(a):\n return a.prod()\"\"\",\n numpy.arange(10),\n np_prod_=[NDArray[int,:]])",
"def test_dot_product(self):\n vec_a = Vec3(1, 2, 3)\n vec_b = Vec3(0, 1, 3)\n dot_product = vec_a * vec_b\n\n self.assertEqual(11, dot_product, \"Asserting dot_product\")",
"def test_multiply_array(ctx_factory):\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)\n\n a_gpu = cl_array.to_device(queue, a)\n b_gpu = cl_array.to_device(queue, a)\n\n a_squared = (b_gpu * a_gpu).get()\n\n assert (a * a == a_squared).all()",
"def test_dot_product(self):\n vector1 = Vector(*self.test_vector)\n vector2 = Vector(*self.test_vector_alternate)\n\n dot_product = sum(\n x * y for x, y in zip(self.test_vector, self.test_vector_alternate)\n )\n\n self.assertEqual(dot_product, vector1.dot(vector2))\n self.assertEqual(dot_product, vector2.dot(vector1))",
"def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))",
"def test_tensor_number_operator(self, tol):\n cutoff_dim = 10\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n gate_name = \"TensorN\"\n assert dev.supports_observable(gate_name)\n\n op = qml.TensorN\n sf_expectation = dev._observable_map[gate_name]\n wires = [0, 1]\n\n @qml.qnode(dev)\n def circuit():\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(wires=wires))\n\n expval = circuit()\n assert np.allclose(\n expval, SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0\n )",
"def test_append_tensor_ops(self):\n\n with Queue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]",
"def test_multi_distribution():\n n1 = NormalDistribution(3)\n n2 = NormalDistribution(2)\n n3 = NormalDistribution(1)\n n = NormalDistribution(6)\n compound = ProductDistribution([n1, n2, n3], cat_dim=-1)\n n_samples = 10\n samples = compound.sample(n_samples)\n assert samples.shape == n.sample(n_samples).shape\n assert n.energy(samples).shape == compound.energy(samples).shape\n assert n.energy(samples).numpy() == pytest.approx(compound.energy(samples).numpy())",
"def test_product_node(self):\n # Init product node\n id0 = IdentityLeaf(scope=0)\n id1 = IdentityLeaf(scope=1)\n id2 = IdentityLeaf(scope=2)\n prod = ProductNode(children=[id0, id1, id2])\n\n # Define input: Two samples with three features\n sample1 = np.array([1, 2, 3])\n sample2 = np.array([10, 20, 30])\n x = torch.Tensor([sample1, sample2])\n\n # Get product node result\n result = prod(x)\n\n # Product in logspace is sum\n expected_result = [np.sum(np.log(sample1)), np.sum(np.log(sample2))]\n\n # Assertions\n self.assertEqual(len(result.tolist()), 2)\n self.assertTrue(np.isclose(result.tolist(), expected_result, atol=DELTA).all())",
"def test_queuing_tensor_matmul(self):\n\n with qml.tape.QuantumTape() as tape:\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t = Tensor(op1, op2)\n\n op3 = qml.PauliZ(2)\n t2 = t @ op3\n\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}\n assert tape._queue[op3] == {\"owner\": t2}",
"def test_queuing_tensor_matmul_components_outside(self):\n\n op1 = qml.PauliX(0)\n op2 = qml.PauliY(1)\n t1 = Tensor(op1, op2)\n\n with qml.tape.QuantumTape() as tape:\n op3 = qml.PauliZ(2)\n t2 = t1 @ op3\n\n assert len(tape._queue) == 2\n assert tape._queue[op3] == {\"owner\": t2}\n assert tape._queue[t2] == {\"owns\": (op1, op2, op3)}",
"def testMultipleOutputs(self):\n sparse_tensor_1 = sparse_tensor.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])\n sparse_tensor_2 = sparse_tensor.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[2, 3], dense_shape=[3, 4])\n\n # This op has three outputs.\n sparse_add = sparse_ops.sparse_add(sparse_tensor_1, sparse_tensor_2)\n\n self.assertEqual(3, len(sparse_add.op.outputs))\n\n c1 = constant_op.constant(1)\n\n with ops.control_dependencies(sparse_add.op.outputs):\n # This op depends on all the three outputs.\n neg = -c1\n\n shared = []\n def sub(t):\n shared.append(t)\n return t\n\n # Subscribe the three outputs at once.\n subscribe.subscribe(sparse_add.op.outputs,\n lambda t: script_ops.py_func(sub, [t], [t.dtype]))\n\n with self.cached_session() as sess:\n self.evaluate([neg])\n\n # All three ops have been processed.\n self.assertEqual(3, len(shared))",
"def test_mul(a, b):\n assert Surreal(a) * Surreal(b) == Surreal(a * b)",
"def test_different_queue_measurements_outside(self, obs):\n\n with qml.tape.QuantumTape() as tape1:\n with qml.tape.QuantumTape() as tape2:\n op1 = qml.expval(obs)\n op2 = qml.apply(op1, tape1)\n\n assert tape1.measurements == [op2]\n assert tape2.measurements == [op1]",
"def test_append_tensor_ops(self):\n\n with AnnotatedQueue() as q:\n A = qml.PauliZ(0)\n B = qml.PauliY(1)\n tensor_op = qml.operation.Tensor(A, B)\n assert q.queue == [A, B, tensor_op]\n assert tensor_op.obs == [A, B]",
"def test_inner_product():\n # Scalar-like computation, two constant lines.\n x1 = numpy.array([100, 200, 300])\n f1 = pvfit.measurement.spectral_correction.DataFunction(x=x1, y=numpy.ones_like(x1))\n x2 = numpy.array([50, 150, 250])\n f2 = pvfit.measurement.spectral_correction.DataFunction(\n x=x2, y=numpy.full_like(x2, 2)\n )\n inner_product_expected = 2.0 * (250 - 100)\n inner_product = pvfit.measurement.spectral_correction.inner_product(f1=f1, f2=f2)\n assert isinstance(inner_product, numpy.ndarray)\n numpy.testing.assert_equal(inner_product.shape, ())\n numpy.testing.assert_equal(inner_product, inner_product_expected)\n # Commutativity.\n numpy.testing.assert_equal(\n inner_product, pvfit.measurement.spectral_correction.inner_product(f1=f2, f2=f1)\n )\n\n # Scalar-like computation, two non-constant lines.\n x1 = numpy.array([100, 200, 300])\n f1 = pvfit.measurement.spectral_correction.DataFunction(\n x=x1, y=numpy.array([0, 1, 2])\n )\n x2 = numpy.array([50, 150, 250, 350])\n f2 = pvfit.measurement.spectral_correction.DataFunction(\n x=x2, y=numpy.array([3, 2, 1, 0])\n )\n inner_product_expected = (\n -(300**3 - 100**3) / 30000\n + 45 / 2000 * (300**2 - 100**2)\n - 35 / 10 * (300 - 100)\n )\n inner_product = pvfit.measurement.spectral_correction.inner_product(f1=f1, f2=f2)\n assert isinstance(inner_product, numpy.ndarray)\n numpy.testing.assert_equal(inner_product.shape, ())\n numpy.testing.assert_almost_equal(inner_product, inner_product_expected)\n # Commutativity.\n numpy.testing.assert_equal(\n inner_product, pvfit.measurement.spectral_correction.inner_product(f1=f2, f2=f1)\n )\n\n # Compatible vectorized computation, time-series like.\n f1 = pvfit.measurement.spectral_correction.DataFunction(\n x=x1, y=numpy.array([[0, 1, 2], [0, 1, 2]])\n )\n f2 = pvfit.measurement.spectral_correction.DataFunction(\n x=x2, y=numpy.array([[3, 2, 1, 0], [3, 2, 1, 0]])\n )\n inner_product = pvfit.measurement.spectral_correction.inner_product(f1=f1, f2=f2)\n assert isinstance(inner_product, numpy.ndarray)\n numpy.testing.assert_equal(inner_product.shape, (2,))\n numpy.testing.assert_almost_equal(inner_product, inner_product_expected)\n # Commutativity.\n numpy.testing.assert_equal(\n inner_product, pvfit.measurement.spectral_correction.inner_product(f1=f2, f2=f1)\n )\n\n # Compatible vectorized computation, table like.\n f1 = pvfit.measurement.spectral_correction.DataFunction(\n x=x1, y=numpy.array([[[0, 1, 2], [0, 1, 2]], [[0, 1, 2], [0, 1, 2]]])\n )\n f2 = pvfit.measurement.spectral_correction.DataFunction(\n x=x2,\n y=numpy.array([[[3, 2, 1, 0], [3, 2, 1, 0]], [[3, 2, 1, 0], [3, 2, 1, 0]]]),\n )\n inner_product = pvfit.measurement.spectral_correction.inner_product(f1=f1, f2=f2)\n assert isinstance(inner_product, numpy.ndarray)\n numpy.testing.assert_equal(inner_product.shape, (2, 2))\n numpy.testing.assert_almost_equal(inner_product, inner_product_expected)\n # Commutativity.\n numpy.testing.assert_equal(\n inner_product, pvfit.measurement.spectral_correction.inner_product(f1=f2, f2=f1)\n )\n\n # Incompatible vectorized computation because of shape mismatch in multi-curves.\n f1 = pvfit.measurement.spectral_correction.DataFunction(\n x=x1,\n y=numpy.array(\n [[[0, 1, 2], [0, 1, 2]], [[0, 1, 2], [0, 1, 2]], [[0, 1, 2], [0, 1, 2]]]\n ),\n )\n f2 = pvfit.measurement.spectral_correction.DataFunction(\n x=x2,\n y=numpy.array([[[3, 2, 1, 0], [3, 2, 1, 0]], [[3, 2, 1, 0], [3, 2, 1, 0]]]),\n )\n with pytest.raises(ValueError):\n # Cannot broadcast in computation.\n inner_product = pvfit.measurement.spectral_correction.inner_product(\n f1=f1, f2=f2\n )\n # Non-overlapping domains.\n # No broadcast case.\n x1 = numpy.array([200, 300])\n f1 = pvfit.measurement.spectral_correction.DataFunction(x=x1, y=numpy.array([1, 2]))\n x2 = numpy.array([50, 150])\n f2 = pvfit.measurement.spectral_correction.DataFunction(x=x2, y=numpy.array([3, 2]))\n inner_product_expected = 0.0\n with pytest.warns(Warning) as record:\n inner_product = pvfit.measurement.spectral_correction.inner_product(\n f1=f1, f2=f2\n )\n assert len(record) == 1\n assert record[0].message.args[0] == \"DataFunction domains do not overlap.\"\n assert isinstance(inner_product, numpy.ndarray)\n numpy.testing.assert_equal(inner_product.shape, ())\n numpy.testing.assert_equal(inner_product, inner_product_expected)\n # Broadcast case.\n x1 = numpy.array([200, 300])\n f1 = pvfit.measurement.spectral_correction.DataFunction(x=x1, y=numpy.array([1, 2]))\n x2 = numpy.array([50, 150])\n f2 = pvfit.measurement.spectral_correction.DataFunction(\n x=x2, y=numpy.array([[3, 2], [5, 7]])\n )\n inner_product_expected = numpy.zeros((2,))\n with pytest.warns(Warning) as record:\n inner_product = pvfit.measurement.spectral_correction.inner_product(\n f1=f1, f2=f2\n )\n assert len(record) == 1\n assert record[0].message.args[0] == \"DataFunction domains do not overlap.\"\n assert isinstance(inner_product, numpy.ndarray)\n numpy.testing.assert_equal(inner_product.shape, (2,))\n numpy.testing.assert_equal(inner_product, inner_product_expected)\n # Infinite computation.\n f = pvfit.measurement.spectral_correction.DataFunction(\n x=numpy.array([0, 1]), y=numpy.array([sys.float_info.max, sys.float_info.max])\n )\n with pytest.warns(Warning) as record:\n inner_product = pvfit.measurement.spectral_correction.inner_product(f1=f, f2=f)\n assert len(record) == 2\n assert record[0].message.args[0] == \"overflow encountered in multiply\"\n assert record[1].message.args[0] == \"Non-finite inner product detected.\"\n assert isinstance(inner_product, numpy.ndarray)\n numpy.testing.assert_equal(inner_product.shape, ())\n numpy.testing.assert_equal(inner_product, numpy.inf)",
"def test_mul_funcs(self):\r\n n = 10\r\n x = Variable(n)\r\n obj = Minimize(norm(x, 1))\r\n constraints = [x >= 2]\r\n prob = Problem(obj, constraints)\r\n data, dims = prob.get_problem_data(solver=SCS)\r\n A = data[\"A\"]\r\n objective, constr_map = prob.canonicalize()\r\n dims = prob._format_for_solver(constr_map, SCS)\r\n\r\n all_ineq = constr_map[s.EQ] + constr_map[s.LEQ]\r\n var_offsets, var_sizes, x_length = prob._get_var_offsets(objective,\r\n all_ineq)\r\n opts = {}\r\n constraints = constr_map[s.EQ] + constr_map[s.LEQ]\r\n constraints = prune_constants(constraints)\r\n Amul, ATmul = iterative.get_mul_funcs(constraints, dims,\r\n var_offsets, var_sizes,\r\n x_length)\r\n vec = np.array(range(x_length))\r\n # A*vec\r\n result = np.zeros(A.shape[0])\r\n Amul(vec, result)\r\n self.assertItemsAlmostEqual(A*vec, result)\r\n Amul(vec, result)\r\n self.assertItemsAlmostEqual(2*A*vec, result)\r\n # A.T*vec\r\n vec = np.array(range(A.shape[0]))\r\n result = np.zeros(A.shape[1])\r\n ATmul(vec, result)\r\n self.assertItemsAlmostEqual(A.T*vec, result)\r\n ATmul(vec, result)\r\n self.assertItemsAlmostEqual(2*A.T*vec, result)",
"def __call__(self, t):\n return self.a(t) * self.b(t)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that custom error is raised in the default matrix representation.
|
def test_matrix_undefined(self):
with pytest.raises(qml.operation.MatrixUndefinedError):
MyOp.compute_matrix()
with pytest.raises(qml.operation.MatrixUndefinedError):
op.matrix()
|
[
"def test_incompatible_dimensions(self, matrices):\n # Instantiate 5x10, 10x5, and 10x10 matrices as Matrix class\n square_mat = chap5.Matrix(matrices.square)\n half_row_mat = chap5.Matrix(matrices.half_row)\n half_col_mat = chap5.Matrix(matrices.half_col)\n # Verify Matrix class raises AssertionError for incompatible dimensions\n with pytest.raises(AssertionError):\n half_row_mat + half_col_mat # (5x10) + (10x5)\n with pytest.raises(AssertionError):\n half_col_mat + half_row_mat # (10x5) + (5x10)\n with pytest.raises(AssertionError):\n half_col_mat @ square_mat # (10x5) @ (10x10)\n with pytest.raises(AssertionError):\n square_mat @ half_row_mat # (10x10) @ (5x10)",
"def test_sparse_matrix_undefined(self):\n with pytest.raises(NotImplementedError):\n MyOp(wires=\"a\").sparse_matrix(wire_order=[\"a\", \"b\"])\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n MyOp.compute_sparse_matrix()\n with pytest.raises(qml.operation.SparseMatrixUndefinedError):\n op.sparse_matrix()",
"def test_sparse_matrix_error(self):\n\n t = qml.PauliX(0) @ qml.Hermitian(np.eye(4), wires=[1, 2])\n with pytest.raises(ValueError, match=\"Can only compute\"):\n t.sparse_matrix()",
"def testNonNumerical(self):\n csv = StringIO('Ignored, A, B, C\\n'\n 'name, 2, 3, hello\\n')\n if PY3:\n error = \"^could not convert string to float: ' hello'$\"\n else:\n error = '^could not convert string to float: hello$'\n assertRaisesRegex(self, ValueError, error, Matrix, csv)",
"def test_create_input_matrix(self):\n input_matrix = create_input_matrix(self.log_return_dataframe, 'angular')\n self.check_angular_distance(input_matrix)\n # An incorrect sub type raises Value Error\n self.assertRaises(ValueError, create_input_matrix, self.log_return_dataframe, 'invalid matrix subtype')",
"def test_normalize_bad_axis():\n X = np.array([[1, 2], [0, 1], [1, 1]])\n assert_raises(ValueError, normalize_matrix_on_axis, X, axis=3)",
"def test_reconstruct_not_raised(self, *shapes):\n self.assert_exception_is_not_raised(matting.reconstruct, shapes)",
"def test_normalize_bad_dimensions():\n X = np.arange(6)\n assert_raises(ValueError, normalize_matrix_on_axis, X)\n\n X = np.arange(12).reshape((3, 2, 2))\n assert_raises(ValueError, normalize_matrix_on_axis, X)",
"def test_create_zero_matrix_bad_inputs(self):\n expected = []\n bad_inputs = [[], {}, (), '', 9.22, -1, 0, -6, None, True]\n for bad_input in bad_inputs:\n actual_left = create_zero_matrix(bad_input, 1)\n actual_right = create_zero_matrix(1, bad_input)\n self.assertEqual(expected, actual_left)\n self.assertEqual(expected, actual_right)",
"def testEmpty(self):\n csv = StringIO()\n error = '^No input CSV data found\\.$'\n assertRaisesRegex(self, ValueError, error, Matrix, csv)",
"def test_error_on_multidim_tensors(metric_class=RelativeSquaredError):\n metric = metric_class()\n with pytest.raises(\n ValueError,\n match=r\"Expected both prediction and target to be 1D or 2D tensors, but received tensors with dimension .\",\n ):\n metric(torch.randn(10, 20, 5), torch.randn(10, 20, 5))",
"def test_matmul_with_not_supported_object_raises_error(self):\n with pytest.raises(ValueError, match=\"Can only perform tensor products between operators.\"):\n _ = qml.PauliX(0) @ \"dummy\"",
"def testDistanceMatrixForUnknownFeature(self):\n csv = StringIO('Ignored, A, B, C\\n'\n 'name1, 1, 2, 3\\n'\n 'name2, 4, 5, 6\\n')\n m = Matrix(csv)\n error = \"^'XXX'$\"\n assertRaisesRegex(self, KeyError, error, m.distanceMatrix, 'XXX')",
"def testUpperDiagonalForUnknownFeature(self):\n csv = StringIO('Ignored, A, B, C\\n'\n 'name1, 1, 2, 3\\n'\n 'name2, 4, 5, 6\\n')\n m = Matrix(csv)\n error = \"^'XXX'$\"\n assertRaisesRegex(self, KeyError, error, m.upperDiagonal, 'XXX')",
"def _check_matrix(data_matrix: List[List]):\n if len(data_matrix) < 3:\n raise IndexError(\"Not enough rows to process file\")\n\n if len(data_matrix[0]) != 6:\n raise IndexError(\"Row 1 has not the right amount of columns\")\n if len(data_matrix[1]) != 8:\n raise IndexError(\"Row 1 has not the right amount of columns\")\n if len(data_matrix[2]) != 5:\n raise IndexError(\"Row 1 has not the right amount of columns\")",
"def test_difference_matrix_size_neg():\n with pytest.raises(ValueError):\n _banded_utils.difference_matrix(-1)",
"def test_error_on_different_shape(metric_class=RelativeSquaredError):\n metric = metric_class()\n with pytest.raises(RuntimeError, match=\"Predictions and targets are expected to have the same shape\"):\n metric(torch.randn(100), torch.randn(50))",
"def test_has_matrix_false(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n assert not MyOp.has_matrix\n assert not MyOp(wires=0).has_matrix",
"def test_MissingPointsError_numpy(self):\n with self.assertRaises(MissingPointsError):\n NumLine(points=np.array([]))\n return",
"def test_import_error(self):\n self.assertRaises(rio.errors.RasterioIOError, CraterpyDataset, \"?\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that custom error is raised in the default terms representation.
|
def test_terms_undefined(self):
with pytest.raises(qml.operation.TermsUndefinedError):
MyOp.compute_terms(wires=[1])
with pytest.raises(qml.operation.TermsUndefinedError):
op.terms()
|
[
"def test_custom_formatting():\r\n \r\n try: SampleAPI.execute('custom_err.fail')\r\n except Exception, e:\r\n assert e.data['error'] == True\r\n assert 'desc' in e.data\r\n assert e.data['num'] == 99\r\n # hook can modified the error instance directly\r\n assert e.http_status == 555\r\n assert e.custom_arg == True",
"def test_builtin_errors():\r\n try: SampleAPI.execute('in.valid')\r\n except MethodNotFoundError, e:\r\n assert e.method == ['in', 'valid']",
"def test_invalid_type_qr_name(self):\n self.assertRaises(QiskitError, QuantumRegister, size=3, name=1)",
"def test_accept_custom_exception_text():\n custom_converter = lambda value: value + \" converted\"\n custom_type = hug.types.accept(custom_converter, \"A string Value\", \"Error occurred\")\n assert custom_type(\"bacon\") == \"bacon converted\"\n with pytest.raises(ValueError):\n custom_type(1)",
"def test_base_error_raises():\n with pytest.raises(PypyrSlackError) as err_info:\n raise PypyrSlackError(\"this is error text right here\")\n\n assert str(err_info.value) == \"this is error text right here\"",
"def test_base_multierror():\n\n exc = MultiError([ZeroDivisionError(), KeyboardInterrupt()])\n assert type(exc) is MultiError",
"def test_error_classes(self):\n def assert_contains_error(field):\n classes = wagtail_omni_forms_tags.admin_field_classes_for_object(field).split()\n self.assertIn('error', classes)\n\n def assert_not_contains_error(field):\n classes = wagtail_omni_forms_tags.admin_field_classes_for_object(field).split()\n self.assertNotIn('error', classes)\n\n form = self.DummyForm(data={})\n self.assertFalse(form.is_valid())\n assert_contains_error(form['title'])\n assert_contains_error(form['agree'])\n assert_contains_error(form['age'])\n assert_not_contains_error(form['description'])\n assert_not_contains_error(form['choices'])",
"def test_bad_values(self):\n self.assertOK(['upgrade'])\n self.assertOK(['foo'])\n self.assertRaisesInternalError([1])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])",
"def test_invalid_qasmname_qr(self):\n self.assertRaises(QiskitError, QuantumRegister, size=3, name='Qr')",
"def test_invalid_type_cr_name(self):\n self.assertRaises(QiskitError, ClassicalRegister, size=3, name=1)",
"def test_expression_error():\n ee = ExpressionError(\"Expression\", \"MSG\")\n assert_true(ee.expression == \"Expression\")\n assert_true(ee.msg == \"MSG\")\n assert_true(ee.__str__() == \"[Expression] MSG\")",
"def test_from_exception_random(self):\r\n exc = errors.LibraryError.from_exception(ValueError(\"visa.dll\"), \"visa.dll\")\r\n assert \"Error while accessing\" in str(exc)",
"def test_glossary_term_create(self):\n pass",
"def test_meta_fail(self):\n with self.assertRaises(ValueError):\n self.resource.meta()",
"def error(self, message):\n pass",
"def invariant_error_views(self):",
"def test_prefix_value_fail(self):\n self.assertRaises(ValueError, self.set_prefix_value_to_unit_fail)",
"def test_display_residue_fail(self):\n\n # Set up some data.\n self.setup_data()\n\n # The following should fail.\n self.assertRaises(RelaxSpinSelectDisallowError, self.residue_fns.display, '@N')",
"def test_missing_symbols(self):\n # pylint: disable=protected-access\n with self.assertRaises(NameError):\n eqn = Equation(self.model, '14*x = 23*ww')\n eqn.parse(self.model._local_context)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that custom error is raised in the default sparse matrix representation.
|
def test_sparse_matrix_undefined(self):
with pytest.raises(NotImplementedError):
MyOp(wires="a").sparse_matrix(wire_order=["a", "b"])
with pytest.raises(qml.operation.SparseMatrixUndefinedError):
MyOp.compute_sparse_matrix()
with pytest.raises(qml.operation.SparseMatrixUndefinedError):
op.sparse_matrix()
|
[
"def test_sparse_matrix_error(self):\n\n t = qml.PauliX(0) @ qml.Hermitian(np.eye(4), wires=[1, 2])\n with pytest.raises(ValueError, match=\"Can only compute\"):\n t.sparse_matrix()",
"def test_sparse_csr_check():\n dense = _dense_matrix_example()\n shape, data, channels, spikes_ptr = _sparse_matrix_example()\n\n # Dense to sparse conversion not implemented yet.\n with raises(NotImplementedError):\n csr_matrix(dense)\n\n # Need the three sparse components and the shape.\n with raises(ValueError):\n csr_matrix(data=data, channels=channels)\n with raises(ValueError):\n csr_matrix(data=data, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(data=data, channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape,\n data=data[:-1], channels=channels, spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, channels=[[0]])\n with raises(ValueError):\n csr_matrix(shape=shape, spikes_ptr=[0])\n with raises(ValueError):\n csr_matrix(shape=(4, 5, 6), data=data, channels=np.zeros((2, 2)),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=np.zeros((2, 2)),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=channels,\n spikes_ptr=np.zeros((2, 2)))\n with raises(ValueError):\n csr_matrix(shape=shape, data=np.zeros((100)), channels=channels,\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=np.zeros(100),\n spikes_ptr=spikes_ptr)\n with raises(ValueError):\n csr_matrix(shape=shape, data=data, channels=channels,\n spikes_ptr=np.zeros(100))\n\n # This one should pass.\n sparse = csr_matrix(shape=shape,\n data=data, channels=channels, spikes_ptr=spikes_ptr)\n assert isinstance(sparse, SparseCSR)\n ae(sparse.shape, shape)\n ae(sparse._data, data)\n ae(sparse._channels, channels)\n ae(sparse._spikes_ptr, spikes_ptr)",
"def test_sparsearray_set_get_value():\n sa = SparseArray(LIST1)\n sa[1] = 2\n sa[2] = 0\n\n with pytest.raises(IndexError):\n print(sa[20])\n with pytest.raises(IndexError):\n sa[20] = 10\n\n sa[0] = 0\n assert sa[0] == 0\n\n assert sa[1] == 2\n assert sa[2] == 0",
"def assert_is_sparse(a):\n if not is_sparse(a):\n raise TypeError('a is not a sparse array/matrix.')",
"def test_matrix_iter_sparse(self):\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n with self.assertWarns(DeprecationWarning):\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(pauli.matrix_iter(sparse=True)):\n self.assertTrue(isinstance(i, csr_matrix))\n self.assertTrue(np.all(i.toarray() == pauli_mat(labels[idx])))",
"def assert_is_scipy_sparse(a):\n if not scipy.sparse.issparse(a):\n raise TypeError('a is not a scipy sparse matrix.')",
"def test_sparse_format(self):\n\n coeffs = [-0.25, 0.75]\n obs = [\n qml.PauliX(wires=[0]) @ qml.PauliZ(wires=[1]),\n qml.PauliY(wires=[0]) @ qml.PauliZ(wires=[1]),\n ]\n H = qml.Hamiltonian(coeffs, obs)\n\n sparse_matrix = H.sparse_matrix()\n\n assert isinstance(sparse_matrix, scipy.sparse.csr_matrix)",
"def test_csc_matrix_empty():\n printer = AmiciCxxCodePrinter()\n matrix = sp.Matrix()\n (\n symbol_col_ptrs,\n symbol_row_vals,\n sparse_list,\n symbol_list,\n sparse_matrix,\n ) = printer.csc_matrix(matrix, rownames=[], colnames=[])\n\n assert symbol_col_ptrs == []\n assert symbol_row_vals == []\n assert sparse_list == sp.Matrix(0, 0, [])\n assert symbol_list == []\n assert str(sparse_matrix) == \"Matrix(0, 0, [])\"",
"def test_sparse_matrix_extra_wire(self):\n\n t = qml.PauliX(0) @ qml.PauliZ(1)\n s = t.sparse_matrix(wires=[0, 1, 2])\n\n assert s.shape == (8, 8)\n assert np.allclose(s.data, [1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0])\n assert np.allclose(s.indices, [4, 5, 6, 7, 0, 1, 2, 3])\n assert np.allclose(s.indptr, [0, 1, 2, 3, 4, 5, 6, 7, 8])",
"def test_to_matrix_5q_sparse(self):\n labels = [\"XXXYY\", \"IXIZY\", \"ZYXIX\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))",
"def _check_csr(self):\n pass",
"def test_csc_matrix():\n printer = AmiciCxxCodePrinter()\n matrix = sp.Matrix([[1, 0], [2, 3]])\n (\n symbol_col_ptrs,\n symbol_row_vals,\n sparse_list,\n symbol_list,\n sparse_matrix,\n ) = printer.csc_matrix(\n matrix,\n rownames=[sp.Symbol(\"a1\"), sp.Symbol(\"a2\")],\n colnames=[sp.Symbol(\"b1\"), sp.Symbol(\"b2\")],\n )\n\n assert symbol_col_ptrs == [0, 2, 3]\n assert symbol_row_vals == [0, 1, 1]\n assert sparse_list == sp.Matrix([[1], [2], [3]])\n assert symbol_list == [\"da1_db1\", \"da2_db1\", \"da2_db2\"]\n assert str(sparse_matrix) == \"Matrix([[da1_db1, 0], [da2_db1, da2_db2]])\"",
"def test_to_matrix_1q_sparse(self):\n labels = [\"X\", \"I\", \"Z\", \"Y\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))",
"def testDistanceMatrixForUnknownFeature(self):\n csv = StringIO('Ignored, A, B, C\\n'\n 'name1, 1, 2, 3\\n'\n 'name2, 4, 5, 6\\n')\n m = Matrix(csv)\n error = \"^'XXX'$\"\n assertRaisesRegex(self, KeyError, error, m.distanceMatrix, 'XXX')",
"def testInvalidValueInTable(self):\n self.assertRaises(ValueError,\n self.manager.snimpyInvalidDescr.__getitem__,\n 2)",
"def test_to_matrix_2q_sparse(self):\n labels = [\"IX\", \"II\", \"ZY\", \"YZ\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))",
"def testNonNumerical(self):\n csv = StringIO('Ignored, A, B, C\\n'\n 'name, 2, 3, hello\\n')\n if PY3:\n error = \"^could not convert string to float: ' hello'$\"\n else:\n error = '^could not convert string to float: hello$'\n assertRaisesRegex(self, ValueError, error, Matrix, csv)",
"def testEmpty(self):\n csv = StringIO()\n error = '^No input CSV data found\\.$'\n assertRaisesRegex(self, ValueError, error, Matrix, csv)",
"def create_sparse_matrix(A, x, s, options=\"non-sparse\"):\n\n if options == \"non-sparse\":\n # print(\"*********create sparse matrix (non-sparse)*********\")\n m, n = np.shape(A)\n i, j, k = sparse.find(A)\n # A transpose and I\n row_index = np.append(j, range(m + n, m + 2 * n))\n col_index = np.append(i + n, range(m + n, m + 2 * n))\n values = np.append(k, np.ones(n))\n # A\n row_index = np.append(row_index, i + n)\n col_index = np.append(col_index, j)\n values = np.append(values, k)\n # S\n row_index = np.append(row_index, range(m + n, m + 2 * n))\n col_index = np.append(col_index, range(0, n))\n values = np.append(values, s)\n # X\n row_index = np.append(row_index, range(m + n, m + 2 * n))\n col_index = np.append(col_index, range(m + n, m + 2 * n))\n values = np.append(values, x)\n # check\n # print(\"sparse matrix non-zero element :\")\n # print(\"row :\", len(row_index))\n # print(\"col :\", len(col_index))\n # print(\"values :\", len(values))\n return sparse.coo_matrix(\n (values, (row_index, col_index)), shape=(m + 2 * n, m + 2 * n)\n )\n # return sparse.coo_matrix((values, (row_index, col_index)))\n elif options == \"sparse\":\n # print(\"***create sparse matrix (sparse)***\")\n # try:\n # i, j, k, m, n = A\n # except:\n i, j, k = sparse.find(A)\n m, n = np.shape(A)\n # print(\"row :\", len(i))\n # print(\"col :\", len(j))\n # print(\"values :\", len(k))\n # print(\"variables :\", n)\n # print(\"constraints :\", m)\n # print(\"number of row :\", max(i))\n # print(\"number of column :\", max(j))\n # A transpose and I\n row_index = np.append(j, range(0, n))\n col_index = np.append(i + n, range(m + n, m + 2 * n))\n values = np.append(k, np.ones(n))\n # A\n row_index = np.append(row_index, i + n)\n col_index = np.append(col_index, j)\n values = np.append(values, k)\n # S\n row_index = np.append(row_index, range(m + n, m + 2 * n))\n col_index = np.append(col_index, range(n))\n values = np.append(values, s)\n # X\n row_index = np.append(row_index, range(m + n, m + 2 * n))\n col_index = np.append(col_index, range(m + n, m + 2 * n))\n values = np.append(values, x)\n # print(\"****full matrix version****\")\n # print(\"variables :\", m + 2 * n)\n # print(\"constraints :\", m + 2 * n)\n # print(\"min index of row :\", min(row_index))\n # print(\"max index of row :\", max(row_index))\n # print(\"min index of column :\", min(col_index))\n # print(\"max index of column :\", max(col_index))\n return sparse.csc_matrix(\n (values, (row_index, col_index)), shape=(m + 2 * n, m + 2 * n)\n )\n # return sparse.csc_matrix((values, (row_index, col_index)))\n elif options == \"tosparse\":\n row_index, col_index, values, m, n = A\n return sparse.csc_matrix((values, (row_index, col_index)), shape=(m, n))\n else:\n raise Exception(\"options must be specific as sparse or non-sparse\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.