query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Retrieves the project info by name for later extraction by the auto directives. Looks for the 'project' entry in the options dictionary. This is a less than ideal API but it is designed to match the use of 'create_project_info' above for which it makes much more sense.
def retrieve_project_info_for_auto(self, options) -> AutoProjectInfo: name = options.get("project", self.app.config.breathe_default_project) if name is None: raise NoDefaultProjectError( "No breathe_default_project config setting to fall back on " "for directive with no 'project' or 'path' specified." ) return self.project_info_for_auto_store[name]
[ "def get_project(self, name):\n project = self.apiconn.get_object('DescribeProject',\n {'Name': name},\n ProjectInfo)\n\n if project.projectname != None:\n return project", "def get_current_project():\n parser = configparser.ConfigParser()\n parser.read(CONFIG_FILE)\n name = parser.get('lastproject', 'name')\n try:\n return { 'name': name, 'loc': parser.get('projects', name) }\n except configparser.InterpolationError:\n print('No project {0} found. Please select a new project.'.format(name))\n return None", "def getProject():\n sgHandle = getShotgunHandle()\n filters = [[\"name\", \"is\", CONFIG_DATA[\"shotgun\"][\"settings\"][\"project_name\"]]]\n fields = [\"id\", \"name\"]\n sg_project = sgHandle.find_one(\"Project\", filters, fields)\n\n return sg_project", "def get_project(self, name_or_id):\n project = self._get_project(name_or_id)\n if project:\n return meta.obj_to_dict(project)\n return None", "def getProjectInfo(self,project):\n m = re.match(r'(.*)-(.*)',project)\n if m:\n name = m.group(1)\n version = m.group(2)\n else:\n name = project\n version = \"*\"\n return name,version", "def _get_info(self):\n url = f\"{self.auth._endpoint()}/projects/{self.project_id}\"\n response_json = self.auth._request(request_type=\"GET\", url=url)\n self.info = response_json[\"data\"]\n return self.info", "def get_project(self, **kwargs) -> Project:\n if self.project_id and not kwargs.get(\"project_id\"):\n return get_project(name=self.project_id)\n else:\n return get_project(**kwargs)", "def create_project_info(self, project_path):\n\n return ProjectInfo(self.app, self._name, project_path, self._source_path, self._reference)", "def get_project_details(self, transplatform, package_name):\n resp_dict = None\n platform_url = None\n if transplatform.engine_name == TRANSPLATFORM_ENGINES[0]:\n platform_url = transplatform.api_url + \"/module/\" + package_name + \"/\"\n resp_dict = self.api_resources.fetch_project_details(\n transplatform.engine_name, transplatform.api_url, package_name\n )\n elif transplatform.engine_name == TRANSPLATFORM_ENGINES[1]:\n resp_dict = self.api_resources.fetch_project_details(\n transplatform.engine_name, transplatform.api_url, package_name,\n **dict(ext=True, auth_user=transplatform.auth_login_id, auth_token=transplatform.auth_token_key)\n )\n if resp_dict:\n tx_org_slug = resp_dict['organization']['slug']\n platform_url = transplatform.api_url + \"/\" + tx_org_slug + \"/\" + package_name\n else:\n platform_url = transplatform.api_url\n elif transplatform.engine_name == TRANSPLATFORM_ENGINES[2]:\n platform_url = transplatform.api_url + \"/project/view/\" + package_name\n resp_dict = self.api_resources.fetch_project_details(\n transplatform.engine_name, transplatform.api_url, package_name,\n **dict(auth_user=transplatform.auth_login_id, auth_token=transplatform.auth_token_key)\n )\n elif transplatform.engine_name == TRANSPLATFORM_ENGINES[3]:\n resp_dict = self.api_resources.fetch_project_details(\n transplatform.engine_name, transplatform.api_url, package_name,\n **dict(auth_user=transplatform.auth_login_id, auth_token=transplatform.auth_token_key)\n )\n platform_url = transplatform.api_url + \"/projects/\" + package_name\n return platform_url, resp_dict", "def get_project(self, project_name, dataset_name):\n url = self.url() + \"/nd/resource/dataset/{}\".format(dataset_name)\\\n + \"/project/{}/\".format(project_name)\n req = self.remote_utils.get_url(url)\n\n if req.status_code is not 200:\n raise RemoteDataNotFoundError('Could not find {}'.format(req.text))\n else:\n return req.json()", "def project(self):\n\t\treturn self._client.project", "def Project():\n\treturn _project", "def getProjectName(self):\n mapping = projects.get_project_mapping()\n if self.project in mapping:\n return mapping[self.project]\n else:\n return \"Unknown\"", "def project_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"project_name\")", "def with_fields(self, **kwargs) -> \"ProjectInfo\":\n return ProjectInfo(**{**self._asdict(), **kwargs})", "def __getitem__(self, name):\r\n return self.project[name]", "def info(argv):\n usage = \"%(prog)s info\"\n description = \"Print information about the current project.\"\n parser = ArgumentParser(usage=usage,\n description=description)\n args = parser.parse_args(argv)\n try:\n project = load_project()\n except IOError as err:\n print(err)\n sys.exit(1)\n print(project.info())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n account_id: Optional[pulumi.Input[str]] = None,\n add_account_owners_admin_access: Optional[pulumi.Input[bool]] = None,\n available_credits: Optional[pulumi.Input[str]] = None,\n billing_group: Optional[pulumi.Input[str]] = None,\n ca_cert: Optional[pulumi.Input[str]] = None,\n copy_from_project: Optional[pulumi.Input[str]] = None,\n default_cloud: Optional[pulumi.Input[str]] = None,\n estimated_balance: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[str]] = None,\n payment_method: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProjectTagArgs']]]]] = None,\n technical_emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n use_source_project_billing_group: Optional[pulumi.Input[bool]] = None) -> 'Project':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ProjectState.__new__(_ProjectState)\n\n __props__.__dict__[\"account_id\"] = account_id\n __props__.__dict__[\"add_account_owners_admin_access\"] = add_account_owners_admin_access\n __props__.__dict__[\"available_credits\"] = available_credits\n __props__.__dict__[\"billing_group\"] = billing_group\n __props__.__dict__[\"ca_cert\"] = ca_cert\n __props__.__dict__[\"copy_from_project\"] = copy_from_project\n __props__.__dict__[\"default_cloud\"] = default_cloud\n __props__.__dict__[\"estimated_balance\"] = estimated_balance\n __props__.__dict__[\"parent_id\"] = parent_id\n __props__.__dict__[\"payment_method\"] = payment_method\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"technical_emails\"] = technical_emails\n __props__.__dict__[\"use_source_project_billing_group\"] = use_source_project_billing_group\n return Project(resource_name, opts=opts, __props__=__props__)", "def _get_project_config(self, name):\n self._load_project_config()\n\n name = to_wiki_format(self.site, name)\n if name not in self._project_config[\"projects\"]:\n return None\n\n config = self._project_config[\"defaults\"].copy()\n config.update(self._project_config[\"projects\"][name])\n return config" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test integration with Faker.
def test_integration(self): result = self.fake.ecommerce_name() self.assertIsInstance(result, str) self.assertGreater(len(result), 1) """Test integration with Faker.""" result = self.fake.ecommerce_category() self.assertIsInstance(result, str) self.assertGreater(len(result), 1) """Test integration with Faker.""" result = self.fake.ecommerce_price(False) self.assertIsInstance(result, float) self.assertGreaterEqual(result, 1) self.assertLessEqual(result, 999999.99)
[ "def fake():\n yield faker.Faker()", "def get_faker(): # pragma: no cover\n selector = randrange(100)\n if 0 <= selector <= 60:\n return Faker('en_GB')\n if 60 < selector <= 75:\n return Faker('es_ES')\n if 75 < selector <= 77:\n return Faker('fr_FR')\n if 77 < selector <= 79:\n return Faker('it_IT')\n if 79 < selector <= 81:\n return Faker('nl_NL')\n if 81 < selector <= 83:\n return Faker('no_NO')\n if 83 < selector <= 85:\n return Faker('de_DE')\n if 85 < selector <= 87:\n return Faker('dk_DK')\n if 87 < selector <= 89:\n return Faker('en_US')\n if 89 < selector <= 91:\n return Faker('en_CA')\n if 91 < selector <= 93:\n return Faker('ru_RU')\n if 93 < selector <= 95:\n return Faker('pt_PT')\n if 95 < selector <= 97:\n return Faker('sv_SE')\n if 97 < selector <= 99:\n return Faker('fi_FI')", "def test_create_recipe():\n recipe = Recipe(\"Tuna pasta\", ingreds)\n assert recipe.name == \"Tuna pasta\"\n assert recipe.ingreds == ingreds", "def gen_random(self, field_name, random):\r\n ...", "def test_all_betterself_factories(self):\n factories_to_test = [\n ActivityFactory,\n ActivityLogFactory,\n DailyProductivityLogFactory,\n IngredientFactory,\n IngredientCompositionFactory,\n MeasurementFactory,\n SleepLogFactory,\n SupplementFactory,\n SupplementLogFactory,\n SupplementStackFactory,\n SupplementStackCompositionFactory,\n WellBeingLogFactory,\n FoodFactory,\n FoodLogFactory,\n ]\n\n for factory in factories_to_test:\n created_instance = factory()\n self.assertIsNotNone(created_instance)", "def func_fixture():\n yield \"Function\"", "def fake_vacancies_data(faker):\n def gen_vacancies(sources_count=1, vacancies_count=3):\n vacancies_data = []\n for s in range(sources_count):\n source_name = faker.company()\n for v in range(vacancies_count):\n vacancies_data.append({\n 'source': faker.uri(),\n 'source_name': source_name[:16],\n 'name': faker.job()\n })\n return vacancies_data\n return gen_vacancies", "def sample_ingredient(user, name='TestIngredient'):\n return models.Ingredient.objects.create(user=user, name=name)", "def test_make_account(self):\n d = baker.make(\"Department\")\n f = MakeNewAccount({\n 'isaac': False,\n 'department':d.pk,\n 'college':d.college.pk,\n 'email':'kfldsj@klfjc.com',\n 'username':'jliver',\n 'password1':'pwpwpwpw',\n 'password2':'pwpwpwpw',\n 'first_name':\"Janey\",\n \"last_name\":\"Liverman\"\n })\n self.assertTrue(f.is_valid())", "def setUp(self):\n super().setUp()\n self.dept = baker.make_recipe(\"makeReports.department\")", "def test_factory(self):\n self.assertIsInstance(Extrapolator.factory(mode=\"window\"), WindowExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"poly\"), PolynomialExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"diff_model\"), DifferentialExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"pca\"), PCAExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"l1\"), SieveExtrapolator)\n self.assertRaises(QiskitNatureError, Extrapolator.factory, mode=\"unknown\")", "def test_create_new_supplier(self):\n supplier = models.Supplier.objects.create(\n email=\"new_supplier@supplier.test\",\n name=\"Supplier test\",\n address=\"Address new #1\"\n )\n\n self.assertEqual(str(supplier), supplier.email)", "def test_build_creation(self):", "def gen_date():\r\n return random.randint(DAY1, TODAY)", "def test_create_fleet(self):\n expected_name = 'example_fleet'\n expected_desc = 'A newly created fleet used in a test.'\n expected_configs = [self.config1, self.config2]\n created_fleet = fleet_model.Fleet.create(loanertest.TECHNICAL_ADMIN_EMAIL,\n expected_name,\n expected_configs,\n expected_desc)\n self.assertEqual(created_fleet.name, expected_name)\n self.assertEqual(created_fleet.config, expected_configs)\n self.assertEqual(created_fleet.description, expected_desc)\n self.assertEqual(created_fleet.display_name, expected_name)", "def sample_ingredient(user, name=\"Batata\"):\n return Ingredient.objects.create(user=user, name=name)", "def test_salutation():\n assert salutation(\"Kinga\") == \"Hello, Kinga!\"", "def gen_fake(self, field_name, fake):\r\n ...", "def test_database_seed(self):\n\n sys.stdout.write('Testing database seed process...')\n user = User.query.filter_by(user_id=1).one()\n house = House.query.filter_by(house_id=2).one()\n assert user.email == \"kae@gmail.com\"\n assert house.address == \"410 Forney Ave Jacksonville, AL 36265\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make a 2D array of radius values from a specific center.
def gen_radius_array(shape, center, xy_scale=None, r_scale=None): # Figure out all the scaling complexity if r_scale is not None: rscale = r_scale xscale = 1 yscale = 1 else: if isinstance(xy_scale, (tuple, list, np.ndarray)): rscale = 1 xscale = xy_scale[0] yscale = xy_scale[1] elif isinstance(xy_scale, (float, int)): rscale = 1 xscale = xy_scale yscale = xy_scale else: rscale = 1 xscale = 1 yscale = 1 x = (np.arange(shape[0]) - center[0]) * xscale y = (np.arange(shape[1]) - center[1]) * yscale r = np.sqrt(x[:, np.newaxis] ** 2 + y ** 2) * rscale return r
[ "def create_2d_circle_kernel(radius):\n return np.array([ np.sqrt( x * x + y * y ) <= float(radius) for y in xrange(-radius, radius+1) for x in xrange(-radius, radius+1)], dtype=np.float32).reshape( radius*2+1, radius*2+1 )", "def radii(data):\n rs = np.empty(len(data))\n for i, (points_sequence, labels_sequence) in enumerate(data):\n rs[i] = radius(points_sequence, labels_sequence, data)\n return rs", "def getCircleCoords(centre, radius):\n centrex, centrey = centre\n coords = []\n for x in range(-radius, radius):\n for y in range(-radius, radius):\n if x * x + y * y <= radius * radius and my.map.inBounds(\n (centrex + x, centrey + y)\n ):\n coords.append((centrex + x, centrey + y))\n return coords", "def get_circle_edgematrix(center_x, center_y, radius, step=30):\n return Generator.get_polygon_edgematrix(\n center_x, center_y, radius, step)", "def define_circle_points(center, radius):\n res = np.pi/radius # set resolution to avoid double counting a pixel\n x = center[0] + np.round(radius * np.cos(np.arange(-np.pi, np.pi, res)))\n y = center[1] + np.round(radius * np.sin(np.arange(-np.pi, np.pi, res)))\n return x, y", "def angle_to_coord(center, angle, radius) -> np.ndarray:\n x = radius * np.sin(angle)\n y = radius * np.cos(angle)\n x += center[0]\n y += center[1]\n return np.array([x, y])", "def computeCartesianCoords(lon, lat, radius):\n npts = lon.shape[0]\n xyz = numpy.empty((npts, 3), numpy.float64)\n cosLat = numpy.cos(lat*DEG2RAD)\n xyz[:, 0] = radius * cosLat * numpy.cos(lon*DEG2RAD)\n xyz[:, 1] = radius * cosLat * numpy.sin(lon*DEG2RAD)\n xyz[:, 2] = radius * numpy.sin(lat*DEG2RAD)\n return xyz", "def make_circle(self):\n\n def circle_equation(center, r, xy):\n \"\"\" circle_equation\n\n Describes circle\n\n Args:\n center (tuple): center coordinates of the ellipse, origin is (0,0).\n r (float): radius\n x (array-like): 2d array of x coordinates\n y (array-like): 2d array of y coordinates\n\n Returns:\n array-like: r_norm, same size as image size\n \n \"\"\"\n x = xy[0] ##breaks pixel array up into x and y\n y = xy[1]\n r_norm = ((x-center[1])**2 + (y-center[0])**2)/(r**2)\n return r_norm\n\n\n r_grid = circle_equation(self.center, self.r, self.pixel_array)\n r_grid[r_grid<1.0] = 1\n r_grid[r_grid>1.0] = 0\n\n return r_grid", "def circular_kernel(radius):\n\n width = 2*radius + 1\n kernel = np.zeros((width, width), np.uint8)\n for i in range(0, width):\n for j in range(0, width):\n if (i - radius) ** 2 + (j - radius) ** 2 <= radius**2:\n kernel[i][j] = int(1)\n return kernel", "def extract_neighborhood(x, y, arr, radius):\n if x < radius or y < radius or x>=480-radius or y>=640-radius:\n return np.ones((radius*2+1,radius*2+1)).ravel()\n return arr[(x - radius) : (x + radius + 1), (y - radius) : (y + radius + 1)].ravel()", "def _get_cells_in_city(self, center: IntVector2D, radius: int, city_orientation: int,\n vector_field: IntVector2DArray) -> IntVector2DArray:\n x_range = np.arange(center[0] - radius, center[0] + radius + 1)\n y_range = np.arange(center[1] - radius, center[1] + radius + 1)\n x_values = np.repeat(x_range, len(y_range))\n y_values = np.tile(y_range, len(x_range))\n city_cells = list(zip(x_values, y_values))\n for cell in city_cells:\n vector_field[cell] = align_cell_to_city(center, city_orientation, cell)\n return city_cells", "def arch_points(radius, start_ang, end_ang, n) -> np.ndarray:\n angles = np.linspace(start_ang, end_ang, n)\n x = radius * np.sin(angles)\n y = radius * np.cos(angles)\n return np.vstack([x, y]).T", "def extract_circle(center, radius, coords):\n return np.where(((coords - center) ** 2).sum(axis=-1) < radius**2)[0]", "def BoxfromCenter(center : np.ndarray, width : int, height : int):\r\n return np.array([center[0]-width//2, center[1]-height//2, center[0]+width//2, center[1]+height//2])", "def yank_circle_pixels(img, center, radius):\n x, y = _define_circle_points(center, radius) \n ## Filter out out-of-bounds points\n yx = zip(y, x) # yx b/c row,column\n y_max, x_max = img.shape\n inbounds = lambda yx: 0 <= yx[0] <= y_max and 0 <= yx[1] <= x_max\n yx_inbounds = filter(inbounds, yx)\n if len(yx) != len(yx_inbounds):\n warnings.warn(\"Circle is clipped by image limits.\")\n ## Find pix\n pix = [img[yx] for yx in yx_inbounds]\n return pix", "def get_in_radius_pixels(point: tuple, radius: int) -> np.ndarray:\n # Construct the array of pixels which may be effected\n x_val, y_val = np.mgrid[-radius: radius + 1: 1, -radius: radius + 1: 1]\n # The mask will be used to filter out the pixels further than\n # the radius around the center.\n mask = x_val * x_val + y_val * y_val <= radius * radius\n # Construct an array of DiameterXDiameter pixels\n in_radius_ar = np.vstack((x_val.flatten(), y_val.flatten())).T.reshape(\n (radius * 2 + 1, radius * 2 + 1, 2))\n # Return the pixels within radius distance, plus an offset so we test\n # the relevant location rather than center of the screen\n return in_radius_ar[mask] + np.array(point)", "def circle(center=[0,0], r=1.):\n ang = np.linspace(0,2*np.pi,1000)\n #unit circle * radius\n x = np.cos(ang)*r\n y = np.sin(ang)*r\n #circle transloation\n x = x + center[0]\n y = y + center[0]\n return x,y", "def map_radius(cube):\n R = np.zeros(cube.shape)\n x, y, z = cube.axes()\n X, Y = np.meshgrid(x, y)\n for i, zt in enumerate(z):\n R[..., i] = np.sqrt(X ** 2 + Y ** 2 + zt ** 2)\n return R", "def scan_cluster_locations(self, centers, radius, resolution, error=0.125):\n all_points = [] # all points in all grids\n\n # create grids centered at cluster center\n for center in centers:\n c_x = center[0]\n c_y = center[1]\n dim = radius+error\n mini_grid = np.linspace(-dim,dim, 20)\n\n # create final list, offset by cluster center coordinates\n scan_points = [(x+c_x, y+c_y) for x in mini_grid for y in mini_grid]\n\n for point in scan_points:\n all_points.append(point)\n\n return all_points" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a 2D radial mask array. Pixels within the radius=(rmin, rmax) from a specified center will be masked by to the value in `mask`.
def gen_radial_mask(shape, center, radius, mask=True, xy_scale=None, r_scale=None): r = gen_radius_array(shape, center, xy_scale=xy_scale, r_scale=r_scale) out = (r >= radius[0]) & (r <= radius[1]) return out if mask else np.logical_not(out)
[ "def circular_mask(radius):\n \n diameter = 2*radius + 1\n \n center_x = center_y = radius\n x, y = np.indices((diameter, diameter))\n \n distances = ((center_x - x) ** 2 + (center_y - y) ** 2) ** 0.5\n return (distances <= radius)", "def _prepared_radial_gradient_mask(size, scale=1):\n\n mask = ImageChops.invert(Image.radial_gradient('L'))\n\n w, h = mask.size\n xoffset = round((w - w / scale) / 2)\n yoffset = round((h - h / scale) / 2)\n box = (xoffset, yoffset, w - xoffset, h - yoffset)\n\n return mask.resize(size, box=box)", "def image_mask(CS_mask,radius = 15):\n from numpy import indices\n w,h = shape(CS_mask)\n x_indices,y_indices = indices((w,h))\n SAXS_mask = sqrt((y_indices-(h-1)/2)**2+(x_indices-(w-1)/2)**2) < radius\n Border_mask = (y_indices<2) | (y_indices>(h-3)) | \\\n (x_indices<2) | (x_indices>(w-3))\n CS_mask *= ~(SAXS_mask | Border_mask)\n return CS_mask", "def make_mask(data, xpix, ypix, rmask=15):\r\n mask = np.zeros_like(halpha).astype(np.int) # variavel booleana do tamanho do halpha\r\n xdim, ydim = data.shape\r\n #define um array de x e y\r\n x = np.arange(xdim)\r\n y = np.arange(ydim)\r\n xx, yy = np.meshgrid(x, y) #faz uma imagem das coordenadas\r\n for x0, y0 in zip(xpix, ypix):#loop para cada objeto dessa lista\r\n #x0 é o centro da estrela\r\n r = np.sqrt((xx - x0) ** 2 + (yy - y0) ** 2) # raio em ao x0 e y0\r\n mask[r<=rmask] = 1\r\n return mask", "def sector_mask(shape, centre, min_radius, max_radius, theta_range, phi_range):\n\n x, y, z = np.ogrid[:shape[0], :shape[1], :shape[2]]\n cx, cy, cz = centre\n t_min, t_max = np.deg2rad(theta_range)\n p_min, p_max = np.deg2rad(phi_range)\n\n # ensure stop angle > start angle\n if t_max < t_min:\n t_max += 2 * np.pi\n if p_max < p_min:\n p_max += 2 * np.pi\n\n # convert cartesian --> polar coordinates\n r2 = (x - cx) ** 2 + (y - cy) ** 2 + (z - cz) ** 2\n theta = np.arctan2(y - cy, x - cx) - t_min\n phi = np.arctan2(np.sqrt((x - cx) ** 2 + (y - cy) ** 2), (z - cz) ** 2) - p_min\n\n # wrap angles between 0 and 2*pi\n theta %= (2 * np.pi)\n phi %= (2 * np.pi)\n\n # circular mask\n sphere_mask = np.logical_and(r2 >= min_radius ** 2, r2 <= max_radius ** 2)\n\n # angular mask\n anglemask = np.logical_and(theta <= (t_max - t_min), phi <= (p_max - p_min))\n\n return sphere_mask * anglemask", "def circle_mask(shape, sharpness = 40):\n assert shape[1] == shape[2], \"circle_mask received a bad shape: \" + shape\n\n diameter = shape[1]\n x = np.linspace(-1, 1, diameter)\n y = np.linspace(-1, 1, diameter)\n xx, yy = np.meshgrid(x, y, sparse=True)\n z = (xx**2 + yy**2) ** sharpness\n\n mask = 1 - np.clip(z, -1, 1)\n mask = np.expand_dims(mask, axis=0)\n mask = np.broadcast_to(mask, shape)\n mask = torch.tensor(mask, dtype=torch.float)\n return mask", "def mask_eccentric(x, y, center_x, center_y, e1, e2, r):\n x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x, center_y)\n r_ab = (x_**2 + y_**2) / r ** 2\n mask = np.empty_like(r_ab, dtype='int')\n mask[r_ab > 1] = 0\n mask[r_ab <= 1] = 1\n return mask", "def find_rotated_mask(shape,Rx,Ry,angle,xc,yc):\n arr = np.empty((shape[0],shape[1]), dtype = bool)\n #now fill the array with distance away from center\n for i in range(arr.shape[0]):\n for j in range(arr.shape[1]):\n xpos, ypos = pos(j,i,xc,yc,angle)\n if (xpos/Rx)**2 + (ypos/Ry)**2 < 1:\n arr[i,j] = True\n else:\n arr[i,j] = False\n\n return arr", "def mask_azimuthal(x, y, center_x, center_y, r):\n x_shift = x - center_x\n y_shift = y - center_y\n R = np.sqrt(x_shift*x_shift + y_shift*y_shift)\n mask = np.empty_like(R, dtype='int')\n mask[R > r] = 0\n mask[R <= r] = 1\n return mask", "def fill_small_radius_holes(mask, max_radius):\n mask = mask.astype(bool)\n outside = ndimage.binary_propagation(numpy.zeros_like(mask), mask=~mask, border_value=1)\n holes = ~(mask | outside)\n large_hole_centers = ndimage.binary_erosion(holes, iterations=max_radius+1)\n large_holes = ndimage.binary_propagation(large_hole_centers, mask=holes)\n small_holes = holes ^ large_holes\n return mask | small_holes", "def compute_mask(self, experiment):\n assert(len(self.args) != 0)\n center = experiment.project(self.center)\n center_to_edge = self.radius * perpendicular(\n experiment.camera_to(self.center))\n radius_vector = (experiment.project(self.center + center_to_edge)\n - experiment.project(self.center))\n radius = np.linalg.norm(radius_vector)\n \n rr, cc = draw.circle(center[0], center[1], radius,\n shape=experiment.image_shape[:2])\n \n dd = np.empty(rr.shape[0], dtype=np.float64)\n for i in range(dd.shape[0]):\n dd[i] = self.distance_to_surface([rr[i], cc[i]], experiment)\n \n return rr, cc, dd", "def _generate_mask(self):\r\n\r\n img = np.zeros((self.height, self.width, self.channels), np.uint8)\r\n\r\n # Set size scale\r\n size = int((self.width + self.height) * 0.03)\r\n if self.width < 64 or self.height < 64:\r\n raise Exception(\"Width and Height of mask must be at least 64!\")\r\n \r\n # Draw random lines\r\n for _ in range(randint(1, 20)):\r\n x1, x2 = randint(1, self.width), randint(1, self.width)\r\n y1, y2 = randint(1, self.height), randint(1, self.height)\r\n thickness = randint(3, size)\r\n cv2.line(img,(x1,y1),(x2,y2),(1,1,1),thickness)\r\n \r\n # Draw random circles\r\n for _ in range(randint(1, 20)):\r\n x1, y1 = randint(1, self.width), randint(1, self.height)\r\n radius = randint(3, size)\r\n cv2.circle(img,(x1,y1),radius,(1,1,1), -1)\r\n \r\n # Draw random ellipses\r\n for _ in range(randint(1, 20)):\r\n x1, y1 = randint(1, self.width), randint(1, self.height)\r\n s1, s2 = randint(1, self.width), randint(1, self.height)\r\n a1, a2, a3 = randint(3, 180), randint(3, 180), randint(3, 180)\r\n thickness = randint(3, size)\r\n cv2.ellipse(img, (x1,y1), (s1,s2), a1, a2, a3,(1,1,1), thickness)\r\n \r\n return 1-img", "def gen_radius_array(shape, center, xy_scale=None, r_scale=None):\n # Figure out all the scaling complexity\n if r_scale is not None:\n rscale = r_scale\n xscale = 1\n yscale = 1\n else:\n if isinstance(xy_scale, (tuple, list, np.ndarray)):\n rscale = 1\n xscale = xy_scale[0]\n yscale = xy_scale[1]\n elif isinstance(xy_scale, (float, int)):\n rscale = 1\n xscale = xy_scale\n yscale = xy_scale\n else:\n rscale = 1\n xscale = 1\n yscale = 1\n x = (np.arange(shape[0]) - center[0]) * xscale\n y = (np.arange(shape[1]) - center[1]) * yscale\n r = np.sqrt(x[:, np.newaxis] ** 2 + y ** 2) * rscale\n return r", "def remove_small_radius_objects(mask, max_radius):\n eroded = ndimage.binary_erosion(mask, iterations=max_radius)\n return ndimage.binary_propagation(eroded, mask=mask)", "def create_circle_mask(self, cells, i):\n xx, yy = np.mgrid[:(cells*2)+1, : (cells*2)+1]\n qsr_mask = (xx - float(cells))** 2 + (yy - float(cells))** 2\n #print qsr_mask\n qsr_mask[qsr_mask > cells**2] = (cells**2)+1\n #print \"max: \", qsr_mask.max()\n #print qsr_mask\n\n label = self.sorted_params[i][0]\n binary_circle_mask = (qsr_mask <=cells**2)*100\n #donut = np.logical_and(circle < (6400 + 60), circle > (6400 - 60)) ???\n\n #This removes the center of the mask if i>0\n self.binary_masks[label] = self.remove_center_of_mask(i, binary_circle_mask)", "def _generate_mask(self):\n\n img = np.zeros((self.height, self.width, self.channels), np.uint8)\n\n # Set size scale\n size = int((self.width + self.height) * 0.03)\n if self.width < 64 or self.height < 64:\n raise Exception(\"Width and Height of mask must be at least 64!\")\n \n # Draw random lines\n for _ in range(randint(1, 20)):\n x1, x2 = randint(1, self.width), randint(1, self.width)\n y1, y2 = randint(1, self.height), randint(1, self.height)\n thickness = randint(3, size)\n cv2.line(img,(x1,y1),(x2,y2),(1,1,1),thickness)\n \n # Draw random circles\n for _ in range(randint(1, 20)):\n x1, y1 = randint(1, self.width), randint(1, self.height)\n radius = randint(3, size)\n cv2.circle(img,(x1,y1),radius,(1,1,1), -1)\n \n # Draw random ellipses\n for _ in range(randint(1, 20)):\n x1, y1 = randint(1, self.width), randint(1, self.height)\n s1, s2 = randint(1, self.width), randint(1, self.height)\n a1, a2, a3 = randint(3, 180), randint(3, 180), randint(3, 180)\n thickness = randint(3, size)\n cv2.ellipse(img, (x1,y1), (s1,s2), a1, a2, a3,(1,1,1), thickness)\n \n return 1-img", "def filter_density(mask, rad=3, size=5, fn = lambda m,i,j: m[i,j]):\n rows, cols = mask.shape\n X,Y = np.meshgrid(xrange(cols), xrange(rows))\n in_circle = lib.in_circle\n out = np.zeros((rows,cols), np.bool)\n for row,col in locations(mask.shape):\n\tif fn(mask,row,col):\n\t a = in_circle((col,row),rad)\n\t if np.sum(mask*a(X,Y))>size:\n\t\tout[row,col] = True\n return out", "def get_background(mask, offset_radius, background_radius):\n offset = ndimage.binary_dilation(mask, iterations=offset_radius)\n background = ndimage.binary_dilation(offset, iterations=background_radius)\n return background ^ offset", "def _generate_mask(self) -> ndarray:\n # calculate mean 3x3 (square nbhood) orography heights\n radius = number_of_grid_cells_to_distance(self.topography, 1)\n topo_nbhood = NeighbourhoodProcessing(\"square\", radius)(self.topography)\n topo_nbhood.convert_units(\"m\")\n\n # create mask\n mask = np.full(topo_nbhood.shape, False, dtype=bool)\n mask = np.where(topo_nbhood.data < self.orog_thresh_m, True, mask)\n mask = np.where(self.humidity.data < self.rh_thresh_ratio, True, mask)\n mask = np.where(abs(self.vgradz) < self.vgradz_thresh_ms, True, mask)\n return mask" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true is all elements in the array a have a lower value than the corresponding elements in the array b
def is_lower(a, b): for idx, a_value in enumerate(a): if a[idx] > b[idx]: return False return True
[ "def is_smaller_or_equal(array1, array2):\n assert(array1.size == array2.size)\n return all(array2-array1 >= 0)", "def is_smaller(array1, array2):\n assert(array1.size == array2.size)\n return all(array2-array1 > 0)", "def agtb(a, b):\n return matrix(list(map(lambda x, y: x > y, a, b)), a.size)", "def altb(a, b):\n return matrix(list(map(lambda x, y: x < y, a, b)), a.size)", "def dominates(a, b):\n return np.all(a <= b)", "def less(x1: ArrayOrScalar, x2: ArrayOrScalar) -> Union[Array, bool]:\n return _compare(x1, x2, \"<\")", "def __lt__(self, arr):\n for i in range(min(len(self), len(arr)): # compare each value in range of shortest length\n if arr[i] > self[i]:\n return True\n\n if len(arr) > len(self): \n return True\n\n return False", "def __le__(self,other):\n if len(self.S) == 0:\n return True\n if len(self.S) > 0 and len(other.S) == 0:\n return False\n return min([max([t <= s for t in other.S]) for s in self.S])", "def checkDifference(self, area, a):\n for i in range(0, len(a)):\n if area >= a[i]:\n return True\n return False", "def array_eq(a, b, tol):\n if a.shape != b.shape: \n return False \n for index, x in np.ndenumerate(a):\n if np.abs(x - b[index]) > tol:\n return False \n return True", "def less_equal(x1: ArrayOrScalar, x2: ArrayOrScalar) -> Union[Array, bool]:\n return _compare(x1, x2, \"<=\")", "def pt_above(p, a, b):\n return ((a[0] - p[0]) * (b[1] - p[1]) -\n (b[0] - p[0]) * (a[1] - p[1]) > 0.0)", "def cell_leq(a, b):\n return ((a[0] == b[0] - 1 and a[1] == b[1])\n or (a[1] == b[1] - 1 and a[0] == b[0]))", "def comparegeneric(a, b,maxfrac=1.e-4,maxdiff=1.e-4):\n xi1 = a.flatten()\n xi2 = b.flatten()\n if len(xi1) != len(xi2):\n return 2\n xx = np.where(np.fabs((xi1-xi2)/xi1) > maxfrac,1,0)\n yy = np.where(np.fabs(xi1-xi2) > maxdiff,1,0)\n zz = (xx & yy)\n\n if (zz != 0).any():\n return 1\n else:\n return 0", "def larger_than(a, b):\n\n lz_a = nlz(a)\n lz_b = nlz(b)\n\n if lz_a > lz_b:\n return 0\n if lz_a < lz_b:\n return 1\n else:\n if a >= b:\n return 1\n return 0", "def epsilon_lte(a, b):\n float_epsilon = numpy.finfo(numpy.float32).eps\n return float_epsilon > a - b", "def hasLowerBound(*args, **kwargs):\n \n pass", "def larger_than_min_t(self):\n total_t = len(self.__detections)\n return total_t >= self.__min_t, total_t", "def is_a_before_b(a, b):\n if not a or not b:\n return False\n year_a, year_b = year(a), year(b)\n if year_a != year_b:\n return year_a < year_b\n month_a, month_b = month(a), month(b)\n if month_a != month_b:\n return month_a < month_b\n return day(a) < day(b)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the minimal Euclidian distance between any two pairs of points in the collection of points provided as argument.
def get_min_euclidian_distance(points): min_distance = math.inf for point1, point2 in itertools.combinations(points, 2): distance = MathUtils.get_distance(point1, point2) if distance < min_distance: min_distance = distance return min_distance
[ "def _interpoint_distances(points):\n\n xd = np.subtract.outer(points[:,0], points[:,0])\n yd = np.subtract.outer(points[:,1], points[:,1])\n\n return np.sqrt(xd**2 + yd**2)", "def slow_closest_pair(points):\n dist = float('inf')\n closest_pair = None\n for x in points:\n for y in points:\n if x != y:\n d = fake_euclid(x, y)\n if d < dist:\n dist = d\n closest_pair =(x, y)\n return closest_pair", "def closest(reference,points):\n min_dis = float('inf')\n for point in points:\n dis = distance(reference,point)\n if dis < min_dis:\n min_dis = dis\n closest_point = point\n return closest_point, min_dis", "def euclidian_dist(point1, point2):\n m = len(point1)\n if m != len(point2):\n raise Exception(\"Points must have same number of dimensions.\")\n square_difs_sum = 0\n for dimension in range(m):\n dif = (point1[dimension] - point2[dimension]) ** 2\n square_difs_sum += dif\n return np.sqrt(square_difs_sum)", "def compute_distance(cls, point_1, point_2):\n return abs(point_1 - point_2)", "def euclidean(self, pairs):\n return math.sqrt(sum([math.pow(x[0]-x[1],2) for x in pairs]))", "def efficient_closest_pair(points):\n\n points = sort_points_by_X(points)\n return efficient_closest_pair_routine(points)", "def distance(point1: list | np.ndarray,\n point2: list | np.ndarray) -> float:\n v1, v2 = pad_with_zeros(point1, point2)\n return np.linalg.norm(v2 - v1)", "def get_point_distance(points, target):\n if len(points.shape) == 1:\n return la.norm(points - target)\n return la.norm(points - target, axis=1)", "def distance(point1, point2):\n return point1.dist_to(point2)", "def find_square_euc(point1, point2):\n\tdist = 0\n\tfor a in range(len(point1)):\n\t\tdiff = point1[a] - point2[a]\n\t\tdist += pow(diff, 2)\n\treturn dist", "def closest_pair_in_strip(points, d):\n\n points = sort_points_by_Y(points)\n min_found = False\n\n for i in range(len(points)):\n point1 = points[i]\n\n for point2 in points[i + 1 : i + 6]:\n current_dist = dist(point1, point2)\n\n if (current_dist < d):\n d = current_dist\n closest_points = [point1, point2]\n min_found = True\n\n return [d, *closest_points] if min_found else -1", "def euclidean_distance(x,y):\n return sqrt(sum(pow(a-b,2) for a, b in zip(x, y)))", "def closest_points(start_point, points, dist):\n pt = []\n for point in points:\n if distance(start_point, point) <= dist:\n pt.append(point)\n return pt", "def distance_point_point(point1, point2=(0., 0.)):\n dx = point1[0] - point2[0]\n dy = point1[1] - point2[1]\n return sqrt(dx*dx + dy*dy)", "def cal_euclidean_distance(x1, y1, x2, y2):\n return np.sqrt(np.square(x1 - x2) + np.square(y1 - y2))", "def distance_point_point_fast(point1, point2):\n dx = point1[0] - point2[0]\n dy = point1[1] - point2[1]\n return abs(dx) + abs(dy)", "def point_distance():\n\n\n #create point1 and point2 instances\n point1 = Point()\n point2 = Point()\n\n # assign values to point objects\n point1.x = 15\n point1.y = 15\n\n point2.x = 5\n point2.y = 5\n\n #compute distance\n distance = sqrt((point2.y - point1.y)**2 + (point2.x - point1.x)**2)\n return distance", "def brute_force_closest(point, pointlist):\n import sys\n pid, d = -1, sys.maxint\n for i, p in enumerate(pointlist):\n nd = norm(point-p) \n if nd < d:\n d = nd\n pid = i\n return pointlist[pid]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the loggamma value using Lanczos approximation formula
def log_gamma(x): return math.lgamma(x)
[ "def lgamma(x):\n cof = [ 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5 ]\n y = x\n tmp = x + 5.5\n tmp -= ((x + 0.5) * math.log(tmp))\n ser = 1.000000000190015\n for j in range(len(cof)):\n y += 1\n ser += (cof[j] / y)\n return (-tmp + math.log(2.5066282746310005 * ser / x))", "def L(self, x, c, gamma):\n return gamma / (np.pi * ((x - c) ** 2 + gamma ** 2))", "def gamma(x):\n return 1.0", "def log_compression(chromagram,gamma = 1):\n\n if type(chromagram) != np.ndarray:\n raise TypeError(\"Chromagram must be 2D numpy ndarray.\")\n\n if chromagram.shape[0] != 12:\n raise ValueError(\"Invalid shape of chromagram.\")\n\n if not isinstance(gamma,int) and not isinstance(gamma,float):\n raise TypeError(\"Gamma must be integer or float.\")\n\n smooth = np.log(1+gamma*chromagram)\n\n return smooth/np.linalg.norm(smooth, ord=2, axis=0, keepdims=True)", "def gamma(_df):\n return 1.e-6*CP*_df['p_a']/(0.622*(2.501-0.00236*_df['t_a']))", "def logigrcdf(norm, dim):\n\n\treturn (log(gammaincinv(dim / 2., norm)) + log(2)) / 2.", "def gamma_gb(rho):\r\n return (1.47270851e-02 / (1.0 + np.exp(-4.22865620e+01 * rho) ) - 1.47270851e-02/2)", "def _digamma(x):\n if x <= 0.0:\n return np.nan\n if x <= 1e-5:\n return -np.euler_gamma - (1 / x)\n if x < 8.5:\n return _digamma(1 + x) - 1 / x\n xpm2 = 1 / x**2\n return (\n np.log(x)\n - 0.5 / x\n - 0.083333333333333333 * xpm2\n + 0.008333333333333333 * xpm2**2\n - 0.003968253968253968 * xpm2**3\n + 0.004166666666666667 * xpm2**4\n - 0.007575757575757576 * xpm2**5\n + 0.021092796092796094 * xpm2**6\n )", "def gaussian_kl_loss(mu, logvar):\n kl_loss = 1.0 + logvar - K.square(mu) - K.exp(logvar)\n kl_loss = K.sum(kl_loss, axis=-1)\n kl_loss *= -0.5\n return kl_loss", "def getLogFactorial(k):\n return np.sum([log(i) for i in range(1, k+1)])", "def ComputeGammaRatio(lognH):\n # value taken from table 2 Rahmati+ 2013\n nH_ssh = 5.1*1.0e-4\n nH = 10**lognH\n ratio = 0.98*(1+(nH/nH_ssh)**1.64)**-2.28 + 0.02*(1+nH/nH_ssh)**-0.84\n return ratio", "def gamma(X):\n X = np.array(X,dtype=np.int64)\n v = remove_pics(X,3)\n \n dist_max = float(max(v))\n if dist_max == 0:\n return np.array([1.00]*len(X))\n \n actual = np.bincount(v)\n fit_alpha, fit_loc, fit_beta = stats.gamma.fit(v)\n expected=stats.gamma.pdf(np.arange(0,dist_max+1,1),fit_alpha,loc=fit_loc,scale=fit_beta)*sum(actual)\n\n return stats.gamma.pdf(X,fit_alpha,loc=fit_loc,scale=fit_beta)", "def gamma(self) -> float:\n return self.angles[2]", "def gamma(c, g):\n\n pass", "def randomLogGamma(beta,seed=None):\n if seed!=None:\n random.seed(seed)\n assert beta > 0, \"beta=%s must be greater than 0\" % beta\n beta0 = beta\n if beta0 < 1:\n beta = beta+1\n d = beta-1.0/3.0\n cinv = 3.0*(d**0.5)\n \n while True:\n Z = random.normalvariate(0,1)\n if Z > -cinv:\n logU = math.log(random.uniform(0,1))\n val = 1+Z/cinv\n V = val**3.0\n logV = 3*math.log(val)\n if logU < 0.5*(Z**2.0)+d-d*V+d*logV:\n # 1.5*math.log(9) = 3.2958368660043\n logX = -0.5*math.log(d) + 3.0*math.log(cinv+Z)-3.2958368660043\n break\n if beta0 < 1:\n logU = math.log(random.uniform(0,1))\n logX = logX + logU/beta0\n return logX", "def kl_divergence_loss(mu, logvar):\n # Increase precision (numerical underflow caused negative KLD).\n return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())", "def _gamma_ratio( xs, match_param, nonmatch_param, exp_param ):\n num = sp.stats.gamma.pdf( xs, *match_param )\n denom = sp.stats.gamma.pdf( xs, *nonmatch_param)\n val = num / (num+denom) * ( 1-exp_param[0] * np.exp(-xs/exp_param[1]))\n return np.log2(val) - np.log2(1.0-val)", "def statePosteriors(log_alpha, log_beta):\n log_alpha = np.where(np.isinf(log_alpha), 0, log_alpha)\n sum_alphas = np.sum(np.exp(log_alpha), axis=1)\n sum_alphas = np.reshape(sum_alphas, (sum_alphas.size, 1))\n log_gamma = log_alpha + log_beta - sum_alphas\n\n # test state probabilities in linear domain\n # a = np.abs(log_gamma).astype(np.float128) # convert to float128 to avoid overflow in exp\n # linear_gamma = np.exp(a)\n # sum_prob = np.sum(linear_gamma, axis=1)\n # if (sum_prob.all() == 1):\n # print('gammas sum to 1!')\n # else:\n # print('gammas do not sum to 1!')\n\n return log_gamma", "def logg(self): \n return math.log10(self.gravity)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the holiday falls on a Saturday, Sunday or Monday then the date is unchanged (Sat/Sun observances are not made up), otherwise use the closest Monday to the date.
def nearest_monday(dt): day = dt.weekday() if day in (TUESDAY, WEDNESDAY, THURSDAY): return dt - timedelta(day - MONDAY) elif day == FRIDAY: return dt + timedelta(3) return dt
[ "def closest_biz_day(self, day, forward=True):\n\n if forward:\n delta = timedelta(days=1)\n else:\n delta = timedelta(days=-1)\n while day.weekday() in self.weekends or day in self.holidays:\n day = day + delta\n return day", "def _FirstSunday(self, dt):\r\n return dt + datetime.timedelta(days=(6-dt.weekday()))", "def _advance_to_weekday(day: date, weekday: int) -> date:\n while day.weekday() != weekday:\n day += timedelta(days=1)\n return day", "def business_date_operation(date, days):\n ret_date = date + relativedelta(days=days)\n # If weekend (saturday/sunday), add another day so that\n # \"days\" number of business days are incremented\n if ret_date.weekday() in [5,6]:\n if days > 0:\n ret_date = ret_date + relativedelta(days=1)\n elif days < 0:\n ret_date = ret_date + relativedelta(days=-1)\n return ret_date", "def next_winter_solstice(date):\n return holiday(date, twopi, halfpi)", "def previous_winter_solstice(date):\n return holiday(date, -twopi, halfpi)", "def first_sunday_on_or_after(d):\n if d <= the_beginning:\n return 0\n else:\n return __weeks_to(d) + (0 if (d - the_beginning).days % 7 == 0 else 1)", "def first_sunday_on_or_before(d):\n if d < the_beginning:\n return 0\n return __weeks_to(d)", "def get_this_sunday(cur_date):\n cur_date = dt.strptime(cur_date, \"%Y-%m-%d\") if type(cur_date) is str else cur_date\n return cur_date - timedelta(cur_date.weekday() - 6)", "def next_exchange_day(dt):\n if dt.weekday() == 5 or dt.weekday() == 6 or dt in exchange_holidays:\n return dt + EDay(1)\n return dt", "def previous_exchange_day(dt):\n if dt.weekday() == 5 or dt.weekday() == 6 or dt in exchange_holidays:\n return dt + EDay(-1)\n return dt", "def business_day(adate, n, holidays=[]):\r\n\tif type(adate) == date:\r\n\t\tadate = datetime(adate.year, adate.month, adate.day) \r\n\r\n\tif n > 0:\r\n\t\tinc = 1\r\n\telif n < 0:\r\n\t\tinc = -1\r\n\telse:\r\n\t\tinc = 1\r\n\t\tn = 1\r\n\t\tadate += timedelta(days=-1)\r\n\r\n\twhile n != 0:\r\n\t\tadate += timedelta(days=inc)\r\n\t\tif (adate.weekday() < 5) and (not adate in holidays):\r\n\t\t\tn -= inc\r\n\r\n\treturn adate", "def week_ending_date(date):\n if not is_week_ending_date(date):\n return date + datetime.timedelta(days=7 - date.isoweekday())\n return date", "def make_tuesday(date):\n offset = (date.weekday() - 1) % 7\n return date - datetime.timedelta(days=offset)", "def next_monday(date):\n if date.weekday():\n one_day = datetime.timedelta(days=1)\n return date + ((7 - date.weekday()) * one_day)\n else:\n return date", "def _business_date(reference_date: date) -> date: \n if reference_date.weekday() > 4:\n return FixedRateACGB._business_date(\n reference_date + timedelta(days = 1))\n return reference_date", "def is_normal_uk_working_day(date: datetime.date) -> bool:\n return not (is_weekend(date) or is_uk_bank_holiday(date))", "def getMondayBeforeDt(inpDt: dt.datetime) -> dt.datetime:\n # get first Monday before inpDt\n inpMonday = inpDt\n while not dt.datetime.strftime(inpMonday, '%w') == '1':\n inpMonday = inpMonday - dt.timedelta(days=1)\n return inpMonday", "def week_commencing_date(date):\n if not is_week_commencing_date(date):\n return date - datetime.timedelta(days=date.isoweekday() - 1)\n return date" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends messages via telegram bot with specified job data Converts job data to a str in a readable format for messaging One message sent for each job
def send_message(jobs, bot_api_key, bot_chat_id): bot = telepot.Bot(bot_api_key) if jobs: for job in jobs: # job_dict = make_job_dict(job) # job_string = '***New Job Alert***! \n' # for key, value in job_dict.items(): # job_string += f'{key}: {value}\n' job_string = make_job_message(job) bot.sendMessage(bot_chat_id, job_string, parse_mode='Markdown') # else: # bot.sendMessage(bot_chat_id, 'No new jobs!', parse_mode='Markdown')
[ "def multiple_send_command(self, job):\n obj = job[1]\n command_list = job[3]\n if obj.device == \" \":\n device = 0\n else:\n device = obj.device\n if obj.system == \" \":\n system = 0\n else:\n system = obj.system\n \n self.set_status(obj, \"Connecting\")\n self.notify_send_command_window(obj)\n try:\n telnet_session = self.establish_telnet(obj.ip_address)\n telnet_session.read_until('>', int(job[2]))\n total = len(command_list)\n count = 0\n error = 0\n for command in command_list:\n count += 1\n output = (\"send_command \" + \n str(device) + \n \":\" + \n str(command[1]) + \n \":\" + \n str(system) + \n \", \" + \n \"\\\"\\'\" + \n str(command[0]) + \n \"\\'\\\"\") \n telnet_session.write(str(output + \" \\r\"))\n result_raw = telnet_session.read_until('>', int(job[2]))\n if result_raw.split()[0] != 'command:':\n dispatcher.send(\n signal=\"send_command result\", \n sender=((True, 'Sending ' + str(result_raw)[:-1])))\n self.set_status(\n obj, ('Sent ' + str(count) + ' of ' + str(total)))\n self.notify_send_command_window(obj) \n else:\n error += 1\n dispatcher.send(signal=\"send_command result\",\n sender=((False, 'Failed to send command')))\n\n telnet_session.close()\n if not error: \n self.set_status(obj, 'Success')\n self.notify_send_command_window(obj)\n else:\n self.set_status(obj, 'Failed')\n self.notify_send_command_window(obj) \n except Exception as error:\n self.error_processing(obj, error)\n self.notify_send_command_window(obj)", "def send(nbr):\n\tjobs = job.objects.all()[:nbr]\n\tcontacts = contact.objects.all()\n\tfor j in jobs:\n\t\tfor c in contacts:\n\t\t\tprint(\"x\")\n\t\t\tmessage = \"Hey {}, {} has published a new job {} {}\".format(c.first_name , j.recruiter , j.title.encode(\"utf-8\") , j.link)\n\n\t\t\tclient = TwilioRestClient(settings.ACCOUNT_ID , settings.AUTH_TOKEN)\n\t\t\tclient.messages.create(body=message , from_=\"+12055022576\" , to = c.phone_number)", "def _handle_comm_message(self, msg):\n\n if 'request_type' in msg['content']['data']:\n r_type = msg['content']['data']['request_type']\n job_id = msg['content']['data'].get('job_id', None)\n parent_job_id = msg['content']['data'].get('parent_job_id', None)\n if job_id is not None and job_id not in self._running_jobs and not parent_job_id:\n # If it's not a real job, just silently ignore the request.\n # Unless it has a parent job id, then its a child job, so things get muddled. If there's 100+ child jobs,\n # then this might get tricky to look up all of them. Let it pass through and fail if it's not real.\n #\n # TODO: perhaps we should implement request/response here. All we really need is to thread a message\n # id through\n self._send_comm_message('job_does_not_exist', {'job_id': job_id, 'request_type': r_type})\n return\n elif parent_job_id is not None:\n try:\n self._verify_job_parentage(parent_job_id, job_id)\n except ValueError as e:\n self._send_comm_message('job_does_not_exist', {'job_id': job_id, 'parent_job_id': parent_job_id, 'request_type': r_type})\n\n if r_type == 'all_status':\n self._lookup_all_job_status(ignore_refresh_flag=True)\n\n elif r_type == 'job_status':\n if job_id is not None:\n self._lookup_job_status(job_id, parent_job_id=parent_job_id)\n\n elif r_type == 'job_info':\n if job_id is not None:\n self._lookup_job_info(job_id, parent_job_id=parent_job_id)\n\n elif r_type == 'stop_update_loop':\n self.cancel_job_lookup_loop()\n\n elif r_type == 'start_update_loop':\n self._start_job_status_loop()\n\n elif r_type == 'stop_job_update':\n if job_id is not None:\n if self._running_jobs[job_id]['refresh'] > 0:\n self._running_jobs[job_id]['refresh'] -= 1\n\n elif r_type == 'start_job_update':\n if job_id is not None:\n self._running_jobs[job_id]['refresh'] += 1\n self._start_job_status_loop()\n\n elif r_type == 'delete_job':\n if job_id is not None:\n try:\n self.delete_job(job_id, parent_job_id=parent_job_id)\n except Exception as e:\n self._send_comm_message('job_comm_error', {'message': str(e), 'request_type': r_type, 'job_id': job_id})\n\n elif r_type == 'cancel_job':\n if job_id is not None:\n try:\n self.cancel_job(job_id, parent_job_id=parent_job_id)\n except Exception as e:\n self._send_comm_message('job_comm_error', {'message': str(e), 'request_type': r_type, 'job_id': job_id})\n\n elif r_type == 'job_logs':\n if job_id is not None:\n first_line = msg['content']['data'].get('first_line', 0)\n num_lines = msg['content']['data'].get('num_lines', None)\n self._get_job_logs(job_id, parent_job_id=parent_job_id, first_line=first_line, num_lines=num_lines)\n else:\n raise ValueError('Need a job id to fetch jobs!')\n\n elif r_type == 'job_logs_latest':\n if job_id is not None:\n num_lines = msg['content']['data'].get('num_lines', None)\n try:\n self._get_latest_job_logs(job_id, parent_job_id=parent_job_id, num_lines=num_lines)\n except Exception as e:\n self._send_comm_message('job_comm_error', {\n 'job_id': job_id,\n 'message': str(e),\n 'request_type': r_type})\n else:\n raise ValueError('Need a job id to fetch jobs!')\n\n else:\n self._send_comm_message('job_comm_error', {'message': 'Unknown message', 'request_type': r_type})\n raise ValueError('Unknown KBaseJobs message \"{}\"'.format(r_type))", "def _send_comm_message(self, msg_type, content):\n msg = {\n 'msg_type': msg_type,\n 'content': content\n }\n if self._comm is None:\n self._comm = Comm(target_name='KBaseJobs', data={})\n self._comm.on_msg(self._handle_comm_message)\n self._comm.send(msg)", "async def send_job(job_no: int, algorithm: str, path: str, k: int):\n logging.info(f\"Job #{job_no} {algorithm} {path} {k}\")\n\n await topic_info.send(key=str(job_no), value=JobInfo(algorithm, path, k))\n\n await send_edges(path)\n\n job_no += 1", "def job(self, msg, *args, **kwargs):\n self.print(50, msg, *args, **kwargs)", "def send_mass_sms(datatuple, fail_silently=False,\r\n auth_user=None, auth_password=None, connection=None):\r\n\r\n from sendsms.message import SmsMessage\r\n connection = connection or get_connection(\r\n username = auth_user, \r\n password = auth_password,\r\n fail_silently = fail_silently\r\n )\r\n messages = [SmsMessage(message=message, from_phone=from_phone, to=to, flash=flash)\r\n for message, from_phone, to, flash in datatuple]\r\n connection.send_messages(messages)", "async def _process_chat_command(self, data):\n message = data['message']\n user = self.user.name\n\n # Send message to match group\n await self.channel_layer.group_send(\n self.party.host_code,\n {\n 'type': 'chat_message',\n 'message': message,\n 'user': user\n }\n )\n await self._send_response(status=status.HTTP_204_NO_CONTENT, id=data['id'])", "def send_text_data(data_list,worker_list):\n\n assert len(data_list) == len(worker_list) , \"The splits of data you are trying to send is not equal to the no. of workers\"\n\n main_list = []\n\n for i,data in enumerate(data_list):\n \n one_list = []\n \n for ind in data.index:\n text = String(data['text'][ind])\n text_ptr = text.send(worker_list[i])\n one_list.append(text_ptr)\n \n main_list.append(one_list)\n \n return main_list", "def build_and_send_message(conn, code, data):\n msg = chatlib.build_message(code, data)\n print(\"[SERVER]\", conn.getpeername(),\" msg: \", msg)\n conn.send(msg.encode())", "def send_email(self):\n from django.core.mail import send_mail\n\n email = self.instance.user.email\n toaddr = [\"spashby@ymac.org.au\", \"cjpoole@ymac.org.au\",\"cforsey@ymac.org.au\"]\n msg_from = email if email else \"spatialjobs@ymac.org.au\"\n if 'cc_recipients' in self.cleaned_data and self.cleaned_data['cc_recipients']:\n cc_emails = [u.email for u in self.cleaned_data['cc_recipients']]\n toaddr += cc_emails\n msg_subject =\"{map_type} {job_id} request\".format(map_type=self.instance.request_type,\n job_id=self.instance.job_control)\n msg_body = u\"\"\"\n Name: {user}\\n\n Email: {user.email}\\n\n Department: {user.department}\\n\n Request Type: {request_type}\\n\n Office: {user.office}\\n\n Region: {region}\\n\n Job Description: {job_desc}\\n\n Supplementary Data: {sup_data_text}\\n\n Map Size: {map_size}\\n\n Map Title: {map_title}\\n\n Required by: {required_by} \\n\n Related Job: {related_jobs} \\n\n Delivery and/or Product Instructions: {product_type} {other_instructions}\\n\n Cost Centre: {cost_centre}\\n\n Priority and urgency: {priority}\\n\"\"\".format(\n **self.cleaned_data\n )\n send_mail(msg_subject, msg_body, msg_from, toaddr)", "def send_text_message(offline_workers: list) -> None:\n txt_body: str = f\"\"\"Panic! At the Hashrate! \\n{', '.join(offline_workers)} rigs are reporting 0 hashrate\"\"\"\n\n client = Client(ACCOUNT_SID, AUTH_TOKEN)\n\n for phone_number in PHONE_NUMBERS:\n client.api.account.messages.create(\n to=phone_number,\n from_=os.environ.get('twilio_number'),\n body=txt_body)", "def send_msg(self, msg):\n if isinstance(msg, list):\n work = [m.jsonify() for m in msg]\n if len(work) == 0:\n # rpush requires at least one event, if msg is empty list then just return\n return\n self.redis.rpush(MANAGER_WORK_QUEUE, *work)\n self.increment_stats(MANAGER_WORK_QUEUE, tx=True, count=len(work))\n else:\n self.redis.rpush(MANAGER_WORK_QUEUE, msg.jsonify())\n self.increment_stats(MANAGER_WORK_QUEUE, tx=True)", "def send_scheduled_msg(context: CallbackContext):\n # Time format is 21:54\n db.execute(\"SELECT * FROM schedules WHERE time=%s\", (str(datetime.utcnow() + timedelta(hours=8)).split(' ')[1].\n rsplit(':', 1)[0],))\n users = db.fetchall()\n\n for user in users:\n buses_selected_list = list(filter(lambda x: type(x) == str and x != 'None', user[5:10]))\n bus_message = scheduled_bus_timing_format(user[1], buses_selected_list)\n context.bot.send_message(chat_id=user[0], text=bus_message[0], reply_markup=bus_message[1],\n parse_mode=ParseMode.HTML)", "def at_msg_send(self, text=None, to_obj=None, **kwargs):\r\n pass", "def post(chat, message, args):\n if message.sender.id != 26170256: #Only admin command\n message.reply(\"This command it's only for the admin of the bot\")\n return\n\n c.execute('SELECT * FROM users')\n users_list = c.fetchall()\n\n message = \" \".join(message.text.split(\" \", 1)[1:])\n\n n = 0\n\n for res in users_list:\n n += 1\n\n if n < 50:\n continue\n\n try:\n bot.chat(res[0]).send(message)\n chat.send(\"Post sent to \"+str(res[0]))\n except botogram.api.ChatUnavailableError:\n c.execute('DELETE FROM users WHERE user_id={}'.format(res[0]))\n chat.send(\"The user \"+str(res[0])+\" has blocked your bot, so I removed him from the database\")\n conn.commit()\n except Exception as e:\n chat.send(\"*Unknow error :(*\\n\"+str(e))\n\n chat.send(\"<b>Done!</b>\\nThe message has been delivered to all users\") #Yeah\n conn.commit()", "def send_goal_ros_iot(self, sheet, data=None):\n # Create a Goal Message object\n goal = msgRosIotGoal()\n goal.sheet = sheet\n\n goal.entries = []\n\n if sheet == \"Inventory\":\n rospy.loginfo(\"received goal.\")\n for inv in self.inventory:\n parameters = msgMqttSub()\n parameters.SKU = inv[\"SKU\"]\n parameters.item = self.items[inv[\"color\"]][0]\n parameters.priority = self.items[inv[\"color\"]][2]\n parameters.storage = \"R\"+inv[\"storagenum\"][0]+\" C\"+inv[\"storagenum\"][1]\n parameters.cost = self.items[inv[\"color\"]][1]\n parameters.qty = \"1\"\n goal.entries.append(parameters)\n rospy.loginfo(\"received goal.\")\n rospy.loginfo(goal.entries)\n elif sheet == \"IncomingOrders\":\n parameters = msgMqttSub()\n parameters.item = data[\"item\"]\n parameters.priority = self.items[data[\"color\"]][2]\n parameters.cost = self.items[data[\"color\"]][1]\n parameters.qty = data[\"qty\"]\n parameters.order_id = data[\"order_id\"]\n parameters.date_time1 = data[\"order_time\"]\n parameters.city = data[\"city\"]\n parameters.lon = data[\"lon\"]\n parameters.lat = data[\"lat\"]\n goal.entries.append(parameters)\n elif sheet == \"OrdersDispatched\":\n parameters = msgMqttSub()\n parameters.item = data[\"item\"]\n parameters.priority = self.items[data[\"color\"]][2]\n parameters.cost = self.items[data[\"color\"]][1]\n parameters.qty = data[\"qty\"]\n parameters.order_id = data[\"order_id\"]\n parameters.date_time1 = data[\"order_time\"]\n parameters.city = data[\"city\"]\n parameters.status = \"YES\"\n goal.entries.append(parameters)\n elif sheet == \"OrdersDispatched\":\n parameters = msgMqttSub()\n parameters.item = data[\"item\"]\n parameters.priority = self.items[data[\"color\"]][2]\n parameters.cost = self.items[data[\"color\"]][1]\n parameters.qty = data[\"qty\"]\n parameters.order_id = data[\"order_id\"]\n parameters.date_time1 = data[\"order_time\"]\n parameters.city = data[\"city\"]\n parameters.status = \"YES\"\n goal.entries.append(parameters)\n rospy.loginfo(\" order dispatched received\")\n elif sheet == \"OrdersShipped\":\n parameters = msgMqttSub()\n parameters.item = data[\"item\"]\n parameters.priority = self.items[data[\"color\"]][2]\n parameters.cost = self.items[data[\"color\"]][1]\n parameters.qty = data[\"qty\"]\n parameters.order_id = data[\"order_id\"]\n parameters.date_time1 = data[\"order_time\"]\n parameters.city = data[\"city\"]\n parameters.status = \"YES\"\n dates12 = datetime.datetime.now() + datetime.timedelta(days=data[\"priority\"])\n parameters.date_time2 = dates12.strftime(\"%Y-%m-%d\")\n goal.entries.append(parameters)\n\n rospy.loginfo(\"Sending goal.\")\n\n # self.on_transition - It is a function pointer to a function which will be called when\n # there is a change of state in the Action Client State Machine\n goal_handle = self._ac3.send_goal(goal, self.on_transition, None)\n\n return goal_handle", "def _send_data(self, data):\n command_data = CommandDataMessage(command_type=self.command_type, command_data=data)\n self.networking.send_message(command_data)", "def test_execute(self):\n\n data = JobData()\n job_type = job_test_utils.create_seed_job_type(max_tries=3)\n job_1 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', input=data.get_dict())\n job_2 = job_test_utils.create_job(job_type=job_type, num_exes=3, status='FAILED', input=data.get_dict())\n job_3 = job_test_utils.create_job(job_type=job_type, num_exes=1, status='COMPLETED', input=data.get_dict())\n job_4 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='PENDING', input=data.get_dict())\n job_5 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='CANCELED')\n job_ids = [job_1.id, job_2.id, job_3.id, job_4.id, job_5.id]\n\n # Add jobs to message\n message = RequeueJobs()\n message.priority = 101\n if message.can_fit_more():\n message.add_job(job_1.id, job_1.num_exes)\n if message.can_fit_more():\n message.add_job(job_2.id, job_2.num_exes - 1) # Mismatched exe_num\n if message.can_fit_more():\n message.add_job(job_3.id, job_3.num_exes)\n if message.can_fit_more():\n message.add_job(job_4.id, job_4.num_exes)\n if message.can_fit_more():\n message.add_job(job_5.id, job_5.num_exes)\n\n # Execute message\n result = message.execute()\n self.assertTrue(result)\n\n jobs = Job.objects.filter(id__in=job_ids).order_by('id')\n # Job 1 should have been good (max_tries increased)\n self.assertEqual(jobs[0].max_tries, 6)\n # Job 2 had mismatched exe_num\n self.assertEqual(jobs[1].max_tries, 3)\n # Job 3 was already COMPLETED\n self.assertEqual(jobs[2].max_tries, 3)\n # Job 4 can't be re-queued since it had never been queued yet\n self.assertEqual(jobs[3].max_tries, 3)\n # Job 5 can't be re-queued since it had never been queued yet\n self.assertEqual(jobs[4].max_tries, 3)\n # Job 1 is only job that should be included in message to be queued\n self.assertEqual(len(message.new_messages), 2)\n queued_jobs_msg = message.new_messages[0]\n self.assertEqual(queued_jobs_msg.type, 'queued_jobs')\n self.assertListEqual(queued_jobs_msg._queued_jobs, [QueuedJob(job_1.id, job_1.num_exes)])\n self.assertEqual(queued_jobs_msg.priority, 101)\n self.assertTrue(queued_jobs_msg.requeue)\n # Job 5 is only job that should be included in message to uncancel\n uncancel_jobs_msg = message.new_messages[1]\n self.assertEqual(uncancel_jobs_msg.type, 'uncancel_jobs')\n self.assertListEqual(uncancel_jobs_msg._job_ids, [job_5.id])\n\n # Test executing message again\n message.new_messages = []\n result = message.execute()\n self.assertTrue(result)\n\n # All results should be the same\n jobs = Job.objects.filter(id__in=job_ids).order_by('id')\n # Job 1 should have been good (max_tries increased)\n self.assertEqual(jobs[0].max_tries, 6)\n # Job 2 had mismatched exe_num\n self.assertEqual(jobs[1].max_tries, 3)\n # Job 3 was already COMPLETED\n self.assertEqual(jobs[2].max_tries, 3)\n # Job 4 can't be re-queued since it had never been queued yet\n self.assertEqual(jobs[3].max_tries, 3)\n # Job 5 can't be re-queued since it had never been queued yet\n self.assertEqual(jobs[4].max_tries, 3)\n # Job 1 is only job that should be included in message to be queued\n self.assertEqual(len(message.new_messages), 2)\n queued_jobs_msg = message.new_messages[0]\n self.assertEqual(queued_jobs_msg.type, 'queued_jobs')\n self.assertListEqual(queued_jobs_msg._queued_jobs, [QueuedJob(job_1.id, job_1.num_exes)])\n self.assertEqual(queued_jobs_msg.priority, 101)\n self.assertTrue(queued_jobs_msg.requeue)\n # Job 5 is only job that should be included in message to uncancel\n uncancel_jobs_msg = message.new_messages[1]\n self.assertEqual(uncancel_jobs_msg.type, 'uncancel_jobs')\n self.assertListEqual(uncancel_jobs_msg._job_ids, [job_5.id])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Accepts the Convertible Reserved Instance exchange quote described in the GetReservedInstancesExchangeQuote call.
def accept_reserved_instances_exchange_quote(DryRun=None, ReservedInstanceIds=None, TargetConfigurations=None): pass
[ "def describe_reserved_instances(DryRun=None, ReservedInstancesIds=None, Filters=None, OfferingType=None, OfferingClass=None):\n pass", "def purchase_reserved_instances_offering(DryRun=None, ReservedInstancesOfferingId=None, InstanceCount=None, LimitPrice=None):\n pass", "def getReservedInstances(verbose):\n lres = {}\n jResp = EC2C.describe_reserved_instances()\n for reserved in jResp['ReservedInstances']:\n if reserved['State'] == 'active':\n if verbose:\n lres[reserved['InstanceType']] = str(reserved['Start'])+\";\"+\\\n str(reserved['End'])+\";\"+\\\n str(reserved['InstanceCount'])+\";\"+\\\n reserved['ProductDescription']+\";\"+\\\n str(reserved['UsagePrice'])\n else:\n if re.search(\"win\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"windows\"\n elif re.search(\"red hat\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"redhat\"\n elif re.search(\"suse\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"suse\"\n else:\n os = \"linux\"\n lres[reserved['InstanceType']+\";\"+os] = str(reserved['InstanceCount'])\n return lres", "def describe_reserved_instances_offerings(DryRun=None, ReservedInstancesOfferingIds=None, InstanceType=None, AvailabilityZone=None, ProductDescription=None, Filters=None, InstanceTenancy=None, OfferingType=None, NextToken=None, MaxResults=None, IncludeMarketplace=None, MinDuration=None, MaxDuration=None, MaxInstanceCount=None, OfferingClass=None):\n pass", "def create_reserved_instances_listing(ReservedInstancesId=None, InstanceCount=None, PriceSchedules=None, ClientToken=None):\n pass", "def _to_reserved_node(self, element):\r\n\r\n # Get our extra dictionary\r\n extra = self._get_extra_dict(\r\n element, RESOURCE_EXTRA_ATTRIBUTES_MAP['reserved_node'])\r\n\r\n try:\r\n size = [size for size in self.list_sizes() if\r\n size.id == extra['instance_type']][0]\r\n except IndexError:\r\n size = None\r\n\r\n return EC2ReservedNode(id=findtext(element=element,\r\n xpath='reservedInstancesId',\r\n namespace=NAMESPACE),\r\n state=findattr(element=element,\r\n xpath='state',\r\n namespace=NAMESPACE),\r\n driver=self,\r\n size=size,\r\n extra=extra)", "def attemptPurchases(order):\n print(\"\\n\")\n # here we sort out the availability zones\n hasOrdersAssigned = True\n\n for az in order.AvailabilityZones:\n if az.ordered is None:\n az.ordered = 0\n if az.Number is None:\n hasOrdersAssigned = False\n\n if hasOrdersAssigned == False:\n remainder = int(order.Number) % len(order.AvailabilityZones)\n eachOrderGets = int((int(order.Number) - remainder) /\n len(order.AvailabilityZones))\n # here we assign all the orders\n for az in order.AvailabilityZones:\n az.Number = eachOrderGets\n if remainder != 0:\n az.Number += 1\n remainder -= 1\n\n # this client can be used for all the az's\n print(order.Region)\n client = boto3.client('ec2', region_name=order.Region,aws_access_key_id=order.aws_access_key_id,aws_secret_access_key=order.aws_secret_access_key)\n for az in order.AvailabilityZones:\n\n # for each AZ we're buying from\n kwargs = order.getKwargs(az.Name)\n response = client.describe_reserved_instances_offerings(**kwargs)\n ReservedInstancesOfferings = response[\"ReservedInstancesOfferings\"]\n\n # we search for all instance types, not just fixed or hourly, then sort when we recieve results\n # do the sorting of the reserved instances by price, cheapest first\n allOfferings = []\n\n # get all the offerings objects\n for instanceOffering in ReservedInstancesOfferings:\n # isFixed and isHourly completely filter out or in whether or not those instance types get included\n # if both are true, then all types of instances get included regardless of payment type\n\n # for limits, 0 means no limit, everything else abides by the limit\n\n iOffering = getInstanceOffering(instanceOffering)\n fixedPrice = iOffering.FixedPrice\n recurringAmount = iOffering.RecurringAmount\n fixedPriceExists = False\n recurringAmountExists = False\n\n if fixedPrice is not None and fixedPrice != 0:\n fixedPriceExists = True\n if recurringAmount is not None and recurringAmount != 0:\n recurringAmountExists = True\n\n MaxFixedPrice = 0\n if order.MaxFixedPrice is not None:\n MaxFixedPrice = order.MaxFixedPrice\n\n MaxRecurringPrice = 0\n if order.MaxHourlyPrice is not None:\n MaxRecurringPrice = order.MaxHourlyPrice\n\n if order.isFixedPrice == True and order.isHourlyPrice == True:\n # either hourly or fixed or both\n if fixedPriceExists and recurringAmountExists:\n if (MaxFixedPrice == 0 or iOffering.FixedPrice <= MaxFixedPrice) and (MaxRecurringPrice == 0 or iOffering.RecurringAmount <= MaxRecurringPrice):\n allOfferings.append(iOffering)\n elif fixedPriceExists:\n if MaxFixedPrice == 0 or iOffering.FixedPrice <= MaxFixedPrice:\n allOfferings.append(iOffering)\n elif recurringAmountExists:\n if MaxRecurringPrice == 0 or iOffering.RecurringAmount <= MaxRecurringPrice:\n allOfferings.append(iOffering)\n\n elif order.isFixedPrice == True:\n # only fixed price servers\n if fixedPriceExists and recurringAmountExists == False:\n if MaxFixedPrice == 0 or iOffering.FixedPrice <= MaxFixedPrice:\n allOfferings.append(iOffering)\n\n elif order.isHourlyPrice == True:\n # only hourly servers\n if recurringAmountExists and fixedPriceExists == False:\n if MaxRecurringPrice == 0 or iOffering.RecurringAmount <= MaxRecurringPrice:\n allOfferings.append(iOffering)\n\n # sort into cost effectiveness, and these all have the correct AZ\n allOfferings.sort(key=lambda x: x.EffectiveHourlyRate)\n\n # print(order.Number)\n if order.Number is not None and order.Number > 0:\n if order.ordered is None:\n # brand new order bring it up to speed\n order.ordered = 0\n\n if az.ordered >= az.Number:\n print(\"AZ\", az.Name, \"has already been fulfilled with\",\n az.ordered, \"instances\")\n # buy until finished\n purchasedJustNow = 0\n previouslyPurchased = az.ordered\n for instanceOffering in allOfferings:\n # instanceOffering.print()\n # also we might want to write to the file, like keep it open, and update it for each order bought\n # something might go wrong\n # print(instanceOffering, \"\\n\")\n if order.ordered < order.Number and az.ordered < az.Number:\n # do purchase\n order.ordered += 1\n az.ordered += 1\n purchasedJustNow += 1\n instance = allOfferings.pop(0)\n kwargs = instance.getKwargs(order.DryRun)\n response = None\n try:\n response = client.purchase_reserved_instances_offering(\n **kwargs)\n print(response)\n except:\n pass\n print(\"Just Purchased:\")\n instanceOffering.print()\n order.PurchasedInstances.append(instanceOffering)\n\n if order.ordered >= order.Number or az.ordered >= az.Number:\n break\n\n print(purchasedJustNow,\n \"Reserved Instances were just purchased for:\", az.Name)\n print(previouslyPurchased, \"instances had been purchased previously\")\n if az.ordered >= az.Number:\n print(\"Purchased all\", az.ordered,\n \"Reserved Instances for:\", az.Name, \"\\n\")\n else:\n print(\"Still need\", int(az.Number - az.ordered), \"instances for availability zone:\",\n az.Name, \", will attempt to purchase the rest during the next run\", \"\\n\")\n\n if order.ordered >= order.Number:\n print(\"Purchased all\", order.ordered,\n \"Reserved Instances for this order\\n\\n\")\n else:\n print(\"Could only purchase\", order.ordered,\n \"Reserved Instances for this order, will attempt to purchase the rest at a later date.\\n\\n\")\n return", "def modify_reserved_instances(ClientToken=None, ReservedInstancesIds=None, TargetConfigurations=None):\n pass", "def reservation_conversion(self):\n \n if(self.order_type == OrderType.PURCHASE_ORDER):\n # this is already a purchase, nothing else to do\n return\n \n if(self.order_type == OrderType.RESERVATION_ORDER and self.reservation):\n self.order_type = OrderType.PURCHASE_ORDER\n self.converted_from_reservation = True\n self.save()\n # TODO: create purchase from reservation", "def _ValidateInstanceName(self, args):\n instance_name_pattern = re.compile('^[a-z]([-a-z0-9]{0,61}[a-z0-9])?$')\n if not instance_name_pattern.match(args.instance_name):\n raise exceptions.InvalidArgumentException(\n 'INSTANCE_NAME',\n 'Name must start with a lowercase letter followed by up to '\n '63 lowercase letters, numbers, or hyphens, and cannot end '\n 'with a hyphen.')", "def ex_list_reserved_nodes(self):\r\n params = {'Action': 'DescribeReservedInstances'}\r\n\r\n response = self.connection.request(self.path, params=params).object\r\n\r\n return self._to_reserved_nodes(response, 'reservedInstancesSet/item')", "def get_instance_type_offerings(region: str) -> List[str]:\n boto_cfg = create_aws_config(region)\n ec2 = boto3.client('ec2', config=boto_cfg)\n try:\n current = ec2.describe_instance_type_offerings(LocationType='region', Filters=[{'Name': 'location', 'Values': [region]}])\n instance_types = current['InstanceTypeOfferings']\n while 'NextToken' in current:\n current = ec2.describe_instance_type_offerings(LocationType='region', Filters=[{'Name': 'location', 'Values': [region]}], NextToken=current['NextToken'])\n instance_types += current['InstanceTypeOfferings']\n except ClientError as err:\n logging.debug(err)\n raise UserReportError(returncode=INPUT_ERROR, message=f'Invalid AWS region \"{region}\"')\n except NoCredentialsError as err:\n logging.debug(err)\n raise UserReportError(returncode=PERMISSIONS_ERROR, message=str(err))\n\n if not instance_types:\n raise UserReportError(returncode=DEPENDENCY_ERROR,\n message=f'Could not get instance types available in region: {region}')\n return [it['InstanceType'] for it in instance_types]", "def instance_from_response(response: Dict) -> List[EC2Instance]:\n ec2_instances = []\n for reservation in response.get(\"Reservations\"):\n for instance in reservation.get(\"Instances\"):\n if dns := instance.get(\"PublicDnsName\"):\n public_dns_name = dns\n else:\n public_dns_name = \"NONE\"\n if ip := instance.get(\"PublicIpAddress\"):\n public_ip_address = ip\n else:\n public_ip_address = \"NONE\"\n ec2_instance = EC2Instance(\n image_id=instance.get(\"ImageId\"),\n instance_id=instance.get(\"InstanceId\"),\n instance_type=instance.get(\"InstanceType\"),\n launch_time=instance.get(\"LaunchTime\"),\n availability_zone=instance.get(\"Placement\").get(\"AvailabilityZone\"),\n private_dns_name=instance.get(\"PrivateDnsName\"),\n private_ip_address=instance.get(\"PrivateIpAddress\"),\n public_dns_name=public_dns_name,\n public_ip_address=public_ip_address,\n state=instance.get(\"State\").get(\"Name\"),\n subnet_id=instance.get(\"SubnetId\"),\n vpc_id=instance.get(\"VpcId\"),\n tags=instance.get(\"Tags\"),\n )\n ec2_instances.append(ec2_instance)\n\n return ec2_instances", "def handle_accepted(cls, agreement): # pragma: no cover", "def confirm_product_instance(DryRun=None, ProductCode=None, InstanceId=None):\n pass", "def optimizeReservation(verbose,region):\n print(\"WARNING: As it's not possible to get OS through AWS API, All \"\\\n \"Linux are reported as Linux (no RedHat, Suse, etc)\\n\"\\\n \"This issue will be address in a future update\\n\\n\")\n shouldReserved = {}\n dreserved = getReservedInstances(False)\n dinstances = listInstances(False)\n dflavors = getInstanceTypes(region)\n count_by_type_os = countInstanceByTypeByOS(False, dinstances)\n resp = \"\"\n for typos, nb in count_by_type_os.items():\n if typos in dreserved:\n if int(count_by_type_os[typos]) - int(dreserved[typos]) >= 0:\n count_by_type_os[typos] = int(count_by_type_os[typos]) - int(dreserved[typos])\n resp += \"Reservation fully used for \"+typos+\"\\n\"\n else:\n print(\"Reservation not fully used for \"+typos+\": \"+dreserved[typos]+\"reserved but only \"+count_by_type_os[typos]+\" instances\")\n for typos, nb in dreserved.items():\n if typos not in count_by_type_os:\n resp += \"Reservation is not used for \"+typos+\"\\n\"\n #Provide tips for better reservations\n #Begin by removing instances that have reservation\n for instanceId in list(dinstances):\n if dinstances[instanceId]['flavor'] in dreserved:\n if int(dreserved[dinstances[instanceId]['flavor']]) > 0:\n dreserved[dinstances[instanceId]['flavor']] -= 1\n del dinstances[instanceId]\n today = datetime.datetime.now(datetime.timezone.utc)\n months6 = today-datetime.timedelta(days=180)\n for k, v in dinstances.items():\n if v['LaunchTime'] < months6:\n try:\n shouldReserved[v['flavor']+\";\"+v['platform']] += 1\n except:\n shouldReserved[v['flavor']+\";\"+v['platform']] = 1\n resp += \"\\nBased on instances older than 6 months, you should buy following reservations:\\n\"\n saveno, savepa = 0, 0\n for k, v in shouldReserved.items():\n resp += k+\":\"+str(v)+\"\\n\"\n saveno += (float(dflavors[k]['ondemand']) - float(dflavors[k]['reserved1yno'])) * v\n savepa += (float(dflavors[k]['ondemand']) - float(dflavors[k]['reserved1ypa'])) * v\n resp += \"You can save up to \"+str(saveno)+\"$/hour with no upfront reservation\\n\"\n resp += \"You can save up to \"+str(savepa)+\"$/hour with partial upfront reservation\\n\"\n if verbose:\n resp += \"\\nInstances below doesn't have reservation:\\n\"\n for k, v in count_by_type_os.items():\n resp += k+\":\"+str(v)+\"\\n\"\n return saveno, resp", "def validate(currency_name, address):\n\n tickers = [currency.Currencies.instances[curr].ticker for curr in currency.Currencies.instances]\n currencies = [currency.Currencies.instances[curr].name for curr in currency.Currencies.instances]\n\n if currency_name in tickers or currency_name in currencies:\n request = ValidationRequest(currency_name, address)\n return request.execute()\n else:\n return ValidationResult(\n name='',\n ticker=currency_name,\n address=bytes(address, 'utf-8'),\n valid=True,\n network='',\n address_type='address',\n is_extended=False\n )", "def _process_rds():\n print \"Relational Database Service menu:\"\n print \"\\t1. List DB instances\"\n print \"\\t2. Show DB info\"\n print \"\\t3. Create DB instance\"\n print \"\\t4. Delete DB instance\"\n print \"Enter \\'\\\\q\\' to go back\"\n while True:\n op = raw_input(\"Enter option: \")\n # Validating entered option\n op_vendor = __op_validation(r'^([1-4]|\\\\q)$', op)\n if op_vendor == \"\\\\q\":\n welcome_menu()\n break\n else:\n if op == '1':\n # List DB instances\n rdsi.list_dbs(rdsconn)\n elif op == '2':\n # Ask user for dbinstance name\n dbinstance_name = raw_input(\"Enter DB instance name: \")\n # Show info\n rdsi.show_dbinstance_info(rdsconn, dbinstance_name)\n elif op == '3':\n # Ask user for dbinstance name\n dbinstance_name = raw_input(\"Enter DB instance name: \")\n # Ask user for db size\n size = raw_input(\"Enter DB instance size (in GB): \")\n # Ask user for dbinstance engine\n engine = raw_input(\"Enter DB instance engine (MySQL/oracle-ee/postgres): \")\n # Ask user for dbinstance user name\n username = raw_input(\"Enter DB user name: \")\n # Ask user for dbinstance password\n password = raw_input(\"Enter DB instance password: \")\n rdsi.create_dbinstance(rdsconn, dbinstance_name, size, engine, username, password)\n elif op == '4':\n # Ask user for dbinstance name\n dbinstance_name = raw_input(\"Enter DB instance name: \")\n rdsi.delete_dbinstance(rdsconn, dbinstance_name)", "def process_instances(region_name, zone_name, instances):\n for instance in instances: \n\n if (zone_name == instance.placement['AvailabilityZone']):\n\n spinner.clear()\n\n if region_name not in region_headers:\n print(\"Region: \"+region_name)\n region_headers.append(region_name)\n\n if zone_name not in zone_headers:\n print(\"\\tZone: \"+zone_name)\n zone_headers.append(zone_name)\n\n print(\"\\t\\t\" + instance.id + \"\\t\" + tag_value(instance.tags,'Name'))\n print(\"\\t\\tIP Address:\" + instance.public_ip_address);\n\n scan_instance(instance)", "def acceptedTypeIds(*args, **kwargs):\n \n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Accept a VPC peering connection request. To accept a request, the VPC peering connection must be in the pendingacceptance state, and you must be the owner of the peer VPC. Use DescribeVpcPeeringConnections to view your outstanding VPC peering connection requests.
def accept_vpc_peering_connection(DryRun=None, VpcPeeringConnectionId=None): pass
[ "def accept_vpc_peering_connection( # pylint: disable=too-many-arguments\n conn_id=\"\", name=\"\", region=None, key=None, keyid=None, profile=None, dry_run=False\n):\n if not _exactly_one((conn_id, name)):\n raise SaltInvocationError(\n \"One (but not both) of vpc_peering_connection_id or name must be provided.\"\n )\n\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n\n if name:\n conn_id = _vpc_peering_conn_id_for_name(name, conn)\n if not conn_id:\n raise SaltInvocationError(\n \"No ID found for this \"\n \"VPC peering connection! ({}) \"\n \"Please make sure this VPC peering \"\n \"connection exists \"\n \"or invoke this function with \"\n \"a VPC peering connection \"\n \"ID\".format(name)\n )\n try:\n log.debug(\"Trying to accept vpc peering connection\")\n conn.accept_vpc_peering_connection(\n DryRun=dry_run, VpcPeeringConnectionId=conn_id\n )\n return {\"msg\": \"VPC peering connection accepted.\"}\n except botocore.exceptions.ClientError as err:\n log.error(\"Got an error while trying to accept vpc peering\")\n return {\"error\": __utils__[\"boto.get_error\"](err)}", "def request_vpc_peering_connection(\n requester_vpc_id=None,\n requester_vpc_name=None,\n peer_vpc_id=None,\n peer_vpc_name=None,\n name=None,\n peer_owner_id=None,\n peer_region=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n dry_run=False,\n):\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n\n if name and _vpc_peering_conn_id_for_name(name, conn):\n raise SaltInvocationError(\n \"A VPC peering connection with this name already \"\n \"exists! Please specify a different name.\"\n )\n\n if not _exactly_one((requester_vpc_id, requester_vpc_name)):\n raise SaltInvocationError(\n \"Exactly one of requester_vpc_id or requester_vpc_name is required\"\n )\n if not _exactly_one((peer_vpc_id, peer_vpc_name)):\n raise SaltInvocationError(\n \"Exactly one of peer_vpc_id or peer_vpc_name is required.\"\n )\n\n if requester_vpc_name:\n requester_vpc_id = _get_id(\n vpc_name=requester_vpc_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not requester_vpc_id:\n return {\n \"error\": \"Could not resolve VPC name {} to an ID\".format(\n requester_vpc_name\n )\n }\n if peer_vpc_name:\n peer_vpc_id = _get_id(\n vpc_name=peer_vpc_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not peer_vpc_id:\n return {\n \"error\": \"Could not resolve VPC name {} to an ID\".format(peer_vpc_name)\n }\n\n peering_params = {\n \"VpcId\": requester_vpc_id,\n \"PeerVpcId\": peer_vpc_id,\n \"DryRun\": dry_run,\n }\n\n if peer_owner_id:\n peering_params.update({\"PeerOwnerId\": peer_owner_id})\n if peer_region:\n peering_params.update({\"PeerRegion\": peer_region})\n\n try:\n log.debug(\"Trying to request vpc peering connection\")\n if not peer_owner_id:\n vpc_peering = conn.create_vpc_peering_connection(**peering_params)\n else:\n vpc_peering = conn.create_vpc_peering_connection(**peering_params)\n peering = vpc_peering.get(\"VpcPeeringConnection\", {})\n peering_conn_id = peering.get(\"VpcPeeringConnectionId\", \"ERROR\")\n msg = \"VPC peering {} requested.\".format(peering_conn_id)\n log.debug(msg)\n\n if name:\n log.debug(\"Adding name tag to vpc peering connection\")\n conn.create_tags(\n Resources=[peering_conn_id], Tags=[{\"Key\": \"Name\", \"Value\": name}]\n )\n log.debug(\"Applied name tag to vpc peering connection\")\n msg += \" With name {}.\".format(name)\n\n return {\"msg\": msg}\n except botocore.exceptions.ClientError as err:\n log.error(\"Got an error while trying to request vpc peering\")\n return {\"error\": __utils__[\"boto.get_error\"](err)}", "def peering_connection_pending_from_vpc(\n conn_id=None,\n conn_name=None,\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n if not _exactly_one((conn_id, conn_name)):\n raise SaltInvocationError(\n \"Exactly one of conn_id or conn_name must be provided.\"\n )\n\n if not _exactly_one((vpc_id, vpc_name)):\n raise SaltInvocationError(\"Exactly one of vpc_id or vpc_name must be provided.\")\n\n if vpc_name:\n vpc_id = check_vpc(\n vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not vpc_id:\n log.warning(\"Could not resolve VPC name %s to an ID\", vpc_name)\n return False\n\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n filters = [\n {\"Name\": \"requester-vpc-info.vpc-id\", \"Values\": [vpc_id]},\n {\"Name\": \"status-code\", \"Values\": [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING]},\n ]\n if conn_id:\n filters += [{\"Name\": \"vpc-peering-connection-id\", \"Values\": [conn_id]}]\n else:\n filters += [{\"Name\": \"tag:Name\", \"Values\": [conn_name]}]\n\n vpcs = conn.describe_vpc_peering_connections(Filters=filters).get(\n \"VpcPeeringConnections\", []\n )\n\n if not vpcs:\n return False\n elif len(vpcs) > 1:\n raise SaltInvocationError(\n \"Found more than one ID for the VPC peering \"\n \"connection ({}). Please call this function \"\n \"with an ID instead.\".format(conn_id or conn_name)\n )\n else:\n status = vpcs[0][\"Status\"][\"Code\"]\n\n return bool(status == PENDING_ACCEPTANCE)", "def describe_vpc_peering_connections(DryRun=None, VpcPeeringConnectionIds=None, Filters=None):\n pass", "def is_peering_connection_pending(\n conn_id=None, conn_name=None, region=None, key=None, keyid=None, profile=None\n):\n if not _exactly_one((conn_id, conn_name)):\n raise SaltInvocationError(\n \"Exactly one of conn_id or conn_name must be provided.\"\n )\n\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n\n if conn_id:\n vpcs = conn.describe_vpc_peering_connections(\n VpcPeeringConnectionIds=[conn_id]\n ).get(\"VpcPeeringConnections\", [])\n else:\n filters = [\n {\"Name\": \"tag:Name\", \"Values\": [conn_name]},\n {\n \"Name\": \"status-code\",\n \"Values\": [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING],\n },\n ]\n vpcs = conn.describe_vpc_peering_connections(Filters=filters).get(\n \"VpcPeeringConnections\", []\n )\n\n if not vpcs:\n return False\n elif len(vpcs) > 1:\n raise SaltInvocationError(\n \"Found more than one ID for the VPC peering \"\n \"connection ({}). Please call this function \"\n \"with an ID instead.\".format(conn_id or conn_name)\n )\n else:\n status = vpcs[0][\"Status\"][\"Code\"]\n\n return status == PENDING_ACCEPTANCE", "async def accept_friend_request(self, request: FriendRequest) -> None:\r\n await self.session.accept_friend_request(\r\n request.type, request.id, request.author.account_id, client=self\r\n )", "def describe_vpc_peering_connection(\n name, region=None, key=None, keyid=None, profile=None\n):\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n return {\"VPC-Peerings\": _get_peering_connection_ids(name, conn)}", "def requestActiveScans(self, peers=None):\n if not peers:\n peers = self.getPeers().except_(self.alreadySent()).all()\n\n retries = int(self.config.scanning.max_active_retries)\n while peers and retries > 0:\n failures = []\n for peer in peers:\n self.logger.log(\"Sending active scan request to peer %r\" % peer)\n request = SentScanRequest(self.config.owner, self.url, peer)\n self.session.commit() # release the database lock for following long operation\n try:\n peer.transport.scanRequest(self.config.owner.name, self.url, request.key)\n except:\n self.logger.exception()\n failures.append(peer)\n else:\n self.session.add(request)\n peers[:] = failures\n retries = 0 #-= 1 # Disable retries for now.\n self.session.commit()", "def onRequestPeers(self):\n selection = self.peerList.curselection()\n if len(selection) == 1:\n peerid = self.peerList.get(selection[0])\n self.btpeer.sendtopeer( peerid, GETPEERS, \"%s\" % ( self.btpeer.myid) )", "def accept_portability_request(portability_request):\n if portability_request.state != PortabilityRequestState.PENDING.value:\n raise PortabilityTransitionException()\n portability_request.state = PortabilityRequestState.ACCEPTED.value\n portability_request.save(update_fields=(\"state\",))\n\n PlaylistPortability.objects.get_or_create(\n source_playlist=portability_request.for_playlist,\n target_playlist=portability_request.from_playlist,\n )", "def connect_to(self, peer):\n self.connection = Connection(peer)\n return self.connection", "def delete_vpc_peering_connection(\n conn_id=None,\n conn_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n dry_run=False,\n):\n if not _exactly_one((conn_id, conn_name)):\n raise SaltInvocationError(\n \"Exactly one of conn_id or conn_name must be provided.\"\n )\n\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n if conn_name:\n conn_id = _vpc_peering_conn_id_for_name(conn_name, conn)\n if not conn_id:\n raise SaltInvocationError(\n \"Couldn't resolve VPC peering connection {} to an ID\".format(conn_name)\n )\n try:\n log.debug(\"Trying to delete vpc peering connection\")\n conn.delete_vpc_peering_connection(\n DryRun=dry_run, VpcPeeringConnectionId=conn_id\n )\n return {\"msg\": \"VPC peering connection deleted.\"}\n except botocore.exceptions.ClientError as err:\n e = __utils__[\"boto.get_error\"](err)\n log.error(\"Failed to delete VPC peering %s: %s\", conn_name or conn_id, e)\n return {\"error\": e}", "def peerings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]]:\n return pulumi.get(self, \"peerings\")", "def collaboration_request_accept(self, sso_session_id, request_key):\n return self.patch(\n url=url_collaboration_request_detail.format(request_key=request_key),\n data={'accepted': True},\n authenticator=self.authenticator(sso_session_id),\n )", "def test_peer_connectivity(self, name, peer_type=\"REPLICATION\"):\n params = self._get_peer_type_param(peer_type)\n return self._post(\"peers/%s/commands/test\" % name, ApiCommand, params=params,\n api_version=3)", "def handle_connect(self, req):\r\n \r\n # Create a socket to connect to the remote server\r\n remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n # From now on, we must not forget to close this socket before leaving.\r\n try:\r\n try:\r\n # Connection to the remote server\r\n print thread.get_ident(), 'Connecting to', req['address']\r\n\r\n\r\n # Possible way to handle the timeout defined in the protocol!\r\n # Make the connect non-blocking, then do a select and keep\r\n # an eye on the writable socket, just as I did with the\r\n # accept() from BIND requests.\r\n # Do this tomorrow... Geez... 00:47... Do this this evening.\r\n \r\n remote.connect(req['address'])\r\n \r\n # The only connection that can be reset here is the one of the\r\n # client, so we don't need to answer. Any other socket\r\n # exception forces us to try to answer to the client.\r\n except socket.error:\r\n exception, value, traceback = sys.exc_info()\r\n if value[0] == ERR_CONNECTION_RESET_BY_PEER:\r\n raise Client_Connection_Closed((ERR_CONNECTION_RESET_BY_PEER, socket.errorTab[ERR_CONNECTION_RESET_BY_PEER]))\r\n else:\r\n raise Remote_Connection_Failed\r\n except:\r\n raise Remote_Connection_Failed\r\n \r\n # From now on we will already have answered to the client.\r\n # Any exception occuring now must make us exit silently.\r\n try:\r\n # Telling the client that the connection it asked for is\r\n # granted.\r\n self.answer_granted()\r\n # Starting to relay information between the two peers.\r\n self.forward(self.request, remote)\r\n # We don't have the right to \"speak\" to the client anymore.\r\n # So any socket failure means a \"connection closed\" and silent\r\n # exit.\r\n except socket.error:\r\n raise Connection_Closed\r\n # Mandatory closing of the remote socket.\r\n finally:\r\n remote.close()", "def onMeetupRequest(self):\n sels = self.peerList.curselection()\n if len(sels)==1:\n # Send request to target node\n peerid = self.peerList.get(sels[0])\n meetup_data = self.meetupRequestEntry.get().lstrip().rstrip()\n # Check if there's a pending request\n found = False\n for id, data in self.btpeer.meetups.iteritems():\n if id == self.btpeer.myid:\n if data['to'] == peerid and data['accepted'] == None:\n found = True\n if not found: # Can only send one meetup request to a node at a time\n self.btpeer.sendtopeer( peerid, MEET,\n \"%s %s\" % (self.btpeer.myid, meetup_data))\n # Add request to my list\n location, date, time = meetup_data.split()\n self.btpeer.meetups[self.btpeer.myid] = {'to': peerid, 'location': location, 'date': date, 'time': time, 'accepted': None}\n self.updateMeetupList()", "def accept_invitation(GraphArn=None):\n pass", "def test_sending_and_accepting_request(self):\n\n self.send_request()\n\n request_response_id = RequestResponse.list(\n self._API_CONTEXT,\n self._USER_ID,\n self._MONETARY_ACCOUNT_ID2\n ).value[self._FIRST_INDEX].id_\n\n self.accept_request(request_response_id)", "def accept(self):\n # TODO: Create a connection.\n self.status = self.InviteStatus.ACCEPTED\n self.save()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allocates a Dedicated Host to your account. At minimum you need to specify the instance size type, Availability Zone, and quantity of hosts you want to allocate.
def allocate_hosts(AutoPlacement=None, ClientToken=None, InstanceType=None, Quantity=None, AvailabilityZone=None): pass
[ "def create_host(self, host: dict) -> PrivXAPIResponse:\n response_status, data = self._http_post(UrlEnum.HOST_STORE.HOSTS, body=host)\n return PrivXAPIResponse(response_status, HTTPStatus.CREATED, data)", "def _allocate_addresses_for_host(self, context, host):\n mgmt_ip = host.mgmt_ip\n mgmt_interfaces = self.iinterfaces_get_by_ihost_nettype(\n context, host.uuid, constants.NETWORK_TYPE_MGMT\n )\n mgmt_interface_id = None\n if mgmt_interfaces:\n mgmt_interface_id = mgmt_interfaces[0]['id']\n hostname = host.hostname\n address_name = cutils.format_address_name(hostname,\n constants.NETWORK_TYPE_MGMT)\n # if ihost has mgmt_ip, make sure address in address table\n if mgmt_ip:\n self._create_or_update_address(context, hostname, mgmt_ip,\n constants.NETWORK_TYPE_MGMT,\n mgmt_interface_id)\n # if ihost has no management IP, check for static mgmt IP\n if not mgmt_ip:\n mgmt_ip = self._lookup_static_ip_address(\n hostname, constants.NETWORK_TYPE_MGMT\n )\n if mgmt_ip:\n host.mgmt_ip = mgmt_ip\n self.update_ihost(context, host)\n # if no static address, then allocate one\n if not mgmt_ip:\n mgmt_pool = self.dbapi.network_get_by_type(\n constants.NETWORK_TYPE_MGMT\n ).pool_uuid\n\n mgmt_ip = self._allocate_pool_address(mgmt_interface_id, mgmt_pool,\n address_name).address\n if mgmt_ip:\n host.mgmt_ip = mgmt_ip\n self.update_ihost(context, host)\n\n self._generate_dnsmasq_hosts_file(existing_host=host)\n self._allocate_cluster_host_address_for_host(host)", "def create_host_vapi(context, host_name, datacenter_name):\n user = context.testbed.config['ESX_USER']\n pwd = context.testbed.config['ESX_PASS']\n\n # Get the host folder for the Datacenter1 using the folder query\n datacenter = context.testbed.entities['DATACENTER_IDS'][datacenter_name]\n folder_summaries = context.client.vcenter.Folder.list(\n Folder.FilterSpec(type=Folder.Type.HOST, datacenters=set([datacenter])))\n folder = folder_summaries[0].folder\n\n create_spec = Host.CreateSpec(\n hostname=host_name,\n user_name=user,\n password=pwd,\n folder=folder,\n thumbprint_verification=Host.CreateSpec.ThumbprintVerification.NONE)\n host = context.client.vcenter.Host.create(create_spec)\n print(\"Created Host '{}' ({})\".format(host, host_name))\n\n return host", "def _allocate_cluster_host_address_for_host(self, host):\n\n # controller must have cluster-host address already allocated\n if (host.personality != constants.CONTROLLER):\n\n cluster_host_address = self._lookup_static_ip_address(\n host.hostname, constants.NETWORK_TYPE_CLUSTER_HOST)\n\n if cluster_host_address is None:\n address_name = cutils.format_address_name(\n host.hostname, constants.NETWORK_TYPE_CLUSTER_HOST)\n LOG.info(\"{} address not found. Allocating address for {}.\".format(\n address_name, host.hostname))\n host_network = self.dbapi.network_get_by_type(\n constants.NETWORK_TYPE_CLUSTER_HOST)\n self._allocate_pool_address(None, host_network.pool_uuid,\n address_name)", "def from_dict(cls, _dict: Dict) -> 'DedicatedHost':\n args = {}\n if 'available_memory' in _dict:\n args['available_memory'] = _dict.get('available_memory')\n else:\n raise ValueError('Required property \\'available_memory\\' not present in DedicatedHost JSON')\n if 'available_vcpu' in _dict:\n args['available_vcpu'] = VCPU.from_dict(_dict.get('available_vcpu'))\n else:\n raise ValueError('Required property \\'available_vcpu\\' not present in DedicatedHost JSON')\n if 'created_at' in _dict:\n args['created_at'] = string_to_datetime(_dict.get('created_at'))\n else:\n raise ValueError('Required property \\'created_at\\' not present in DedicatedHost JSON')\n if 'crn' in _dict:\n args['crn'] = _dict.get('crn')\n else:\n raise ValueError('Required property \\'crn\\' not present in DedicatedHost JSON')\n if 'disks' in _dict:\n args['disks'] = [DedicatedHostDisk.from_dict(x) for x in _dict.get('disks')]\n else:\n raise ValueError('Required property \\'disks\\' not present in DedicatedHost JSON')\n if 'group' in _dict:\n args['group'] = DedicatedHostGroupReference.from_dict(_dict.get('group'))\n else:\n raise ValueError('Required property \\'group\\' not present in DedicatedHost JSON')\n if 'href' in _dict:\n args['href'] = _dict.get('href')\n else:\n raise ValueError('Required property \\'href\\' not present in DedicatedHost JSON')\n if 'id' in _dict:\n args['id'] = _dict.get('id')\n else:\n raise ValueError('Required property \\'id\\' not present in DedicatedHost JSON')\n if 'instance_placement_enabled' in _dict:\n args['instance_placement_enabled'] = _dict.get('instance_placement_enabled')\n else:\n raise ValueError('Required property \\'instance_placement_enabled\\' not present in DedicatedHost JSON')\n if 'instances' in _dict:\n args['instances'] = [InstanceReference.from_dict(x) for x in _dict.get('instances')]\n else:\n raise ValueError('Required property \\'instances\\' not present in DedicatedHost JSON')\n if 'lifecycle_state' in _dict:\n args['lifecycle_state'] = _dict.get('lifecycle_state')\n else:\n raise ValueError('Required property \\'lifecycle_state\\' not present in DedicatedHost JSON')\n if 'memory' in _dict:\n args['memory'] = _dict.get('memory')\n else:\n raise ValueError('Required property \\'memory\\' not present in DedicatedHost JSON')\n if 'name' in _dict:\n args['name'] = _dict.get('name')\n else:\n raise ValueError('Required property \\'name\\' not present in DedicatedHost JSON')\n if 'profile' in _dict:\n args['profile'] = DedicatedHostProfileReference.from_dict(_dict.get('profile'))\n else:\n raise ValueError('Required property \\'profile\\' not present in DedicatedHost JSON')\n if 'provisionable' in _dict:\n args['provisionable'] = _dict.get('provisionable')\n else:\n raise ValueError('Required property \\'provisionable\\' not present in DedicatedHost JSON')\n if 'resource_group' in _dict:\n args['resource_group'] = ResourceGroupReference.from_dict(_dict.get('resource_group'))\n else:\n raise ValueError('Required property \\'resource_group\\' not present in DedicatedHost JSON')\n if 'resource_type' in _dict:\n args['resource_type'] = _dict.get('resource_type')\n else:\n raise ValueError('Required property \\'resource_type\\' not present in DedicatedHost JSON')\n if 'socket_count' in _dict:\n args['socket_count'] = _dict.get('socket_count')\n else:\n raise ValueError('Required property \\'socket_count\\' not present in DedicatedHost JSON')\n if 'state' in _dict:\n args['state'] = _dict.get('state')\n else:\n raise ValueError('Required property \\'state\\' not present in DedicatedHost JSON')\n if 'supported_instance_profiles' in _dict:\n args['supported_instance_profiles'] = [InstanceProfileReference.from_dict(x) for x in _dict.get('supported_instance_profiles')]\n else:\n raise ValueError('Required property \\'supported_instance_profiles\\' not present in DedicatedHost JSON')\n if 'vcpu' in _dict:\n args['vcpu'] = VCPU.from_dict(_dict.get('vcpu'))\n else:\n raise ValueError('Required property \\'vcpu\\' not present in DedicatedHost JSON')\n if 'zone' in _dict:\n args['zone'] = ZoneReference.from_dict(_dict.get('zone'))\n else:\n raise ValueError('Required property \\'zone\\' not present in DedicatedHost JSON')\n return cls(**args)", "def create_host_vim(context, host_name, datacenter_name):\n user = context.testbed.config['ESX_USER']\n pwd = context.testbed.config['ESX_PASS']\n\n # Get the host folder for the Datacenter1 using the folder query\n datacenter = context.testbed.entities['DATACENTER_IDS'][datacenter_name]\n\n for entity in context.service_instance.content.rootFolder.childEntity:\n if isinstance(entity, vim.Datacenter) and\\\n entity.name == datacenter_name:\n datacenter_mo = entity\n\n folder_mo = datacenter_mo.hostFolder\n connect_spec = vim.host.ConnectSpec(hostName=host_name,\n userName=user,\n password=pwd,\n force=False)\n print(\"Creating Host ({})\".format(host_name))\n task = folder_mo.AddStandaloneHost(connect_spec,\n vim.ComputeResource.ConfigSpec(),\n True)\n pyVim.task.WaitForTask(task)\n\n # Get host from task result\n host_mo = task.info.result.host[0]\n print(\"Created Host '{}' ({})\".format(host_mo._moId, host_name))\n\n return host_mo._moId", "def create_host(self, project_id: UUID, host: HiveLibrary.Host) -> Optional[UUID]:\n return self.create_hosts(project_id=project_id, hosts=[host])", "def tenant_floating_ip_allocate(request):\n return engineclient(request).floating_ips.create()", "def create_next_vm(self, host_entry):\n host_entry.ip = self.os.get_new_floating_ip().ip\n response = self.ipa.host_add(host_entry)\n host_entry.otp = response['randompassword']\n self.create_userdata(host_entry)\n result = self.os.boot_vm(host_entry)", "def _ensure_sufficient_hosts(\n self, context, hosts, required_count, claimed_uuids=None,\n ):\n if len(hosts) == required_count:\n # We have enough hosts.\n return\n\n if claimed_uuids:\n self._cleanup_allocations(context, claimed_uuids)\n\n # NOTE(Rui Chen): If multiple creates failed, set the updated time\n # of selected HostState to None so that these HostStates are\n # refreshed according to database in next schedule, and release\n # the resource consumed by instance in the process of selecting\n # host.\n for host in hosts:\n host.updated = None\n\n # Log the details but don't put those into the reason since\n # we don't want to give away too much information about our\n # actual environment.\n LOG.debug(\n 'There are %(hosts)d hosts available but '\n '%(required_count)d instances requested to build.',\n {'hosts': len(hosts), 'required_count': required_count})\n reason = _('There are not enough hosts available.')\n raise exception.NoValidHost(reason=reason)", "def reserve_ip_for_third_monitor_node(self, context, hostname):\n chost = self.dbapi.ihost_get_by_hostname(hostname)\n\n # check if hostname is storage-0 or any worker\n if (chost['personality'] == constants.STORAGE and hostname != constants.STORAGE_0_HOSTNAME) \\\n or chost['personality'] == constants.CONTROLLER:\n raise exception.SysinvException(_(\n \"Ceph monitor can only be added to storage-0 or any worker.\"))\n\n network = self.dbapi.network_get_by_type(constants.NETWORK_TYPE_MGMT)\n address_name = cutils.format_address_name(\n hostname, constants.NETWORK_TYPE_MGMT)\n\n try:\n self.dbapi.address_get_by_name(address_name)\n LOG.debug(\"Address %s already reserved, continuing.\" % address_name)\n except exception.AddressNotFoundByName:\n LOG.debug(\"Reserving address for %s.\" % address_name)\n self._allocate_pool_address(None, network.pool_uuid,\n address_name)\n self._generate_dnsmasq_hosts_file()", "def purchase_host_reservation(OfferingId=None, HostIdSet=None, LimitPrice=None, CurrencyCode=None, ClientToken=None):\n pass", "def addHost(self, name):\n host = (name, )\n self.cursor.execute(\"INSERT INTO hosts VALUES (?, 0) \", host)\n self.database.commit()", "def insert(self, host):\r\n\r\n try:\r\n host.get_addr()\r\n except AttributeError:\r\n raise ValueError('Not a Host')\r\n \r\n self.__hosts.append(host)", "def _configure_edgeworker_host(self, context, host):\n self._allocate_addresses_for_host(context, host)", "def allocate(self, worker_address, n_gpu):\n gpu = \",\".join(self.worker_vacant_gpus[worker_address][0:n_gpu])\n self.worker_used_gpus[worker_address].extend(self.worker_vacant_gpus[worker_address][0:n_gpu])\n self.worker_vacant_gpus[worker_address] = self.worker_vacant_gpus[worker_address][n_gpu:]\n assert len(self.worker_vacant_gpus[worker_address]) >= 0\n return AllocatedGpu(worker_address, gpu)", "def __init__(__self__,\n resource_name: str,\n args: DedicatedHostGroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def create_host(self, **kwargs):\n opts = ['domain_id', 'name', 'ip_address', 'domain_type',\n 'name_servers', 'primary_dns', 'hostmaster', 'master',\n 'ttl', 'refresh', 'expire', 'minimum']\n required = ['domain_id', 'name', 'ip_address']\n kwargs = self._parse_kwargs(kwargs, opts, required)\n\n reverse_zone = self._reverse_zone(kwargs['ip_address'])\n if not self.has_domain(reverse_zone):\n required = ['domain_type', 'name_servers', 'primary_dns',\n 'hostmaster']\n for opt in required:\n if not kwargs[opt]:\n print('create_domain() needs %s' % (opt))\n return\n\n self.create_domain(name=reverse_zone,\n domain_type=kwargs['domain_type'],\n name_servers=kwargs['name_servers'],\n mail_exchangers=None,\n primary_dns=kwargs['primary_dns'],\n hostmaster=kwargs['hostmaster'],\n ttl=kwargs['ttl'],\n refresh=kwargs['refresh'],\n expire=kwargs['expire'],\n minimum=kwargs['minimum'])\n\n reverse_domain_id = self.get_domain_id(reverse_zone)\n host_octet = kwargs['ip_address'].split(\".\")[3:][0]\n if not self.has_record(kwargs['domain_id'], kwargs['name'], 'A',\n kwargs['ip_address']):\n self.add_record(domain_id=kwargs['domain_id'], name=kwargs['name'],\n record_type='A', content=kwargs['ip_address'])\n if not self.has_record(reverse_domain_id, host_octet, 'PTR',\n kwargs['name']):\n self.add_record(domain_id=reverse_domain_id, name=host_octet,\n record_type='PTR', content=kwargs['name'])", "def test_create_host(self):\n h = self.plugin.createAndAddHost(\"pepito\", \"linux\")\n self._plugin_controller.setLastCommandInformation(\"mock\")\n self._plugin_controller.onCommandFinished()\n self._model_controller.processAllPendingActions()\n \n self.assertTrue(h is not None, \"host should have an ID\")\n self.assertTrue(len(self._model_controller.getAllHosts()) == 1, \"The controller should have one host\")\n self.assertTrue(self._model_controller.getHost(h) is not None, \"The host should be in the controller\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assigns one or more IPv6 addresses to the specified network interface. You can specify one or more specific IPv6 addresses, or you can specify the number of IPv6 addresses to be automatically assigned from within the subnet's IPv6 CIDR block range. You can assign as many IPv6 addresses to a network interface as you can assign private IPv4 addresses, and the limit varies per instance type. For information, see IP Addresses Per Network Interface Per Instance Type in the Amazon Elastic Compute Cloud User Guide .
def assign_ipv6_addresses(NetworkInterfaceId=None, Ipv6Addresses=None, Ipv6AddressCount=None): pass
[ "def add_ip6_addr(self, prefix, subnet, mac, interface, interface_label):\n new_ip = silk_ip.assemble(prefix, subnet, mac)\n command = \"ip addr add %s/64 dev %s\" % (new_ip, interface)\n self.store_data(new_ip, interface_label)\n self.make_netns_call_async(command, \"\", 1)\n self.make_netns_call_async(\"ifconfig\", \"\", 1)", "def create_ipv6(self):\n int1 = Interface('eth1/1')\n int2 = Interface('eth1/2')\n pc1 = PortChannel('211')\n ipv6 = IPV6()\n ipv6.add_interface_address(int1, '2004:0DB8::1/10', link_local='FE83::1')\n ipv6.add_interface_address(int2, '2104:0DB8::1/11')\n ipv6.add_interface_address(int2, '2002:0DB8::1/12')\n ipv6.add_interface_address(pc1, '2022:0DB8::1/13')\n return ipv6", "def unassign_ipv6_addresses(NetworkInterfaceId=None, Ipv6Addresses=None):\n pass", "def assign_private_ip_addresses(NetworkInterfaceId=None, PrivateIpAddresses=None, SecondaryPrivateIpAddressCount=None, AllowReassignment=None):\n pass", "def ipv6(self, ipv6: SubUnnumberedTop):\n\n self._ipv6 = ipv6", "def manual_ipv6_infrastructure_allocation(anm):\n\n import netaddr\n g_ipv6 = anm['ipv6']\n log.info('Using specified IPv6 infrastructure allocation')\n\n for node in g_ipv6.l3devices():\n for interface in node.physical_interfaces:\n if not interface['input'].is_bound:\n continue # unbound interface\n ip_address = netaddr.IPAddress(interface['input'\n ].ipv6_address)\n prefixlen = interface['input'].ipv6_prefixlen\n interface.ip_address = ip_address\n interface.prefixlen = prefixlen\n cidr_string = '%s/%s' % (ip_address, prefixlen)\n interface.subnet = netaddr.IPNetwork(cidr_string)\n\n broadcast_domains = [d for d in g_ipv6 if d.broadcast_domain]\n\n # TODO: allow this to work with specified ip_address/subnet as well as ip_address/prefixlen\n\n from netaddr import IPNetwork\n for coll_dom in broadcast_domains:\n connected_interfaces = [edge.dst_int for edge in\n coll_dom.edges()]\n cd_subnets = [IPNetwork('%s/%s' % (i.subnet.network,\n i.prefixlen)) for i in connected_interfaces]\n\n\n if len(cd_subnets) == 0:\n log.warning(\"Collision domain %s is not connected to any nodes\" % coll_dom)\n continue\n\n try:\n assert len(set(cd_subnets)) == 1\n except AssertionError:\n mismatch_subnets = '; '.join('%s: %s/%s' % (i,\n i.subnet.network, i.prefixlen) for i in\n connected_interfaces)\n log.warning('Non matching subnets from collision domain %s: %s'\n % (coll_dom, mismatch_subnets))\n else:\n coll_dom.subnet = cd_subnets[0] # take first entry\n\n # apply to remote interfaces\n\n for edge in coll_dom.edges():\n edge.dst_int.subnet = coll_dom.subnet\n\n # also need to form aggregated IP blocks (used for e.g. routing prefix\n # advertisement)\n # import autonetkit\n # autonetkit.update_http(anm)\n\n infra_blocks = {}\n for (asn, devices) in g_ipv6.groupby('asn').items():\n broadcast_domains = [d for d in devices if d.broadcast_domain]\n subnets = [cd.subnet for cd in broadcast_domains\n if cd.subnet is not None] # only if subnet is set\n infra_blocks[asn] = netaddr.cidr_merge(subnets)\n\n g_ipv6.data.infra_blocks = infra_blocks", "def set_interface_ip(self, interface, ip, mask):\n\n cmds = ['interface %s' %(interface), 'ip address %s %s' %(ip, mask)]\n\n output = self.iosapi.bcp_send_config_command(self.iosapi.netmiko_session, cmds)\n self.iosapi.bcp_log(\"info\", \"(%s) set_interface_ip() : Attempting to set interface %s IP\" %(__name__, interface))\n return(output)", "def update(self, networkipv6s):\n\n data = {'networks': networkipv6s}\n networkipv6s_ids = [str(networkipv6.get('id'))\n for networkipv6 in networkipv6s]\n\n return super(ApiNetworkIPv6, self).put('api/v3/networkv6/%s/' %\n ';'.join(networkipv6s_ids), data)", "def create(self, networkipv6s):\n\n data = {'networks': networkipv6s}\n return super(ApiNetworkIPv6, self).post('api/v3/networkv6/', data)", "def set_interface_mtu(node, pf_pcis, mtu=9200):\n for pf_pci in pf_pcis:\n pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)\n cmd = f\"ip link set {pf_eth} mtu {mtu}\"\n exec_cmd_no_error(node, cmd, sudo=True)", "def test_ipv6interface_ip(self):\n n = 10**5\n a = '2000:3456::/40'\n addr = ip.IPv6Interface(a)\n time1, result1 = timefn(n, lambda: addr.ip)\n eaddr = eip.IPv6Interface(a)\n time2, result2 = timefn(n, lambda: eaddr.ip)\n results = (time1, result1), (time2, result2)\n self.report_6i.report(fn_name(), n, results, a)", "def calculate_link_ip_addresses_ipv6(address_str, cidr_str, supernet_str, ip_count):\n if '::' in address_str:\n add_str = ''\n count = (address_str.count(':'))\n if address_str[-1] == ':':\n count -= 2\n while count < 7:\n add_str += ':0'\n count += 1\n else:\n while count < 8:\n add_str += ':0'\n count += 1\n add_str += ':'\n\n address_str = address_str.replace('::', add_str)\n\n address = address_str.split(':')\n cidr = int(cidr_str)\n supernet = int(supernet_str)\n\n mask_cidr = find_mask_v6(cidr)\n network = find_network_v6(address, mask_cidr)\n broadcast = find_broadcast_v6(network, cidr)\n\n mask_supernet = find_mask_v6(supernet)\n network_hex = []\n for i in range(8):\n network_hex.append(hex(network[i])[2:])\n network_supernet = find_network_v6(address, mask_supernet)\n broadcast_supernet = find_broadcast_v6(network_supernet, supernet)\n\n initial_ip = network_supernet[7]\n ip_checking = list(network_supernet)\n while not(initial_ip >= broadcast[7] and ip_checking[:7] == broadcast[:7]):\n initial_ip = network_supernet[7]\n ips_list = []\n no_of_ip = 0\n while initial_ip <= broadcast_supernet[7] and no_of_ip < ip_count:\n ip = list(network_supernet)\n ip[7] = initial_ip\n\n for i in range(0, 8):\n ip[i] = hex(ip[i])[2:]\n\n ip = ':'.join(ip)\n ip += '/' + str(supernet)\n ips_list.append(ip)\n initial_ip += 1\n no_of_ip += 1\n ip_checking = list(broadcast_supernet)\n initial_ip = broadcast_supernet[7]\n network_supernet = find_network_supernet_v6(broadcast_supernet, cidr, supernet)\n broadcast_supernet = find_broadcast_v6(network_supernet, supernet)\n\n yield ips_list", "def test_ipv6interface_init(self):\n n = 10**4\n data = [\n '1:2:3:4:5:6::',\n 16384,\n (int(64).to_bytes(16, 'big'), 124),\n ]\n fns = ip.IPv6Interface, eip.IPv6Interface\n for args in data:\n generic_test(self.report_6i, fn_name(), n, fns, args)", "def give_me_an_interface_ipv6():\n import netifaces\n for interface in netifaces.interfaces():\n if netifaces.AF_INET6 in netifaces.ifaddresses(interface):\n return interface\n return None", "def test_ipv6interface_format(self):\n n = 10**5\n data = ['s', 'b', 'x', 'n', '#b', '_b', '#_x']\n a1 = '1:2:3::6'\n addr = ip.IPv6Interface(a1)\n eaddr = eip.IPv6Interface(a1)\n fns = addr.__format__, eaddr.__format__\n for args in data:\n generic_test(self.report_6i, fn_name(), n, fns, args)", "def add_ipv6(self):\n for host in self.hosts_matrix:\n h = self.net.getNodeByName(host[0])\n h.cmd(f'ip -6 addr flush dev {host[0]}-eth1')\n h.cmd(f'ip -6 addr add dev {host[0]}-eth1 {host[2]}')", "def host_ip_v6(self, host_ip_v6):\n\n self._host_ip_v6 = host_ip_v6", "def modify_subnet_attribute(SubnetId=None, MapPublicIpOnLaunch=None, AssignIpv6AddressOnCreation=None):\n pass", "def build_ipv6(anm):\n import netaddr\n import autonetkit.plugins.ipv6 as ipv6\n\n # uses the nodes and edges from ipv4\n\n g_ipv6 = anm.add_overlay('ipv6')\n g_ip = anm['ip']\n g_in = anm['input']\n g_ipv6.add_nodes_from(g_ip, retain=['label', 'asn', 'broadcast_domain']) # retain if collision domain or not\n g_ipv6.add_edges_from(g_ip.edges())\n\n #TODO: tidy up naming consitency of secondary_loopback_block and vrf_loopback_block\n (infra_block, loopback_block, secondary_loopback_block) = \\\n extract_ipv6_blocks(anm)\n\n block_message = \"IPv6 allocations: Infrastructure: %s, Loopback: %s\" % (infra_block, loopback_block)\n if any(i for n in g_ip.nodes() for i in\n n.loopback_interfaces if not i.is_loopback_zero):\n block_message += \" Secondary Loopbacks: %s\" % secondary_loopback_block\n log.info(block_message)\n\n # TODO: replace this with direct allocation to interfaces in ip alloc plugin\n allocated = sorted([n for n in g_ip if n['input'].loopback_v6])\n if len(allocated) == len(g_ip.l3devices()):\n # all allocated\n #TODO: need to infer subnetomanual_ipv6_loopback_allocation\n log.info(\"Using user-specified IPv6 loopback addresses\")\n manual_ipv6_loopback_allocation(anm)\n else:\n if len(allocated):\n log.warning(\"Using automatic IPv6 loopback allocation. IPv6 loopback addresses specified on nodes %s will be ignored.\" % allocated)\n else:\n log.info(\"Automatically assigning IPv6 loopback addresses\")\n\n ipv6.allocate_loopbacks(g_ipv6, loopback_block)\n\n l3_devices = [d for d in g_in if d.device_type in ('router', 'server')]\n\n manual_alloc_devices = set()\n for device in l3_devices:\n physical_interfaces = list(device.physical_interfaces)\n allocated = list(interface.ipv6_address for interface in physical_interfaces if interface.is_bound)\n if all(interface.ipv6_address for interface in\n physical_interfaces if interface.is_bound):\n manual_alloc_devices.add(device) # add as a manual allocated device\n\n if manual_alloc_devices == set(l3_devices):\n log.info(\"Using user-specified IPv6 infrastructure addresses\")\n manual_alloc_ipv6_infrastructure = True\n else:\n manual_alloc_ipv6_infrastructure = False\n # warn if any set\n allocated = []\n unallocated = []\n for node in l3_devices:\n allocated += sorted([i for i in node.physical_interfaces if i.is_bound and i.ipv6_address])\n unallocated += sorted([i for i in node.physical_interfaces if i.is_bound and not i.ipv6_address])\n\n #TODO: what if IP is set but not a prefix?\n if len(allocated):\n #TODO: if set is > 50% of nodes then list those that are NOT set\n log.warning(\"Using automatic IPv6 interface allocation. IPv6 interface addresses specified on interfaces %s will be ignored.\" % allocated)\n else:\n log.info(\"Automatically assigning IPv6 infrastructure addresses\")\n\n if manual_alloc_ipv6_infrastructure:\n manual_ipv6_infrastructure_allocation(anm)\n else:\n ipv6.allocate_infra(g_ipv6, infra_block)\n #TODO: see if this is still needed or if can allocate direct from the ipv6 allocation plugin\n for node in g_ipv6.l3devices():\n for interface in node:\n edges = list(interface.edges())\n if len(edges):\n edge = edges[0] # first (only) edge\n interface.ip_address = edge.ip # TODO: make this consistent\n interface.subnet = edge.dst.subnet # from collision domain\n\n ipv6.allocate_vrf_loopbacks(g_ipv6, secondary_loopback_block)\n\n for node in g_ipv6.routers():\n #TODO: test this code\n node.loopback_zero.ip_address = node.loopback\n node.loopback_zero.subnet = netaddr.IPNetwork(\"%s/32\" % node.loopback)\n for interface in node.loopback_interfaces:\n if not interface.is_loopback_zero:\n interface.ip_address = interface.loopback #TODO: fix this inconsistency elsewhere" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assigns one or more secondary private IP addresses to the specified network interface. You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide . For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide . AssignPrivateIpAddresses is available only in EC2VPC.
def assign_private_ip_addresses(NetworkInterfaceId=None, PrivateIpAddresses=None, SecondaryPrivateIpAddressCount=None, AllowReassignment=None): pass
[ "def _assign_secondary_ip_():\n interface_idx = 0\n node = env.nodes[0]\n cidr='%s/%s' % (env.secondary_ip,env.secondary_ip_cidr_prefix_size)\n\n if (_get_secondary_ip_node_().id == node.id):\n debug(\"VPC Secondary IP %s already assigned to %s\" % (cidr, pretty_instance(node)))\n else:\n info(\"Assigning VPC Secondary IP %s to %s\" % (cidr, pretty_instance(node)))\n connect().assign_private_ip_addresses(node.interfaces[interface_idx].id, env.secondary_ip, allow_reassignment=True)\n # Notify opsys that it has a new address (This seems to only happen automatically with Elastic IPs). Write to /etc to make persistent.\n has_address = run('ip addr | grep %s' % cidr, quiet=True)\n if not has_address:\n sudo('ip addr add %s dev eth0' % cidr)\n append('/etc/network/interfaces','up ip addr add %s dev eth%d' % (cidr,interface_idx),use_sudo=True)", "def assign_ipv6_addresses(NetworkInterfaceId=None, Ipv6Addresses=None, Ipv6AddressCount=None):\n pass", "def do_add_private_ip(vnic_utils, add_options):\n # needs the OCI SDK installed and configured\n sess = get_oci_api_session()\n if sess is None:\n raise Exception(\"Failed to get API session.\")\n\n if add_options.ocid:\n vnic = sess.get_vnic(add_options.ocid)\n if vnic is None:\n raise Exception(\"VNIC not found: %s\" % add_options.ocid)\n else:\n vnics = sess.this_instance().all_vnics()\n if len(vnics) > 1:\n _logger.error(\"More than one VNIC found.\"\n \"Use the --vnic option to select the one to add \"\n \"a secondary IP for:\")\n for vnic in vnics:\n _logger.error(\" %s: %s\" % (vnic.get_private_ip(),\n vnic.get_ocid()))\n raise Exception(\"Too many VNICs found\")\n vnic = vnics[0]\n try:\n priv_ip = vnic.add_private_ip(private_ip=add_options.private_ip)\n except Exception as e:\n raise Exception('Failed to provision private IP') from e\n\n _logger.info(\n 'provisioning secondary private IP: %s' % priv_ip.get_address())\n vnic_utils.add_private_ip(priv_ip.get_address(), vnic.get_ocid())\n return priv_ip.get_address(), vnic.get_ocid()", "def associate_vpc_cidr_block(VpcId=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def associate_address(DryRun=None, InstanceId=None, PublicIp=None, AllocationId=None, NetworkInterfaceId=None, PrivateIpAddress=None, AllowReassociation=None):\n pass", "def modify_subnet_attribute(SubnetId=None, MapPublicIpOnLaunch=None, AssignIpv6AddressOnCreation=None):\n pass", "def associate_subnet_cidr_block(SubnetId=None, Ipv6CidrBlock=None):\n pass", "def PrivateIPAddressing(self, zone = None):\n self.private_addressing = True\n if zone is None:\n zone = self.zone\n self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name, private_addressing=self.private_addressing, zone=zone)\n self.tester.sleep(10)\n for instance in self.reservation.instances:\n address = self.tester.allocate_address()\n self.assertTrue(address,'Unable to allocate address')\n self.assertTrue(self.tester.associate_address(instance, address))\n self.tester.sleep(30)\n instance.update()\n self.assertTrue( self.tester.ping(instance.public_dns_name), \"Could not ping instance with new IP\")\n address.disassociate()\n self.tester.sleep(30)\n instance.update()\n self.assertTrue( self.tester.ping(instance.public_dns_name), \"Could not ping instance with new IP\")\n address.release()\n if (instance.public_dns_name != instance.private_dns_name):\n self.tester.critical(\"Instance received a new public IP: \" + instance.public_dns_name)\n return self.reservation", "def set_interface_ip(self, interface, ip, mask):\n\n cmds = ['interface %s' %(interface), 'ip address %s %s' %(ip, mask)]\n\n output = self.iosapi.bcp_send_config_command(self.iosapi.netmiko_session, cmds)\n self.iosapi.bcp_log(\"info\", \"(%s) set_interface_ip() : Attempting to set interface %s IP\" %(__name__, interface))\n return(output)", "def get_private_ip_address(instance_info):\n private_ip = instance_info[\"PrivateIpAddress\"]\n for network_interface in instance_info[\"NetworkInterfaces\"]:\n attachment = network_interface[\"Attachment\"]\n if attachment[\"DeviceIndex\"] == 0 and attachment[\"NetworkCardIndex\"] == 0:\n private_ip = network_interface[\"PrivateIpAddress\"]\n break\n return private_ip", "def add_ip6_addr(self, prefix, subnet, mac, interface, interface_label):\n new_ip = silk_ip.assemble(prefix, subnet, mac)\n command = \"ip addr add %s/64 dev %s\" % (new_ip, interface)\n self.store_data(new_ip, interface_label)\n self.make_netns_call_async(command, \"\", 1)\n self.make_netns_call_async(\"ifconfig\", \"\", 1)", "def addIp(cls, api_client, id, ipaddress=None):\n cmd = {'nicid': id}\n if ipaddress:\n cmd['ipaddress'] = ipaddress\n return api_client.addIpToNic(**cmd)", "def associate_ip(request):\n cloud_id = request.matchdict['cloud']\n network_id = request.matchdict['network']\n params = params_from_request(request)\n ip = params.get('ip')\n machine_id = params.get('machine')\n assign = params.get('assign', True)\n auth_context = auth_context_from_request(request)\n auth_context.check_perm(\"cloud\", \"read\", cloud_id)\n try:\n machine = Machine.objects.get(cloud=cloud_id, machine_id=machine_id)\n machine_uuid = machine.id\n except me.DoesNotExist:\n machine_uuid = \"\"\n auth_context.check_perm(\"machine\", \"edit\", machine_uuid)\n\n ret = methods.associate_ip(auth_context.owner, cloud_id, network_id,\n ip, machine_id, assign)\n if ret:\n return OK\n else:\n return Response(\"Bad Request\", 400)", "def set_cidr_ip(value, yaml_file):\n\n new_ports = [22, {4506: {\"cidr-ip\": value}}, {4505: {\"cidr-ip\": value}} ]\n\n print \"setting cidr_ip \" + value\n yaml_content = get_yaml(yaml_file)\n yaml_content[\"master-server\"][\"aws\"][\"ports\"] = new_ports\n write_yaml(yaml_content, yaml_file)", "def _set_vm_ip(self, instance, key_pair, mac, ip=None, net_mask=None):\n instance_ip = instance['floating_ip']\n linux_client = self.get_remote_client(\n ip_address=instance_ip,\n username=self.image_ssh_user,\n private_key=key_pair['private_key']\n )\n nic_name = linux_client.get_nic_name_by_mac(mac)\n if ip and net_mask:\n script_name = 'SetStaticIp.sh'\n script_path = '/scripts/' + script_name\n destination = '/tmp/'\n my_path = os.path.abspath(\n os.path.normpath(os.path.dirname(__file__)))\n full_script_path = my_path + script_path\n cmd_params = [ip, net_mask, nic_name]\n linux_client.execute_script(script_name, cmd_params,\n full_script_path, destination)\n else:\n # assuming IP can be assigned by DHCP\n linux_client.exec_command('sudo dhclient {}'.format(nic_name))\n return linux_client, nic_name", "def assign_underlay_addresses(topo: Topology) -> None:\n link_subnets = None\n\n if topo.default_link_subnet:\n def_subnet = topo.default_link_subnet\n prefixlen_diff = def_subnet.max_prefixlen - def_subnet.prefixlen - LINK_SUBNET_HOST_LEN\n if prefixlen_diff >= 0:\n link_subnets = topo.default_link_subnet.subnets(prefixlen_diff)\n\n # Wrapper around IP network host iterator.\n class HostAddrGenerator:\n def __init__(self, bridge: Bridge):\n self._iter = bridge.valid_ip_iter()\n self.current = next(self._iter)\n\n def next(self):\n self.current = next(self._iter)\n\n # Mapping from IP subnet to generator producing addresses from said subnet.\n addr_gens: Dict[IpNetwork, HostAddrGenerator] = {}\n\n for link in topo.links:\n if link.bridge is None: # assign a subnet of the default link network\n # DockerBridge cannot span multiple hosts.\n assert topo.ases[link.ep_a].host == topo.ases[link.ep_b].host\n\n if not link_subnets:\n log.error(\"No default link network specified.\")\n raise errors.OutOfResources()\n try:\n ip_net = next(link_subnets)\n link.bridge = DockerBridge(\n topo.gen_bridge_name(), topo.ases[link.ep_a].host, ip_net)\n topo.bridges.append(link.bridge)\n except StopIteration:\n log.error(\"Not enough IP addresses for all links.\")\n raise errors.OutOfResources()\n\n # Assign IP addresses to link endpoints\n addr_gen = _lazy_setdefault(addr_gens, link.bridge.ip_network,\n lambda: HostAddrGenerator(unwrap(link.bridge)))\n\n try:\n if not link.ep_a.is_zero():\n link.ep_a_underlay = link.bridge.assign_br_address(\n link.ep_a, topo.ases[link.ep_a], link.ep_a.ifid,\n pref_ip=None if isinstance(link.bridge, HostNetwork) else addr_gen.current)\n if link.ep_a_underlay.ip == addr_gen.current:\n addr_gen.next()\n\n if not link.ep_b.is_zero():\n link.ep_b_underlay = link.bridge.assign_br_address(\n link.ep_b, topo.ases[link.ep_b], link.ep_b.ifid,\n pref_ip=None if isinstance(link.bridge, HostNetwork) else addr_gen.current)\n if link.ep_b_underlay.ip == addr_gen.current:\n addr_gen.next()\n\n except (errors.OutOfResources, StopIteration):\n log.error(\"Not enough IP addresses in subnet '%s'.\", link.bridge.ip_network)\n raise errors.OutOfResources()", "def addServerToIpGroup():\r\n serverId = getServerId()\r\n server = serverManager.find(serverId)\r\n print \"server: \", server\r\n sharedIpGroupId = getSharedIpGroupId()\r\n sharedIpGroup = sharedIpGroupManager.find(sharedIpGroupId)\r\n print \"shared ip group: \", sharedIpGroup\r\n ipAddress = getIpAddress()\r\n serverManager.shareIp(server, ipAddress, sharedIpGroupId, True)\r\n pass", "def do_del_private_ip(vnic_utils, delete_options):\n # needs the OCI SDK installed and configured\n sess = get_oci_api_session()\n if sess is None:\n raise Exception(\"Failed to get API session.\")\n # find the private IP\n priv_ip = sess.this_instance().find_private_ip(\n delete_options.ip_address)\n if priv_ip is None:\n raise Exception(\n \"Secondary private IP not found: %s\" %\n delete_options.ip_address)\n\n if priv_ip.is_primary():\n raise Exception(\"Cannot delete IP %s, it is the primary private \"\n \"address of the VNIC.\" % delete_options.ip_address)\n\n vnic_id = priv_ip.get_vnic_ocid()\n\n if not priv_ip.delete():\n raise Exception('failed to delete secondary private IP %s' %\n delete_options.ip_address)\n\n _logger.info('deconfigure secondary private IP %s' %\n delete_options.ip_address)\n # delete from vnic_info and de-configure the interface\n return vnic_utils.del_private_ip(delete_options.ip_address, vnic_id)", "def unassign_ipv6_addresses(NetworkInterfaceId=None, Ipv6Addresses=None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Associates an Elastic IP address with an instance or a network interface. An Elastic IP address is for use in either the EC2Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide . [EC2Classic, VPC in an EC2VPConly account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account. [VPC in an EC2Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation. You cannot associate an Elastic IP address with an instance or network interface that has an existing Elastic IP address.
def associate_address(DryRun=None, InstanceId=None, PublicIp=None, AllocationId=None, NetworkInterfaceId=None, PrivateIpAddress=None, AllowReassociation=None): pass
[ "def associate_address(self, instance_id, address):\n query = self.query_factory(\n action=\"AssociateAddress\", creds=self.creds,\n endpoint=self.endpoint,\n other_params={\"InstanceId\": instance_id, \"PublicIp\": address})\n d = query.submit()\n return d.addCallback(self.parser.truth_return)", "def ex_associate_address_with_node(self, node, elastic_ip, domain=None):\r\n params = {'Action': 'AssociateAddress', 'InstanceId': node.id}\r\n\r\n if domain is not None and domain != 'vpc':\r\n raise AttributeError('Domain can only be set to vpc')\r\n\r\n if domain is None:\r\n params.update({'PublicIp': elastic_ip.ip})\r\n else:\r\n params.update({'AllocationId': elastic_ip.extra['allocation_id']})\r\n\r\n response = self.connection.request(self.path, params=params).object\r\n association_id = findtext(element=response,\r\n xpath='associationId',\r\n namespace=NAMESPACE)\r\n return association_id", "def assign_elastic_ip(node = None, elastic_ip=None):\n node = node or env.nodes[0]\n elastic_ip = elastic_ip or env.elastic_ip\n if elastic_ip == ip_address(node):\n debug(\"ElasticIP %s already assigned to %s\" % (elastic_ip, pretty_instance(node)))\n else:\n info(\"Assigning ElasticIP %s to %s\" % (elastic_ip, pretty_instance(node)))\n connect().associate_address(node.id, elastic_ip)", "def HaVipAssociateAddressIp(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"HaVipAssociateAddressIp\", params, headers=headers)\n response = json.loads(body)\n model = models.HaVipAssociateAddressIpResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def ElasticIps(self, zone = None):\n if zone is None:\n zone = self.zone\n self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name)\n self.tester.sleep(10)\n for instance in self.reservation.instances:\n address = self.tester.allocate_address()\n self.assertTrue(address,'Unable to allocate address')\n self.assertTrue(self.tester.associate_address(instance, address))\n self.tester.sleep(30)\n instance.update()\n self.assertTrue( self.tester.ping(instance.public_dns_name), \"Could not ping instance with new IP\")\n address.disassociate()\n self.tester.sleep(30)\n instance.update()\n self.assertTrue( self.tester.ping(instance.public_dns_name), \"Could not ping instance with new IP\")\n address.release()\n return self.reservation", "def _assign_secondary_ip_():\n interface_idx = 0\n node = env.nodes[0]\n cidr='%s/%s' % (env.secondary_ip,env.secondary_ip_cidr_prefix_size)\n\n if (_get_secondary_ip_node_().id == node.id):\n debug(\"VPC Secondary IP %s already assigned to %s\" % (cidr, pretty_instance(node)))\n else:\n info(\"Assigning VPC Secondary IP %s to %s\" % (cidr, pretty_instance(node)))\n connect().assign_private_ip_addresses(node.interfaces[interface_idx].id, env.secondary_ip, allow_reassignment=True)\n # Notify opsys that it has a new address (This seems to only happen automatically with Elastic IPs). Write to /etc to make persistent.\n has_address = run('ip addr | grep %s' % cidr, quiet=True)\n if not has_address:\n sudo('ip addr add %s dev eth0' % cidr)\n append('/etc/network/interfaces','up ip addr add %s dev eth%d' % (cidr,interface_idx),use_sudo=True)", "def associate_ip(request):\n cloud_id = request.matchdict['cloud']\n network_id = request.matchdict['network']\n params = params_from_request(request)\n ip = params.get('ip')\n machine_id = params.get('machine')\n assign = params.get('assign', True)\n auth_context = auth_context_from_request(request)\n auth_context.check_perm(\"cloud\", \"read\", cloud_id)\n try:\n machine = Machine.objects.get(cloud=cloud_id, machine_id=machine_id)\n machine_uuid = machine.id\n except me.DoesNotExist:\n machine_uuid = \"\"\n auth_context.check_perm(\"machine\", \"edit\", machine_uuid)\n\n ret = methods.associate_ip(auth_context.owner, cloud_id, network_id,\n ip, machine_id, assign)\n if ret:\n return OK\n else:\n return Response(\"Bad Request\", 400)", "def associate_vpc_cidr_block(VpcId=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def check_ipaddress_unique(instance, address):\n try:\n ip = IPAddress.objects.get(address=address)\n except IPAddress.DoesNotExist:\n pass\n else:\n if ip.ethernet and ip.ethernet.base_object_id != instance.pk:\n raise ValidationError(\n 'IP {} is already assigned to other object!'.format(address)\n )", "def test_ip_addresses_update(self):\n pass", "def reserve_ip(self, region='us-west-1', ip=None):\n \n conn = self._ec2_connection(region)\n if not ip:\n address = conn.allocate_address()\n ip = IPy.IP(address.public_ip)\n else:\n ip = IPy.IP(ip)\n\n a = self.add_attr(key='reserved_ip', subkey=region,\n value=self._ipy_to_int(ip))\n return a", "def test_ip_addresses_create(self):\n pass", "def ex_release_address(self, elastic_ip, domain=None):\r\n params = {'Action': 'ReleaseAddress'}\r\n\r\n if domain is not None and domain != 'vpc':\r\n raise AttributeError('Domain can only be set to vpc')\r\n\r\n if domain is None:\r\n params['PublicIp'] = elastic_ip.ip\r\n else:\r\n params['AllocationId'] = elastic_ip.extra['allocation_id']\r\n\r\n response = self.connection.request(self.path, params=params).object\r\n return self._get_boolean(response)", "def disassociate_address(DryRun=None, PublicIp=None, AssociationId=None):\n pass", "def assign_private_ip_addresses(NetworkInterfaceId=None, PrivateIpAddresses=None, SecondaryPrivateIpAddressCount=None, AllowReassignment=None):\n pass", "def delete_elastic_ips():\n client = boto3.client('ec2')\n print('Deleting Elastic IPs')\n for eip in client.describe_addresses()['Addresses']:\n allocation_id = eip['AllocationId']\n print('Releasing EIP {}'.format(allocation_id))\n client.release_address(\n AllocationId=allocation_id\n )\n\n print('Elastic IPs deleted')", "def ReserveIp(self, net_uuid, address, ec_id, check=True):\n if net_uuid:\n return self._UnlockedReserveIp(net_uuid, address, ec_id, check)", "def add_address(self, ip_address, mask='255.255.255.0'):\n str_command = 'netsh interface ipv4 add address \"{}\" {} {}'.format(self.name, ip_address, mask)\n command = Popen(str_command) \n stdout, stderr = command.communicate()\n if stdout is None and stderr is None:\n print('Success - {} added to interface {}'.format(ip_address, self.name))\n else:\n print('Failure - {} added to interface {}'.format(ip_address, self.name))\n print('\\t' + str(stdout))\n print('\\t' + str(stderr))\n self = self.__init__(self.interface)", "def PrivateIPAddressing(self, zone = None):\n self.private_addressing = True\n if zone is None:\n zone = self.zone\n self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name, private_addressing=self.private_addressing, zone=zone)\n self.tester.sleep(10)\n for instance in self.reservation.instances:\n address = self.tester.allocate_address()\n self.assertTrue(address,'Unable to allocate address')\n self.assertTrue(self.tester.associate_address(instance, address))\n self.tester.sleep(30)\n instance.update()\n self.assertTrue( self.tester.ping(instance.public_dns_name), \"Could not ping instance with new IP\")\n address.disassociate()\n self.tester.sleep(30)\n instance.update()\n self.assertTrue( self.tester.ping(instance.public_dns_name), \"Could not ping instance with new IP\")\n address.release()\n if (instance.public_dns_name != instance.private_dns_name):\n self.tester.critical(\"Instance received a new public IP: \" + instance.public_dns_name)\n return self.reservation" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC. After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide .
def associate_dhcp_options(DryRun=None, DhcpOptionsId=None, VpcId=None): pass
[ "def associate_dhcp_options_to_vpc(\n dhcp_options_id,\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"associated\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if conn.associate_dhcp_options(dhcp_options_id, vpc_id):\n log.info(\n \"DHCP options with id %s were associated with VPC %s\",\n dhcp_options_id,\n vpc_id,\n )\n return {\"associated\": True}\n else:\n log.warning(\n \"DHCP options with id %s were not associated with VPC %s\",\n dhcp_options_id,\n vpc_id,\n )\n return {\n \"associated\": False,\n \"error\": {\"message\": \"DHCP options could not be associated.\"},\n }\n except BotoServerError as e:\n return {\"associated\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def create_dhcp_options(\n domain_name=None,\n domain_name_servers=None,\n ntp_servers=None,\n netbios_name_servers=None,\n netbios_node_type=None,\n dhcp_options_name=None,\n tags=None,\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if vpc_id or vpc_name:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n\n r = _create_resource(\n \"dhcp_options\",\n name=dhcp_options_name,\n domain_name=domain_name,\n domain_name_servers=domain_name_servers,\n ntp_servers=ntp_servers,\n netbios_name_servers=netbios_name_servers,\n netbios_node_type=netbios_node_type,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if r.get(\"created\") and vpc_id:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.associate_dhcp_options(r[\"id\"], vpc_id)\n log.info(\"Associated options %s to VPC %s\", r[\"id\"], vpc_name or vpc_id)\n return r\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def create_dhcp_options(DryRun=None, DhcpConfigurations=None):\n pass", "def modify_vpc_attribute(VpcId=None, EnableDnsSupport=None, EnableDnsHostnames=None):\n pass", "def configure_dhcp():\n dhcp_config = {}\n dhcp_config_content = \"\"\"\nddns-update-style none;\ndefault-lease-time 600;\nmax-lease-time 7200;\noption domain-name-servers 84.200.69.80, 84.200.70.40;\noption domain-name \"pikube.local\";\nauthorative;\nlog-facility local7;\n\nsubnet 10.12.29.0 netmask 255.255.255.0 {\n range 10.12.29.10 10.12.29.100;\n}\n\"\"\"\n\n dhcp_config['path'] = r'/etc/dhcp/dhcpd.conf'\n dhcp_config['encoding'] = \"b64\"\n dhcp_config['content'] = base64.b64encode(\n bytes(dhcp_config_content, \"utf-8\"))\n return dhcp_config", "def dhcp_options_exists(\n dhcp_options_id=None,\n name=None,\n dhcp_options_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if name:\n log.warning(\n \"boto_vpc.dhcp_options_exists: name parameter is deprecated \"\n \"use dhcp_options_name instead.\"\n )\n dhcp_options_name = name\n\n return resource_exists(\n \"dhcp_options\",\n name=dhcp_options_name,\n resource_id=dhcp_options_id,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def delete_dhcp_options(DryRun=None, DhcpOptionsId=None):\n pass", "def create_vpc(DryRun=None, CidrBlock=None, InstanceTenancy=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def associate_address(DryRun=None, InstanceId=None, PublicIp=None, AllocationId=None, NetworkInterfaceId=None, PrivateIpAddress=None, AllowReassociation=None):\n pass", "def add_vpc(template, key_pair_name, nat_ip,\n nat_image_id=DEFAULT_NAT_IMAGE_ID,\n nat_instance_type=DEFAULT_NAT_INSTANCE_TYPE):\n vpc_id = \"VPC\"\n vpc = template.add_resource(ec2.VPC(\n vpc_id,\n CidrBlock=\"10.0.0.0/16\",\n Tags=Tags(\n Name=name_tag(vpc_id)\n ),\n ))\n public_subnet = _add_public_subnet(template, vpc)\n nat = _add_nat(template, vpc, public_subnet, nat_image_id, nat_instance_type,\n key_pair_name, nat_ip)\n _add_private_subnet(template, vpc, nat)\n return vpc", "def create(\n cidr_block,\n instance_tenancy=None,\n vpc_name=None,\n enable_dns_support=None,\n enable_dns_hostnames=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n vpc = conn.create_vpc(cidr_block, instance_tenancy=instance_tenancy)\n if vpc:\n log.info(\"The newly created VPC id is %s\", vpc.id)\n\n _maybe_set_name_tag(vpc_name, vpc)\n _maybe_set_tags(tags, vpc)\n _maybe_set_dns(conn, vpc.id, enable_dns_support, enable_dns_hostnames)\n _maybe_name_route_table(conn, vpc.id, vpc_name)\n if vpc_name:\n _cache_id(\n vpc_name,\n vpc.id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n return {\"created\": True, \"id\": vpc.id}\n else:\n log.warning(\"VPC was not created\")\n return {\"created\": False}\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def associate_vpc_cidr_block(VpcId=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "async def begin_update_dhcp(\n self,\n resource_group_name: str,\n private_cloud_name: str,\n dhcp_id: str,\n workload_network_dhcp: _models.WorkloadNetworkDhcp,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> AsyncLROPoller[_models.WorkloadNetworkDhcp]:", "def test_08_VPC_Network_Restarts_With_InternalDns(self):\n\n # Validate the following\n # 1. Create a VPC and Tier network by using DNS network offering.\n # 2. Deploy vm1 in Tier network network1.\n # 3. Verify dhcp option 06 and 0f for subnet\n # 4. Verify dhcp option 06,15 and 0f for vm Interface.\n # 5. Deploy Vm2.\n # 6. Verify end to end by pinging with hostname while restarting\n # VPC and Tier without and with cleanup.\n\n cmd = updateZone.updateZoneCmd()\n cmd.id = self.zone.id\n cmd.domain = VPC_DOMAIN_NAME\n self.apiclient.updateZone(cmd)\n\n vpc_off = self.create_VpcOffering(self.dnsdata[\"vpc_offering\"])\n self.validate_VpcOffering(vpc_off, state=\"Enabled\")\n vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16', cleanup=False)\n\n self.debug(\"Creating Nuage Vsp VPC Network offering...\")\n network_offering = self.create_NetworkOffering(\n self.dnsdata[\"vpc_network_offering\"])\n self.validate_NetworkOffering(network_offering, state=\"Enabled\")\n network_1 = self.create_Network(\n network_offering, gateway='10.1.1.1', vpc=vpc)\n\n vm_1 = self.create_VM(network_1)\n\n # VSD verification\n self.verify_vsd_network(self.domain.id, network_1, vpc)\n self.verify_vsd_vm(vm_1)\n # Internal DNS check point on VSD\n self.verify_vsd_dhcp_option(self.DNS, \"10.1.1.2\", network_1)\n self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, network_1)\n for nic in vm_1.nic:\n self.verify_vsd_dhcp_option(self.DNS, \"10.1.1.2\", nic, True)\n self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)\n self.verify_vsd_dhcp_option(self.HOSTNAME, \"vm1\", nic, True)\n\n self.test_data[\"virtual_machine\"][\"displayname\"] = \"vm2\"\n self.test_data[\"virtual_machine\"][\"name\"] = \"vm2\"\n vm_2 = self.create_VM(network_1)\n self.test_data[\"virtual_machine\"][\"displayname\"] = \"vm1\"\n self.test_data[\"virtual_machine\"][\"name\"] = \"vm1\"\n self.verify_vsd_vm(vm_2)\n for nic in vm_2.nic:\n self.verify_vsd_dhcp_option(self.DNS, \"10.1.1.2\", nic, True)\n self.verify_vsd_dhcp_option(self.DOMAINNAME, VPC_DOMAIN_NAME, nic, True)\n self.verify_vsd_dhcp_option(self.HOSTNAME, \"vm2\", nic, True)\n\n public_ip_1 = self.acquire_PublicIPAddress(network_1, vpc)\n self.create_StaticNatRule_For_VM(vm_1, public_ip_1, network_1)\n # Adding Network ACL rule in the Public tier\n self.debug(\"Adding Network ACL rule to make the created NAT rule \"\n \"(SSH) accessible...\")\n public_ssh_rule = self.create_NetworkAclRule(\n self.test_data[\"ingress_rule\"], network=network_1)\n\n # VSD verification\n self.verify_vsd_firewall_rule(public_ssh_rule)\n\n self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)\n\n # Restarting VPC network (cleanup = false)\n self.debug(\"Restarting the created VPC network without cleanup...\")\n Network.restart(network_1, self.api_client, cleanup=False)\n self.validate_Network(network_1, state=\"Implemented\")\n vr = self.get_Router(network_1)\n self.check_Router_state(vr, state=\"Running\")\n self.check_VM_state(vm_1, state=\"Running\")\n self.check_VM_state(vm_2, state=\"Running\")\n\n # VSD verification\n self.verify_vsd_network(self.domain.id, network_1, vpc)\n self.verify_vsd_router(vr)\n self.verify_vsd_vm(vm_1)\n self.verify_vsd_vm(vm_2)\n\n self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)\n\n # Restarting VPC network (cleanup = true)\n self.debug(\"Restarting the created VPC network with cleanup...\")\n Network.restart(network_1, self.api_client, cleanup=True)\n self.validate_Network(network_1, state=\"Implemented\")\n vr = self.get_Router(network_1)\n self.check_Router_state(vr, state=\"Running\")\n self.check_VM_state(vm_1, state=\"Running\")\n self.check_VM_state(vm_2, state=\"Running\")\n\n # VSD verification\n self.verify_vsd_network(self.domain.id, network_1, vpc)\n self.verify_vsd_router(vr)\n self.verify_vsd_vm(vm_1)\n self.verify_vsd_vm(vm_2)\n\n self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)\n\n # Restarting VPC (cleanup = false)\n self.debug(\"Restarting the VPC without cleanup...\")\n self.restart_Vpc(vpc, cleanup=False)\n self.validate_Network(network_1, state=\"Implemented\")\n vr = self.get_Router(network_1)\n self.check_Router_state(vr, state=\"Running\")\n self.check_VM_state(vm_1, state=\"Running\")\n self.check_VM_state(vm_2, state=\"Running\")\n\n # VSD verification\n self.verify_vsd_network(self.domain.id, network_1, vpc)\n self.verify_vsd_router(vr)\n self.verify_vsd_vm(vm_1)\n\n self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)\n\n # Restarting VPC (cleanup = true)\n self.debug(\"Restarting the VPC with cleanup...\")\n self.restart_Vpc(vpc, cleanup=True)\n self.validate_Network(network_1, state=\"Implemented\")\n vr = self.get_Router(network_1)\n self.check_Router_state(vr, state=\"Running\")\n self.check_VM_state(vm_1, state=\"Running\")\n self.check_VM_state(vm_2, state=\"Running\")\n\n # VSD verification\n self.verify_vsd_network(self.domain.id, network_1, vpc)\n self.verify_vsd_router(vr)\n self.verify_vsd_vm(vm_1)\n\n self.vm_verify_ping(vm_1, public_ip_1, vm_2, VPC_DOMAIN_NAME)", "async def begin_create_dhcp(\n self,\n resource_group_name: str,\n private_cloud_name: str,\n dhcp_id: str,\n workload_network_dhcp: _models.WorkloadNetworkDhcp,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> AsyncLROPoller[_models.WorkloadNetworkDhcp]:", "def modify_instance_placement(InstanceId=None, Tenancy=None, Affinity=None, HostId=None):\n pass", "def _create_dhcp_entries_for_single_instance(\n instance, ip_or_network, ethernet_id\n):\n if ip_or_network['value'] == OTHER:\n ip_address = ip_or_network[OTHER]\n ip = IPAddress.objects.create(address=ip_address)\n else:\n network = Network.objects.get(\n pk=ip_or_network['value']\n )\n ip = network.issue_next_free_ip()\n logger.info('Assigning {} to {}'.format(ip, instance))\n # pass base_object as param to make sure that this ethernet is assigned\n # to currently transitioned instance\n ethernet = Ethernet.objects.get(pk=ethernet_id, base_object=instance)\n ip.hostname = instance.hostname\n logger.info('Bounding {} to {} ethernet'.format(ip, ethernet))\n ip.ethernet = ethernet\n ip.dhcp_expose = True\n ip.save()\n return ip, ethernet", "def update(\n hostname,\n refresh_key,\n authorization_host,\n org_id,\n sddc_id,\n type,\n dhcp_profile_id,\n verify_ssl=True,\n cert=None,\n server_addresses=None,\n tags=vmc_constants.VMC_NONE,\n lease_time=None,\n display_name=None,\n):\n\n log.info(\"Updating DHCP profile %s for SDDC %s\", dhcp_profile_id, sddc_id)\n profile_type = vmc_constants.DHCP_CONFIGS.format(type)\n api_url_base = vmc_request.set_base_url(hostname)\n api_url = (\n \"{base_url}vmc/reverse-proxy/api/orgs/{org_id}/sddcs/{sddc_id}/\"\n \"policy/api/v1/infra/{profile_type}/{profile_id}\"\n )\n api_url = api_url.format(\n base_url=api_url_base,\n org_id=org_id,\n sddc_id=sddc_id,\n profile_type=profile_type,\n profile_id=dhcp_profile_id,\n )\n\n existing_data = get_by_id(\n hostname,\n refresh_key,\n authorization_host,\n org_id,\n sddc_id,\n type,\n dhcp_profile_id,\n verify_ssl,\n cert,\n )\n\n # fetch the dhcp profile for the given dhcp_profile_id\n if vmc_constants.ERROR in existing_data:\n return existing_data\n\n allowed_dict = {\n \"server_addresses\": server_addresses,\n \"tags\": tags,\n \"lease_time\": lease_time,\n \"display_name\": display_name,\n }\n\n req_data = vmc_request._filter_kwargs(\n allowed_kwargs=allowed_dict.keys(), allow_none=[\"tags\"], **allowed_dict\n )\n\n payload = _update_payload_for_dhcp_profile(type, existing_data, req_data)\n return vmc_request.call_api(\n method=vmc_constants.PATCH_REQUEST_METHOD,\n url=api_url,\n refresh_key=refresh_key,\n authorization_host=authorization_host,\n description=\"vmc_dhcp_profiles.update\",\n responsebody_applicable=False,\n data=payload,\n verify_ssl=verify_ssl,\n cert=cert,\n )", "def create(\n hostname,\n refresh_key,\n authorization_host,\n org_id,\n sddc_id,\n type,\n dhcp_profile_id,\n verify_ssl=True,\n cert=None,\n server_addresses=None,\n tags=vmc_constants.VMC_NONE,\n lease_time=None,\n):\n\n log.info(\"Creating DHCP profile %s for SDDC %s\", dhcp_profile_id, sddc_id)\n\n profile_type = vmc_constants.DHCP_CONFIGS.format(type)\n api_url_base = vmc_request.set_base_url(hostname)\n api_url = (\n \"{base_url}vmc/reverse-proxy/api/orgs/{org_id}/sddcs/{sddc_id}/\"\n \"policy/api/v1/infra/{profile_type}/{profile_id}\"\n )\n api_url = api_url.format(\n base_url=api_url_base,\n org_id=org_id,\n sddc_id=sddc_id,\n profile_type=profile_type,\n profile_id=dhcp_profile_id,\n )\n\n allowed_dict = {\"server_addresses\": server_addresses, \"tags\": tags, \"lease_time\": lease_time}\n\n req_data = vmc_request._filter_kwargs(\n allowed_kwargs=allowed_dict.keys(), allow_none=[\"tags\"], **allowed_dict\n )\n\n payload = _create_payload_for_dhcp_profile(dhcp_profile_id, type, req_data)\n return vmc_request.call_api(\n method=vmc_constants.PUT_REQUEST_METHOD,\n url=api_url,\n refresh_key=refresh_key,\n authorization_host=authorization_host,\n description=\"vmc_dhcp_profiles.create\",\n data=payload,\n verify_ssl=verify_ssl,\n cert=cert,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Associates an IAM instance profile with a running or stopped instance. You cannot associate more than one IAM instance profile with an instance.
def associate_iam_instance_profile(IamInstanceProfile=None, InstanceId=None): pass
[ "def _init_instance_profile(self):\n iam_client = self._session.client('iam')\n\n # Create instance profile\n instance_profile_name = 'AccelizeLoadFPGA'\n with _ExceptionHandler.catch(filter_error_codes='EntityAlreadyExists'):\n iam_client.create_instance_profile(\n InstanceProfileName=instance_profile_name)\n\n _get_logger().info(\n _utl.gen_msg('created_object', 'instance profile',\n instance_profile_name))\n\n _time.sleep(5)\n\n # Attach role to instance profile\n with _ExceptionHandler.catch(filter_error_codes='LimitExceeded'):\n iam_client.add_role_to_instance_profile(\n InstanceProfileName=instance_profile_name, RoleName=self._role)\n\n _get_logger().info(\n _utl.gen_msg('attached_to', 'role', self._role,\n 'instance profile', instance_profile_name))", "def create_iam_instance_profile(iam):\n try:\n inst_profile = iam.create_instance_profile(InstanceProfileName=instance_data['role'])\n usr_log(f'IAM: Created Instance Profile: {inst_profile}', 'info')\n return inst_profile\n except Exception as error:\n usr_log(f'IAM: Error creating instance profile: {error}', 'error')\n return None", "def add_instance_profile_to_template(template):\n template.add_resource(Role(\n \"ECSInstanceRole\",\n AssumeRolePolicyDocument={\n \"Statement\": [{\n \"Effect\": \"Allow\",\n \"Action\": \"sts:AssumeRole\",\n \"Principal\": {\"Service\": \"ec2.amazonaws.com\"},\n }]\n },\n Policies=[Policy(\n PolicyName=\"ssm-agent\",\n PolicyDocument={\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ssm:DescribeAssociation\",\n \"ssm:GetDeployablePatchSnapshotForInstance\",\n \"ssm:GetDocument\",\n \"ssm:DescribeDocument\",\n \"ssm:GetManifest\",\n \"ssm:GetParameter\",\n \"ssm:GetParameters\",\n \"ssm:ListAssociations\",\n \"ssm:ListInstanceAssociations\",\n \"ssm:PutInventory\",\n \"ssm:PutComplianceItems\",\n \"ssm:PutConfigurePackageResult\",\n \"ssm:UpdateAssociationStatus\",\n \"ssm:UpdateInstanceAssociationStatus\",\n \"ssm:UpdateInstanceInformation\"\n ],\n \"Resource\": \"*\"\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ssmmessages:CreateControlChannel\",\n \"ssmmessages:CreateDataChannel\",\n \"ssmmessages:OpenControlChannel\",\n \"ssmmessages:OpenDataChannel\"\n ],\n \"Resource\": \"*\"\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ec2messages:AcknowledgeMessage\",\n \"ec2messages:DeleteMessage\",\n \"ec2messages:FailMessage\",\n \"ec2messages:GetEndpoint\",\n \"ec2messages:GetMessages\",\n \"ec2messages:SendReply\"\n ],\n \"Resource\": \"*\"\n }\n ]\n }\n ), Policy(\n PolicyName=\"ecs-policy\",\n PolicyDocument={\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ec2:DescribeTags\",\n \"ecs:CreateCluster\",\n \"ecs:DeregisterContainerInstance\",\n \"ecs:DiscoverPollEndpoint\",\n \"ecs:Poll\",\n \"ecs:RegisterContainerInstance\",\n \"ecs:StartTelemetrySession\",\n \"ecs:UpdateContainerInstancesState\",\n \"ecs:Submit*\",\n \"ecr:GetAuthorizationToken\",\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:BatchGetImage\",\n \"logs:CreateLogStream\",\n \"logs:PutLogEvents\"\n ],\n \"Resource\": \"*\"\n }\n ]\n }\n )]\n ))\n template.add_resource(InstanceProfile(\n \"ECSInstanceProfile\",\n Roles=[Ref(\"ECSInstanceRole\")]\n ))\n return GetAtt(\"ECSInstanceProfile\", \"Arn\")", "def create_tps_profile(iam, profile_name, locations):\n\n instance_profile = iam.create_instance_profile(\n InstanceProfileName=profile_name,\n Path='/',\n )\n\n iam.create_role(\n RoleName=profile_name,\n Path='/',\n AssumeRolePolicyDocument=json.dumps(\n assume_role_policy_document('ec2.amazonaws.com')),\n )\n\n iam.add_role_to_instance_profile(\n InstanceProfileName=profile_name,\n RoleName=profile_name,\n )\n\n for policy in (\n 'AmazonRDSFullAccess',\n 'AmazonEC2ContainerRegistryFullAccess',\n 'AWSBatchFullAccess',\n ):\n arn = 'arn:aws:iam::aws:policy/' + policy\n iam.attach_role_policy(\n RoleName=profile_name,\n PolicyArn=arn,\n )\n\n ec2_policy = dict(\n Version='2012-10-17',\n Statement=dict(\n Effect='Allow',\n Action=[\n 'ec2:AuthorizeSecurityGroupIngress',\n 'ec2:DescribeInstances',\n 'ec2:TerminateInstances',\n 'ec2:CreateKeyPair',\n 'ec2:CreateTags',\n 'ec2:RunInstances',\n 'ec2:DescribeSecurityGroups',\n 'ec2:DescribeImages',\n 'ec2:CreateSecurityGroup',\n 'ec2:DeleteSecurityGroup',\n 'ec2:DescribeSubnets',\n 'ec2:DeleteKeyPair',\n 'ec2:DescribeInstanceStatus',\n ],\n Resource='*',\n ),\n )\n\n iam_policy = dict(\n Version='2012-10-17',\n Statement=dict(\n Effect='Allow',\n Action=[\n 'iam:ListPolicies',\n 'iam:CreatePolicy',\n 'iam:GetRole',\n 'iam:CreateRole',\n 'iam:AttachRolePolicy',\n 'iam:PassRole',\n 'iam:GetInstanceProfile',\n 'iam:AddRoleToInstanceProfile',\n ],\n Resource='*',\n ),\n )\n\n assets_path = '/' + locations.assets.prefix + '/*'\n s3_policy = dict(\n Version='2012-10-17',\n Statement=[\n dict(\n Effect='Allow',\n Action=[\n 's3:DeleteObject',\n ],\n Resource=[\n 'arn:aws:s3:::' + locations.missing.name + '/*',\n ],\n ),\n dict(\n Effect='Allow',\n Action=[\n 's3:ListBucket',\n ],\n Resource=[\n 'arn:aws:s3:::' + locations.missing.name,\n 'arn:aws:s3:::' + locations.assets.name,\n 'arn:aws:s3:::' + locations.rawr.name,\n 'arn:aws:s3:::' + locations.meta.name,\n ],\n ),\n dict(\n Effect='Allow',\n Action=[\n 's3:GetObject',\n ],\n Resource=[\n 'arn:aws:s3:::' + locations.assets.name + '/tileops/*',\n 'arn:aws:s3:::' + locations.assets.name + assets_path,\n 'arn:aws:s3:::' + locations.missing.name + '/*',\n 'arn:aws:s3:::' + locations.rawr.name + '/*',\n 'arn:aws:s3:::' + locations.meta.name + '/*',\n ],\n ),\n ],\n )\n\n cw_policy = dict(\n Version='2012-10-17',\n Statement=[\n dict(\n Effect='Allow',\n Action=[\n 'logs:CreateLogGroup',\n 'logs:CreateLogStream',\n 'logs:PutLogEvents',\n 'logs:DescribeLogStreams',\n ],\n Resource=[\n 'arn:aws:logs:*:*:*',\n ]\n ),\n ],\n )\n\n for name, policy in [\n ('AllowEC2', ec2_policy),\n ('AllowIAM', iam_policy),\n ('AllowS3', s3_policy),\n ('WriteLogs', cw_policy),\n ]:\n iam.put_role_policy(\n RoleName=profile_name,\n PolicyName=name,\n PolicyDocument=json.dumps(policy),\n )\n\n wait_for_profile(iam, profile_name)\n return instance_profile['InstanceProfile']", "def iam_instance_profile(self) -> Optional[pulumi.Input['LaunchTemplateIamInstanceProfileArgs']]:\n return pulumi.get(self, \"iam_instance_profile\")", "def iam_instance_profile(self) -> pulumi.Output[Optional['outputs.LaunchTemplateIamInstanceProfile']]:\n return pulumi.get(self, \"iam_instance_profile\")", "def test_jenkins_instance_profile_exists(self) -> None:\n self.assertTrue(EC2.instance_profile_valid(\n instance_profile_name='global-jenkis-server-instance-profile',\n asg_name='global-jenkins-server-asg',\n iam_role_name='jenkins-role'\n ))", "def wait_for_profile(iam, profile_name):\n\n import time\n\n waiter = iam.get_waiter('instance_profile_exists')\n waiter.wait(InstanceProfileName=profile_name)\n # not sure why, but even after waiting, the ec2 instance creation\n # fails with an invalid profile arn error\n # sleeping a little bit of time fixes it :(\n time.sleep(10)", "def control_instance(stackName, action, instanceName=None):\n try:\n aws_cfg\n except NameError:\n try:\n aws_cfg = load_aws_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide an AWS conifguration based on aws.cfg-dist to proceed. %s\" % error))\n return 1\n\n stackName = stackName.lower()\n opsworks = connect_to_opsworks()\n stacks = opsworks.describe_stacks()\n stackId = [stack['StackId'] for stack in stacks['Stacks'] if stack['Name'] == stackName]\n if stackId == []:\n print(_red(\"stack %s not found\" % stackName))\n return 1\n instances = opsworks.describe_instances(stack_id=stackId[0])['Instances']\n if instanceName is not None:\n instances = [instance for instance in instances if instance['Hostname'] == instanceName]\n\n ec2 = connect_to_ec2()\n for instance in instances:\n if action == 'start':\n print(_green(\"starting instance: %s\" % instance['Hostname']))\n try:\n opsworks.start_instance(instance_id=instance['InstanceId'])\n except ValidationException:\n pass\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n spinner = Spinner(_yellow(\"[%s]Waiting for reservation \" % myinstance['Hostname']), hide_cursor=False)\n while myinstance['Status'] == 'requested':\n spinner.next()\n time.sleep(1)\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n print(_green(\"\\n[%s]OpsWorks instance status: %s\" % (myinstance['Hostname'], myinstance['Status'])))\n ec2Instance = ec2.get_only_instances(instance_ids=[myinstance['Ec2InstanceId']])[0]\n spinner = Spinner(_yellow(\"[%s]Booting ec2 instance \" % myinstance['Hostname']), hide_cursor=False)\n while ec2Instance.state != u'running':\n spinner.next()\n time.sleep(1)\n ec2Instance.update()\n print(_green(\"\\n[%s]ec2 Instance state: %s\" % (myinstance['Hostname'], ec2Instance.state)))\n spinner = Spinner(_yellow(\"[%s]Running OpsWorks setup \" % myinstance['Hostname']), hide_cursor=False)\n while myinstance['Status'] != 'online':\n if myinstance['Status'] == 'setup_failed':\n print(_red(\"\\n[%s]OpsWorks instance failed\" % myinstance['Hostname']))\n return 1\n spinner.next()\n time.sleep(1)\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n print(_green(\"\\n[%s]OpsWorks Instance state: %s\" % (myinstance['Hostname'], myinstance['Status'])))\n getec2instances()\n elif action == 'stop':\n if 'Ec2InstanceId' in instance.keys():\n print(_green(\"Stopping instance %s\" % instance['Hostname']))\n opsworks.stop_instance(instance_id=instance['InstanceId'])\n ec2Instance = ec2.get_only_instances(instance_ids=[instance['Ec2InstanceId']])[0]\n spinner = Spinner(_yellow(\"[%s]Waiting for ec2 instance to stop \" % instance['Hostname']), hide_cursor=False)\n while ec2Instance.state != u'stopped':\n spinner.next()\n time.sleep(1)\n ec2Instance.update()\n print(_green(\"\\n[%s]ec2 Instance state: %s\" % (instance['Hostname'], ec2Instance.state)))\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n spinner = Spinner(_yellow(\"[%s]Stopping OpsWorks Instance \" % instance['Hostname']), hide_cursor=False)\n while myinstance['Status'] != 'stopped':\n spinner.next()\n time.sleep(1)\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n print(_green(\"\\n[%s]OpsWorks Instance state: %s\" % (instance['Hostname'], myinstance['Status'])))\n else:\n print(_green(\"%s in %s already stopped\" % (instance['Hostname'], stackName)))\n try:\n print(_green(\"removing %s from ssh config...\" % instance['PublicDns']))\n removefromsshconfig(dns=instance['PublicDns'])\n except Exception:\n pass", "def tag_instance(request):\n log('Tagging instance with: {}', request.instance_tags)\n _azure('vm', 'update',\n '--name', request.vm_name,\n '--resource-group', request.resource_group,\n '--set', *['tags.{}={}'.format(tag, value)\n for tag, value in request.instance_tags.items()])", "def __init__(self,\n profiles: List['InstanceProfile']) -> None:\n self.profiles = profiles", "def start():\n local('aws ec2 start-instances --instance-ids %s'%(AWS_INSTANCE_ID))", "def StartInstance(*, session, imageid, keyname=\"\", instancetype=\"t2.micro\", userdata=\"\"):\n ec2conn = session.connect_to(\"ec2\")\n ret = ec2conn.run_instances(image_id=imageid, min_count=1, max_count=1, key_name=keyname, instance_type=instancetype, user_data=userdata)\n return ret[\"Instances\"][0]", "def modify_instance_placement(InstanceId=None, Tenancy=None, Affinity=None, HostId=None):\n pass", "def add_profile(self, namespace, key, value):\n try:\n entry = dax.Profile(namespace, key, value)\n self._dax_executable.addProfile(entry) \n except dax.DuplicateError:\n pass", "def launch_instance(self, image_id, key_pair):\n return self.ec2_service.create_instances(\n ImageId=image_id,\n MinCount=1,\n MaxCount=1,\n InstanceType='t2.micro',\n KeyName=key_pair\n )", "def create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(account=instance)\n new_profile_created.send(sender=instance.__class__, account=instance)", "def startinstance(imagename, instance_type='m1.large'):\n if not settings.get_image(imagename):\n raise SystemExit(\"Invalid imagename '%s'\" % imagename)\n\n username, conn = _getbotoconn(auth_user)\n\n print \"starting an instance from the %s image under the %s account of \" \\\n \"type %s\" % \\\n (imagename, username, instance_type)\n\n username, accesskey, secretkey, pkname = settings.get_user(username)\n imagename, imageid = settings.get_image(imagename)\n\n image = conn.get_image(imageid)\n reservation = None\n if pkname:\n reservation = image.run(instance_type=instance_type, key_name=pkname)\n else:\n reservation = image.run(instance_type=instance_type)\n\n instance = reservation.instances[0]\n\n # The image has been started in the pending state, wait for it to transition\n # into the running state\n while True:\n if instance.update() == u'running':\n # [AN] it would be nice if the user knew it was still working\n break\n time.sleep(1)\n\n print \"\"\n print \"Instance started\"\n print \"DNS name: %s\" % instance.dns_name", "def add_profile(self, namespace, key, value):\n try:\n entry = dax.Profile(namespace, key, value)\n self._dax_node.addProfile(entry)\n except dax.DuplicateError:\n pass", "def associate_address(self, instance_id, address):\n query = self.query_factory(\n action=\"AssociateAddress\", creds=self.creds,\n endpoint=self.endpoint,\n other_params={\"InstanceId\": instance_id, \"PublicIp\": address})\n d = query.submit()\n return d.addCallback(self.parser.truth_return)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide .
def associate_route_table(DryRun=None, SubnetId=None, RouteTableId=None): pass
[ "def associate_route_table(\n route_table_id=None,\n subnet_id=None,\n route_table_name=None,\n subnet_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"associated\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n\n if all((route_table_id, route_table_name)):\n raise SaltInvocationError(\n \"Only one of route_table_name or route_table_id may be provided.\"\n )\n if route_table_name:\n route_table_id = _get_resource_id(\n \"route_table\",\n route_table_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not route_table_id:\n return {\n \"associated\": False,\n \"error\": {\n \"message\": \"Route table {} does not exist.\".format(route_table_name)\n },\n }\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_route_table(route_table_id, subnet_id)\n log.info(\n \"Route table %s was associated with subnet %s\", route_table_id, subnet_id\n )\n return {\"association_id\": association_id}\n except BotoServerError as e:\n return {\"associated\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def add_subnet(tag_name, ip_part, route_table, az, realm):\n template_name = tag_name.title().replace('-', '')\n subnet = ec2.Subnet(\n template_name,\n VpcId=Ref(self.vpc),\n CidrBlock=_(Ref(self.vpc_base_net), \".{}.0/24\".format(ip_part)),\n AvailabilityZone=Select(az, GetAZs()),\n Tags=self.get_tags(tag_name, realm=realm)\n )\n subnet = self.t.add_resource(subnet)\n\n self.t.add_resource(ec2.SubnetRouteTableAssociation(\n \"{}RouteTableAssociation\".format(template_name),\n SubnetId=Ref(subnet),\n RouteTableId=Ref(route_table)\n ))\n\n return subnet", "def associate_network_acl_to_subnet(\n network_acl_id=None,\n subnet_id=None,\n network_acl_name=None,\n subnet_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if network_acl_name:\n network_acl_id = _get_resource_id(\n \"network_acl\",\n network_acl_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not network_acl_id:\n return {\n \"associated\": False,\n \"error\": {\n \"message\": \"Network ACL {} does not exist.\".format(network_acl_name)\n },\n }\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"associated\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_network_acl(network_acl_id, subnet_id)\n if association_id:\n log.info(\n \"Network ACL with id %s was associated with subnet %s\",\n network_acl_id,\n subnet_id,\n )\n\n return {\"associated\": True, \"id\": association_id}\n else:\n log.warning(\n \"Network ACL with id %s was not associated with subnet %s\",\n network_acl_id,\n subnet_id,\n )\n return {\n \"associated\": False,\n \"error\": {\"message\": \"ACL could not be assocaited.\"},\n }\n except BotoServerError as e:\n return {\"associated\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def configure_routing(vpc):\n internet_gateways = list(vpc.internet_gateways.all())\n if len(internet_gateways) == 1:\n internet_gateway = internet_gateways[0]\n elif len(internet_gateways) == 0:\n raise CraftingTableError(\"No internet gateway found\")\n else:\n raise CraftingTableError(f\"Multiple internet gateways found: {id_list(internet_gateways)}\")\n\n route_tables = list(vpc.route_tables.filter(Filters=[{\"Name\": \"association.main\", \"Values\": [\"true\"]}]))\n if len(route_tables) == 1:\n route_table = route_tables[0]\n elif len(route_tables) == 0:\n raise CraftingTableError(\"No route table found\")\n if len(route_tables) != 1:\n raise CraftingTableError(f\"Multiple route tables found: {id_list(route_tables)}\")\n\n for route in route_table.routes:\n if route.gateway_id == internet_gateway.id:\n break\n else:\n route_table.create_route(DestinationCidrBlock=\"0.0.0.0/0\", GatewayId=internet_gateway.id)\n click.echo(f\"Created default route to {internet_gateway.id}\")", "def associate_subnet_cidr_block(SubnetId=None, Ipv6CidrBlock=None):\n pass", "def _assert_nat_in_subnet_route(ec2_client, subnet_id):\n response = ec2_client.describe_route_tables(Filters=[{\"Name\": \"association.subnet-id\", \"Values\": [subnet_id]}])\n routes = response[\"RouteTables\"][0][\"Routes\"]\n assert_that(next(route for route in routes if route[\"DestinationCidrBlock\"] == \"0.0.0.0/0\")).contains(\n \"NatGatewayId\"\n )", "def _get_subnet_explicit_route_table(\n subnet_id, vpc_id, conn=None, region=None, key=None, keyid=None, profile=None\n):\n if not conn:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if conn:\n vpc_route_tables = conn.get_all_route_tables(filters={\"vpc_id\": vpc_id})\n for vpc_route_table in vpc_route_tables:\n for rt_association in vpc_route_table.associations:\n if rt_association.subnet_id == subnet_id and not rt_association.main:\n return rt_association.id\n return None", "def route(self, subnet_id, type_serv, target):\n return self.client.call('SoftLayer_Network_Subnet', 'route',\n type_serv, target, id=subnet_id, )", "def replace_route_table_association(\n association_id, route_table_id, region=None, key=None, keyid=None, profile=None\n):\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.replace_route_table_association_with_assoc(\n association_id, route_table_id\n )\n log.info(\n \"Route table %s was reassociated with association id %s\",\n route_table_id,\n association_id,\n )\n return {\"replaced\": True, \"association_id\": association_id}\n except BotoServerError as e:\n return {\"replaced\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def create_route(\n route_table_id=None,\n destination_cidr_block=None,\n route_table_name=None,\n gateway_id=None,\n internet_gateway_name=None,\n instance_id=None,\n interface_id=None,\n vpc_peering_connection_id=None,\n vpc_peering_connection_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n nat_gateway_id=None,\n nat_gateway_subnet_name=None,\n nat_gateway_subnet_id=None,\n):\n\n if not _exactly_one((route_table_name, route_table_id)):\n raise SaltInvocationError(\n \"One (but not both) of route_table_id or route_table_name must be provided.\"\n )\n\n if not _exactly_one(\n (\n gateway_id,\n internet_gateway_name,\n instance_id,\n interface_id,\n vpc_peering_connection_id,\n nat_gateway_id,\n nat_gateway_subnet_id,\n nat_gateway_subnet_name,\n vpc_peering_connection_name,\n )\n ):\n raise SaltInvocationError(\n \"Only one of gateway_id, internet_gateway_name, instance_id, interface_id,\"\n \" vpc_peering_connection_id, nat_gateway_id, nat_gateway_subnet_id,\"\n \" nat_gateway_subnet_name or vpc_peering_connection_name may be provided.\"\n )\n\n if destination_cidr_block is None:\n raise SaltInvocationError(\"destination_cidr_block is required.\")\n\n try:\n if route_table_name:\n route_table_id = _get_resource_id(\n \"route_table\",\n route_table_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not route_table_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"route table {} does not exist.\".format(\n route_table_name\n )\n },\n }\n\n if internet_gateway_name:\n gateway_id = _get_resource_id(\n \"internet_gateway\",\n internet_gateway_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not gateway_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_name\n )\n },\n }\n\n if vpc_peering_connection_name:\n vpc_peering_connection_id = _get_resource_id(\n \"vpc_peering_connection\",\n vpc_peering_connection_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not vpc_peering_connection_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC peering connection {} does not exist.\".format(\n vpc_peering_connection_name\n )\n },\n }\n\n if nat_gateway_subnet_name:\n gws = describe_nat_gateways(\n subnet_name=nat_gateway_subnet_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not gws:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"nat gateway for {} does not exist.\".format(\n nat_gateway_subnet_name\n )\n },\n }\n nat_gateway_id = gws[0][\"NatGatewayId\"]\n\n if nat_gateway_subnet_id:\n gws = describe_nat_gateways(\n subnet_id=nat_gateway_subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not gws:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"nat gateway for {} does not exist.\".format(\n nat_gateway_subnet_id\n )\n },\n }\n nat_gateway_id = gws[0][\"NatGatewayId\"]\n\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n if not nat_gateway_id:\n return _create_resource(\n \"route\",\n route_table_id=route_table_id,\n destination_cidr_block=destination_cidr_block,\n gateway_id=gateway_id,\n instance_id=instance_id,\n interface_id=interface_id,\n vpc_peering_connection_id=vpc_peering_connection_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n # for nat gateway, boto3 is required\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n ret = conn3.create_route(\n RouteTableId=route_table_id,\n DestinationCidrBlock=destination_cidr_block,\n NatGatewayId=nat_gateway_id,\n )\n return {\"created\": True, \"id\": ret.get(\"NatGatewayId\")}\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def _assert_internet_gateway_in_subnet_route(ec2_client, subnet_id, expected_internet_gateway_id):\n response = ec2_client.describe_route_tables(Filters=[{\"Name\": \"association.subnet-id\", \"Values\": [subnet_id]}])\n routes = response[\"RouteTables\"][0][\"Routes\"]\n internet_gateway_route = next(route for route in routes if route[\"DestinationCidrBlock\"] == \"0.0.0.0/0\")\n assert_that(internet_gateway_route).contains(\"GatewayId\")\n assert_that(internet_gateway_route[\"GatewayId\"]).is_equal_to(expected_internet_gateway_id)", "def create_route(DryRun=None, RouteTableId=None, DestinationCidrBlock=None, GatewayId=None, DestinationIpv6CidrBlock=None, EgressOnlyInternetGatewayId=None, InstanceId=None, NetworkInterfaceId=None, VpcPeeringConnectionId=None, NatGatewayId=None):\n pass", "def disassociate_route_table(DryRun=None, AssociationId=None):\n pass", "def add_route_tgw_nh(route_table_id, destination_cidr_block, transit_gateway_id):\n ec2 = boto3.client('ec2')\n\n resp = ec2.create_route(\n DryRun=False,\n RouteTableId=route_table_id,\n DestinationCidrBlock=destination_cidr_block,\n TransitGatewayId=transit_gateway_id,\n )\n logger.info(\"Got response to add_route_tgw_nh {} \".format(resp))\n return resp", "def ReplaceRouteTableAssociation(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ReplaceRouteTableAssociation\", params, headers=headers)\n response = json.loads(body)\n model = models.ReplaceRouteTableAssociationResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def allocate_subnet(self, request):", "def create_route_entry(self, route_tables, vpc_id):\n params = {}\n results = []\n changed = False \n vrouter_table_id = None\n\n # Describe Vpc for getting VRouterId \n desc_vpc_param = {}\n self.build_list_params(desc_vpc_param, vpc_id, 'VpcId')\n desc_vpc_response = self.get_status('DescribeVpcs', desc_vpc_param)\n if int(desc_vpc_response[u'TotalCount']) > 0:\n vrouter_id = str(desc_vpc_response[u'Vpcs'][u'Vpc'][0][u'VRouterId']) \n\n # Describe Route Tables for getting RouteTable Id \n desc_route_table_param = {}\n self.build_list_params(desc_route_table_param, vrouter_id, 'VRouterId')\n desc_route_table_response = self.get_status('DescribeRouteTables', desc_route_table_param)\n if int(desc_route_table_response[u'TotalCount']) > 0:\n vrouter_table_id = str(desc_route_table_response[u'RouteTables'][u'RouteTable'][0][u'RouteTableId'])\n\n for vroute in route_tables:\n self.build_list_params(params, vrouter_table_id , 'RouteTableId') \n if \"next_hop_id\" in vroute:\n if (\"dest\" in vroute) or (\"destination_cidrblock\" in vroute):\n fixed_dest_cidr_block = None\n if 'dest' in vroute:\n fixed_dest_cidr_block = vroute[\"dest\"]\n if 'destination_cidrblock' in vroute:\n fixed_dest_cidr_block = vroute[\"destination_cidrblock\"]\n if fixed_dest_cidr_block:\n self.build_list_params(params, fixed_dest_cidr_block, 'DestinationCidrBlock')\n\n if 'next_hop_type' in vroute:\n self.build_list_params(params, vroute[\"next_hop_type\"], 'NextHopType')\n\n if 'next_hop_id' in vroute:\n self.build_list_params(params, vroute[\"next_hop_id\"], 'NextHopId')\n \n try:\n instance_result = self.get_instance_info()\n flag = False\n if instance_result:\n for instances in instance_result[0][u'Instances'][u'Instance']:\n if vroute[\"next_hop_id\"] == instances['InstanceId']:\n flag = True\n break\n if flag: \n response = self.get_status('CreateRouteEntry', params)\n results.append(response)\n changed = True\n time.sleep(10)\n else:\n results.append({\"Error Message\": str(vroute[\"next_hop_id\"])+\" Instance not found\"})\n except Exception as ex:\n error_code = ex.error_code\n error_msg = ex.message\n results.append({\"Error Code\": error_code, \"Error Message\": error_msg})\n else:\n results.append({\"Error Message\": \"destination_cidrblock is required to create custom route entry\"})\n else:\n results.append({\"Error Message\": \"next_hop_id is required to create custom route entry\"})\n else:\n results.append({\"Error Message\": \"vpc_id is not valid\"})\n \n return changed, results", "def associate_address(DryRun=None, InstanceId=None, PublicIp=None, AllocationId=None, NetworkInterfaceId=None, PrivateIpAddress=None, AllowReassociation=None):\n pass", "def routing_table(ip, community, ci):\n ipRouteType = \"1.3.6.1.2.1.4.21.1.8\"\n ret = get_bulk(ip, ipRouteType, community)\n if ret != None:\n for r in ret:\n for name, val in r:\n ip = name.prettyPrint()[len(\"SNMPv2-SMI::mib-2.4.21.1.8.\"):]\n route_type = int(val.prettyPrint())\n\n # indirect(4)\n if route_type == 4:\n discovery_info.add_ip(ip)\n\n new_ci = ConfigurationItem.ConfigurationItem()\n new_ci.add_ipv4_address(ip)\n mac = discovery_info.get_mac_from_ip(ip)\n if mac != None:\n ci.set_mac_address(mac)\n\n rel_type = methods.add_rel_type(\n RelationshipType.RelationshipType(\"route to\"))\n rel_obj_1 = methods.create_relation(ci, new_ci, rel_type)\n rel_obj_1.set_title(str(ci.get_title()) +\n \" route to \" + str(new_ci.get_title()))\n\n rel_obj_2 = methods.create_relation(new_ci, ci, rel_type)\n rel_obj_2.set_title(str(new_ci.get_title()) + \" route to \" +\n str(ci.get_title()))\n\n methods.add_ci(new_ci)\n methods.add_rel(rel_obj_1)\n methods.add_rel(rel_obj_2)\n\n # direct(3)\n elif route_type == 3:\n ci.add_ipv4_address(ip)\n # discovery_info.add_ip(ip)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Associates a CIDR block with your subnet. You can only associate a single IPv6 CIDR block with your subnet. An IPv6 CIDR block must have a prefix length of /64.
def associate_subnet_cidr_block(SubnetId=None, Ipv6CidrBlock=None): pass
[ "def associate_vpc_cidr_block(VpcId=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def set_subnet_cidr(value, yaml_file):\n yaml_content = get_yaml(elife_global_yaml)\n yaml_content[\"defaults\"][\"aws\"][\"subnet-cidr\"] = value\n write_yaml(yaml_content, yaml_file)", "def add_ip6_addr(self, prefix, subnet, mac, interface, interface_label):\n new_ip = silk_ip.assemble(prefix, subnet, mac)\n command = \"ip addr add %s/64 dev %s\" % (new_ip, interface)\n self.store_data(new_ip, interface_label)\n self.make_netns_call_async(command, \"\", 1)\n self.make_netns_call_async(\"ifconfig\", \"\", 1)", "def allocate_subnet(self, request):", "def ipv6_cidr_block_association_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ipv6_cidr_block_association_id\")", "def cidr(self, value: str):\n self._properties[\"cidr\"] = value", "def _generate_subnet_cidr(cls, network_id):\n if network_id in cls._subnet_cidrs:\n cidr_no = cls._subnet_cidrs[network_id]\n if cidr_no > 255:\n # NOTE(amaretskiy): consider whether max number of\n # 255 subnets per network is enough.\n raise ValueError(\n \"can not generate more than 255 subnets CIDRs \"\n \"per one network due to IP pattern limitation\")\n else:\n cidr_no = 0\n\n cls._subnet_cidrs[network_id] = cidr_no + 1\n return cls.SUBNET_CIDR_PATTERN % cidr_no", "def ipv6_cidr_block_association_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ipv6_cidr_block_association_id\")", "def calculate_link_ip_addresses_ipv4(address_str, cidr_str, supernet_str):\n # Split address into octets and turn CIDR, supernet mask into int\n address = address_str.split('.')\n cidr = int(cidr_str)\n supernet = int(supernet_str)\n supernet_range = (1 << (32 - supernet)) - 2\n base_addr = int(address[3])\n\n # Initialize the netmask and calculate based on CIDR mask\n mask = [0, 0, 0, 0]\n for i in range(cidr):\n mask[i // 8] += (1 << (7 - i % 8))\n\n # Initialize net and binary and netmask with addr to get network\n network = []\n for i in range(4):\n network.append(int(address[i]) & mask[i])\n\n # Duplicate net into broad array, gather host bits, and generate broadcast\n broadcast = list(network)\n broadcast_range = 32 - cidr\n for i in range(broadcast_range):\n broadcast[3 - i // 8] += (1 << (i % 8))\n\n last_ip = list(broadcast)\n diff = base_addr % (supernet_range + 2)\n host = base_addr - diff\n count, hostmin, hostmax = 0, 0, 0\n third_octet = network[2]\n available_ips = []\n while third_octet <= last_ip[2]:\n ips_list = []\n while count < last_ip[3]:\n hostmin = host + 1\n hostmax = hostmin + supernet_range - 1\n if supernet == 31:\n while hostmax <= hostmin:\n ips_list.append(hostmax)\n hostmax += 1\n host = hostmin + 1\n else:\n while hostmin <= hostmax:\n ips_list.append(hostmin)\n hostmin += 1\n host = hostmax + 2\n\n count = host\n\n list_index = 0\n ip_address = str(last_ip[0]) + '.' + str(last_ip[1]) + '.'\n ip_address += str(third_octet)\n while list_index < len(ips_list):\n ip = ip_address + '.' + str(ips_list[list_index]) + \"/\"\n ip += supernet_str\n available_ips.append(ip)\n list_index += 1\n\n host, count, hostmax, hostmin = 0, 0, 0, 0\n third_octet += 1\n\n return available_ips", "def calculate_link_ip_addresses_ipv6(address_str, cidr_str, supernet_str, ip_count):\n if '::' in address_str:\n add_str = ''\n count = (address_str.count(':'))\n if address_str[-1] == ':':\n count -= 2\n while count < 7:\n add_str += ':0'\n count += 1\n else:\n while count < 8:\n add_str += ':0'\n count += 1\n add_str += ':'\n\n address_str = address_str.replace('::', add_str)\n\n address = address_str.split(':')\n cidr = int(cidr_str)\n supernet = int(supernet_str)\n\n mask_cidr = find_mask_v6(cidr)\n network = find_network_v6(address, mask_cidr)\n broadcast = find_broadcast_v6(network, cidr)\n\n mask_supernet = find_mask_v6(supernet)\n network_hex = []\n for i in range(8):\n network_hex.append(hex(network[i])[2:])\n network_supernet = find_network_v6(address, mask_supernet)\n broadcast_supernet = find_broadcast_v6(network_supernet, supernet)\n\n initial_ip = network_supernet[7]\n ip_checking = list(network_supernet)\n while not(initial_ip >= broadcast[7] and ip_checking[:7] == broadcast[:7]):\n initial_ip = network_supernet[7]\n ips_list = []\n no_of_ip = 0\n while initial_ip <= broadcast_supernet[7] and no_of_ip < ip_count:\n ip = list(network_supernet)\n ip[7] = initial_ip\n\n for i in range(0, 8):\n ip[i] = hex(ip[i])[2:]\n\n ip = ':'.join(ip)\n ip += '/' + str(supernet)\n ips_list.append(ip)\n initial_ip += 1\n no_of_ip += 1\n ip_checking = list(broadcast_supernet)\n initial_ip = broadcast_supernet[7]\n network_supernet = find_network_supernet_v6(broadcast_supernet, cidr, supernet)\n broadcast_supernet = find_broadcast_v6(network_supernet, supernet)\n\n yield ips_list", "def modify_subnet_attribute(SubnetId=None, MapPublicIpOnLaunch=None, AssignIpv6AddressOnCreation=None):\n pass", "def test_create_subnet_with_cidr_and_default_subnetpool(self):\n with self.network() as network:\n tenant_id = network['network']['tenant_id']\n subnetpool_prefix = '10.0.0.0/8'\n with self.subnetpool(prefixes=[subnetpool_prefix],\n admin=True,\n name=\"My subnet pool\",\n tenant_id=tenant_id,\n min_prefixlen='25',\n is_default=True):\n data = {'subnet': {'network_id': network['network']['id'],\n 'cidr': '10.0.0.0/24',\n 'ip_version': constants.IP_VERSION_4,\n 'tenant_id': tenant_id}}\n subnet_req = self.new_create_request('subnets', data)\n res = subnet_req.get_response(self.api)\n subnet = self.deserialize(self.fmt, res)['subnet']\n self.assertIsNone(subnet['subnetpool_id'])", "def associate_network_acl_to_subnet(\n network_acl_id=None,\n subnet_id=None,\n network_acl_name=None,\n subnet_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if network_acl_name:\n network_acl_id = _get_resource_id(\n \"network_acl\",\n network_acl_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not network_acl_id:\n return {\n \"associated\": False,\n \"error\": {\n \"message\": \"Network ACL {} does not exist.\".format(network_acl_name)\n },\n }\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"associated\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_network_acl(network_acl_id, subnet_id)\n if association_id:\n log.info(\n \"Network ACL with id %s was associated with subnet %s\",\n network_acl_id,\n subnet_id,\n )\n\n return {\"associated\": True, \"id\": association_id}\n else:\n log.warning(\n \"Network ACL with id %s was not associated with subnet %s\",\n network_acl_id,\n subnet_id,\n )\n return {\n \"associated\": False,\n \"error\": {\"message\": \"ACL could not be assocaited.\"},\n }\n except BotoServerError as e:\n return {\"associated\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def update_subnet(self, request):", "def create_subnet(DryRun=None, VpcId=None, CidrBlock=None, Ipv6CidrBlock=None, AvailabilityZone=None):\n pass", "def subnet_put_api(subnetid=None):\r\n try:\r\n if not subnetid:\r\n return err_return('subnetId is required', \"ParameterInvalid\",\r\n \"\", HTTP_BAD_REQUEST)\r\n db_subnet = subnet_db_get_one('*', id=subnetid)\r\n if not db_subnet:\r\n return err_return('subnetId does not exist', \"ParameterInvalid\",\r\n \"\", HTTP_NOT_FOUND)\r\n cidr = db_subnet['cidr']\r\n try:\r\n req = models.Subnet(request.json)\r\n req.validate()\r\n except Exception as e:\r\n log.error(e)\r\n return err_return('Parameter Invalid', \"ParameterInvalid\",\r\n \"\", HTTP_BAD_REQUEST)\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n if req.subnet_name is not None:\r\n if len(req.subnet_name) > NAME_MAX_LEN:\r\n return err_return('Length of name must be less than 255',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n sql = \"UPDATE neutron_subnets SET name=%s WHERE id=%s\"\r\n cursor.execute(sql, (req.subnet_name, subnetid))\r\n if req.dns_nameservers is not None:\r\n sql = (\"UPDATE neutron_subnets SET \"\r\n \"dns_nameservers=%s WHERE id=%s\")\r\n cursor.execute(sql,\r\n (json.dumps(req.dns_nameservers), subnetid))\r\n if req.allocation_pools is not None:\r\n allocation_pools = []\r\n for all_pool in req.allocation_pools:\r\n allocation_pools.append(all_pool.to_primitive())\r\n req.allocation_pools = allocation_pools\r\n for pool in req.allocation_pools:\r\n if ip_to_bin(pool['start']) > ip_to_bin(pool['end']):\r\n return err_return(\"end_ip must be more than start_ip\",\r\n \"IPRangeError\", \"\", HTTP_BAD_REQUEST)\r\n networkid = subnetid_to_networkid(subnetid)\r\n db_network = network_db_get_one('*', id=networkid)\r\n external = db_network['external']\r\n log.debug('external=%s' % external)\r\n if external:\r\n if req.allocation_pools is not None:\r\n old_alloc_pools = json.loads(db_subnet['allocation_pools'])\r\n old_alloc_ips = alloc_pools_to_ip_list(old_alloc_pools)\r\n new_alloc_ips = alloc_pools_to_ip_list(req.allocation_pools)\r\n tmp_nips = copy.deepcopy(new_alloc_ips)\r\n for new_ip in tmp_nips:\r\n if new_ip in old_alloc_ips:\r\n new_alloc_ips.remove(new_ip)\r\n old_alloc_ips.remove(new_ip)\r\n isp = lc_vl2_db_get_one('isp', lcuuid=db_network['lcuuid'])\r\n items = lc_ip_res_db_get_all(req='ip, userid, vifid',\r\n isp=isp)\r\n isp_all_ips = []\r\n ip_to_userid = {}\r\n ip_to_vifid = {}\r\n for it in items:\r\n isp_all_ips.append(it['ip'])\r\n ip_to_userid[it['ip']] = it['userid']\r\n ip_to_vifid[it['ip']] = it['vifid']\r\n for new_alloc_ip in new_alloc_ips:\r\n if new_alloc_ip not in isp_all_ips:\r\n return err_return(\"%s invalid\" % new_alloc_ip,\r\n \"IPInvalid\", \"\", HTTP_BAD_REQUEST)\r\n if ip_to_userid[new_alloc_ip] != 0:\r\n return err_return(\"%s in use\" % new_alloc_ip,\r\n \"IPInUse\", \"\", HTTP_BAD_REQUEST)\r\n for old_alloc_ip in old_alloc_ips:\r\n if ip_to_vifid[old_alloc_ip] != 0:\r\n return err_return(\"%s in use\" % old_alloc_ip,\r\n \"IPInUse\", \"\", HTTP_BAD_REQUEST)\r\n sql = (\"UPDATE neutron_subnets SET allocation_pools='%s' \"\r\n \"WHERE id='%s'\" % (json.dumps(req.allocation_pools),\r\n subnetid))\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n cursor.execute(sql)\r\n sql = (\"UPDATE ip_resource_v2_2 SET userid=0 \"\r\n \"WHERE ip in ('-1',\")\r\n for ip in old_alloc_ips:\r\n sql += \"'%s',\" % ip\r\n sql = sql[:-1]\r\n sql += \")\"\r\n sql2 = (\"UPDATE ip_resource_v2_2 SET userid=%s \"\r\n \"WHERE ip in ('-1',\")\r\n for ip in new_alloc_ips:\r\n sql2 += \"'%s',\" % ip\r\n sql2 = sql2[:-1]\r\n sql2 += \")\"\r\n with MySQLdb.connect(**LCDB_INFO) as cursor:\r\n cursor.execute(sql)\r\n cursor.execute(sql2, conf.livecloud_userid)\r\n return subnet_get(subnetid=subnetid)\r\n\r\n if req.gateway_ip is not None:\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n sql = \"UPDATE neutron_subnets SET gateway_ip=%s WHERE id=%s\"\r\n cursor.execute(sql, (req.gateway_ip, subnetid))\r\n log.debug('old_cidr=%s, new_cidr=%s' % (cidr, req.cidr))\r\n if req.cidr and cidr != req.cidr:\r\n vl2lcid = yynetworkid_to_lcvl2id(networkid)\r\n nets = [{\"prefix\": VFW_TOR_LINK_NET_PRE,\r\n \"netmask\": VFW_TOR_LINK_NET_MASK}]\r\n subnets = get_subnets_by_network(networkid)\r\n for subnet in subnets:\r\n if str(subnet['id']) == subnetid:\r\n continue\r\n cidr = subnet['cidr'].split('/')\r\n nets.append({\"prefix\": cidr[0], \"netmask\": int(cidr[1])})\r\n cidr = str(req.cidr).split('/')\r\n log.debug('netmask=%s' % cidr[1])\r\n nets.append({\"prefix\": cidr[0], \"netmask\": int(cidr[1])})\r\n nw_name = network_db_get_one('name', id=networkid)\r\n payload = json.dumps({\"name\": nw_name, \"nets\": nets})\r\n log.debug('patch vl2 data=%s' % payload)\r\n r = lcapi.patch(conf.livecloud_url + '/v1/vl2s/%s' % vl2lcid,\r\n data=payload)\r\n if r.status_code != HTTP_OK:\r\n err = r.json()['DESCRIPTION']\r\n log.error(err)\r\n return err_return(err, 'Fail', '', HTTP_BAD_REQUEST)\r\n nets = r.json()['DATA']['NETS']\r\n for net in nets:\r\n if subnet_equ(net['PREFIX'], cidr[0],\r\n net['NETMASK'], int(cidr[1])):\r\n sb_lcuuid = net['LCUUID']\r\n sb_idx = net['NET_INDEX']\r\n break\r\n else:\r\n log.error('sb_lcuuid no found')\r\n return Response(json.dumps(NEUTRON_500)), \\\r\n HTTP_INTERNAL_SERVER_ERROR\r\n if req.allocation_pools is None:\r\n req.allocation_pools = []\r\n else:\r\n req.cidr = db_subnet['cidr']\r\n sb_lcuuid = db_subnet['lcuuid']\r\n sb_idx = db_subnet['net_idx']\r\n if req.allocation_pools is None:\r\n return subnet_get(subnetid=subnetid)\r\n new_alloc_ips = alloc_pools_to_ip_list(req.allocation_pools)\r\n vl2id = lc_vl2_db_get_one('id', lcuuid=sb_lcuuid)\r\n used_ips = lc_vif_ip_db_get_all('ip', vl2id=vl2id,\r\n net_index=sb_idx)\r\n for used_ip in used_ips:\r\n ip = used_ip['ip']\r\n if ip not in new_alloc_ips:\r\n return err_return('used ip(%s) not in alloc pool' % ip,\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n\r\n sql = (\"UPDATE neutron_subnets SET cidr='%s', \"\r\n \"allocation_pools='%s', lcuuid='%s', net_idx=%s \"\r\n \"WHERE id='%s'\" %\r\n (req.cidr, json.dumps(req.allocation_pools),\r\n sb_lcuuid, sb_idx, subnetid))\r\n log.debug('sql=%s' % sql)\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n cursor.execute(sql)\r\n return subnet_get(subnetid=subnetid)\r\n except Exception as e:\r\n log.error(e)\r\n return Response(json.dumps(NEUTRON_500)), HTTP_INTERNAL_SERVER_ERROR", "def get_new_cidr(uuid):\n global lock\n lock.acquire()\n\n global __current_ip\n while __first_ip <= __current_ip < __last_ip and __current_ip in __issued_ips:\n __current_ip += __default_subnet_size\n\n if __current_ip >= __last_ip or __current_ip < __first_ip or __current_ip in __issued_ips:\n return None\n\n __issued_ips[__current_ip] = uuid\n lock.release()\n\n return Net.int_2_ip(__current_ip) + '/' + str(__default_subnet_bitmask)", "def manual_ipv6_infrastructure_allocation(anm):\n\n import netaddr\n g_ipv6 = anm['ipv6']\n log.info('Using specified IPv6 infrastructure allocation')\n\n for node in g_ipv6.l3devices():\n for interface in node.physical_interfaces:\n if not interface['input'].is_bound:\n continue # unbound interface\n ip_address = netaddr.IPAddress(interface['input'\n ].ipv6_address)\n prefixlen = interface['input'].ipv6_prefixlen\n interface.ip_address = ip_address\n interface.prefixlen = prefixlen\n cidr_string = '%s/%s' % (ip_address, prefixlen)\n interface.subnet = netaddr.IPNetwork(cidr_string)\n\n broadcast_domains = [d for d in g_ipv6 if d.broadcast_domain]\n\n # TODO: allow this to work with specified ip_address/subnet as well as ip_address/prefixlen\n\n from netaddr import IPNetwork\n for coll_dom in broadcast_domains:\n connected_interfaces = [edge.dst_int for edge in\n coll_dom.edges()]\n cd_subnets = [IPNetwork('%s/%s' % (i.subnet.network,\n i.prefixlen)) for i in connected_interfaces]\n\n\n if len(cd_subnets) == 0:\n log.warning(\"Collision domain %s is not connected to any nodes\" % coll_dom)\n continue\n\n try:\n assert len(set(cd_subnets)) == 1\n except AssertionError:\n mismatch_subnets = '; '.join('%s: %s/%s' % (i,\n i.subnet.network, i.prefixlen) for i in\n connected_interfaces)\n log.warning('Non matching subnets from collision domain %s: %s'\n % (coll_dom, mismatch_subnets))\n else:\n coll_dom.subnet = cd_subnets[0] # take first entry\n\n # apply to remote interfaces\n\n for edge in coll_dom.edges():\n edge.dst_int.subnet = coll_dom.subnet\n\n # also need to form aggregated IP blocks (used for e.g. routing prefix\n # advertisement)\n # import autonetkit\n # autonetkit.update_http(anm)\n\n infra_blocks = {}\n for (asn, devices) in g_ipv6.groupby('asn').items():\n broadcast_domains = [d for d in devices if d.broadcast_domain]\n subnets = [cd.subnet for cd in broadcast_domains\n if cd.subnet is not None] # only if subnet is set\n infra_blocks[asn] = netaddr.cidr_merge(subnets)\n\n g_ipv6.data.infra_blocks = infra_blocks", "def allocate_subnet(self, subnet_lst, net_id=None):\n\n session = db.get_session()\n query_str = None\n for sub in subnet_lst:\n sub_que = (self.model.subnet_address != sub)\n if query_str is not None:\n query_str = query_str & sub_que\n else:\n query_str = sub_que\n with session.begin(subtransactions=True):\n select = (session.query(self.model).filter(\n (self.model.allocated == 0) & query_str))\n\n # Selected segment can be allocated before update by someone else,\n # We retry until update success or DB_MAX_RETRIES retries\n for attempt in range(DB_MAX_RETRIES + 1):\n alloc = select.first()\n if not alloc:\n LOG.info(_LI(\"No subnet resource available\"))\n return\n count = (session.query(self.model).\n filter_by(subnet_address=alloc.subnet_address,\n allocated=False).update({\"allocated\": True,\n \"network_id\": net_id}))\n if count:\n return alloc.subnet_address\n\n LOG.error(_LE(\"ERROR: Failed to allocate subnet for net %(net)s\"),\n {'net': net_id})\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Associates a CIDR block with your VPC. You can only associate a single Amazonprovided IPv6 CIDR block with your VPC. The IPv6 CIDR block size is fixed at /56.
def associate_vpc_cidr_block(VpcId=None, AmazonProvidedIpv6CidrBlock=None): pass
[ "def associate_subnet_cidr_block(SubnetId=None, Ipv6CidrBlock=None):\n pass", "def associate_address(DryRun=None, InstanceId=None, PublicIp=None, AllocationId=None, NetworkInterfaceId=None, PrivateIpAddress=None, AllowReassociation=None):\n pass", "def set_cidr_ip(value, yaml_file):\n\n new_ports = [22, {4506: {\"cidr-ip\": value}}, {4505: {\"cidr-ip\": value}} ]\n\n print \"setting cidr_ip \" + value\n yaml_content = get_yaml(yaml_file)\n yaml_content[\"master-server\"][\"aws\"][\"ports\"] = new_ports\n write_yaml(yaml_content, yaml_file)", "def set_subnet_cidr(value, yaml_file):\n yaml_content = get_yaml(elife_global_yaml)\n yaml_content[\"defaults\"][\"aws\"][\"subnet-cidr\"] = value\n write_yaml(yaml_content, yaml_file)", "def allocate_subnet(self, request):", "def manual_ipv6_infrastructure_allocation(anm):\n\n import netaddr\n g_ipv6 = anm['ipv6']\n log.info('Using specified IPv6 infrastructure allocation')\n\n for node in g_ipv6.l3devices():\n for interface in node.physical_interfaces:\n if not interface['input'].is_bound:\n continue # unbound interface\n ip_address = netaddr.IPAddress(interface['input'\n ].ipv6_address)\n prefixlen = interface['input'].ipv6_prefixlen\n interface.ip_address = ip_address\n interface.prefixlen = prefixlen\n cidr_string = '%s/%s' % (ip_address, prefixlen)\n interface.subnet = netaddr.IPNetwork(cidr_string)\n\n broadcast_domains = [d for d in g_ipv6 if d.broadcast_domain]\n\n # TODO: allow this to work with specified ip_address/subnet as well as ip_address/prefixlen\n\n from netaddr import IPNetwork\n for coll_dom in broadcast_domains:\n connected_interfaces = [edge.dst_int for edge in\n coll_dom.edges()]\n cd_subnets = [IPNetwork('%s/%s' % (i.subnet.network,\n i.prefixlen)) for i in connected_interfaces]\n\n\n if len(cd_subnets) == 0:\n log.warning(\"Collision domain %s is not connected to any nodes\" % coll_dom)\n continue\n\n try:\n assert len(set(cd_subnets)) == 1\n except AssertionError:\n mismatch_subnets = '; '.join('%s: %s/%s' % (i,\n i.subnet.network, i.prefixlen) for i in\n connected_interfaces)\n log.warning('Non matching subnets from collision domain %s: %s'\n % (coll_dom, mismatch_subnets))\n else:\n coll_dom.subnet = cd_subnets[0] # take first entry\n\n # apply to remote interfaces\n\n for edge in coll_dom.edges():\n edge.dst_int.subnet = coll_dom.subnet\n\n # also need to form aggregated IP blocks (used for e.g. routing prefix\n # advertisement)\n # import autonetkit\n # autonetkit.update_http(anm)\n\n infra_blocks = {}\n for (asn, devices) in g_ipv6.groupby('asn').items():\n broadcast_domains = [d for d in devices if d.broadcast_domain]\n subnets = [cd.subnet for cd in broadcast_domains\n if cd.subnet is not None] # only if subnet is set\n infra_blocks[asn] = netaddr.cidr_merge(subnets)\n\n g_ipv6.data.infra_blocks = infra_blocks", "def create_subnets(ec2, vpc, subnets):\n # Generate candidate subnet CIDRs by shifting the VPC's prefix by 4 bits, yielding 16 possible subnet\n # CIDRs.\n vpc_cidr = ipaddress.ip_network(vpc.cidr_block)\n subnet_cidrs = list(vpc_cidr.subnets(prefixlen_diff=4))\n\n # The set difference between the availability zones that already have subnets and the availability zones\n # available in the region yields the set of availability zones where subnets must be created.\n subnet_azs = frozenset(map(lambda subnet: subnet.availability_zone, subnets))\n available_azs = frozenset(map(\n lambda az: az[\"ZoneName\"], ec2.meta.client.describe_availability_zones()[\"AvailabilityZones\"]))\n\n for az in (available_azs - subnet_azs):\n # If subnets already exist, their CIDRs may conflict with the candidate CIDRs that were generated.\n # Loop through the candidate list until subnet creation does not fail with a CIDR conflict error, or\n # until no candidates remain.\n while len(subnet_cidrs) > 0:\n try:\n cidr = subnet_cidrs.pop(0)\n subnet = vpc.create_subnet(AvailabilityZone=az, CidrBlock=cidr.with_prefixlen)\n # Ensure that the new subnet has the MapPublicIpOnLaunch attribute set\n ec2.meta.client.modify_subnet_attribute(SubnetId=subnet.id,\n MapPublicIpOnLaunch={\"Value\": True})\n click.echo(f\"Created new subnet: {subnet.id}\")\n break\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"InvalidSubnet.Conflict\":\n continue\n raise\n else:\n raise CraftingTableError(f\"Could not find valid CIDR to create subnet in {az}\")", "def ipv6_cidr_block_association_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ipv6_cidr_block_association_id\")", "def create_subnet(DryRun=None, VpcId=None, CidrBlock=None, Ipv6CidrBlock=None, AvailabilityZone=None):\n pass", "def modify_subnet_attribute(SubnetId=None, MapPublicIpOnLaunch=None, AssignIpv6AddressOnCreation=None):\n pass", "def ipv6_cidr_block_association_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ipv6_cidr_block_association_id\")", "def cidr(self, value: str):\n self._properties[\"cidr\"] = value", "def calculate_link_ip_addresses_ipv6(address_str, cidr_str, supernet_str, ip_count):\n if '::' in address_str:\n add_str = ''\n count = (address_str.count(':'))\n if address_str[-1] == ':':\n count -= 2\n while count < 7:\n add_str += ':0'\n count += 1\n else:\n while count < 8:\n add_str += ':0'\n count += 1\n add_str += ':'\n\n address_str = address_str.replace('::', add_str)\n\n address = address_str.split(':')\n cidr = int(cidr_str)\n supernet = int(supernet_str)\n\n mask_cidr = find_mask_v6(cidr)\n network = find_network_v6(address, mask_cidr)\n broadcast = find_broadcast_v6(network, cidr)\n\n mask_supernet = find_mask_v6(supernet)\n network_hex = []\n for i in range(8):\n network_hex.append(hex(network[i])[2:])\n network_supernet = find_network_v6(address, mask_supernet)\n broadcast_supernet = find_broadcast_v6(network_supernet, supernet)\n\n initial_ip = network_supernet[7]\n ip_checking = list(network_supernet)\n while not(initial_ip >= broadcast[7] and ip_checking[:7] == broadcast[:7]):\n initial_ip = network_supernet[7]\n ips_list = []\n no_of_ip = 0\n while initial_ip <= broadcast_supernet[7] and no_of_ip < ip_count:\n ip = list(network_supernet)\n ip[7] = initial_ip\n\n for i in range(0, 8):\n ip[i] = hex(ip[i])[2:]\n\n ip = ':'.join(ip)\n ip += '/' + str(supernet)\n ips_list.append(ip)\n initial_ip += 1\n no_of_ip += 1\n ip_checking = list(broadcast_supernet)\n initial_ip = broadcast_supernet[7]\n network_supernet = find_network_supernet_v6(broadcast_supernet, cidr, supernet)\n broadcast_supernet = find_broadcast_v6(network_supernet, supernet)\n\n yield ips_list", "def get_new_cidr(uuid):\n global lock\n lock.acquire()\n\n global __current_ip\n while __first_ip <= __current_ip < __last_ip and __current_ip in __issued_ips:\n __current_ip += __default_subnet_size\n\n if __current_ip >= __last_ip or __current_ip < __first_ip or __current_ip in __issued_ips:\n return None\n\n __issued_ips[__current_ip] = uuid\n lock.release()\n\n return Net.int_2_ip(__current_ip) + '/' + str(__default_subnet_bitmask)", "def manual_ipv4_infrastructure_allocation(anm):\n\n import netaddr\n g_ipv4 = anm['ipv4']\n log.info('Using specified IPv4 infrastructure allocation')\n\n for node in g_ipv4.l3devices():\n for interface in node.physical_interfaces:\n if not interface['input'].is_bound:\n continue # unbound interface\n ip_address = netaddr.IPAddress(interface['input'\n ].ipv4_address)\n prefixlen = interface['input'].ipv4_prefixlen\n interface.ip_address = ip_address\n interface.prefixlen = prefixlen\n cidr_string = '%s/%s' % (ip_address, prefixlen)\n interface.subnet = netaddr.IPNetwork(cidr_string)\n\n broadcast_domains = [d for d in g_ipv4 if d.broadcast_domain]\n\n # TODO: allow this to work with specified ip_address/subnet as well as ip_address/prefixlen\n\n from netaddr import IPNetwork\n for coll_dom in broadcast_domains:\n connected_interfaces = [edge.dst_int for edge in\n coll_dom.edges()]\n cd_subnets = [IPNetwork('%s/%s' % (i.subnet.network,\n i.prefixlen)) for i in connected_interfaces]\n\n\n if len(cd_subnets) == 0:\n log.warning(\"Collision domain %s is not connected to any nodes\" % coll_dom)\n continue\n\n try:\n assert len(set(cd_subnets)) == 1\n except AssertionError:\n mismatch_subnets = '; '.join('%s: %s/%s' % (i,\n i.subnet.network, i.prefixlen) for i in\n connected_interfaces)\n log.warning('Non matching subnets from collision domain %s: %s'\n % (coll_dom, mismatch_subnets))\n else:\n coll_dom.subnet = cd_subnets[0] # take first entry\n\n # apply to remote interfaces\n\n for edge in coll_dom.edges():\n edge.dst_int.subnet = coll_dom.subnet\n\n # also need to form aggregated IP blocks (used for e.g. routing prefix\n # advertisement)\n # import autonetkit\n # autonetkit.update_http(anm)\n\n infra_blocks = {}\n for (asn, devices) in g_ipv4.groupby('asn').items():\n broadcast_domains = [d for d in devices if d.broadcast_domain]\n subnets = [cd.subnet for cd in broadcast_domains\n if cd.subnet is not None] # only if subnet is set\n infra_blocks[asn] = netaddr.cidr_merge(subnets)\n\n g_ipv4.data.infra_blocks = infra_blocks", "def calculate_link_ip_addresses_ipv4(address_str, cidr_str, supernet_str):\n # Split address into octets and turn CIDR, supernet mask into int\n address = address_str.split('.')\n cidr = int(cidr_str)\n supernet = int(supernet_str)\n supernet_range = (1 << (32 - supernet)) - 2\n base_addr = int(address[3])\n\n # Initialize the netmask and calculate based on CIDR mask\n mask = [0, 0, 0, 0]\n for i in range(cidr):\n mask[i // 8] += (1 << (7 - i % 8))\n\n # Initialize net and binary and netmask with addr to get network\n network = []\n for i in range(4):\n network.append(int(address[i]) & mask[i])\n\n # Duplicate net into broad array, gather host bits, and generate broadcast\n broadcast = list(network)\n broadcast_range = 32 - cidr\n for i in range(broadcast_range):\n broadcast[3 - i // 8] += (1 << (i % 8))\n\n last_ip = list(broadcast)\n diff = base_addr % (supernet_range + 2)\n host = base_addr - diff\n count, hostmin, hostmax = 0, 0, 0\n third_octet = network[2]\n available_ips = []\n while third_octet <= last_ip[2]:\n ips_list = []\n while count < last_ip[3]:\n hostmin = host + 1\n hostmax = hostmin + supernet_range - 1\n if supernet == 31:\n while hostmax <= hostmin:\n ips_list.append(hostmax)\n hostmax += 1\n host = hostmin + 1\n else:\n while hostmin <= hostmax:\n ips_list.append(hostmin)\n hostmin += 1\n host = hostmax + 2\n\n count = host\n\n list_index = 0\n ip_address = str(last_ip[0]) + '.' + str(last_ip[1]) + '.'\n ip_address += str(third_octet)\n while list_index < len(ips_list):\n ip = ip_address + '.' + str(ips_list[list_index]) + \"/\"\n ip += supernet_str\n available_ips.append(ip)\n list_index += 1\n\n host, count, hostmax, hostmin = 0, 0, 0, 0\n third_octet += 1\n\n return available_ips", "def configPublicNet(self):\n networks = self.handler.getNetworks(self.osid)\n for net in networks['networks']:\n if net['name'] == \"public\":\n net[\"ip_ranges\"] = [[\"10.20.1.10\", \"10.20.1.126\"]]\n net['cidr'] = \"10.20.1.0/24\"\n net['gateway'] = \"10.20.1.1\"\n\n # updates the floating ranges\n rng = [[\"10.20.1.130\", \"10.20.1.254\"]]\n networks['networking_parameters']['floating_ranges'] = rng\n self.handler.uploadNetworks(networks, self.osid)", "def docker_network_three_available_addresses() -> Iterator[Network]:\n # We want to return a Docker network with only three assignable IP\n # addresses.\n # To do this, we create a network with 8 IP addresses, where 5 are\n # reserved.\n #\n # Why we have 8 IP addresses available:\n # * The IP range is \"172.28.0.0/29\"\n # * We get 2 ^ (32 - 29) = 8 IP addresses\n #\n # The 8 IP addresses in the IPAM Pool are:\n # * 172.28.0.0 (reserved because this is the subnet identifier)\n # * 172.28.0.1 (reserved because this is the gateway address)\n # * 172.28.0.2 (available)\n # * 172.28.0.3 (available)\n # * 172.28.0.4 (available)\n # * 172.28.0.5 (reserved because we reserve this with `aux_addresses`)\n # * 172.28.0.6 (reserved because we reserve this with `aux_addresses`)\n # * 172.28.0.7 (reserved because this is the broadcast address)\n client = docker.from_env(version='auto')\n aux_addresses = {\n 'reserved_address_0': '172.28.0.5',\n 'reserved_address_1': '172.28.0.6',\n }\n ipam_pool = docker.types.IPAMPool(\n subnet='172.28.0.0/29',\n iprange='172.28.0.0/29',\n gateway='172.28.0.1',\n aux_addresses=aux_addresses,\n )\n network = client.networks.create(\n name='dcos-e2e-network-{random}'.format(random=uuid.uuid4()),\n driver='bridge',\n ipam=docker.types.IPAMConfig(pool_configs=[ipam_pool]),\n attachable=False,\n )\n try:\n yield network\n finally:\n network.remove()", "def add_ip6_addr(self, prefix, subnet, mac, interface, interface_label):\n new_ip = silk_ip.assemble(prefix, subnet, mac)\n command = \"ip addr add %s/64 dev %s\" % (new_ip, interface)\n self.store_data(new_ip, interface_label)\n self.make_netns_call_async(command, \"\", 1)\n self.make_netns_call_async(\"ifconfig\", \"\", 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Links an EC2Classic instance to a ClassicLinkenabled VPC through one or more of the VPC's security groups. You cannot link an EC2Classic instance to more than one VPC at a time. You can only link an instance that's in the running state. An instance is automatically unlinked from a VPC when it's stopped you can link it to the VPC again when you restart it. After you've linked an instance, you cannot change the VPC security groups that are associated with it. To change the security groups, you must first unlink the instance, and then link it again. Linking your instance to a VPC is sometimes referred to as attaching your instance.
def attach_classic_link_vpc(DryRun=None, InstanceId=None, VpcId=None, Groups=None): pass
[ "def vpc_classic_link_security_groups(self) -> Sequence[str]:\n warnings.warn(\"\"\"With the retirement of EC2-Classic the vpc_classic_link_security_groups attribute has been deprecated and will be removed in a future version.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"vpc_classic_link_security_groups is deprecated: With the retirement of EC2-Classic the vpc_classic_link_security_groups attribute has been deprecated and will be removed in a future version.\"\"\")\n\n return pulumi.get(self, \"vpc_classic_link_security_groups\")", "def describe_vpc_classic_link_dns_support(VpcIds=None, MaxResults=None, NextToken=None):\n pass", "def describe_vpc_classic_link(DryRun=None, VpcIds=None, Filters=None):\n pass", "def describe_classic_link_instances(DryRun=None, InstanceIds=None, Filters=None, NextToken=None, MaxResults=None):\n pass", "def vpc_classic_link_id(self) -> str:\n warnings.warn(\"\"\"With the retirement of EC2-Classic the vpc_classic_link_id attribute has been deprecated and will be removed in a future version.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"vpc_classic_link_id is deprecated: With the retirement of EC2-Classic the vpc_classic_link_id attribute has been deprecated and will be removed in a future version.\"\"\")\n\n return pulumi.get(self, \"vpc_classic_link_id\")", "def launch_instance(key_name, security_group):\n # Create Key Pair if it does not already exist\n key_names = get_key_pairs()\n if key_name not in key_names:\n create_key_pair(key_name, True)\n print()\n elif not os.path.isfile(key_name):\n delete_key_pair(key_name, True)\n print()\n create_key_pair(key_name, True)\n print()\n\n # Create Security Group if it does not already exist\n names = get_security_group_names()\n if security_group not in names:\n group_id = create_security_group(security_group)\n\n # Create EC2 Instance\n ec2 = boto3.client('ec2', AVAILABILITY_ZONE)\n response = ec2.run_instances(\n ImageId=AMI_IMAGE_ID,\n InstanceType=AMI_INSTANCE_TYPE,\n KeyName=key_name,\n MinCount=1,\n MaxCount=1,\n InstanceInitiatedShutdownBehavior='terminate',\n SecurityGroups=[\n security_group\n ],\n )\n instance = response['Instances'][0]\n instance_id = instance['InstanceId']\n print(f\"Launched EC2 Instance with: ID={instance_id}\")\n print(f\"Terminate this instance with the script: terminate_ec2_{instance_id[-4:]}.sh\")\n with open(f\"terminate_ec2_{instance_id[-4:]}.sh\", \"w\") as f:\n f.write(f\"python {sys.argv[0]} --terminate_id {instance_id}\")\n\n print(\"Waiting for public dns\", end='')\n while True:\n instance_info = describe_instances([instance_id])\n public_dns = instance_info['Reservations'][0]['Instances'][0]['PublicDnsName']\n if public_dns != '':\n print(f\"\\nPublic DNS: {public_dns}\")\n break\n print('.', end='')\n sys.stdout.flush()\n time.sleep(1)\n\n ssh_command = f'ssh -i {key_name} ec2-user@{public_dns}'\n with open('ssh_to_ec2.sh', 'w') as f:\n f.write(ssh_command)\n\n print('Access the EC2 instance with ssh_to_ec2.sh, or run following command directly:')\n print(ssh_command)\n return response", "def DescribeClassicLinkInstances(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeClassicLinkInstances\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeClassicLinkInstancesResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def control_instance(stackName, action, instanceName=None):\n try:\n aws_cfg\n except NameError:\n try:\n aws_cfg = load_aws_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide an AWS conifguration based on aws.cfg-dist to proceed. %s\" % error))\n return 1\n\n stackName = stackName.lower()\n opsworks = connect_to_opsworks()\n stacks = opsworks.describe_stacks()\n stackId = [stack['StackId'] for stack in stacks['Stacks'] if stack['Name'] == stackName]\n if stackId == []:\n print(_red(\"stack %s not found\" % stackName))\n return 1\n instances = opsworks.describe_instances(stack_id=stackId[0])['Instances']\n if instanceName is not None:\n instances = [instance for instance in instances if instance['Hostname'] == instanceName]\n\n ec2 = connect_to_ec2()\n for instance in instances:\n if action == 'start':\n print(_green(\"starting instance: %s\" % instance['Hostname']))\n try:\n opsworks.start_instance(instance_id=instance['InstanceId'])\n except ValidationException:\n pass\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n spinner = Spinner(_yellow(\"[%s]Waiting for reservation \" % myinstance['Hostname']), hide_cursor=False)\n while myinstance['Status'] == 'requested':\n spinner.next()\n time.sleep(1)\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n print(_green(\"\\n[%s]OpsWorks instance status: %s\" % (myinstance['Hostname'], myinstance['Status'])))\n ec2Instance = ec2.get_only_instances(instance_ids=[myinstance['Ec2InstanceId']])[0]\n spinner = Spinner(_yellow(\"[%s]Booting ec2 instance \" % myinstance['Hostname']), hide_cursor=False)\n while ec2Instance.state != u'running':\n spinner.next()\n time.sleep(1)\n ec2Instance.update()\n print(_green(\"\\n[%s]ec2 Instance state: %s\" % (myinstance['Hostname'], ec2Instance.state)))\n spinner = Spinner(_yellow(\"[%s]Running OpsWorks setup \" % myinstance['Hostname']), hide_cursor=False)\n while myinstance['Status'] != 'online':\n if myinstance['Status'] == 'setup_failed':\n print(_red(\"\\n[%s]OpsWorks instance failed\" % myinstance['Hostname']))\n return 1\n spinner.next()\n time.sleep(1)\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n print(_green(\"\\n[%s]OpsWorks Instance state: %s\" % (myinstance['Hostname'], myinstance['Status'])))\n getec2instances()\n elif action == 'stop':\n if 'Ec2InstanceId' in instance.keys():\n print(_green(\"Stopping instance %s\" % instance['Hostname']))\n opsworks.stop_instance(instance_id=instance['InstanceId'])\n ec2Instance = ec2.get_only_instances(instance_ids=[instance['Ec2InstanceId']])[0]\n spinner = Spinner(_yellow(\"[%s]Waiting for ec2 instance to stop \" % instance['Hostname']), hide_cursor=False)\n while ec2Instance.state != u'stopped':\n spinner.next()\n time.sleep(1)\n ec2Instance.update()\n print(_green(\"\\n[%s]ec2 Instance state: %s\" % (instance['Hostname'], ec2Instance.state)))\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n spinner = Spinner(_yellow(\"[%s]Stopping OpsWorks Instance \" % instance['Hostname']), hide_cursor=False)\n while myinstance['Status'] != 'stopped':\n spinner.next()\n time.sleep(1)\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n print(_green(\"\\n[%s]OpsWorks Instance state: %s\" % (instance['Hostname'], myinstance['Status'])))\n else:\n print(_green(\"%s in %s already stopped\" % (instance['Hostname'], stackName)))\n try:\n print(_green(\"removing %s from ssh config...\" % instance['PublicDns']))\n removefromsshconfig(dns=instance['PublicDns'])\n except Exception:\n pass", "def test_050_create_security_groups(self):\n sg = self.vpc_client.create_security_group(\n data_utils.rand_name(\"WebServerSG-\"),\n data_utils.rand_name(\"description \"),\n self.ctx.vpc.id)\n self.assertIsNotNone(sg)\n self.assertTrue(sg.id)\n self.addResourceCleanUp(self._destroy_security_group_wait, sg)\n self.ctx.web_security_group = sg\n sg = self.vpc_client.create_security_group(\n data_utils.rand_name(\"NATSG-\"),\n data_utils.rand_name(\"description \"),\n self.ctx.vpc.id)\n self.assertIsNotNone(sg)\n self.assertTrue(sg.id)\n self.addResourceCleanUp(self._destroy_security_group_wait, sg)\n self.ctx.nat_security_group = sg\n sg = self.vpc_client.create_security_group(\n data_utils.rand_name(\"DBServerSG-\"),\n data_utils.rand_name(\"description \"),\n self.ctx.vpc.id)\n self.assertIsNotNone(sg)\n self.assertTrue(sg.id)\n self.addResourceCleanUp(self._destroy_security_group_wait, sg)\n self.ctx.db_security_group = sg\n\n sg = self.ctx.web_security_group\n status = self.vpc_client.revoke_security_group_egress(\n sg.id, \"-1\", cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 1433, 1433,\n src_group_id=self.ctx.db_security_group.id)\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 3306, 3306,\n src_group_id=self.ctx.db_security_group.id)\n self.assertTrue(status)\n # NOTE(ft): especially for connectivity test\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 80, 80, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n # NOTE(ft): especially for connectivity test\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 22, 22,\n src_group_id=self.ctx.db_security_group.id)\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=80, to_port=80,\n cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=443, to_port=443,\n cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=22, to_port=22,\n cidr_ip=str(self.test_client_cidr))\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=3389,\n to_port=3389, cidr_ip=str(self.test_client_cidr))\n self.assertTrue(status)\n\n sg = self.ctx.nat_security_group\n status = self.vpc_client.revoke_security_group_egress(\n sg.id, \"-1\", cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 80, 80, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 443, 443, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 53, 53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"udp\", 53, 53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=53,\n to_port=53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"udp\", from_port=53,\n to_port=53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=80, to_port=80,\n cidr_ip=str(self.db_subnet))\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=443, to_port=443,\n cidr_ip=str(self.db_subnet))\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=22, to_port=22,\n cidr_ip=str(self.test_client_cidr))\n self.assertTrue(status)\n\n sg = self.ctx.db_security_group\n status = self.vpc_client.revoke_security_group_egress(\n sg.id, \"-1\", cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 80, 80, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 443, 443, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 53, 53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"udp\", 53, 53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\",\n from_port=1433,\n to_port=1433,\n src_security_group_group_id=self.ctx.web_security_group.id)\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\",\n from_port=3306,\n to_port=3306,\n src_security_group_group_id=self.ctx.web_security_group.id)\n self.assertTrue(status)\n # NOTE(ft): especially for connectivity test\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\",\n from_port=22,\n to_port=22,\n src_security_group_group_id=self.ctx.web_security_group.id)\n self.assertTrue(status)", "def configure_instance_security(self, parameters):\n keyname = parameters[self.PARAM_KEYNAME]\n group = parameters[self.PARAM_GROUP]\n\n key_path = '{}/{}.key'.format(utils.KEY_DIRECTORY, keyname)\n ssh_key = os.path.abspath(key_path)\n utils.log('About to spawn OpenStack instances - ' \\\n 'Expecting to find a key at {0}'.format(ssh_key))\n if os.path.exists(ssh_key):\n utils.log('SSH keys found in the local system - '\n 'Not initializing OpenStack security')\n return False\n\n try:\n conn = self.open_connection(parameters)\n key_pair = conn.get_key_pair(keyname)\n if key_pair is None:\n utils.log('Creating key pair: {0}'.format(keyname))\n key_pair = conn.create_key_pair(keyname)\n utils.write_key_file(ssh_key, key_pair.material)\n\n security_groups = conn.get_all_security_groups()\n group_exists = False\n for security_group in security_groups:\n if security_group.name == group:\n group_exists = True\n break\n\n if not group_exists:\n utils.log('Creating security group: {0}'.format(group))\n conn.create_security_group(group, 'AppScale security group')\n conn.authorize_security_group(group, from_port=1,\\\n to_port=65535, ip_protocol='udp')\n conn.authorize_security_group(group, from_port=1,\\\n to_port=65535, ip_protocol='tcp')\n #TODO: Check if ec2_agent can be change to include the from_port\n # and the to_port. If yes, remove this method.\n conn.authorize_security_group(group, from_port=-1, to_port=-1, \\\n ip_protocol='icmp', cidr_ip='0.0.0.0/0')\n return True\n\n except EC2ResponseError as exception:\n self.handle_failure('OpenStack response error while initializing '\n 'security: {0}'.format(exception.error_message))\n except Exception as exception:\n self.handle_failure('Error while initializing OpenStack '\n 'security: {0}'.format(exception.message))", "def create_security_group():\n conn = boto.connect_ec2()\n sec_group = conn.create_security_group(\"shopply\", \"Shopply servers security group\")\n sec_group.authorize('tcp', 80, 80, '0.0.0.0/0')\n sec_group.authorize('tcp', 22, 22, '0.0.0.0/0')\n sec_group.authorize('tcp', 8080, 8080, '0.0.0.0/0')\n sec_group.authorize('tcp', 9001, 9001, '0.0.0.0/0')", "def assign_private_ip_addresses(NetworkInterfaceId=None, PrivateIpAddresses=None, SecondaryPrivateIpAddressCount=None, AllowReassignment=None):\n pass", "def recycle_elastigroup(module):\n ssi = module.params.get('stateful_instance_id')\n wait_timeout = int(module.params.get('wait_timeout'))\n endpoint = \"aws/ec2/group/{}/statefulInstance/{}/recycle?accountId={}\".format(module.params.get('esg_id'),\n ssi,\n module.params.get('account_id'))\n\n # Safety check as Stateful operations can only be performed when instance is in ACTIVE state\n _wait_for_stateful_instance(module, wait_timeout=wait_timeout, pending_state='ACTIVE')\n\n _call_spotinst_api(module, endpoint=endpoint, method='PUT')\n recycled_instance = _wait_for_stateful_instance(module, wait_timeout=wait_timeout, pending_state='RECYCLING')\n\n # If a Stateful instance does no have privateIp persistance gather new privateIp\n if 'privateIp' not in recycled_instance:\n endpoint = \"aws/ec2/group/{}?accountId={}\".format(module.params.get('esg_id'),\n module.params.get('account_id'))\n # Gather information about the instance's ESG group to know in which region it is running\n esg_info = _call_spotinst_api(module, endpoint=endpoint)\n\n # Get the first instance found\n ec2 = _get_instances_by_region(module, region=[esg_info['response']['items'][0]['region']], ids=[recycled_instance['instanceId']])[0]\n\n # Append privateIp to the Spotinst instance object\n recycled_instance.update(\n {'privateIp': ec2['PrivateIpAddress']}\n )\n\n _return_result(module=module, changed=True, failed=False, message=recycled_instance)", "def modify_reserved_instances(ClientToken=None, ReservedInstancesIds=None, TargetConfigurations=None):\n pass", "def delete_security_groups():\n print('Deleting Security Groups')\n client = boto3.resource('ec2')\n for security_group in client.security_groups.all():\n print('Deleting Security Group rules for security group {}'.format(security_group.id))\n for perm in security_group.ip_permissions:\n security_group.revoke_ingress(\n IpPermissions=[perm]\n )\n for perm in security_group.ip_permissions_egress:\n security_group.revoke_egress(\n IpPermissions=[perm]\n )\n for security_group in client.security_groups.all():\n if security_group.group_name != 'default':\n print('Deleting Security Group {}'.format(security_group.id))\n security_group.delete()\n print('Security Groups deleted')", "def ec2_start_instances(tag_key, tag_value):\n ec2 = boto3.client(\"ec2\")\n\n for ec2_instance in ec2_list_instances(tag_key, tag_value):\n try:\n ec2.start_instances(InstanceIds=[ec2_instance])\n print(\"Start instances {0}\".format(ec2_instance))\n except ClientError as e:\n error_code = e.response[\"Error\"][\"Code\"]\n if error_code == \"UnsupportedOperation\":\n logging.warning(\"%s\", e)\n else:\n logging.error(\"Unexpected error: %s\", e)", "def associate_vpc_cidr_block(VpcId=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def _create_security_group(client, vpc_id):\n\n res = client.create_security_group(\n Description=\"Allow ssh from user public IP address\",\n GroupName=f'ssh-from-public-ip-{_rand_chars(10)}',\n VpcId=vpc_id,\n )\n\n group_id = res['GroupId']\n\n try:\n public_ip = f'{requests.get(\"https://checkip.amazonaws.com/\").text.strip()}/32'\n except Exception:\n print('encountered error getting public ip; using 0.0.0.0/0 instead')\n public_ip = '0.0.0.0/0'\n\n res = client.authorize_security_group_ingress(\n CidrIp=public_ip,\n FromPort=22,\n GroupId=group_id,\n IpProtocol='tcp',\n ToPort=22,\n )\n\n return group_id", "def enforce_security_groups_rules(self) -> None:\n sagsnl_sg = self.get_security_group(SwiftComponents.SAGSNL + \"SG\")\n rds_sg = self.get_security_group(\"RDSSG\")\n mq_sg = self.get_security_group(\"MQSG\")\n amh_sg = self.get_security_group(SwiftComponents.AMH + \"SG\")\n\n boto = boto3.client(\"ec2\")\n prefix_lists = boto.describe_prefix_lists(\n Filters=[{\"Name\": \"prefix-list-name\", \"Values\": [\"com.amazonaws.*.s3\"]}])\n s3_prefix_list = prefix_lists[\"PrefixLists\"][0][\"PrefixListId\"]\n\n sagsnl_sg.connections.allow_from(other=amh_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"SAGSNL- AMH (48002,48003)\",\n from_port=48002,\n to_port=48003\n ),\n description=\"Incoming connection from AMH\")\n\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._workstation_ip_range,\n from_port=2443, to_port=2443, is_ingress=True,\n description=\"SWP Web GUI Interface Ingress from workstation\"\n )\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.TCP,\n prefix_list=s3_prefix_list,\n from_port=443, to_port=443, is_ingress=False,\n description=\"Egress to S3 VPC Gateway Endpoint\"\n )\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.ALL,\n cidr_range=self._swift_ip_range,\n from_port=0, to_port=65535, is_ingress=False,\n description=\"To SWIFT via VGW and VPN\"\n )\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._hsm_ip,\n from_port=1792, to_port=1792, is_ingress=False,\n description=\"To HSM via VGW\"\n )\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._hsm_ip,\n from_port=22, to_port=22, is_ingress=False,\n description=\"To HSM (SSH) via VGW\"\n )\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._hsm_ip,\n from_port=48321, to_port=48321, is_ingress=False,\n description=\"TO HSM (Remote PED) via VGW \"\n )\n\n amh_sg.connections.allow_to(other=sagsnl_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"AMH - SAGSNL (48002, 48003)\",\n from_port=48002,\n to_port=48003\n ),\n description=\"AMH to SAGSNL connection\")\n\n amh_sg.connections.allow_to(other=rds_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"RDS (1521)\",\n from_port=1521,\n to_port=1521\n ),\n description=\"AMH - RDS (1521)\")\n amh_sg.connections.allow_to(other=mq_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"MQ (61617)\",\n from_port=61617,\n to_port=61617\n ),\n description=\"AMH - MQ (61617)\")\n self.add_security_group_rule(SwiftComponents.AMH + \"SG\", protocol=_ec2.Protocol.TCP,\n prefix_list=s3_prefix_list,\n from_port=443, to_port=443,\n is_ingress=False,\n description=\"AMH Egress to S3\"\n )\n self.add_security_group_rule(SwiftComponents.AMH + \"SG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._workstation_ip_range,\n from_port=8443, to_port=8443, is_ingress=True\n )\n rds_sg.connections.allow_from(other=amh_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"RDS (1521)\",\n from_port=1521,\n to_port=1521\n ),\n description=\"AMH - RDS (1521)\")\n\n mq_sg.connections.allow_from(other=amh_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"MQ (61617)\",\n from_port=61617,\n to_port=61617\n ),\n description=\"AMH - MQ (61617)\")\n self.add_security_group_rule(\"MQSG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._workstation_ip_range,\n from_port=8162, to_port=8162, is_ingress=True\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attaches an Internet gateway to a VPC, enabling connectivity between the Internet and the VPC. For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide .
def attach_internet_gateway(DryRun=None, InternetGatewayId=None, VpcId=None): pass
[ "def igw_attach(session, igw_id=None, vpc_id=None):\n client = session.client('ec2')\n\n with open('outputs/igw.json', 'r') as fo:\n data = json.load(fo)\n igw_id = data.get('InternetGateway', {}).get('InternetGatewayId', None)\n vpc_id = get_vpc_id()\n\n try:\n response = client.attach_internet_gateway(\n InternetGatewayId=igw_id,\n VpcId=vpc_id\n )\n except botocore.exceptions.ClientError as e:\n print(f'WARNING: Internet Gateway already attached: {e}')\n response = None\n\n print(f'Internet Gateway: {igw_id} attached to VPC: {vpc_id}')\n\n if response:\n write_output_json('igw_attach.json', response)", "def create_internet_gateway(\n internet_gateway_name=None,\n vpc_id=None,\n vpc_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if vpc_id or vpc_name:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n\n r = _create_resource(\n \"internet_gateway\",\n name=internet_gateway_name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if r.get(\"created\") and vpc_id:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.attach_internet_gateway(r[\"id\"], vpc_id)\n log.info(\n \"Attached internet gateway %s to VPC %s\", r[\"id\"], vpc_name or vpc_id\n )\n return r\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def create_igw(conn, name, region, vpc_id):\n\n try:\n igw = conn.create_internet_gateway()\n except boto.exception.EC2ResponseError as e:\n print(e.message)\n exit(1)\n else:\n conn.attach_internet_gateway(igw.id, vpc_id)\n t = Tag(name, 'igw', region); t.tag_resource(conn, igw.id)\n\n return igw.id", "def addGw(ip):\n logging.debugv(\"functions/linux.py->addGw(ip)\", [ip])\n logging.info(\"setting default gateway to %s\" % (ip) )\n cmd = [\"ip\", \"route\", \"add\", \"default\", \"via\", ip]\n runWrapper(cmd)", "def attach_vpn_gateway(DryRun=None, VpnGatewayId=None, VpcId=None):\n pass", "def configure_routing(vpc):\n internet_gateways = list(vpc.internet_gateways.all())\n if len(internet_gateways) == 1:\n internet_gateway = internet_gateways[0]\n elif len(internet_gateways) == 0:\n raise CraftingTableError(\"No internet gateway found\")\n else:\n raise CraftingTableError(f\"Multiple internet gateways found: {id_list(internet_gateways)}\")\n\n route_tables = list(vpc.route_tables.filter(Filters=[{\"Name\": \"association.main\", \"Values\": [\"true\"]}]))\n if len(route_tables) == 1:\n route_table = route_tables[0]\n elif len(route_tables) == 0:\n raise CraftingTableError(\"No route table found\")\n if len(route_tables) != 1:\n raise CraftingTableError(f\"Multiple route tables found: {id_list(route_tables)}\")\n\n for route in route_table.routes:\n if route.gateway_id == internet_gateway.id:\n break\n else:\n route_table.create_route(DestinationCidrBlock=\"0.0.0.0/0\", GatewayId=internet_gateway.id)\n click.echo(f\"Created default route to {internet_gateway.id}\")", "def associate_address(DryRun=None, InstanceId=None, PublicIp=None, AllocationId=None, NetworkInterfaceId=None, PrivateIpAddress=None, AllowReassociation=None):\n pass", "def modify_vpc_attribute(VpcId=None, EnableDnsSupport=None, EnableDnsHostnames=None):\n pass", "def attach_classic_link_vpc(DryRun=None, InstanceId=None, VpcId=None, Groups=None):\n pass", "def allowInternetConnection(network, bridge):\n\n cmds = []\n cmds.append('ip -4 route add dev {} {} proto static'.format(bridge, network))\n cmds.append(\n 'iptables -A FORWARD -o {} -t filter -m comment --comment \"generated for Distrinet Admin Network\" -j ACCEPT'.format(\n bridge))\n cmds.append(\n 'iptables -A FORWARD -i {} -t filter -m comment --comment \"generated for Distrinet Admin Network\" -j ACCEPT'.format(\n bridge))\n cmds.append(\n 'iptables -A POSTROUTING -t nat -m comment --comment \"generated for Distrinet Admin Network\" -s {} ! -d {} -j MASQUERADE'.format(\n network, network))\n cmds.append('sysctl -w net.ipv4.ip_forward=1')\n return cmds", "def associate_vpc_cidr_block(VpcId=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def add_vpc(template, key_pair_name, nat_ip,\n nat_image_id=DEFAULT_NAT_IMAGE_ID,\n nat_instance_type=DEFAULT_NAT_INSTANCE_TYPE):\n vpc_id = \"VPC\"\n vpc = template.add_resource(ec2.VPC(\n vpc_id,\n CidrBlock=\"10.0.0.0/16\",\n Tags=Tags(\n Name=name_tag(vpc_id)\n ),\n ))\n public_subnet = _add_public_subnet(template, vpc)\n nat = _add_nat(template, vpc, public_subnet, nat_image_id, nat_instance_type,\n key_pair_name, nat_ip)\n _add_private_subnet(template, vpc, nat)\n return vpc", "def test_01_vpc_remote_access_vpn(self):\n # 1) Create VPC\n vpc = VPC.create(\n api_client=self.apiclient,\n services=self.services[\"vpc\"],\n networkDomain=\"vpc.vpn\",\n vpcofferingid=self.vpc_offering.id,\n zoneid=self.zone.id,\n account=self.account.name,\n domainid=self.domain.id\n )\n\n self.assertIsNotNone(vpc, \"VPC creation failed\")\n self.logger.debug(\"VPC %s created\" % (vpc.id))\n\n self.cleanup.append(vpc)\n\n # 2) Create network in VPC\n ntwk = Network.create(\n api_client=self.apiclient,\n services=self.services[\"network_1\"],\n accountid=self.account.name,\n domainid=self.domain.id,\n networkofferingid=self.network_offering.id,\n zoneid=self.zone.id,\n vpcid=vpc.id\n )\n\n self.assertIsNotNone(ntwk, \"Network failed to create\")\n self.logger.debug(\"Network %s created in VPC %s\" % (ntwk.id, vpc.id))\n\n self.cleanup.append(ntwk)\n\n # 3) Deploy a vm\n vm = VirtualMachine.create(self.apiclient, services=self.services[\"virtual_machine\"],\n templateid=self.template.id,\n zoneid=self.zone.id,\n accountid=self.account.name,\n domainid=self.domain.id,\n serviceofferingid=self.virtual_machine_offering.id,\n networkids=ntwk.id,\n hypervisor=self.hypervisor\n )\n self.assertIsNotNone(vm, \"VM failed to deploy\")\n self.assertEquals(vm.state, 'Running', \"VM is not running\")\n self.debug(\"VM %s deployed in VPC %s\" % (vm.id, vpc.id))\n\n self.logger.debug(\"Deployed virtual machine: OK\")\n self.cleanup.append(vm)\n\n # 4) Enable VPN for VPC\n src_nat_list = PublicIPAddress.list(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n listall=True,\n issourcenat=True,\n vpcid=vpc.id\n )\n ip = src_nat_list[0]\n\n self.logger.debug(\"Acquired public ip address: OK\")\n\n vpn = Vpn.create(self.apiclient,\n publicipid=ip.id,\n account=self.account.name,\n domainid=self.account.domainid,\n iprange=self.services[\"vpn\"][\"iprange\"],\n fordisplay=self.services[\"vpn\"][\"fordisplay\"]\n )\n\n self.assertIsNotNone(vpn, \"Failed to create Remote Access VPN\")\n self.logger.debug(\"Created Remote Access VPN: OK\")\n\n vpn_user = None\n # 5) Add VPN user for VPC\n vpn_user = VpnUser.create(self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n username=self.services[\"vpn\"][\"vpn_user\"],\n password=self.services[\"vpn\"][\"vpn_pass\"]\n )\n\n self.assertIsNotNone(vpn_user, \"Failed to create Remote Access VPN User\")\n self.logger.debug(\"Created VPN User: OK\")\n\n # TODO: Add an actual remote vpn connection test from a remote vpc\n\n # 9) Disable VPN for VPC\n vpn.delete(self.apiclient)\n\n self.logger.debug(\"Deleted the Remote Access VPN: OK\")", "def create_route(vserver_name: str, net_gateway_ip: str) -> None:\n \"\"\"The default destination will be set to \"0.0.0.0/0\" for IPv4 gateway addresses\"\"\" \n\n data = {\n 'gateway': net_gateway_ip,\n 'svm': {'name': vserver_name}\n }\n\n route = NetworkRoute(**data)\n\n try:\n route.post()\n print(\"Route %s created successfully\" % route.gateway)\n except NetAppRestError as err:\n print(\"Error: Route was not created: %s\" % err)\n return", "def create_egress_only_internet_gateway(DryRun=None, VpcId=None, ClientToken=None):\n pass", "def add_natgw(self, idx: int, nat_eips: Ref = None):\n if nat_eips:\n eip = Select(idx, nat_eips)\n else:\n self.nat_eip = self.t.add_resource(EIP(\n f'NatEip{self.idx}',\n Domain='vpc',\n ))\n eip = GetAtt(self.nat_eip, 'AllocationId')\n\n self.natgw = self.t.add_resource(NatGateway(\n f'NatGw{self.idx}',\n AllocationId=eip,\n SubnetId=Ref(self.subnet),\n ))\n\n self.t.add_output(Output(\n f'NatEip{self.idx}',\n Value=eip,\n Description=f'Nat Gateway Elastic IP for {self.az}',\n ))", "def create_nat_gw(dmz_id) :\n\t\n\text_ip = client.allocate_address(\n \t#Domain='vpc'|'standard',\n\t #Address='string',\n \t#DryRun=True|False\n\t )\n\text_ip = client.describe_addresses(\n\t\tFilters=[\n \t{\n \t'Name': 'public-ip',\n 'Values': [ext_ip['PublicIp']]\n \t}\n ]\n \t\t)['Addresses'][0] # good part\n\n\tnat_gw = client.create_nat_gateway(\n \tAllocationId=ext_ip['AllocationId'],\n\t SubnetId=dmz_id\n \t)['NatGateway']\n\t\n\treturn ext_ip, nat_gw", "def associate_ip(request):\n cloud_id = request.matchdict['cloud']\n network_id = request.matchdict['network']\n params = params_from_request(request)\n ip = params.get('ip')\n machine_id = params.get('machine')\n assign = params.get('assign', True)\n auth_context = auth_context_from_request(request)\n auth_context.check_perm(\"cloud\", \"read\", cloud_id)\n try:\n machine = Machine.objects.get(cloud=cloud_id, machine_id=machine_id)\n machine_uuid = machine.id\n except me.DoesNotExist:\n machine_uuid = \"\"\n auth_context.check_perm(\"machine\", \"edit\", machine_uuid)\n\n ret = methods.associate_ip(auth_context.owner, cloud_id, network_id,\n ip, machine_id, assign)\n if ret:\n return OK\n else:\n return Response(\"Bad Request\", 400)", "def HaVipAssociateAddressIp(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"HaVipAssociateAddressIp\", params, headers=headers)\n response = json.loads(body)\n model = models.HaVipAssociateAddressIpResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attaches a network interface to an instance.
def attach_network_interface(DryRun=None, NetworkInterfaceId=None, InstanceId=None, DeviceIndex=None): pass
[ "def attach_port(self, instance_obj, network_obj):\n raise NotImplementedError()", "def attach_interface(self, instance, image_meta, vif):\n self.vif_driver.plug(instance, vif)\n container_id = self._find_container_by_instance(instance).get('id')\n self.vif_driver.attach(instance, vif, container_id)", "def encapsulate_interface(self, iface_name: str):\n iface_ip = self.ipdb_get_ip(True, iface_name)\n try:\n with self.ipdb.interfaces[iface_name] as iface:\n iface.net_ns_fd = self.nsp_name\n # the interface automatically switched the database and is now inside ipdb_netns_dictionary[vlan_iface_name]\n with self.ipdb_netns.interfaces[iface_name] as iface:\n iface.add_ip(iface_ip) # '192.168.1.11/24'\n iface.up()\n logging.debug(\"%s[+] Encapsulate Interface(\" + iface_name + \")\", LoggerSetup.get_log_deep(3))\n except Exception as e:\n logging.error(\"%s[-] Couldn't encapsulate the Interface(\" + iface_name + \")\", LoggerSetup.get_log_deep(3))\n logging.error(\"%s\" + str(e), LoggerSetup.get_log_deep(3))", "def addNetworkInterface(self, name, link=None, ip=None, up=None, down=None):\n if up:\n if not os.path.isabs(up):\n raise ValueError('Path to up script has to be absolute.')\n\n if not os.path.isfile(up):\n raise ValueError('Path to up script is not a file.')\n\n if not os.access(up, os.X_OK):\n raise ValueError('Up script is not executable.')\n\n if down:\n if not os.path.isabs(down):\n raise ValueError('Path to down script has to be absolute.')\n\n if not os.path.isfile(down):\n raise ValueError('Path to down script is not a file.')\n\n if not os.access(down, os.X_OK):\n raise ValueError('Down script is not executable.')\n\n self._ifs.append((name, link, ip, up, down))", "def add_interface(session, attribs):\n network = attribs.get(\"network\")\n ipaddr = attribs[\"ipaddr\"]\n attribs[\"interface_type\"] = get_interface_type(session)\n q = session.query(models.Interface).filter(models.and_(\n models.Interface.network==network,\n models.Interface.ipaddr==ipaddr)\n )\n # try to find equipment by matching name.\n hostname = attribs.get(\"description\")\n if hostname:\n eq = get_equipment(session, hostname)\n del attribs[\"description\"]\n else:\n eq = None\n attribs[\"equipment\"] = eq\n\n try:\n intf = q.one()\n except models.NoResultFound:\n intf = models.create(models.Interface, **attribs)\n session.add(intf)\n session.commit()\n else:\n models.update(intf, **attribs)\n session.commit()", "def ex_node_attach_interface(self, node, iface):\r\n op = self.connection.request('hosting.vm.iface_attach',\r\n int(node.id), int(iface.id))\r\n if self._wait_operation(op.object['id']):\r\n return True\r\n return False", "def addInterface(interface): #@NoSelf", "def add_ip6_addr(self, prefix, subnet, mac, interface, interface_label):\n new_ip = silk_ip.assemble(prefix, subnet, mac)\n command = \"ip addr add %s/64 dev %s\" % (new_ip, interface)\n self.store_data(new_ip, interface_label)\n self.make_netns_call_async(command, \"\", 1)\n self.make_netns_call_async(\"ifconfig\", \"\", 1)", "def l3interface(name, ipaddress, ip_network, interfaceid):\n engine = Engine(name).load()\n result = engine.physical_interface.add_single_node_interface(\n interface_id=interfaceid,\n address=ipaddress,\n network_value=ip_network)\n return result", "def modify_network_interface_attribute(DryRun=None, NetworkInterfaceId=None, Description=None, SourceDestCheck=None, Groups=None, Attachment=None):\n pass", "def add_eth_interface(\n node, ifc_name=None, sw_if_index=None, ifc_pfx=None,\n host_if_key=None):\n if_key = Topology.add_new_port(node, ifc_pfx)\n\n if ifc_name and sw_if_index is None:\n sw_if_index = InterfaceUtil.vpp_get_interface_sw_index(\n node, ifc_name)\n Topology.update_interface_sw_if_index(node, if_key, sw_if_index)\n if sw_if_index and ifc_name is None:\n ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)\n Topology.update_interface_name(node, if_key, ifc_name)\n ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_index)\n Topology.update_interface_mac_address(node, if_key, ifc_mac)\n if host_if_key is not None:\n Topology.set_interface_numa_node(\n node, if_key, Topology.get_interface_numa_node(\n node, host_if_key\n )\n )\n Topology.update_interface_pci_address(\n node, if_key, Topology.get_interface_pci_addr(node, host_if_key)\n )", "def add(self, ifname=None, address=None,\n mask=None, broadcast=None, net_ns_fd=None):\n ipdb = self.ipdb_manager.open_ipdb(net_ns_fd=net_ns_fd)\n\n if address:\n address = (\"%s/%d\" % (address, mask))\n\n with ipdb.interfaces[ifname] as interface:\n if address:\n interface.add_ip(address)\n\n self.ipdb_manager.close_ipdb(ipdb)", "def EnableNetworkInterfaces(self, interfaces, logger, dhclient_script=None):\n helpers.CallDhclient(interfaces, logger)", "def _setup_interface(self):\n\n # Create and set the interface up.\n self._ip.link(\"add\", ifname=self.interface, kind=\"dummy\")\n dev = self._ip.link_lookup(ifname=self.interface)[0]\n self._ip.link(\"set\", index=dev, state=\"up\")\n\n # Set up default route for both IPv6 and IPv4\n self._ip.neigh(\"add\", dst='169.254.1.1', lladdr='21:21:21:21:21:21',\n state=ndmsg.states['permanent'], ifindex=dev)\n self._ip.neigh(\"add\", family=AF_INET6, dst='fe80::1', lladdr='21:21:21:21:21:21',\n state=ndmsg.states['permanent'], ifindex=dev)\n self._ip.addr(\"add\", index=dev, address=\"169.254.1.2\", mask=24)\n self._ip.route(\"add\", gateway=\"169.254.1.1\", oif=dev)\n self._ip.route(\"add\", family=AF_INET6, gateway='fe80::1', oif=dev)\n\n # Set the loopback up as well since some of the packets go through there.\n lo = self._ip.link_lookup(ifname=\"lo\")[0]\n self._ip.link(\"set\", index=lo, state=\"up\")\n\n # Return internal interface ID for later use\n return dev", "def add_address(self, ip_address, mask='255.255.255.0'):\n str_command = 'netsh interface ipv4 add address \"{}\" {} {}'.format(self.name, ip_address, mask)\n command = Popen(str_command) \n stdout, stderr = command.communicate()\n if stdout is None and stderr is None:\n print('Success - {} added to interface {}'.format(ip_address, self.name))\n else:\n print('Failure - {} added to interface {}'.format(ip_address, self.name))\n print('\\t' + str(stdout))\n print('\\t' + str(stderr))\n self = self.__init__(self.interface)", "def link_set(self, interface_name, virtual_eth_peer):\n command = \"ip link set %s netns %s\" % (interface_name, self.netns)\n self._make_system_call(\"link-set\", command, 1)\n\n command = \"ifconfig %s up\" % interface_name\n self.make_netns_call(command, 1)\n\n command = \"ip link set %s up\" % virtual_eth_peer\n self._make_system_call(\"link-set\", command, 1)", "def vpp_add_bond_member(node, interface, bond_if):\n cmd = u\"bond_add_member\"\n args = dict(\n sw_if_index=Topology.get_interface_sw_index(node, interface),\n bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),\n is_passive=False,\n is_long_timeout=False\n )\n err_msg = f\"Failed to add member {interface} to bond interface \" \\\n f\"{bond_if} on host {node[u'host']}\"\n with PapiSocketExecutor(node) as papi_exec:\n papi_exec.add(cmd, **args).get_reply(err_msg)", "def AddVlanInterfaceIdempotent(self, name, interface, vlanid, ip_mask, vdom, mode, allowaccess):\n name = str(name)\n interface = str(interface)\n vlanid = str(vlanid)\n ip_mask = str(ip_mask)\n vdom = str(vdom)\n mode = str(mode)\n allowaccess = str(allowaccess)\n objects = [['name', name], ['interface', interface], ['vlanid', int(vlanid)], ['ip', ip_mask]]\n if not (self.Exists('cmdb/system/interface/', objects)):\n # object does not exist, create it\n return self.AddVlanInterface(name, interface, vlanid, ip_mask, vdom, mode, allowaccess)\n else:\n # object already Exist\n return 200", "def add_interface(self, interface_definition: dict) -> None:\n interface = Interface(interface_definition)\n self.__interfaces_list[interface.name] = interface" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attaches a virtual private gateway to a VPC. You can attach one virtual private gateway to one VPC at a time. For more information, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide .
def attach_vpn_gateway(DryRun=None, VpnGatewayId=None, VpcId=None): pass
[ "def attach_classic_link_vpc(DryRun=None, InstanceId=None, VpcId=None, Groups=None):\n pass", "def attach_internet_gateway(DryRun=None, InternetGatewayId=None, VpcId=None):\n pass", "def addGw(ip):\n logging.debugv(\"functions/linux.py->addGw(ip)\", [ip])\n logging.info(\"setting default gateway to %s\" % (ip) )\n cmd = [\"ip\", \"route\", \"add\", \"default\", \"via\", ip]\n runWrapper(cmd)", "def test_01_vpc_remote_access_vpn(self):\n # 1) Create VPC\n vpc = VPC.create(\n api_client=self.apiclient,\n services=self.services[\"vpc\"],\n networkDomain=\"vpc.vpn\",\n vpcofferingid=self.vpc_offering.id,\n zoneid=self.zone.id,\n account=self.account.name,\n domainid=self.domain.id\n )\n\n self.assertIsNotNone(vpc, \"VPC creation failed\")\n self.logger.debug(\"VPC %s created\" % (vpc.id))\n\n self.cleanup.append(vpc)\n\n # 2) Create network in VPC\n ntwk = Network.create(\n api_client=self.apiclient,\n services=self.services[\"network_1\"],\n accountid=self.account.name,\n domainid=self.domain.id,\n networkofferingid=self.network_offering.id,\n zoneid=self.zone.id,\n vpcid=vpc.id\n )\n\n self.assertIsNotNone(ntwk, \"Network failed to create\")\n self.logger.debug(\"Network %s created in VPC %s\" % (ntwk.id, vpc.id))\n\n self.cleanup.append(ntwk)\n\n # 3) Deploy a vm\n vm = VirtualMachine.create(self.apiclient, services=self.services[\"virtual_machine\"],\n templateid=self.template.id,\n zoneid=self.zone.id,\n accountid=self.account.name,\n domainid=self.domain.id,\n serviceofferingid=self.virtual_machine_offering.id,\n networkids=ntwk.id,\n hypervisor=self.hypervisor\n )\n self.assertIsNotNone(vm, \"VM failed to deploy\")\n self.assertEquals(vm.state, 'Running', \"VM is not running\")\n self.debug(\"VM %s deployed in VPC %s\" % (vm.id, vpc.id))\n\n self.logger.debug(\"Deployed virtual machine: OK\")\n self.cleanup.append(vm)\n\n # 4) Enable VPN for VPC\n src_nat_list = PublicIPAddress.list(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n listall=True,\n issourcenat=True,\n vpcid=vpc.id\n )\n ip = src_nat_list[0]\n\n self.logger.debug(\"Acquired public ip address: OK\")\n\n vpn = Vpn.create(self.apiclient,\n publicipid=ip.id,\n account=self.account.name,\n domainid=self.account.domainid,\n iprange=self.services[\"vpn\"][\"iprange\"],\n fordisplay=self.services[\"vpn\"][\"fordisplay\"]\n )\n\n self.assertIsNotNone(vpn, \"Failed to create Remote Access VPN\")\n self.logger.debug(\"Created Remote Access VPN: OK\")\n\n vpn_user = None\n # 5) Add VPN user for VPC\n vpn_user = VpnUser.create(self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n username=self.services[\"vpn\"][\"vpn_user\"],\n password=self.services[\"vpn\"][\"vpn_pass\"]\n )\n\n self.assertIsNotNone(vpn_user, \"Failed to create Remote Access VPN User\")\n self.logger.debug(\"Created VPN User: OK\")\n\n # TODO: Add an actual remote vpn connection test from a remote vpc\n\n # 9) Disable VPN for VPC\n vpn.delete(self.apiclient)\n\n self.logger.debug(\"Deleted the Remote Access VPN: OK\")", "def add_vpc(template, key_pair_name, nat_ip,\n nat_image_id=DEFAULT_NAT_IMAGE_ID,\n nat_instance_type=DEFAULT_NAT_INSTANCE_TYPE):\n vpc_id = \"VPC\"\n vpc = template.add_resource(ec2.VPC(\n vpc_id,\n CidrBlock=\"10.0.0.0/16\",\n Tags=Tags(\n Name=name_tag(vpc_id)\n ),\n ))\n public_subnet = _add_public_subnet(template, vpc)\n nat = _add_nat(template, vpc, public_subnet, nat_image_id, nat_instance_type,\n key_pair_name, nat_ip)\n _add_private_subnet(template, vpc, nat)\n return vpc", "def igw_attach(session, igw_id=None, vpc_id=None):\n client = session.client('ec2')\n\n with open('outputs/igw.json', 'r') as fo:\n data = json.load(fo)\n igw_id = data.get('InternetGateway', {}).get('InternetGatewayId', None)\n vpc_id = get_vpc_id()\n\n try:\n response = client.attach_internet_gateway(\n InternetGatewayId=igw_id,\n VpcId=vpc_id\n )\n except botocore.exceptions.ClientError as e:\n print(f'WARNING: Internet Gateway already attached: {e}')\n response = None\n\n print(f'Internet Gateway: {igw_id} attached to VPC: {vpc_id}')\n\n if response:\n write_output_json('igw_attach.json', response)", "def modify_vpc_attribute(VpcId=None, EnableDnsSupport=None, EnableDnsHostnames=None):\n pass", "def associate_vpc_cidr_block(VpcId=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def create_route(vserver_name: str, net_gateway_ip: str) -> None:\n \"\"\"The default destination will be set to \"0.0.0.0/0\" for IPv4 gateway addresses\"\"\" \n\n data = {\n 'gateway': net_gateway_ip,\n 'svm': {'name': vserver_name}\n }\n\n route = NetworkRoute(**data)\n\n try:\n route.post()\n print(\"Route %s created successfully\" % route.gateway)\n except NetAppRestError as err:\n print(\"Error: Route was not created: %s\" % err)\n return", "def create_igw(conn, name, region, vpc_id):\n\n try:\n igw = conn.create_internet_gateway()\n except boto.exception.EC2ResponseError as e:\n print(e.message)\n exit(1)\n else:\n conn.attach_internet_gateway(igw.id, vpc_id)\n t = Tag(name, 'igw', region); t.tag_resource(conn, igw.id)\n\n return igw.id", "def configure_routing(vpc):\n internet_gateways = list(vpc.internet_gateways.all())\n if len(internet_gateways) == 1:\n internet_gateway = internet_gateways[0]\n elif len(internet_gateways) == 0:\n raise CraftingTableError(\"No internet gateway found\")\n else:\n raise CraftingTableError(f\"Multiple internet gateways found: {id_list(internet_gateways)}\")\n\n route_tables = list(vpc.route_tables.filter(Filters=[{\"Name\": \"association.main\", \"Values\": [\"true\"]}]))\n if len(route_tables) == 1:\n route_table = route_tables[0]\n elif len(route_tables) == 0:\n raise CraftingTableError(\"No route table found\")\n if len(route_tables) != 1:\n raise CraftingTableError(f\"Multiple route tables found: {id_list(route_tables)}\")\n\n for route in route_table.routes:\n if route.gateway_id == internet_gateway.id:\n break\n else:\n route_table.create_route(DestinationCidrBlock=\"0.0.0.0/0\", GatewayId=internet_gateway.id)\n click.echo(f\"Created default route to {internet_gateway.id}\")", "def create_nat_gw(dmz_id) :\n\t\n\text_ip = client.allocate_address(\n \t#Domain='vpc'|'standard',\n\t #Address='string',\n \t#DryRun=True|False\n\t )\n\text_ip = client.describe_addresses(\n\t\tFilters=[\n \t{\n \t'Name': 'public-ip',\n 'Values': [ext_ip['PublicIp']]\n \t}\n ]\n \t\t)['Addresses'][0] # good part\n\n\tnat_gw = client.create_nat_gateway(\n \tAllocationId=ext_ip['AllocationId'],\n\t SubnetId=dmz_id\n \t)['NatGateway']\n\t\n\treturn ext_ip, nat_gw", "def create_internet_gateway(\n internet_gateway_name=None,\n vpc_id=None,\n vpc_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if vpc_id or vpc_name:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n\n r = _create_resource(\n \"internet_gateway\",\n name=internet_gateway_name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if r.get(\"created\") and vpc_id:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.attach_internet_gateway(r[\"id\"], vpc_id)\n log.info(\n \"Attached internet gateway %s to VPC %s\", r[\"id\"], vpc_name or vpc_id\n )\n return r\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def add_virtualip(self, vip):\r\n return self.manager.add_virtualip(self, vip)", "def create_vpc(DryRun=None, CidrBlock=None, InstanceTenancy=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def add_virtualip(self, loadbalancer, vip):\r\n return loadbalancer.add_virtualip(vip)", "def create_vpn_gateway(DryRun=None, Type=None, AvailabilityZone=None):\n pass", "def associate_address(DryRun=None, InstanceId=None, PublicIp=None, AllocationId=None, NetworkInterfaceId=None, PrivateIpAddress=None, AllowReassociation=None):\n pass", "def create_vpc_endpoint(DryRun=None, VpcId=None, ServiceName=None, PolicyDocument=None, RouteTableIds=None, ClientToken=None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[EC2VPC only] Adds one or more egress rules to a security group for use with a VPC. Specifically, this action permits instances to send traffic to one or more destination IPv4 or IPv6 CIDR address ranges, or to one or more destination security groups for the same VPC. This action doesn't apply to security groups for use in EC2Classic. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide . For more information about security group limits, see Amazon VPC Limits . Each rule consists of the protocol (for example, TCP), plus either a CIDR range or a source group. For the TCP and UDP protocols, you must also specify the destination port or port range. For the ICMP protocol, you must also specify the ICMP type and code. You can use 1 for the type or code to mean all types or all codes. Rule changes are propagated to affected instances as quickly as possible. However, a small delay might occur.
def authorize_security_group_egress(DryRun=None, GroupId=None, SourceSecurityGroupName=None, SourceSecurityGroupOwnerId=None, IpProtocol=None, FromPort=None, ToPort=None, CidrIp=None, IpPermissions=None): pass
[ "def AddVpcNetworkGroupFlags(parser, resource_kind='service', is_update=False):\n group = parser.add_argument_group('Direct VPC egress setting flags group.')\n AddVpcNetworkFlags(group, resource_kind)\n AddVpcSubnetFlags(group, resource_kind)\n if not is_update:\n AddVpcNetworkTagsFlags(group, resource_kind)\n return\n tags_group = group.add_mutually_exclusive_group()\n AddVpcNetworkTagsFlags(tags_group, resource_kind)\n AddClearVpcNetworkTagsFlags(tags_group, resource_kind)", "def create_egress_only_internet_gateway(DryRun=None, VpcId=None, ClientToken=None):\n pass", "def revoke_security_group_egress(DryRun=None, GroupId=None, SourceSecurityGroupName=None, SourceSecurityGroupOwnerId=None, IpProtocol=None, FromPort=None, ToPort=None, CidrIp=None, IpPermissions=None):\n pass", "def remove_sg_inbound_rule(self):\n try:\n vpc = self.ec2_client.Vpc(id=self.cluster_props['VpcId'])\n sg_list = list(vpc.security_groups.all())\n for sg in sg_list:\n if sg.group_id == self.security_group_id:\n sg.authorize_ingress(\n GroupName=sg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(self.dwh_port),\n ToPort=int(self.dwh_port))\n continue\n except Exception as e:\n print(e)", "def AddEgressSettingsFlag(parser):\n parser.add_argument(\n '--vpc-egress',\n help=(\n 'The outbound traffic to send through the VPC connector'\n ' for this resource. This resource must have a VPC connector to set'\n ' VPC egress.'\n ),\n choices={\n container_resource.EGRESS_SETTINGS_PRIVATE_RANGES_ONLY: (\n 'Default option. Sends outbound traffic to private IP addresses '\n 'defined by RFC1918 through the VPC connector.'\n ),\n container_resource.EGRESS_SETTINGS_ALL_TRAFFIC: (\n 'Sends all outbound traffic through the VPC connector.'\n ),\n container_resource.EGRESS_SETTINGS_ALL: (\n '(DEPRECATED) Sends all outbound traffic through the VPC '\n \"connector. Provides the same functionality as '{all_traffic}'.\"\n \" Prefer to use '{all_traffic}' instead.\".format(\n all_traffic=container_resource.EGRESS_SETTINGS_ALL_TRAFFIC\n )\n ),\n },\n )", "def test_050_create_security_groups(self):\n sg = self.vpc_client.create_security_group(\n data_utils.rand_name(\"WebServerSG-\"),\n data_utils.rand_name(\"description \"),\n self.ctx.vpc.id)\n self.assertIsNotNone(sg)\n self.assertTrue(sg.id)\n self.addResourceCleanUp(self._destroy_security_group_wait, sg)\n self.ctx.web_security_group = sg\n sg = self.vpc_client.create_security_group(\n data_utils.rand_name(\"NATSG-\"),\n data_utils.rand_name(\"description \"),\n self.ctx.vpc.id)\n self.assertIsNotNone(sg)\n self.assertTrue(sg.id)\n self.addResourceCleanUp(self._destroy_security_group_wait, sg)\n self.ctx.nat_security_group = sg\n sg = self.vpc_client.create_security_group(\n data_utils.rand_name(\"DBServerSG-\"),\n data_utils.rand_name(\"description \"),\n self.ctx.vpc.id)\n self.assertIsNotNone(sg)\n self.assertTrue(sg.id)\n self.addResourceCleanUp(self._destroy_security_group_wait, sg)\n self.ctx.db_security_group = sg\n\n sg = self.ctx.web_security_group\n status = self.vpc_client.revoke_security_group_egress(\n sg.id, \"-1\", cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 1433, 1433,\n src_group_id=self.ctx.db_security_group.id)\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 3306, 3306,\n src_group_id=self.ctx.db_security_group.id)\n self.assertTrue(status)\n # NOTE(ft): especially for connectivity test\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 80, 80, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n # NOTE(ft): especially for connectivity test\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 22, 22,\n src_group_id=self.ctx.db_security_group.id)\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=80, to_port=80,\n cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=443, to_port=443,\n cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=22, to_port=22,\n cidr_ip=str(self.test_client_cidr))\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=3389,\n to_port=3389, cidr_ip=str(self.test_client_cidr))\n self.assertTrue(status)\n\n sg = self.ctx.nat_security_group\n status = self.vpc_client.revoke_security_group_egress(\n sg.id, \"-1\", cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 80, 80, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 443, 443, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 53, 53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"udp\", 53, 53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=53,\n to_port=53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"udp\", from_port=53,\n to_port=53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=80, to_port=80,\n cidr_ip=str(self.db_subnet))\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=443, to_port=443,\n cidr_ip=str(self.db_subnet))\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\", from_port=22, to_port=22,\n cidr_ip=str(self.test_client_cidr))\n self.assertTrue(status)\n\n sg = self.ctx.db_security_group\n status = self.vpc_client.revoke_security_group_egress(\n sg.id, \"-1\", cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 80, 80, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 443, 443, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"tcp\", 53, 53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group_egress(\n sg.id, \"udp\", 53, 53, cidr_ip=\"0.0.0.0/0\")\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\",\n from_port=1433,\n to_port=1433,\n src_security_group_group_id=self.ctx.web_security_group.id)\n self.assertTrue(status)\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\",\n from_port=3306,\n to_port=3306,\n src_security_group_group_id=self.ctx.web_security_group.id)\n self.assertTrue(status)\n # NOTE(ft): especially for connectivity test\n status = self.vpc_client.authorize_security_group(\n group_id=sg.id, ip_protocol=\"tcp\",\n from_port=22,\n to_port=22,\n src_security_group_group_id=self.ctx.web_security_group.id)\n self.assertTrue(status)", "def _create_security_group(client, vpc_id):\n\n res = client.create_security_group(\n Description=\"Allow ssh from user public IP address\",\n GroupName=f'ssh-from-public-ip-{_rand_chars(10)}',\n VpcId=vpc_id,\n )\n\n group_id = res['GroupId']\n\n try:\n public_ip = f'{requests.get(\"https://checkip.amazonaws.com/\").text.strip()}/32'\n except Exception:\n print('encountered error getting public ip; using 0.0.0.0/0 instead')\n public_ip = '0.0.0.0/0'\n\n res = client.authorize_security_group_ingress(\n CidrIp=public_ip,\n FromPort=22,\n GroupId=group_id,\n IpProtocol='tcp',\n ToPort=22,\n )\n\n return group_id", "def add_egress_rule(self, rule):\n self.egress_rules.append(rule)", "def add_secgroup_rule(self,\n name=None, # group name\n port=None,\n protocol=None,\n ip_range=None):\n\n try:\n portmin, portmax = port.split(\":\")\n except ValueError:\n portmin = -1\n portmax = -1\n\n try:\n data = self.ec2_client.authorize_security_group_ingress(\n GroupName=name,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': int(portmin),\n 'ToPort': int(portmax),\n 'IpRanges': [{'CidrIp': ip_range}]},\n ])\n Console.ok(f'Ingress Successfully Set as {data}')\n except ClientError as e:\n Console.info(\"Rule couldn't be added to security group\")", "def create_spot_security_group(sg_name):\n\n sg_desc = \"Security group to be applied to any spot instance running our schedule jobs\"\n\n client = boto3.client('ec2')\n\n # First verify if such a SG already exists. If so, just return its id\n try:\n response = client.describe_security_groups(GroupNames=[sg_name])\n return response[\"SecurityGroups\"][0][\"GroupId\"]\n\n except: # If there's no sg with such name\n\n # Create a new group and save its id\n response = client.create_security_group(\n GroupName=sg_name, Description=sg_desc)\n sg_id = response[\"GroupId\"]\n\n # Add the rules\n response = client.authorize_security_group_egress(GroupId=sg_id, IpPermissions=[\n {'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80, 'IpRanges': [\n {'CidrIp': '0.0.0.0/0'}]}])\n\n # Return the SG id\n return sg_id", "def enforce_security_groups_rules(self) -> None:\n sagsnl_sg = self.get_security_group(SwiftComponents.SAGSNL + \"SG\")\n rds_sg = self.get_security_group(\"RDSSG\")\n mq_sg = self.get_security_group(\"MQSG\")\n amh_sg = self.get_security_group(SwiftComponents.AMH + \"SG\")\n\n boto = boto3.client(\"ec2\")\n prefix_lists = boto.describe_prefix_lists(\n Filters=[{\"Name\": \"prefix-list-name\", \"Values\": [\"com.amazonaws.*.s3\"]}])\n s3_prefix_list = prefix_lists[\"PrefixLists\"][0][\"PrefixListId\"]\n\n sagsnl_sg.connections.allow_from(other=amh_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"SAGSNL- AMH (48002,48003)\",\n from_port=48002,\n to_port=48003\n ),\n description=\"Incoming connection from AMH\")\n\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._workstation_ip_range,\n from_port=2443, to_port=2443, is_ingress=True,\n description=\"SWP Web GUI Interface Ingress from workstation\"\n )\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.TCP,\n prefix_list=s3_prefix_list,\n from_port=443, to_port=443, is_ingress=False,\n description=\"Egress to S3 VPC Gateway Endpoint\"\n )\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.ALL,\n cidr_range=self._swift_ip_range,\n from_port=0, to_port=65535, is_ingress=False,\n description=\"To SWIFT via VGW and VPN\"\n )\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._hsm_ip,\n from_port=1792, to_port=1792, is_ingress=False,\n description=\"To HSM via VGW\"\n )\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._hsm_ip,\n from_port=22, to_port=22, is_ingress=False,\n description=\"To HSM (SSH) via VGW\"\n )\n self.add_security_group_rule(SwiftComponents.SAGSNL + \"SG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._hsm_ip,\n from_port=48321, to_port=48321, is_ingress=False,\n description=\"TO HSM (Remote PED) via VGW \"\n )\n\n amh_sg.connections.allow_to(other=sagsnl_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"AMH - SAGSNL (48002, 48003)\",\n from_port=48002,\n to_port=48003\n ),\n description=\"AMH to SAGSNL connection\")\n\n amh_sg.connections.allow_to(other=rds_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"RDS (1521)\",\n from_port=1521,\n to_port=1521\n ),\n description=\"AMH - RDS (1521)\")\n amh_sg.connections.allow_to(other=mq_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"MQ (61617)\",\n from_port=61617,\n to_port=61617\n ),\n description=\"AMH - MQ (61617)\")\n self.add_security_group_rule(SwiftComponents.AMH + \"SG\", protocol=_ec2.Protocol.TCP,\n prefix_list=s3_prefix_list,\n from_port=443, to_port=443,\n is_ingress=False,\n description=\"AMH Egress to S3\"\n )\n self.add_security_group_rule(SwiftComponents.AMH + \"SG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._workstation_ip_range,\n from_port=8443, to_port=8443, is_ingress=True\n )\n rds_sg.connections.allow_from(other=amh_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"RDS (1521)\",\n from_port=1521,\n to_port=1521\n ),\n description=\"AMH - RDS (1521)\")\n\n mq_sg.connections.allow_from(other=amh_sg,\n port_range=_ec2.Port(\n protocol=_ec2.Protocol.TCP,\n string_representation=\"MQ (61617)\",\n from_port=61617,\n to_port=61617\n ),\n description=\"AMH - MQ (61617)\")\n self.add_security_group_rule(\"MQSG\", protocol=_ec2.Protocol.TCP,\n cidr_range=self._workstation_ip_range,\n from_port=8162, to_port=8162, is_ingress=True\n )", "def delete_egress_only_internet_gateways():\n client = boto3.client('ec2')\n print('Deleting Egress Only Internet Gateways')\n gw_resp = client.describe_egress_only_internet_gateways()\n while True:\n for gateway in gw_resp['EgressOnlyInternetGateways']:\n gw_id = gateway['EgressOnlyInternetGatewayId']\n client.delete_egress_only_internet_gateway(\n EgressOnlyInternetGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_egress_only_internet_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_egress_only_internet_gateways()['EgressOnlyInternetGateways']:\n time.sleep(5)\n print('Egress Only Internet Gateways deleted')", "def test_create_group_rules_no_src_group_id_or_cidr(self):\n\n ec2_client = connection.EC2ConnectionClient().client()\n test_properties = self.get_mock_properties()\n ctx = self.security_group_mock(\n 'test_create_group_rules_no_src_group_id_or_cidr',\n test_properties)\n current_ctx.set(ctx=ctx)\n test_securitygroup = self.create_sg_for_checking()\n\n del ctx.node.properties['rules'][0]['cidr_ip']\n group = ec2_client.create_security_group(\n 'test_create_group_rules_no_src_group_id_or_cidr',\n 'this is test')\n ex = self.assertRaises(\n NonRecoverableError,\n test_securitygroup._create_group_rules,\n group)\n self.assertIn(\n 'is not a valid rule target cidr_ip or src_group_ip',\n ex.message)", "def create_security_group():\n conn = boto.connect_ec2()\n sec_group = conn.create_security_group(\"shopply\", \"Shopply servers security group\")\n sec_group.authorize('tcp', 80, 80, '0.0.0.0/0')\n sec_group.authorize('tcp', 22, 22, '0.0.0.0/0')\n sec_group.authorize('tcp', 8080, 8080, '0.0.0.0/0')\n sec_group.authorize('tcp', 9001, 9001, '0.0.0.0/0')", "def enhanced_vpc_routing(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"enhanced_vpc_routing\")", "def AddVpcSubnetFlags(parser, resource_kind='service'):\n parser.add_argument(\n '--subnet',\n metavar='SUBNET',\n help=(\n 'The VPC subnetwork that the Cloud Run {kind} will get IPs from. The'\n ' subnetwork must be `/26` or larger. If --network is also specified,'\n ' subnet must be a subnetwork of the network specified by the'\n ' --network flag. If --network is not specified, network will be'\n ' looked up from this subnetwork. To clear existing VPC network'\n ' settings, use --clear-network.'.format(kind=resource_kind)\n ),\n )", "def _AddVpcRestrictionArgs(parser):\n _AddServiceRestrictionArgs(\n parser=parser,\n restriction_type='vpc',\n list_help='Services allowed to be called within the Perimeter when '\n 'VPC Service Restriction is enabled',\n enable_help=('When specified restrict API calls within the Service '\n 'Perimeter to the set of vpc allowed services. To disable '\n 'use \\'--no-enable-vpc-service-restriction\\'.'))", "def attach_classic_link_vpc(DryRun=None, InstanceId=None, VpcId=None, Groups=None):\n pass", "def AddVpcServiceRestriction(args, req, version=None):\n return _AddServiceFilterRestriction(args, req, version, 'vpc')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bundles an Amazon instance storebacked Windows instance.
def bundle_instance(DryRun=None, InstanceId=None, Storage=None): pass
[ "def backup_instance(self, instance):\n image_id = self._connection.create_image(\n instance.id,\n self._create_AMI_name(instance)\n )\n self._connection.create_tags([image_id],\n {'instance': instance.id,\n 'created_at': datetime.date.today(),\n 'created_by': 'cloudsnap',\n })\n return image_id", "def deploy_system_instance(id=None):\n pass", "def AMI_builder(\n AWS_access_key_id,\n AWS_secret_access_key,\n region_name,\n base_image_id,\n os,\n security_group_id,\n AMI_name,\n RPM_package_version,\n APT_OSS_version,\n):\n try:\n instance = Instance(\n AWS_access_key_id=AWS_access_key_id,\n AWS_secret_access_key=AWS_secret_access_key,\n region_name=region_name,\n base_image_id=base_image_id,\n os=os, # ubuntu, amazonLinux\n security_group_id=security_group_id,\n AMI_name=AMI_name,\n RPM_package_version=RPM_package_version,\n APT_OSS_version=APT_OSS_version,\n )\n except Exception as err:\n logging.error(\"Could not bring up the instance. \" + str(err))\n sys.exit(-1)\n AMI_id = \"\"\n installation_failed = False\n try:\n instance.wait_until_ready()\n except Exception as err:\n logging.error(\n \"Could not bring the instance to ready state. \" + str(err))\n installation_failed = True\n else:\n try:\n instance.install_ODFE()\n AMI_id = instance.create_AMI()\n except Exception as err:\n installation_failed = True\n logging.error(\n \"AMI creation failed there was an error see the logs. \" + str(err))\n finally:\n try:\n instance.cleanup_instance()\n except Exception as err:\n logging.error(\n \"Could not cleanup the instance. There could be an instance currently running, terminate it. \" + str(err))\n installation_failed = True\n if installation_failed:\n sys.exit(-1)\n # copy the AMI to the required regions\n ec2_client = boto3.client(\n \"ec2\",\n aws_access_key_id=AWS_access_key_id,\n aws_secret_access_key=AWS_secret_access_key,\n region_name=region_name,\n )\n AMI_copy_regions = [region[\"RegionName\"]\n for region in ec2_client.describe_regions()[\"Regions\"]]\n AMI_copy_regions.remove(region_name) # since AMI is created here\n copy_AMI_to_regions(\n AWS_access_key_id=AWS_access_key_id,\n AWS_secret_access_key=AWS_secret_access_key,\n AMI_id=AMI_id,\n AMI_name=AMI_name,\n AMI_source_region=region_name,\n AMI_copy_regions=AMI_copy_regions,\n )", "def upload_bundle():\n s3 = boto3.client('s3', region_name=os.environ['TF_VAR_aws_region'])\n\n try:\n s3.put_object(\n Body=os.environ['TF_VAR_elastic_beanstalk_s3_key'],\n Bucket=os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n Key=os.environ['TF_VAR_elastic_beanstalk_s3_key']\n )\n except Exception as e:\n raise e", "def bundle():\n pass", "def _create_boot_volume(self, context, instance):\n LOG.debug('Creating boot volume')\n boot_vol_az = CONF.solariszones.boot_volume_az\n boot_vol_type = CONF.solariszones.boot_volume_type\n try:\n vol = self._volume_api.create(\n context, instance['root_gb'],\n instance['hostname'] + \"-\" + self._rootzpool_suffix,\n \"Boot volume for instance '%s' (%s)\"\n % (instance['name'], instance['uuid']),\n volume_type=boot_vol_type, availability_zone=boot_vol_az)\n # TODO(npower): Polling is what nova/compute/manager also does when\n # creating a new volume, so we do likewise here.\n while True:\n volume = self._volume_api.get(context, vol['id'])\n if volume['status'] != 'creating':\n return volume\n greenthread.sleep(1)\n\n except Exception as reason:\n LOG.exception(_(\"Unable to create root zpool volume for instance \"\n \"'%s': %s\") % (instance['name'], reason))\n raise", "def as_bundle(self):\n self._unsupported(\"as_bundle()\")", "def start(self):\n logger.info(\"Starting backup run for %s backups\", self.backup_type)\n instance_list = self.instances_for_backup()\n\n for instance in instance_list:\n instance_id = unicodedata.normalize('NFKD', instance.id).encode('ascii','ignore')\n\n try:\n instance_name = instance.tags['Name']\n except:\n instance_name=None\n\n logger.info(\"Instance-ID [%s] - Instance Name [%s]\" % (instance_id, instance_name))\n\n self.create_ami(instance_id, instance_name) # we create the ami for each instance", "def create_application_version():\n beanstalk = boto3.client('elasticbeanstalk', region_name=os.environ['TF_VAR_aws_region'])\n application_not_found_re = r'^No Application named .*? found.$'\n\n try:\n beanstalk.create_application_version(\n ApplicationName=os.environ['TF_VAR_elastic_beanstalk_application_name'],\n VersionLabel=os.environ['TF_VAR_elastic_beanstalk_application_version'],\n SourceBundle={\n 'S3Bucket': os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n 'S3Key': os.environ['TF_VAR_elastic_beanstalk_s3_key']\n }\n )\n except botocore.exceptions.ClientError as e:\n if re.match(application_not_found_re, e.response['Error']['Message']):\n pass\n else:\n raise e", "def get_instance():\n logging.debug(\"Querying cloud-init for instance-id\")\n instance_id = requests.get(\"http://169.254.169.254/latest/meta-data/instance-id\").text\n client = boto3.client('ec2')\n ec2_resource = boto3.resource('ec2')\n aws_instance = client.describe_instances(InstanceIds=[instance_id])\n instance = aws_instance['Reservations'][0]['Instances'][0]\n ebs_volumes = []\n for device in instance['BlockDeviceMappings']:\n volume_info = ec2_resource.Volume(device['Ebs']['VolumeId'])\n ebs_volume = {u\"VolumeId\": device['Ebs']['VolumeId'],\n u\"DeviceName\": device['DeviceName'],\n u\"volume_type\": volume_info.volume_type,\n u\"size\": volume_info.size,\n u\"snapshot_id\": volume_info.snapshot_id,\n u\"iops\": volume_info.iops,\n u\"availability_zone\": volume_info.availability_zone,\n u\"encrypted\": volume_info.encrypted,\n u\"volume_tags\": volume_info.tags }\n ebs_volumes.append(ebs_volume)\n instance[u'volumes'] = ebs_volumes\n return instance", "def launch_new_instance():\n binifier = Binifier()\n binifier.main()", "def dumpinstance():\n\n parent = localAvatar.getParent()\n parent = parent.getParent()\n parent.writeBamFile('dump.bam')\n\n return 'Instance Dumped'", "def make_snapshots(client, tag_key, tag_value):\n tag_filter = {'Name': 'tag:%s' % tag_key, 'Values': [tag_value]}\n try:\n reservations = client.describe_instances(Filters=[tag_filter])['Reservations']\n except BotoCoreError as exc:\n logging.error(\"Failed to get list of instances to back up:\\n%s\", exc)\n instances = list(chain.from_iterable([x['Instances'] for x in reservations]))\n if not instances:\n logging.warning(\"Couldn't find any instances whose volumes need snapshotting. Aborting …\")\n sys.exit(0)\n\n # Create snapshots for all volumes per instance.\n for instance in instances:\n # Get the volumes attached to the instance.\n try:\n volumes_for_instance = client.describe_volumes(Filters=[\n {'Name': 'attachment.instance-id',\n 'Values': [instance['InstanceId']]},\n {'Name': 'status',\n 'Values': ['in-use']}])['Volumes']\n except BotoCoreError as exc:\n logging.error(\"Failed to get the list of volumes attached to instance %s:\\n%s\",\n instance['InstanceId'], exc)\n if not volumes_for_instance:\n logging.warning(\"Found instance %s to backup, but no attached \"\n \"volumes. Something is fishy here. Aborting …\",\n instance['InstanceId'])\n sys.exit(1)\n\n # Get the instance name from the tags if it exists.\n instance_name = None\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n instance_name = tag['Value']\n continue\n\n for volume in volumes_for_instance:\n attachments = volume['Attachments']\n volume_attach_devices = ', '.join([att['Device'] for att in attachments])\n volume_attach_instances = ', '.join([att['InstanceId'] for att in attachments])\n\n if instance_name:\n description = ('automated snapshot of volume '\n '%s attached as %s to %s (%s)' %\n (volume['VolumeId'],\n volume_attach_devices,\n instance_name,\n volume_attach_instances))\n else:\n description = ('automated snapshot of volume '\n '%s attached as %s to %s' %\n (volume['VolumeId'],\n volume_attach_devices,\n volume_attach_instances))\n try:\n snapshot = client.create_snapshot(VolumeId=volume['VolumeId'],\n Description=description)\n except BotoCoreError as exc:\n logging.error(\"Creating a snapshot of volume %s failed:\\n%s\",\n volume['VolumeId'],\n exc)\n else:\n logging.info(\"Creating snapshot %s of volume %s\",\n snapshot['SnapshotId'],\n volume['VolumeId'])\n snapshot_date = datetime.now().strftime('%Y-%m-%d %H:%M')\n if instance_name:\n name_tag_value = '%s %s %s' % (instance_name,\n volume_attach_devices,\n snapshot_date)\n else:\n name_tag_value = '%s %s %s' % (volume_attach_instances,\n volume_attach_devices,\n snapshot_date)\n try:\n client.create_tags(Resources=[snapshot['SnapshotId']],\n Tags=[{'Key': 'Name',\n 'Value': name_tag_value},\n {'Key': 'Creator',\n 'Value': 'ebs_snapshot_automation'},\n {'Key': 'Origin-Instance',\n 'Value': instance['InstanceId']},\n {'Key': 'Origin-%s' % tag_key,\n 'Value': tag_value}])\n except BotoCoreError as exc:\n logging.error(\"Tagging the snapshot %s of volume %s failed:\\n%s\",\n snapshot['SnapshotId'],\n volume['VolumeId'],\n exc)", "def backup_instance(self, ctxt, instance, image_id, backup_type, rotation):\n self.msg_runner.backup_instance(ctxt, instance, image_id,\n backup_type, rotation)", "def create_server():\n conn = boto.connect_ec2(ec2_key, ec2_secret)\n image = conn.get_all_images(ec2_amis)\n\n reservation = image[0].run(1, 1, ec2_keypair, ec2_secgroups,\n instance_type=ec2_instancetype)\n\n instance = reservation.instances[0]\n\n while instance.state == u'pending':\n print \"Instance state: %s\" % instance.state\n time.sleep(10)\n instance.update()\n\n print \"Instance state: %s\" % instance.state\n local(\"echo %s | pbcopy\" % instance.public_dns_name)\n print \"Public dns: %s\" % instance.public_dns_name\n \n print \"*** Edit env.hosts to include hostname, then run 'setup_instance' ***\"", "def add_rds_mysql_instance(self, resource_name, db_name=False, instance_id=False,\n mysql_version='5.6', security_groups=[], parameter_group=False, subnet_group='',\n master_username='', master_password='', multi_az='false', instance_type='db.t2.small',\n allocated_storage='10', storage_type='standard', snapshot=False,\n maintenance_window=False, backup_window=False):\n instance = DBInstance(resource_name,\n DBName=db_name,\n VPCSecurityGroups=security_groups,\n DBSubnetGroupName=subnet_group,\n Engine='MySQL',\n EngineVersion=mysql_version,\n MasterUsername=master_username,\n MasterUserPassword=master_password,\n MultiAZ=multi_az,\n DBInstanceClass=instance_type,\n StorageType=storage_type,\n AllocatedStorage=allocated_storage,\n Tags=self.resource_tags(resource_name))\n if snapshot:\n has_snapshot = '{}HasSnapshot'.format(resource_name)\n self.template.add_condition(has_snapshot, Not(Equals(snapshot, '')))\n\n instance.DBSnapshotIdentifier = snapshot\n instance.DBName = If(has_snapshot, Ref('AWS::NoValue'), db_name)\n instance.MasterUsername = If(has_snapshot, Ref('AWS::NoValue'), master_username)\n instance.MasterUserPassword = If(has_snapshot, Ref('AWS::NoValue'), master_password)\n\n if instance_id:\n instance.DBInstanceIdentifier = instance_id\n if parameter_group:\n instance.DBParameterGroupName = parameter_group\n if maintenance_window:\n instance.PreferredMaintenanceWindow = maintenance_window\n if backup_window:\n instance.PreferredBackupWindow = backup_window\n\n self.template.add_resource(instance)\n self.template.add_output(Output(\n resource_name,\n Value=Ref(resource_name),\n Description='{} DB Instance'.format(resource_name)\n ))\n self.template.add_output(Output(\n '{}Host'.format(resource_name),\n Value=GetAtt(resource_name, 'Endpoint.Address'),\n Description='{} DB Address'.format(resource_name)\n ))\n self.template.add_output(Output(\n '{}Port'.format(resource_name),\n Value=GetAtt(resource_name, 'Endpoint.Port'),\n Description='{} DB Port'.format(resource_name)\n ))\n self.template.add_output(Output(\n '{}DBName'.format(resource_name),\n Value=db_name,\n Description='{} DB Name'.format(resource_name)\n ))", "def launch_new_instance():\n app = IPClusterApp()\n app.start()", "def __startRestoreInstance(self, dbInst):\n pgCmd = \"gs_ctl start -Z restoremode -D %s\" % dbInst.datadir\n self.logger.debug(\"start local instance in restore mode cmd is %s\" % pgCmd)\n (status, output) = commands.getstatusoutput(pgCmd)\n if (status != 0):\n self.logger.debug(output)\n raise Exception(\"Start instance in restore mode failed!Datadir: %s\" % dbInst.datadir)", "def package_windows(game, lovefile, prefix, name, slug, version):\n _, output_name = tempfile.mkstemp(\"love\")\n\n zip_name = u\"{}-win.zip\".format(slug)\n\n archive = zipfile.ZipFile(output_name, \"w\")\n\n for filename in os.listdir(p(prefix + \"/windows\")):\n if not filename.endswith(\".dll\"):\n continue\n archive.write(p(os.path.join(prefix + \"/windows\", filename)),\n os.path.join(name, filename), zipfile.ZIP_DEFLATED)\n\n love_exe = open(p(prefix + \"/windows/love.exe\"), \"rb\").read()\n love_archive = open(lovefile, \"rb\").read()\n\n archive.writestr(os.path.join(name, slug + \".exe\"),\n love_exe + love_archive, zipfile.ZIP_DEFLATED)\n\n archive.close()\n\n return output_name, zip_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancels a bundling operation for an instance storebacked Windows instance.
def cancel_bundle_task(DryRun=None, BundleId=None): pass
[ "def cancel(self):\n self.sa_session.rollback()", "def cancel(self,widget,data=None):\n\n self.box.import_box.destroy()", "def cancel(self):\n try:\n if(self.status & (self.STAT_START)): #the event didn't finish copying\n self.kill = True\n #end if bkp in tmr\n except Exception as e:\n dbg.prn(dbg.BKP|dbg.ERR,\"[---]bkp.start:\",e,sys.exc_info()[-1].tb_lineno)\n #end ifself.done", "def cancel(self,\n headers=None,\n **query_parameters):\n return self.cancel_backup(\n headers=headers,\n **query_parameters\n )", "def cancel(self, rm=None):\n\n for r in self.resources:\n r.cancel()\n\n print 'Cancel the BigJob'\n self.engine.should_stop = True\n self.engine.join()", "def cancel(self):\n logging.warning(f\"called cancel on hunt {self} but {self.type} does not support cancel\")", "def cancel_upgrade(self, upgrade_task, service):\n pass", "def cancel(self):\n url = self._path.format(self.custom_model_id, self.custom_model_version_id)\n self._client.delete(url)", "def cancel():\n\t\traise NotImplementedError()", "async def cancel(ctx: commands.Context):\n actives = state[\"active-pickbans-by-user\"]\n process = actives.get(ctx.author)\n if not process:\n await ctx.send(\n \"You do not have an active pick/ban process. Start one with the `pickban` command.\"\n )\n return\n captain1, captain2 = process[\"captains\"]\n actives.pop(captain1, None)\n actives.pop(captain2, None)\n await ctx.send(\n \"Cancelled pick/ban process for {} and {}.\".format(\n captain1.mention, captain2.mention\n )\n )", "def cancel_job(self, command):\n pass", "def cancel(self):\n raise TypeError('Cannot cancel %s suspend' % self.__class__.__name__)", "async def cancel(self):\r\n\t\tif self.state != TradeState.TRADING:\r\n\t\t\traise TradeOnWrongState('cancel', self.state)\r\n\r\n\t\tawait self.client.main.send(Packet.new(31, 6).writeString(self.trader).write8(2))", "def cancel(self):\n payload = {\n \"command\": \"cancel\",\n \"options\": {}\n }\n self.log('INFO', 'Cancel workflow (instance ID: {})...'.format(self.instanceId))\n rsp = self.rest_put(self.uri+\"/action\", payload)\n\n if rsp.get('status', None) != 202:\n raise Exception('Cancel workflow (instance ID: {}) fail, http status: {}, response: {}'.\n format(self.instanceId, rsp.get('status', None), rsp.get('text', '')))\n else:\n self.log('INFO', 'Cancel workflow (instance ID: {}) done'.\n format(self.instanceId))", "def do_cancel(self):\n return self.case_cancel()", "def SystemAbort(self):\n debug('GCSCommands.SystemAbort()')\n self.__msgs.send(chr(27))", "def cancel_job(self, batch_request: BatchStatisticalRequestType) -> Json:\n return self._call_job(batch_request, \"cancel\")", "def abort():\n from concert.devices.base import Device\n\n tuples = _current_instances(Device)\n return device_abort(zip(*tuples)[1])", "def cancel(self):\n if self._jobid == -1:\n return\n\n os_ext.run_command('scancel %s' % self._jobid,\n check=True, timeout=settings.job_submit_timeout)\n self._is_cancelling = True\n self.wait()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception. For more information, see Importing a Virtual Machine Using the Amazon EC2 CLI .
def cancel_conversion_task(DryRun=None, ConversionTaskId=None, ReasonMessage=None): pass
[ "def cancel_task(api, task_id):\n logger.info(\"Canceling transfer\")\n try:\n api.task_cancel(task_id)\n except:\n pass", "def cancel_import_task(DryRun=None, ImportTaskId=None, CancelReason=None):\n pass", "def cancel_upgrade(self, upgrade_task, service):\n pass", "def delete(task):\n if os.path.isfile(task.absolute_source_path):\n try:\n # a simple file removing utility\n os.remove(task.absolute_source_path)\n except FileNotFoundError as file_err:\n task.add_exception(CopyProcessFailedException(Copytool.NO_SRC_FILE_ERR\n % (task.get_id(), file_err.output)))\n task.status = TaskStatus.EXCEPTION\n return task\n except PermissionError:\n task.add_exception(CopyProcessFailedException(Copytool.NO_PERM % task.get_id()))\n task.status = TaskStatus.EXCEPTION\n return task\n elif os.path.isdir(task.absolute_source_path):\n try:\n # a recursive file removing utility\n shutil.rmtree(task.absolute_source_path)\n except FileNotFoundError as file_err:\n task.add_exception(CopyProcessFailedException(Copytool.NO_SRC_FILE_ERR\n % (task.get_id(), file_err.output)))\n task.status = TaskStatus.EXCEPTION\n return task\n except PermissionError:\n task.add_exception(CopyProcessFailedException(Copytool.NO_PERM % task.get_id()))\n task.status = TaskStatus.EXCEPTION\n return task\n else:\n if not os.path.exists(task.absolute_source_path):\n task.add_exception(CopyProcessFailedException(Copytool.NO_SRC_FILE_CK % task.get_id()))\n else:\n task.add_exception(CopyProcessFailedException(Copytool.INVALID_SOURCE\n % (task.get_id(), task.get_absolute_source_path())))\n task.status = TaskStatus.EXCEPTION\n return task\n # if we get here no exception was triggered\n task.status = TaskStatus.DELETED\n return task", "def delTask(self, hostNum):\n try:\n url = self.baseURL+'fog/host/'+str(hostNum)+'/cancel'\n req = requests.delete(url, headers=self.header)\n if req.status_code == 200:\n self.log.info(\"%s\", \"successfully deleted image task\")\n except Exception:\n self.log.exception(\"Failed to delete the imaging task!\")", "def cancel(self):\n payload = {\n \"command\": \"cancel\",\n \"options\": {}\n }\n self.log('INFO', 'Cancel workflow (instance ID: {})...'.format(self.instanceId))\n rsp = self.rest_put(self.uri+\"/action\", payload)\n\n if rsp.get('status', None) != 202:\n raise Exception('Cancel workflow (instance ID: {}) fail, http status: {}, response: {}'.\n format(self.instanceId, rsp.get('status', None), rsp.get('text', '')))\n else:\n self.log('INFO', 'Cancel workflow (instance ID: {}) done'.\n format(self.instanceId))", "def cancel_transfer(transfertool_obj, transfer_id):\n\n try:\n transfertool_obj.cancel(transfer_ids=[transfer_id])\n except Exception:\n raise RucioException('Could not cancel FTS3 transfer %s on %s: %s' % (transfer_id, transfertool_obj, traceback.format_exc()))", "def cancel_job(self, command):\n pass", "def cancel_task(hostname, task_id):\n return cancel_task_async(hostname, task_id).get_result()", "def test_terminal_v1_tasks_delete(self):\n pass", "async def jsk_cancel(self, ctx: commands.Context, *, index: int):\n\n if not self.tasks:\n return await ctx.send(\"No tasks to cancel.\")\n\n if index == -1:\n task = self.tasks.pop()\n else:\n task = discord.utils.get(self.tasks, index=index)\n if task:\n self.tasks.remove(task)\n else:\n return await ctx.send(\"Unknown task.\")\n\n task.task.cancel()\n return await ctx.send(f\"Cancelled task {task.index}: `{task.ctx.command.qualified_name}`,\"\n f\" invoked at {task.ctx.message.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC\")", "def cancel(self):\n self.log.info(\"Starting the cancel of transfer_wrapper %s\" % self)\n if self.dest.startswith('file:///'):\n dest = self.dest[7:]\n else:\n dest = self.dest\n if os.path.exists(dest):\n self.log.info(\"Unlinking partially complete dest file %s.\" % dest)\n try:\n os.unlink(dest)\n except Exception as exc:\n print_exc(exc)\n else:\n self.log.info(\"Destination path %s doesn't exist; not deleting.\" % \\\n dest)\n self._killflag = True\n if self.pid:\n self.log.info(\"Killing transfer process at PID %s.\" % str(self.pid))\n try:\n os.killpg(self.pid, signal.SIGTERM)\n self.log.info(\"Process return status: %s.\" % \\\n str(os.waitpid(self.pid, os.P_WAIT)))\n except:\n pass\n self.pid = None\n else:\n self.log.warning(\"I don't know what PID to kill! Doing nothing.\")\n self.log.info(\"Setting the kill flag, which should cause the \" \\\n \"transfer_wrapper to exit soon.\")", "def execute(self, env, args):\n\n # extract args\n task_name = args.task_name\n force = args.force\n\n if env.task.active and env.task.name == task_name:\n raise errors.ActiveTask\n\n if not env.task.exists(task_name):\n raise errors.TaskNotFound(task_name)\n\n if force:\n env.task.remove(task_name)\n\n else:\n try:\n while True:\n prompt = ('Are you sure you want to delete \"{0}\" (y/n)? '\n .format(task_name))\n resp = env.io.prompt(prompt, newline=False).lower()\n\n if resp in ('y', 'n'):\n if resp == 'y':\n env.task.remove(task_name)\n break\n\n except KeyboardInterrupt:\n pass", "def cancel(self):\n assert self.running\n\n self._cancelled = True\n\n # in this section we callback on processes's deferreds, it's\n # callbacks need to know that conversion is cancelled\n self.stop_running_processes()\n self.reset_tasks_queue()\n\n self.stop_scheduler()", "def delete(self, cm_name, task_id):\n res = CM.task_by_id(task_id, cm_name=cm_name)\n res.revoke(terminate=True)\n return redirect(url_for(\".cm_cm_task\", cm_name=cm_name, task_id=task_id))", "def clear_tasks(except_task_id=None):\n from contentcuration.celery import app\n\n # remove any other tasks\n qs = TaskResult.objects.all()\n if except_task_id:\n qs = qs.exclude(task_id=except_task_id)\n for task_id in qs.values_list(\"task_id\", flat=True):\n app.control.revoke(task_id, terminate=True)\n qs.update(status=states.REVOKED)", "def discard_task(self, task):\n self.tasks.discard(task)", "def delete(task_file):\n\t\n\tos.remove(task_file)", "def abort(t):\n return Action(ABORT, t)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancels an active export task. The request removes all artifacts of the export, including any partiallycreated Amazon S3 objects. If the export task is complete or is in the process of transferring the final disk image, the command fails and returns an error.
def cancel_export_task(ExportTaskId=None): pass
[ "def delete(self):\n try:\n self.source._api.delete_export(self.source.id, self.id)\n except:\n # Export probably no longer exists\n pass", "def cancel_task(api, task_id):\n logger.info(\"Canceling transfer\")\n try:\n api.task_cancel(task_id)\n except:\n pass", "def delTask(self, hostNum):\n try:\n url = self.baseURL+'fog/host/'+str(hostNum)+'/cancel'\n req = requests.delete(url, headers=self.header)\n if req.status_code == 200:\n self.log.info(\"%s\", \"successfully deleted image task\")\n except Exception:\n self.log.exception(\"Failed to delete the imaging task!\")", "def clear_tasks(except_task_id=None):\n from contentcuration.celery import app\n\n # remove any other tasks\n qs = TaskResult.objects.all()\n if except_task_id:\n qs = qs.exclude(task_id=except_task_id)\n for task_id in qs.values_list(\"task_id\", flat=True):\n app.control.revoke(task_id, terminate=True)\n qs.update(status=states.REVOKED)", "def cancel_import_task(DryRun=None, ImportTaskId=None, CancelReason=None):\n pass", "def delete(ctx):\n delete_script = \"\"\"\n rm -r $OUTPUT_PATH/fhir/IG || true > /dev/null\n gsutil -m rm -r gs://$GOOGLE_BUCKET/fhir/IG \n \"\"\"\n run_cmd(delete_script)", "def cancel(self,\n headers=None,\n **query_parameters):\n return self.cancel_backup(\n headers=headers,\n **query_parameters\n )", "async def delete(ctx, task_id: int):\n raise NotImplementedError", "def delete(task_file):\n\t\n\tos.remove(task_file)", "def delete(self, *args, **kwargs):\n try:\n self.terminate_task()\n self.periodic_task.delete()\n except:\n pass\n return super(ShoalScrapeTask, self).delete(*args, **kwargs)", "def _delete_staged_downloads(download_result):\n download_result[\"tarball_stage\"].destroy()\n download_result[\"specfile_stage\"].destroy()", "def abort(self):\n qparam = {DSORT_UUID: [self._dsort_id]}\n self._client.request(\n HTTP_METHOD_DELETE, path=f\"{URL_PATH_DSORT}/{DSORT_ABORT}\", params=qparam\n )", "def execute(self, env, args):\n\n # extract args\n task_name = args.task_name\n force = args.force\n\n if env.task.active and env.task.name == task_name:\n raise errors.ActiveTask\n\n if not env.task.exists(task_name):\n raise errors.TaskNotFound(task_name)\n\n if force:\n env.task.remove(task_name)\n\n else:\n try:\n while True:\n prompt = ('Are you sure you want to delete \"{0}\" (y/n)? '\n .format(task_name))\n resp = env.io.prompt(prompt, newline=False).lower()\n\n if resp in ('y', 'n'):\n if resp == 'y':\n env.task.remove(task_name)\n break\n\n except KeyboardInterrupt:\n pass", "def discard_task(self, task):\n self.tasks.discard(task)", "def cancel_task(hostname, task_id):\n return cancel_task_async(hostname, task_id).get_result()", "def cancel_transfer(transfertool_obj, transfer_id):\n\n try:\n transfertool_obj.cancel(transfer_ids=[transfer_id])\n except Exception:\n raise RucioException('Could not cancel FTS3 transfer %s on %s: %s' % (transfer_id, transfertool_obj, traceback.format_exc()))", "def cancel_conversion_task(DryRun=None, ConversionTaskId=None, ReasonMessage=None):\n pass", "def test_abortTransfers() -> json:\r\n\r\n # Action\r\n _, transfers = u.getTransfersFromManager()\r\n tr_id = \"\"\r\n try:\r\n active_transfers = [t[\"transferId\"] for t in transfers if t[\"percentCompleted\"] < 100]\r\n tr_id = active_transfers[0]\r\n except:\r\n tr_id = transfers[0][\"transferId\"]\r\n\r\n status, result = u.abortTransfers(tr_id)\r\n\r\n # Assertion\r\n AssertNotEmptyOrError(status, result)", "def _delete_attachment(self, task):\n for each in self.cleaned_data.get('delete_attachment', []):\n each.delete()", "def cancel(self):\n self.log.info(\"Starting the cancel of transfer_wrapper %s\" % self)\n if self.dest.startswith('file:///'):\n dest = self.dest[7:]\n else:\n dest = self.dest\n if os.path.exists(dest):\n self.log.info(\"Unlinking partially complete dest file %s.\" % dest)\n try:\n os.unlink(dest)\n except Exception as exc:\n print_exc(exc)\n else:\n self.log.info(\"Destination path %s doesn't exist; not deleting.\" % \\\n dest)\n self._killflag = True\n if self.pid:\n self.log.info(\"Killing transfer process at PID %s.\" % str(self.pid))\n try:\n os.killpg(self.pid, signal.SIGTERM)\n self.log.info(\"Process return status: %s.\" % \\\n str(os.waitpid(self.pid, os.P_WAIT)))\n except:\n pass\n self.pid = None\n else:\n self.log.warning(\"I don't know what PID to kill! Doing nothing.\")\n self.log.info(\"Setting the kill flag, which should cause the \" \\\n \"transfer_wrapper to exit soon.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancels an inprocess import virtual machine or import snapshot task.
def cancel_import_task(DryRun=None, ImportTaskId=None, CancelReason=None): pass
[ "def cancel_task(api, task_id):\n logger.info(\"Canceling transfer\")\n try:\n api.task_cancel(task_id)\n except:\n pass", "def cancel_upgrade(self, upgrade_task, service):\n pass", "def cancel(self,widget,data=None):\n\n self.box.import_box.destroy()", "def _cancel_exec(self, reason=None):\n raise CancelScriptLoading(reason)", "def cancel_ingest(ingest_run_uuid=None):\n ingest_run = IngestRun.objects.get(pk=ingest_run_uuid)\n logger.info(f\"Canceling ingest run {ingest_run.pk}...\")\n ingest_run.cancel()\n ingest_run.save()", "def cancel_task(hostname, task_id):\n return cancel_task_async(hostname, task_id).get_result()", "def cancel_job(self, command):\n pass", "def cancel():\n\t\traise NotImplementedError()", "def cancel_conversion_task(DryRun=None, ConversionTaskId=None, ReasonMessage=None):\n pass", "def request_cancel(self, *args, **kwargs) -> None:\n self.connection.request_cancel_workflow_execution(self.domain.name, self.workflow_id, run_id=self.run_id)", "def cancel(self):\n payload = {\n \"command\": \"cancel\",\n \"options\": {}\n }\n self.log('INFO', 'Cancel workflow (instance ID: {})...'.format(self.instanceId))\n rsp = self.rest_put(self.uri+\"/action\", payload)\n\n if rsp.get('status', None) != 202:\n raise Exception('Cancel workflow (instance ID: {}) fail, http status: {}, response: {}'.\n format(self.instanceId, rsp.get('status', None), rsp.get('text', '')))\n else:\n self.log('INFO', 'Cancel workflow (instance ID: {}) done'.\n format(self.instanceId))", "def disassociate_task(self,\n load: Task) -> None:\n raise NotImplementedError", "def abort(self):\n\n self.progtrack.download_add_progress(0, -self.dlcurrent)\n self.progtrack.upload_add_progress(-self.ulcurrent)\n self.completed = True", "def delTask(self, hostNum):\n try:\n url = self.baseURL+'fog/host/'+str(hostNum)+'/cancel'\n req = requests.delete(url, headers=self.header)\n if req.status_code == 200:\n self.log.info(\"%s\", \"successfully deleted image task\")\n except Exception:\n self.log.exception(\"Failed to delete the imaging task!\")", "def cancel(self, future: RuntimeFuture) -> None:\n ...", "def SystemAbort(self):\n debug('GCSCommands.SystemAbort()')\n self.__msgs.send(chr(27))", "def cancel(self):\n self.sa_session.rollback()", "def cancel_bundle_task(DryRun=None, BundleId=None):\n pass", "def cancelUploadFile(self, uploadSessionId):\n return self.session.request('replicationcomms/slave/sessions/%s/'\n % (uploadSessionId))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace. For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide .
def cancel_reserved_instances_listing(ReservedInstancesListingId=None): pass
[ "def stop():\n local('aws ec2 stop-instances --instance-ids %s'%(AWS_INSTANCE_ID))", "def delete_instance(self, instance_crn):\n\n safe_crn = urllib.parse.quote(instance_crn, \"\")\n resp = self.session.delete(\n \"{0}/v2/resource_instances/{1}\".format(self.endpoint_url, safe_crn)\n )\n\n if resp.status_code != 204:\n raise Exception(\n \"Delete instance failed: code=%d body=%s\"\n % (resp.status_code, resp.text)\n )", "def cancel(self):\n payload = {\n \"command\": \"cancel\",\n \"options\": {}\n }\n self.log('INFO', 'Cancel workflow (instance ID: {})...'.format(self.instanceId))\n rsp = self.rest_put(self.uri+\"/action\", payload)\n\n if rsp.get('status', None) != 202:\n raise Exception('Cancel workflow (instance ID: {}) fail, http status: {}, response: {}'.\n format(self.instanceId, rsp.get('status', None), rsp.get('text', '')))\n else:\n self.log('INFO', 'Cancel workflow (instance ID: {}) done'.\n format(self.instanceId))", "def create_reserved_instances_listing(ReservedInstancesId=None, InstanceCount=None, PriceSchedules=None, ClientToken=None):\n pass", "def terminate_instance(instance_id):\n\n client = boto3.client('ec2')\n response = client.terminate_instances(InstanceIds=instance_id)", "def cancel_uber():\n\n request_id = flask.request.form['request_id']\n response = uber_client.cancel_ride(request_id)\n return render_template(\"ride_cancelled.html\")", "def terminate_instance(id):\n for region in boto.ec2.regions():\n conn = region.connect()\n for reservation in conn.get_all_instances():\n for instance in reservation.instances:\n if instance.id == id:\n print \"Terminating instance: {0}\".format(id)\n instance.terminate()\n return\n print \"Unable to terminate instance: {0}\".format(id)", "def cancel_rebalance(self) -> Dict:\n return GsIndexApi.cancel_rebalance(self.id, CustomBasketsRebalanceAction.default_instance())", "def TerminateInstance(*, session, instanceid):\n ec2conn = session.connect_to(\"ec2\")\n return ec2conn.terminate_instances(instance_ids=[instanceid,])", "def modify_reserved_instances(ClientToken=None, ReservedInstancesIds=None, TargetConfigurations=None):\n pass", "def StopInstance(*, session, instanceid):\n ec2conn = session.connect_to(\"ec2\")\n ret = ec2.stop_instances(instance_ids=[instanceid,])\n return True", "def terminate(session):\n logging.info(\"Terminating instances\")\n session.clients[\"ec2\"].terminate_instances(InstanceIds=list(session.instances))", "def terminate(self):\n ips_to_remove = self.floating_ips[:]\n for ip in ips_to_remove:\n self.remove_floating_ip(ip)\n _id = self.id\n self._instance.delete()\n self.log.debug('Waiting for instance (%s) to be terminated.' % _id)\n\n try:\n while self.instance:\n self.log.debug('Nova instance %s has status %s...' % (_id, self.status))\n sleep(5)\n except:\n self.log.debug('Nova instance %s deleted.' % _id)\n\n if self.key_pair:\n self.log.debug('Removing key pair: %s' % self.key_pair.name)\n\n key_dir = os.path.expanduser('~/') + '.ssh/'\n try:\n self.stack_env.nova.keypairs.delete(self.key_pair)\n os.remove(key_dir + self.key_pair.name)\n os.remove(key_dir + self.key_pair.name + '.pub')\n except:\n self.log.exception('Unable to remove key pair %s%s' % (key_dir, self.key_pair.name))", "def purchase_reserved_instances_offering(DryRun=None, ReservedInstancesOfferingId=None, InstanceCount=None, LimitPrice=None):\n pass", "def stopinstances():\n username, conn = _getbotoconn(auth_user)\n print \"stopping instances running under the %s account\" % username\n\n running_instances = _getrunninginstances(conn)\n for instid, instance in running_instances.iteritems():\n instance.stop()\n print \"instance %s stopped\" % instid", "def stop_instance(stackName, instanceName=None):\n control_instance(stackName=stackName, action='stop', instanceName=instanceName)", "def select_instance(self):\n\n try:\n for instance in self.nova_client.servers.list():\n if 'novacheck' in str(instance.name) \\\n and getattr(instance, 'OS-EXT-AZ:availability_zone') \\\n == self.zone and \\\n instance.status == 'ACTIVE':\n self.instance = instance\n if self.instance:\n self.logger.warning(\"Selected Instance %s : %s\" %\n (self.instance.id, self.instance.name))\n self.success = True\n else:\n self.logger.error(\"No Instance Available\")\n self.success, self.overall_success = False, False\n self.failure = \"No Instance Available\"\n self.instance.delete()\n self.logger.error(\"Deleting instance\")\n exit(1)\n except nova_exceptions.NotFound:\n self.logger.error(\"404 instance not found\")\n self.success, self.overall_success = False, False\n self.failure = \"Not Found\"\n self.instance.delete()\n self.logger.error(\"Deleting instance\")\n exit(1)\n except Exception as e:\n self.logger.error(\"Selecting Instance Failed\")\n self.success, self.overall_success = False, False\n self.failure = e\n self.instance.delete()\n self.logger.error(\"Deleting instance\")\n exit(1)", "def cancel(self):\n url = self._path.format(self.custom_model_id, self.custom_model_version_id)\n self._client.delete(url)", "def describe_reserved_instances(DryRun=None, ReservedInstancesIds=None, Filters=None, OfferingType=None, OfferingClass=None):\n pass", "def stop(self):\n c = Controller()\n instance_id = c.instance.id\n c.terminate_instance()\n\n print('Successfully shut down instance: ' + instance_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancels the specified Spot fleet requests. After you cancel a Spot fleet request, the Spot fleet launches no new Spot instances. You must specify whether the Spot fleet should also terminate its Spot instances. If you terminate the instances, the Spot fleet request enters the cancelled_terminating state. Otherwise, the Spot fleet request enters the cancelled_running state and the instances continue to run until they are interrupted or you terminate them manually.
def cancel_spot_fleet_requests(DryRun=None, SpotFleetRequestIds=None, TerminateInstances=None): pass
[ "async def futures_cancel_orders(self, **params):\r\n return await self.client_helper(\"futures_cancel_orders\", **params)", "def cancel_steps(ClusterId=None, StepIds=None, StepCancellationOption=None):\n pass", "def request_cancel(self, *args, **kwargs) -> None:\n self.connection.request_cancel_workflow_execution(self.domain.name, self.workflow_id, run_id=self.run_id)", "async def futures_cancel_order(self, **params):\r\n return await self.client_helper(\"futures_cancel_order\", **params)", "def cancel_orders(self, reqs: Sequence[CancelRequest]) -> None:\n for req in reqs:\n self.cancel_order(req)", "def cancel(self):\n payload = {\n \"command\": \"cancel\",\n \"options\": {}\n }\n self.log('INFO', 'Cancel workflow (instance ID: {})...'.format(self.instanceId))\n rsp = self.rest_put(self.uri+\"/action\", payload)\n\n if rsp.get('status', None) != 202:\n raise Exception('Cancel workflow (instance ID: {}) fail, http status: {}, response: {}'.\n format(self.instanceId, rsp.get('status', None), rsp.get('text', '')))\n else:\n self.log('INFO', 'Cancel workflow (instance ID: {}) done'.\n format(self.instanceId))", "def _cancel_tasks(self):\n logger.debug(f\"{self} cancelling incomplete tasks\")\n cancel_count = 0\n for wrapper in self._wrappers:\n future = wrapper.future\n if future.cancellable:\n future.cancel()\n cancel_count += 1\n logger.debug(f\"{self} cancelled {cancel_count} tasks\")", "def cancel_job(self, batch_request: BatchStatisticalRequestType) -> Json:\n return self._call_job(batch_request, \"cancel\")", "def cancel_hunts(self):\n for hunt in self._hunts: # order doesn't matter here\n try:\n if hunt.running:\n logging.info(\"cancelling {hunt}\")\n hunt.cancel()\n hunt.wait()\n except Exception as e:\n logging.info(\"unable to cancel {hunt}: {e}\")", "def order_cancel_request(message, futures):\n order_id = message[2][0] # uses id, if no cid given\n order_cid = message[2][2]\n future_id = f\"oc_{order_id}\"\n future_id_cid = f\"oc_{order_cid}\"\n # print(\"Cancel requst started!\")\n if future_id in futures.keys():\n future = futures[future_id]\n elif future_id_cid in futures.keys():\n future = futures[future_id_cid]\n # print(\"requst future\", future)\n future.set_result({\n \"status\": message[6], # Error/Sucess\n \"id\": message[4][0],\n \"cid\": message[4][2],\n \"response\": message[4],\n \"comment\": message[7]\n })\n if future_id in futures:\n del futures[future_id]\n elif future_id_cid in futures:\n del futures[future_id_cid]", "def cancel_request(self, requestid):\n # TODO: return to SedmDb.py because of how much sql \"understanding\" it requires?\n self.db.update_request({'id': requestid, 'status': 'CANCELED'})\n # cancel the associated atomicrequests\n # TODO: allow more nuanced update function inputs (e.g. add a where_dict)?\n self.db.execute_sql(\"UPDATE atomicrequest SET status='CANCELED' WHERE request_id='%s'\" % (requestid,))\n return (0, \"Request canceled\")", "async def futures_cancel_all_open_orders(self, **params):\r\n return await self.client_helper(\"futures_cancel_all_open_orders\", **params)", "def cancel_uber():\n\n request_id = flask.request.form['request_id']\n response = uber_client.cancel_ride(request_id)\n return render_template(\"ride_cancelled.html\")", "def cancel_all(context, vo_name=None):\n submitter = Submitter(context)\n return submitter.cancel_all(vo_name)", "def cancel(self) -> None:\n if not self.called:\n for deferred in self._deferredList:\n try:\n deferred.cancel()\n except BaseException:\n log.failure(\"Exception raised from user supplied canceller\")", "def cancel_game(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n\n if game:\n if game.game_over == False:\n game.game_over = True\n game.attempts_remaining = 0\n game.put()\n return StringMessage(message=\"Game cancelled!\")\n else:\n raise endpoints.BadRequestException('You cannot cancel a finished game.')\n else:\n raise endpoints.NotFoundException('Game not found!')", "def cancel_orders(order_ids=None, conids=None, strategies=None, cancel_all=None):\n params = {}\n if order_ids:\n params[\"order_ids\"] = order_ids\n if conids:\n params[\"conids\"] = conids\n if strategies:\n params[\"strategies\"] = strategies\n if cancel_all:\n params[\"cancel_all\"] = cancel_all\n\n response = houston.delete(\"/blotter/orders\", params=params)\n houston.raise_for_status_with_json(response)\n return response.json()", "def request_cancel_slow_command(self, req, msg):\n fut = self._slow_futures.pop(req.client_connection, None)\n if fut:\n fut.set_result(None)\n return req.make_reply('ok')", "def cancel_game(self, request):\n game = get_by_urlsafe(request.key, Game)\n if not game:\n raise endpoints.NotFoundException(\"Game does not exist\")\n if game.over:\n raise endpoints.ForbiddenException(\n \"You cannot cancel a completed game\")\n else:\n game.key.delete()\n return StringMessage(message=\"Game deleted!\")", "def abort_request(self, request):\n self.timedout = True\n if not request.called:\n try:\n request.cancel()\n except error.AlreadyCancelled:\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancels one or more Spot instance requests. Spot instances are instances that Amazon EC2 starts on your behalf when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide .
def cancel_spot_instance_requests(DryRun=None, SpotInstanceRequestIds=None): pass
[ "def request_spot_instances(DryRun=None, SpotPrice=None, ClientToken=None, InstanceCount=None, Type=None, ValidFrom=None, ValidUntil=None, LaunchGroup=None, AvailabilityZoneGroup=None, BlockDurationMinutes=None, LaunchSpecification=None):\n pass", "def wait_for_fulfillment(self, timeout=50, request_ids=None):\n logger.debug(\"waiting for requests to be fulfilled\") \n\n if request_ids is None:\n spot_req_ids = self.spot_req_ids\n else:\n spot_req_ids = request_ids\n\n processed_dict=dict()\n for sir_id in spot_req_ids:\n processed_dict[sir_id] = False\n #status_dict[sir_id] = None\n\n ### wait for a disposition for each spot request (basically when sir.state is not open)\n loop_count=0\n while not all( processed_dict.values()) and loop_count <= timeout:\n loop_count+=1\n try:\n spot_reqs = self.conn.get_all_spot_instance_requests(request_ids = spot_req_ids)\n except boto.exception.EC2ResponseError:\n ### need to wait a little time for AWS to register the requests, if this function called\n ### right after create_spot_instances\n time.sleep(3)\n continue\n for sir in spot_reqs:\n if sir.state != 'open':\n processed_dict[sir.id] = True\n\n if not all ( processed_dict.values()):\n time.sleep(15)\n\n\n ### get disposition of each spot instance request\n spot_reqs = self.conn.get_all_spot_instance_requests(request_ids = spot_req_ids)\n instance_ids = list()\n instance_ready = dict()\n for sir in spot_reqs:\n if sir.state == 'open':\n self.request_status_dict[sir.id] = 'timed out'\n else:\n self.request_status_dict[sir.id] = sir.status.code\n\n if sir.status.code == 'fulfilled':\n instance_ids.append(sir.instance_id)\n instance_ready[sir.instance_id] = False\n else:\n self.failed_req_ids.append(sir.id)\n \n ### wait for ready states in the fulfilled instances\n while not all ( instance_ready.values()) and loop_count <= timeout:\n loop_count+=1\n instances = self.conn.get_only_instances(instance_ids = instance_ids)\n for inst in instances:\n if inst.state != 'pending':\n instance_ready[inst.id] = True\n \n if not all (instance_ready.values()):\n time.sleep(15)\n\n ### get final dispositions of instances\n good_instances =0\n instances = self.conn.get_only_instances(instance_ids = instance_ids)\n for inst in instances:\n if inst.state != 'running':\n sir_id = inst.spot_instance_request_id\n self.failed_req_ids.append(sir_id)\n if inst.state == 'pending':\n self.request_status_dict[sir_id] = 'timed out'\n else:\n self.request_status_dict[sir_id] = 'post-fulfillment premature instance termination'\n else:\n if self.use_private_ips:\n ipaddr=inst.private_ip_address\n else:\n ipaddr=inst.ip_address\n self.instance_ids.append(inst.id)\n self.ip_dict[inst.id] = ipaddr\n self.rev_ip_dict[ipaddr] = inst.id\n self.request_status_dict[sir_id] = 'running'\n good_instances+=1\n\n\n ### might have to sleep a little bit after running status toggles before it can accept ssh connections\n # put a 30 second delay in\n time.sleep(30)\n\n return (len (spot_req_ids), good_instances) \n\n ### to retrieve good instances: awsobj.instance_ids[-good_instances:]", "def delete_ec2_instances():\n print('Deleting EC2 instances')\n ec2 = boto3.resource('ec2')\n\n active_ec2_instance_count = 0\n for instance in ec2.instances.all():\n disable_api_termination = instance.describe_attribute(\n Attribute='disableApiTermination'\n )\n if disable_api_termination['DisableApiTermination']['Value']:\n print('Stopping instance to enable API termination - {}'.format(instance.instance_id))\n instance.stop()\n active_ec2_instance_count = active_ec2_instance_count + 1\n else:\n if instance.state['Code'] != 48: # code 48 is 'terminated'\n print('Terminating instance - {}'.format(instance.instance_id))\n instance.terminate()\n active_ec2_instance_count = active_ec2_instance_count + 1\n\n if active_ec2_instance_count > 0:\n print('Waiting for ec2 instances to stop or terminate')\n while [instance for instance in ec2.instances.all()]:\n all_terminated = True\n for instance in ec2.instances.all():\n disable_api_termination = instance.describe_attribute(\n Attribute='disableApiTermination'\n )\n if (disable_api_termination['DisableApiTermination']['Value'] and\n instance.state['Code'] == 80):\n # code 80 is 'stopped'\n # instance has termination protection switched on and is stopped\n # switch it off and terminate the instance\n instance.modify_attribute(\n DisableApiTermination={\n 'Value': False\n }\n )\n instance.terminate()\n if instance.state['Code'] != 48: # code 48 is 'terminated'\n all_terminated = False\n\n if all_terminated:\n break\n else:\n time.sleep(5)\n\n print('EC2 instances deleted')", "def stop():\n local('aws ec2 stop-instances --instance-ids %s'%(AWS_INSTANCE_ID))", "def get(self):\n try:\n right_now = datetime.now() # let's assume datetime is the class\n except AttributeError:\n # App Engine sometimes imports datetime as a module...\n # Has been reported to GOOG: http://code.google.com/p/googleappengine/issues/detail?id=7341\n right_now = datetime.datetime.now()\n\n if self.request.get('early', False):\n right_now = right_now + datetime.timedelta(days=1)\n\n expired_instances = SIBTInstance.all()\\\n .filter('end_datetime <=', right_now)\\\n .filter('is_live =', True)\n\n for instance in expired_instances:\n taskqueue.add(\n url=url('RemoveExpiredSIBTInstance'),\n params={\n 'instance_uuid': instance.uuid\n }\n )\n msg = 'expiring %d instances' % expired_instances.count()\n logging.info(msg)\n self.response.out.write(msg)", "def terminate(session):\n logging.info(\"Terminating instances\")\n session.clients[\"ec2\"].terminate_instances(InstanceIds=list(session.instances))", "def cancel_orders(self, reqs: Sequence[CancelRequest]) -> None:\n for req in reqs:\n self.cancel_order(req)", "def check_terminated_instance_request_consistency(self):\n\n ret=False\n ### check consistency of supposedly active instances.\n if len(self.instance_ids) > 0:\n instances = self.conn.get_only_instances(instance_ids = self.instance_ids)\n for inst in instances:\n if inst.state == 'terminated':\n self.terminate_instance(inst.id)\n\n ### check consistency of sirs related to terminated instances.\n if len(self.terminated_ids) > 0:\n removed_from_terminated=list()\n for inst_id in self.terminated_ids:\n try:\n inst = self.conn.get_only_instances(instance_ids = [ inst_id] )[0]\n except:\n ### means the instance is no longer tracked in aws so we can delete from terminated\n removed_from_terminated.append(inst_id)\n continue\n\n sir_id=inst.spot_instance_request_id\n if sir_id:\n sir = self.conn.get_all_spot_instance_requests(request_ids = [sir_id])[0]\n if not (sir.state == 'canceled' or sir.state =='closed'):\n ### forcibly send a cancel.\n self.conn.cancel_spot_instance_requests( request_ids = [sir_id])\n ret=True\n else:\n ### we can stop tracking the instance\n removed_from_terminated.append(inst_id)\n else:\n ### not an instances generated from spot request. so it got added here by mistake\n ### so do the same stop tracking\n removed_from_terminated.append(inst_id)\n\n\n for r in removed_from_terminated:\n self.terminated_ids.remove(r)\n\n return ret", "def action_cancel_salepoint(self):\n for rec in self:\n # send Email to big manager for cancel process\n user_email_list = []\n user_obj = self.env['res.users']\n from_mail = user_obj.browse(self._uid) and user_obj.login or ''\n big_manager_grp = self.env.ref(\"big_general.group_big_manager\")\n for user in big_manager_grp.users:\n user_email_list.append(user.partner_id.email\n if user.partner_id.email else '')\n email_template = self.env.ref(\n 'big_new_registration.email_surrender_connection_request')\n if email_template and user_email_list:\n user_email = ','.join(user_email_list)\n email_template.sudo().write({\n 'email_from': from_mail,\n 'email_to': user_email\n })\n email_template.send_mail(self.id, force_send=True)\n rec.state = 'cancel_sales_point'\n if rec.new_connection_id.cylinder_qty == 0:\n rec.new_connection_id.state = 'cancel_sales_point'", "def wait_for_instances_to_stop(conn, instance_ids, pending_ids):\n reservations = conn.get_all_instances(instance_ids=pending_ids)\n for reservation in reservations:\n for instance in reservation.instances:\n print \"State: \" + instance.state\n if instance.state == 'terminated':\n print \"instance `{\" + instance.id + \"}` terminated!\"\n pending_ids.pop(pending_ids.index(instance.id))\n else:\n print \"instance `{\" + instance.id + \"}` stopping...\"\n if len(pending_ids) == 0:\n print \"all instances terminated!\"\n else:\n time.sleep(10)\n wait_for_instances_to_stop(conn, instance_ids, pending_ids)", "def stopinstances():\n username, conn = _getbotoconn(auth_user)\n print \"stopping instances running under the %s account\" % username\n\n running_instances = _getrunninginstances(conn)\n for instid, instance in running_instances.iteritems():\n instance.stop()\n print \"instance %s stopped\" % instid", "def describe_spot_instance_requests(DryRun=None, SpotInstanceRequestIds=None, Filters=None):\n pass", "def run_instances(self):\n # create an entry in the s3 log for the start of this task \n self.log_to_s3('run-instances-start.log', 'start')\n\n session = botocore.session.get_session()\n client = session.create_client('ec2', region_name=self.aws_region)\n\n # convert user-data to base64\n user_data = ''\n # NOTE conversion of file to string, then string to bytes, the bytes encoded \n # base64 - then decode the base64 bytes into base64 string\n with open(self.ec2_user_data, 'r') as f:\n user_data = base64.b64encode(bytes(f.read(), \"utf-8\")).decode(\"utf-8\")\n\n if self.ec2_type in (CONST.VALID_EC2_INSTANCE_TYPES_EBS_ONLY).split('|'):\n # block device mapping for ebs backed instances\n # creates an ephemeral EBS volume (delete on terminate)\n # Note that gp2 instance type is EBS SSD\n custom_block_device_mapping = [{\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0',\n 'Ebs':{\n 'VolumeSize': self.ec2_ebs_only_volume_size,\n 'VolumeType': self.ec2_ebs_only_volume_type,\n },\n }]\n else:\n # block device mapping allows for 2 extra drives\n # - works for either single ssd or 2 ssd's\n custom_block_device_mapping = [ \n {\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0'\n },\n {\n 'DeviceName': '/dev/sdc',\n 'VirtualName': 'ephemeral1'\n }\n ]\n\n r = client.request_spot_instances(\n InstanceCount=self.ec2_count,\n SpotPrice=self.ec2_spot_price,\n LaunchSpecification= {\n 'SecurityGroupIds': [\n self.ec2_security_group_id,\n ],\n 'SecurityGroups': [\n self.ec2_security_groups,\n ],\n 'Placement': {\n 'AvailabilityZone': self.ec2_availability_zone,\n },\n 'BlockDeviceMappings': custom_block_device_mapping,\n 'IamInstanceProfile': {\n 'Arn': self.ec2_arn_id,\n },\n 'UserData': user_data,\n 'ImageId': self.ec2_image_id,\n 'InstanceType': self.ec2_type,\n 'KeyName': self.ec2_security_key,\n },\n )\n\n # get the spot instance request ids\n spot_ids = []\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Spot request ids:'))\n for i, spot_inst in enumerate(r['SpotInstanceRequests']):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, \n inst_str + '\\t' + spot_inst['SpotInstanceRequestId']))\n spot_ids.append(spot_inst['SpotInstanceRequestId'])\n utility.list_to_file(CONST.SPOT_REQUEST_IDS, spot_ids)\n\n # create a list of spot instance statuses - so we can print out\n # some updates to the user\n spot_status = ['']*len(spot_ids)\n # Expecting status codes of \"pending-evaluation\", \"pending-fulfillment\", or \n # fulfilled. Any other status-code should be printed out & the program \n # terminated.\n expected_status = ['fulfilled', 'pending-evaluation', 'pending-fulfillment']\n instance_ids = [None]*len(spot_ids)\n\n # check the status of the spot requests\n while True:\n fulfilled = 0\n for i, id in enumerate(spot_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_spot_instance_requests(SpotInstanceRequestIds=[id])\n status_code = r['SpotInstanceRequests'][0]['Status']['Code']\n if status_code not in expected_status:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, 'Unexpected status for spot request ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': ') +\n colour_msg(Colour.PURPLE, status_code))\n sys.exit(1)\n if status_code != spot_status[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Spot instance request: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tStatus: ') +\n colour_msg(Colour.PURPLE, status_code))\n spot_status[i] = status_code\n if status_code == 'fulfilled':\n fulfilled += 1\n # record the instance id\n instance_ids[i] = r['SpotInstanceRequests'][0]['InstanceId']\n if fulfilled == len(spot_ids):\n break\n time.sleep(1)\n\n utility.list_to_file(CONST.INSTANCE_IDS, instance_ids)\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ids:'))\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n tag_val = self.ec2_instance_tag + str(i)\n client.create_tags(Resources=[id], Tags=[{'Key':'Name', 'Value':tag_val}])\n\n # monitor the instances until all running\n instance_states = ['']*len(instance_ids)\n expected_states = ['running', 'pending']\n instance_ips = [None]*len(instance_ids)\n running = 0\n while True:\n running = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instances(InstanceIds=[id])\n state = r['Reservations'][0]['Instances'][0]['State']['Name']\n if state not in expected_states:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, \n 'Unexpected instance state for instance-id ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': \\t') +\n colour_msg(Colour.PURPLE, state))\n sys.exit(1)\n if state != instance_states[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tState: ') +\n colour_msg(Colour.PURPLE, state))\n instance_states[i] = state\n if state == 'running':\n running += 1\n # record the instance id\n instance_ips[i] = r['Reservations'][0]['Instances'][0]['PublicDnsName']\n if running == len(instance_ids):\n break\n time.sleep(10)\n\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ips:'))\n for i, id in enumerate(instance_ips):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n \n utility.list_to_file(CONST.INSTANCE_IPS_FILE, instance_ips)\n # need to at least wait until all the instances are reachable\n # possible statuses: (passed | failed | initializing | insufficient-data )\n reachability = ['']*len(instance_ids)\n while True:\n passed = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instance_status(InstanceIds=[id])\n state = r['InstanceStatuses'][0]['InstanceStatus']['Details'][0]['Status']\n if state != reachability[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tReachability: ') +\n colour_msg(Colour.PURPLE, state))\n reachability[i] = state\n if state == 'passed':\n passed += 1\n if passed == len(instance_ids):\n break\n time.sleep(10)\n \n lgr.info(CONST.INFO + colour_msg(Colour.GREEN, 'Instances are reachable'))\n \n # if user-data configuration file supplied - check that it has worked\n # Note that this checker is run once on each instance\n if self.ec2_user_data:\n lgr.info(CONST.INFO + colour_msg(Colour.CYAN, \n 'Starting job to monitor user-data configuration...'))\n # at the moment is calling a local script that does the checking\n result = subprocess.call('./' + self.ec2_user_data_check) \n if result:\n lgr.error(CONST.ERROR + colour_msg(Colour.CYAN, \n 'user data checker FAILED'))\n sys.exit(1)\n\n # create an entry in the s3 log for finish this task \n self.log_to_s3('run-instances-finish.log', 'finish')\n\n # return the list of ip's for the newly created instances\n return utility.file_to_list(CONST.INSTANCE_IPS_FILE)", "def terminate_instances(self):\n\n if self._reservation and self._reservation.instances:\n for instance in self._reservation.instances:\n instance.terminate()\n msg = 'EC2 instance terminated.'\n log.info(msg)\n self._store_message(msg)", "def spare_cancel(self,cr,uid,ids,context=None):\n\n exchange = self.pool.get('exchange.order')\n wf_service = netsvc.LocalService(\"workflow\")\n for rec in self.browse(cr , uid ,ids):\n exchange_ref = rec.ir_ref\n exchange_id = exchange.search(cr , uid , [('name' , '=' , exchange_ref)])\n for exchange_record in exchange.browse(cr ,uid , exchange_id):\n wf_service.trg_validate(uid, 'exchange.order', exchange_record.id, 'exchange_cancel', cr)\n \n return self.write(cr, uid, ids, {'state':'spare_cancel'}, context=context)", "def modify_spot_fleet_request(SpotFleetRequestId=None, TargetCapacity=None, ExcessCapacityTerminationPolicy=None):\n pass", "def purchase_scheduled_instances(DryRun=None, ClientToken=None, PurchaseRequests=None):\n pass", "def control_instance(stackName, action, instanceName=None):\n try:\n aws_cfg\n except NameError:\n try:\n aws_cfg = load_aws_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide an AWS conifguration based on aws.cfg-dist to proceed. %s\" % error))\n return 1\n\n stackName = stackName.lower()\n opsworks = connect_to_opsworks()\n stacks = opsworks.describe_stacks()\n stackId = [stack['StackId'] for stack in stacks['Stacks'] if stack['Name'] == stackName]\n if stackId == []:\n print(_red(\"stack %s not found\" % stackName))\n return 1\n instances = opsworks.describe_instances(stack_id=stackId[0])['Instances']\n if instanceName is not None:\n instances = [instance for instance in instances if instance['Hostname'] == instanceName]\n\n ec2 = connect_to_ec2()\n for instance in instances:\n if action == 'start':\n print(_green(\"starting instance: %s\" % instance['Hostname']))\n try:\n opsworks.start_instance(instance_id=instance['InstanceId'])\n except ValidationException:\n pass\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n spinner = Spinner(_yellow(\"[%s]Waiting for reservation \" % myinstance['Hostname']), hide_cursor=False)\n while myinstance['Status'] == 'requested':\n spinner.next()\n time.sleep(1)\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n print(_green(\"\\n[%s]OpsWorks instance status: %s\" % (myinstance['Hostname'], myinstance['Status'])))\n ec2Instance = ec2.get_only_instances(instance_ids=[myinstance['Ec2InstanceId']])[0]\n spinner = Spinner(_yellow(\"[%s]Booting ec2 instance \" % myinstance['Hostname']), hide_cursor=False)\n while ec2Instance.state != u'running':\n spinner.next()\n time.sleep(1)\n ec2Instance.update()\n print(_green(\"\\n[%s]ec2 Instance state: %s\" % (myinstance['Hostname'], ec2Instance.state)))\n spinner = Spinner(_yellow(\"[%s]Running OpsWorks setup \" % myinstance['Hostname']), hide_cursor=False)\n while myinstance['Status'] != 'online':\n if myinstance['Status'] == 'setup_failed':\n print(_red(\"\\n[%s]OpsWorks instance failed\" % myinstance['Hostname']))\n return 1\n spinner.next()\n time.sleep(1)\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n print(_green(\"\\n[%s]OpsWorks Instance state: %s\" % (myinstance['Hostname'], myinstance['Status'])))\n getec2instances()\n elif action == 'stop':\n if 'Ec2InstanceId' in instance.keys():\n print(_green(\"Stopping instance %s\" % instance['Hostname']))\n opsworks.stop_instance(instance_id=instance['InstanceId'])\n ec2Instance = ec2.get_only_instances(instance_ids=[instance['Ec2InstanceId']])[0]\n spinner = Spinner(_yellow(\"[%s]Waiting for ec2 instance to stop \" % instance['Hostname']), hide_cursor=False)\n while ec2Instance.state != u'stopped':\n spinner.next()\n time.sleep(1)\n ec2Instance.update()\n print(_green(\"\\n[%s]ec2 Instance state: %s\" % (instance['Hostname'], ec2Instance.state)))\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n spinner = Spinner(_yellow(\"[%s]Stopping OpsWorks Instance \" % instance['Hostname']), hide_cursor=False)\n while myinstance['Status'] != 'stopped':\n spinner.next()\n time.sleep(1)\n myinstance = opsworks.describe_instances(instance_ids=[instance['InstanceId']])['Instances'][0]\n print(_green(\"\\n[%s]OpsWorks Instance state: %s\" % (instance['Hostname'], myinstance['Status'])))\n else:\n print(_green(\"%s in %s already stopped\" % (instance['Hostname'], stackName)))\n try:\n print(_green(\"removing %s from ssh config...\" % instance['PublicDns']))\n removefromsshconfig(dns=instance['PublicDns'])\n except Exception:\n pass", "def terminate_instance(id):\n for region in boto.ec2.regions():\n conn = region.connect()\n for reservation in conn.get_all_instances():\n for instance in reservation.instances:\n if instance.id == id:\n print \"Terminating instance: {0}\".format(id)\n instance.terminate()\n return\n print \"Unable to terminate instance: {0}\".format(id)", "def order_cancel_request(message, futures):\n order_id = message[2][0] # uses id, if no cid given\n order_cid = message[2][2]\n future_id = f\"oc_{order_id}\"\n future_id_cid = f\"oc_{order_cid}\"\n # print(\"Cancel requst started!\")\n if future_id in futures.keys():\n future = futures[future_id]\n elif future_id_cid in futures.keys():\n future = futures[future_id_cid]\n # print(\"requst future\", future)\n future.set_result({\n \"status\": message[6], # Error/Sucess\n \"id\": message[4][0],\n \"cid\": message[4][2],\n \"response\": message[4],\n \"comment\": message[7]\n })\n if future_id in futures:\n del futures[future_id]\n elif future_id_cid in futures:\n del futures[future_id_cid]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines whether a product code is associated with an instance. This action can only be used by the owner of the product code. It is useful when a product code owner needs to verify whether another user's instance is eligible for support.
def confirm_product_instance(DryRun=None, ProductCode=None, InstanceId=None): pass
[ "def product_exist(self, code):\r\n\r\n if code in self.products:\r\n return True\r\n else:\r\n return False", "def instance_exists(self, instance: RuntimeInstance.Params, env: RuntimeEnvironment.Params, **kwargs) -> bool:", "def requested_instance_inspection(self):\n return bool(self._unit.received[\"enable-instance-inspection\"])", "def instance_exists(self, instance):\n if instance[0:3] == \"WD_\":\n instance = get_wbs_local_id(instance[3:])\n entity_number = wb_SQL_query(self.label, \"item\")\n for ID in entity_number:\n item = WBItem(ID=ID)\n item_array = item.get_value('WD_P31')\n for instance_item in item_array:\n if instance_item == instance:\n return ID\n return None", "def has_instance(name):\n vms = list_instances()\n return name in vms", "def instance_exists(self):\n requested_start = datetime.combine(date.today(), self.send_time)\n instance_set = self.instances.filter(requested_start=requested_start)\n if instance_set.exists():\n return True\n return False", "def is_known_product_class(product_class):\n return product_class in PRODUCT_PLATFORM_MAP", "def check_if_vendor_exists(vend_code:int):\n \n with StoreDatabase() as db:\n rows = db.query(f'SELECT * FROM vendor WHERE vend_code = {vend_code}')\n if rows:\n return True\n else:\n return False", "def is_using_instance(self) -> bool:\n tasks: Dict = self.current_file.get(\"tasks\", {})\n for task in tasks.values():\n scriptargs = task.get(\"scriptarguments\", {})\n if scriptargs and scriptargs.get(\"using\", {}):\n error_message, error_code = Errors.using_instance_in_playbook()\n if self.handle_error(\n error_message, error_code, file_path=self.file_path\n ):\n self.is_valid = False\n return False\n return True", "def hasPermissionToUse(mhlUser, product_code):\n\t# check if product code is valid in Sales_products table\n\n\tproducts = Products.objects.filter(code=product_code)\n\tif (products.count() != 1):\n\t# log incorrect code sent error\n\t\tlogger.warning('Error product code sent %s. Number of rows in sales_products with '\n\t\t\t'code is %s' % (product_code, products.count()))\n\t\treturn True # for now customer gets a free pass, until we fix data issue\n\t# process file sharing product\n\tif (product_code == 'fsh_srv'):\n\t\t# now we need to get either Provider or OfficeStaff\n\t\ttry:\n\t\t\tuser_with_practices = OfficeStaff.objects.get(user=mhlUser)\n\t\texcept ObjectDoesNotExist:\n\t\t\ttry:\n\t\t\t\tuser_with_practices = Provider.objects.get(user=mhlUser)\n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\treturn False # not provider, not office staff NOT going to be subscribed \n\t\t\t# to file sharing\n\n\t\t# take user and check his practices, if find active file sharing product in one \n\t\t# of the practice locations, return true\n\t\tpractices_set = user_with_practices.practices.all()\n\t\treturn_val = Subscription.objects.filter(product=products[0], \n\t\t\tpractice_location__in=practices_set, is_active=1, \n\t\t\tstart_date__lte=datetime.now()).exists()\n\t\treturn return_val\n\telse:\n\t\t# for all the rest of products if checked for will return True until we \n\t\t# define business logic for each product subscription\n\t\treturn True", "def check_product_existence_in_db(self, product_ref):\n product_in_db = Product.objects.filter(ref=product_ref).exists()\n return product_in_db", "def is_application(self):\n\n elements = self.get(CPE.KEY_APP)\n return len(elements) > 0", "def check_instance_tag(tag_key, tag_value, app):\n if tag_key == 'app_id':\n return tag_value == app['_id']\n if tag_key == 'app':\n return tag_value == app['name']\n if tag_key == 'env':\n return tag_value == app['env']\n if tag_key == 'role':\n return tag_value == app['role']\n if tag_key == 'color':\n return tag_value == app.get('blue_green', {}).get('color')\n if tag_key.startswith('aws:'):\n return True\n instance_tags = {t['tag_name']: t['tag_value'] for t in app.get('environment_infos', {}).get('instance_tags', [])}\n return tag_value == instance_tags.get(tag_key)", "def __check_instance(hostname, pid):\n\n # Instances running on a remote host with a filesystem shared with us can\n # not usually be checked (/proc is rarely shared across computers...),\n # so we consider them \"alive\" servers.\n if hostname != socket.gethostname():\n return True\n\n try:\n proc = psutil.Process(pid)\n\n cli = os.path.join(\"codechecker_common\", \"cli.py\")\n return cli in proc.cmdline()[1] and \\\n proc.username() == getpass.getuser()\n except psutil.NoSuchProcess:\n # If the process does not exist, it cannot be valid.\n return False", "def has_a_product(obj):\n return \"products\" in obj and len(obj[\"products\"]) > 0", "def _uses_single_instance_type(self):\n return len(self._compute_resource_config[\"Instances\"]) == 1", "def _is_valid(product):\n return hasattr(product, 'name') and hasattr(product, 'desc')", "def _kc_ident_in_resource(self):\n kc_sys_ids = [\n ident for ident in self.resource.get('identifier', []) if\n ident['system'] == self.user.kc_identifier_system]\n if not kc_sys_ids:\n return False\n if len(kc_sys_ids) != 1:\n raise ValueError(\n \"unexpected multiple KC identifiers on Patient \"\n f\"{self.resource['id']}\")\n result = kc_sys_ids[0]['value'] == self.user.kc_identifier_value\n # Cache internals in self.user if this happens to be the owners\n if result:\n self.user.extract_internals()\n return result", "def verify_identity(self):\n global static_model\n if self.identity == None:\n return False\n if isinstance(self.identity, str):\n if len(self.identity) > 0:\n for pen in static_model.available_pens:\n if self.identity.upper() == pen.identity.upper():\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copies a pointintime snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to. Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a nondefault CMK with the KmsKeyId parameter. For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide .
def copy_snapshot(DryRun=None, SourceRegion=None, SourceSnapshotId=None, Description=None, DestinationRegion=None, PresignedUrl=None, Encrypted=None, KmsKeyId=None): pass
[ "def make_snapshot(ec2,vol,retention,description):\n\n snap = ec2.create_snapshot(VolumeId=vol,Description=description)\n return snap", "def add_snapshot(self, *params):\n if not params or len(params)==0:\n raise TypeError(\"add_snapshot takes at lease 1 argument 0 given.\")\n elif params and len(params)>2:\n raise TypeError(\"add_snapshot takes at lease 1 argument %u given.\" %(len(params)))\n destdisk=params[0]\n sourcedisk=params[1]\n properties=destdisk.getProperties()\n properties.setProperty(\"vdisk\", sourcedisk.getAttribute(\"name\"))\n return self._add(\"snapshot\", destdisk.getAttribute(\"name\"), properties)", "def test_copy_vm_disks_with_snapshot(self, storage):\n testflow.step(\"Taking snapshot of VM %s\", self.vm_name)\n assert ll_vms.addSnapshot(\n True, self.vm_name, self.snapshot_description\n ), (\"Failed to create snapshot for vm %s\" % self.vm_name)\n ll_jobs.wait_for_jobs([config.JOB_CREATE_SNAPSHOT])\n\n self.basic_copy(self.vm_name)\n helpers.attach_new_disks_to_vm(self.test_vm_name, self.new_disks)\n assert helpers.check_file_existence(\n self.test_vm_name, storage_type=storage\n )", "def copy_db_snapshot(rds_client, source_db_snapshot_identifier,\n destination_db_snapshot_identifier, snapshot_tags, kms_key=None):\n copy_db_snapshot_parameters = {\n 'SourceDBSnapshotIdentifier': source_db_snapshot_identifier,\n 'TargetDBSnapshotIdentifier': destination_db_snapshot_identifier,\n 'Tags': snapshot_tags\n }\n if kms_key:\n copy_db_snapshot_parameters['KmsKeyId'] = kms_key\n try:\n print(\"Copying DB snapshot with the following parameters: \")\n print(json.dumps(copy_db_snapshot_parameters))\n\n destination_snapshot_response = check_snapshot_exists(\n rds_client,\n destination_db_snapshot_identifier)\n if not destination_snapshot_response:\n copy_db_snapshot_response = rds_client.copy_db_snapshot(\n **copy_db_snapshot_parameters)\n print(f\"Successfully copied DB snapshot: {destination_db_snapshot_identifier}\")\n return copy_db_snapshot_response['DBSnapshot']\n\n print(f'{destination_db_snapshot_identifier} already exists. Using existing snapshot.')\n return destination_snapshot_response['DBSnapshots'][0]\n\n except ClientError as err:\n raise MaskopyResourceException(\"Could not copy snapshot: %s\" % err)", "def copy_db_cluster_snapshot(self, SourceDBClusterSnapshotIdentifier: str, TargetDBClusterSnapshotIdentifier: str, KmsKeyId: str = None, PreSignedUrl: str = None, CopyTags: bool = None, Tags: List = None) -> Dict:\n pass", "def Process(self, container: containers.AWSSnapshot\n ) -> None: # pytype: disable=signature-mismatch\n\n # Aws accounts have thread safety issues. Create a unique one per thread\n aws_account = account.AWSAccount(self._PickAvailabilityZone(self.subnet))\n try:\n result = forensics.CopyEBSSnapshotToS3Process(aws_account,\n self.bucket,\n container.id,\n self.iam_details['profile']['arn'],\n subnet_id=self.subnet)\n\n self.StoreContainer(containers.AWSS3Object(result['image']))\n for h in result['hashes']:\n self.StoreContainer(containers.AWSS3Object(h))\n except ResourceCreationError as exception:\n self.ModuleError(\n f'Exception during copy operation: {exception!s}', critical=True)", "def create_snapshot(self, snapshot, share_server):", "def create_snapshot(self, name, snapshot_id=None):\n if snapshot_id is None:\n wrap_popen('collie', 'vdi', 'snapshot', name)\n else:\n wrap_popen('collie', 'vdi', 'snapshot', '-s', snapshot_id, name)", "def test_create_snapshot(self):\n mox = self.mox\n\n vol_size = self._driver._size_bytes(self.TEST_VOLSIZE)\n\n mox.StubOutWithMock(self._driver, '_create_file')\n self._driver._create_file(self.TEST_SNAPPATH, vol_size)\n mox.StubOutWithMock(self._driver, '_copy_file')\n self._driver._copy_file(self.TEST_VOLPATH, self.TEST_SNAPPATH)\n\n mox.ReplayAll()\n\n self._driver.create_snapshot(self.TEST_SNAPSHOT)", "def create_snapshot(self, snapshot_name, description='',\n dump_memory=False, quiesce=False):\n return self.machine.cloud.ctl.compute.create_machine_snapshot(\n self.machine, snapshot_name, description, dump_memory, quiesce)", "def test_csi_volumesnapshot_basic(set_random_backupstore, # NOQA\n volumesnapshotclass, # NOQA\n volumesnapshot, # NOQA\n client, # NOQA\n core_api, # NOQA\n volume_name, # NOQA\n csi_pv, # NOQA\n pvc, # NOQA\n pod_make, # NOQA\n volsnapshotclass_delete_policy, # NOQA\n backup_is_deleted,\n csi_snapshot_type=None): # NOQA\n\n csisnapclass = \\\n volumesnapshotclass(name=\"snapshotclass\",\n deletepolicy=volsnapshotclass_delete_policy,\n snapshot_type=csi_snapshot_type)\n\n pod_name, pv_name, pvc_name, md5sum = \\\n prepare_pod_with_data_in_mb(client, core_api,\n csi_pv, pvc, pod_make,\n volume_name,\n data_path=\"/data/test\")\n\n # Create volumeSnapshot test\n csivolsnap = volumesnapshot(volume_name + \"-volumesnapshot\",\n \"default\",\n csisnapclass[\"metadata\"][\"name\"],\n \"persistentVolumeClaimName\",\n pvc_name)\n\n volume = client.by_id_volume(volume_name)\n\n for i in range(RETRY_COUNTS):\n snapshots = volume.snapshotList()\n if len(snapshots) == 2:\n break\n time.sleep(RETRY_INTERVAL)\n\n lh_snapshot = None\n snapshots = volume.snapshotList()\n for snapshot in snapshots:\n if snapshot[\"name\"] == \"snapshot-\" + csivolsnap[\"metadata\"][\"uid\"]:\n lh_snapshot = snapshot\n assert lh_snapshot is not None\n\n wait_for_volumesnapshot_ready(csivolsnap[\"metadata\"][\"name\"],\n csivolsnap[\"metadata\"][\"namespace\"])\n\n bv1, b = find_backup(client, volume_name, lh_snapshot[\"name\"])\n\n assert b[\"snapshotName\"] == lh_snapshot[\"name\"]\n\n restore_pvc_name = pvc[\"metadata\"][\"name\"] + \"-restore\"\n restore_pvc_size = pvc[\"spec\"][\"resources\"][\"requests\"][\"storage\"]\n\n restore_csi_volume_snapshot(core_api,\n client,\n csivolsnap,\n restore_pvc_name,\n restore_pvc_size)\n\n restore_pod = pod_make()\n restore_pod_name = restore_pod[\"metadata\"][\"name\"]\n restore_pod['spec']['volumes'] = [create_pvc_spec(restore_pvc_name)]\n\n create_and_wait_pod(core_api, restore_pod)\n restore_md5sum = \\\n get_pod_data_md5sum(core_api, restore_pod_name, path=\"/data/test\")\n assert restore_md5sum == md5sum\n\n # Delete volumeSnapshot test\n delete_volumesnapshot(csivolsnap[\"metadata\"][\"name\"], \"default\")\n\n if backup_is_deleted is False:\n find_backup(client, volume_name, b[\"snapshotName\"])\n else:\n wait_for_backup_delete(client, volume_name, b[\"name\"])", "def create_volume_snapshot(self, volume, name):\r\n snapshot_data = {}\r\n snapshot_data['name'] = name\r\n request = '/zones/%s/disks/%s/createSnapshot' % (\r\n volume.extra['zone'].name, volume.name)\r\n self.connection.async_request(request, method='POST',\r\n data=snapshot_data)\r\n\r\n return self.ex_get_snapshot(name)", "def snapshot(ctx, name, snap_id):\n body = {\"name\": name, \"id\": snap_id}\n resp = consume_task(ctx.obj.vlab_api,\n endpoint='/api/1/inf/snapshot',\n message='Destorying snapshot {} on VM {}'.format(snap_id, name),\n body=body,\n method='DELETE',\n timeout=900,\n pause=5)\n typewriter('OK!')", "def ex_copy_to(self, image, volume):\r\n url = REST_BASE + '/storage/%s' % (volume.id)\r\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\r\n data = {'imageId': image.id}\r\n resp = self.connection.request(action=url,\r\n method='PUT',\r\n headers=headers,\r\n data=data)\r\n return int(resp.status) == 200", "def backup_volume(ec2,instances):\n\n for instance in instances:\n retention = get_retention(instance)\n if not is_master(instance['PrivateIpAddress']):\n #make snapshot only on primary\n continue\n\n for dev in instance['BlockDeviceMappings']:\n if dev.get('Ebs', None) is None:\n # skip non-EBS volumes\n continue\n\n retention = get_retention(instance)\n now = datetime.today()\n delete_date_days = (now + timedelta(days=retention['days'])).strftime('%Y-%m-%d')\n delete_date_weeks = (now + timedelta(weeks=retention['weeks'])).strftime('%Y-%m-%d')\n delete_date_months = (now + relativedelta(months=retention['months'])).strftime('%Y-%m-%d')\n desc_date = now.strftime('%Y-%m-%d.%H:%M:%S')\n\n\n # all mongo disks are sdf\n if dev['DeviceName'] == '/dev/sdf':\n vol_id = dev['Ebs']['VolumeId']\n\n # Make sure that only one snapshot is taken, whether daily, weekly or monthly.\n if now.strftime('%d') == '01':\n print \"Creating snapshot of %s volume that will be retain for %d months\" % (vol_id, retention['months'])\n snap = make_snapshot(ec2,vol_id, retention['months'], \"MongoMonthlyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_months)\n elif now.strftime('%a') == 'Sun':\n print \"Creating snapshot of %s volume that will be retain for %d weeks\" % (vol_id, retention['weeks'])\n snap = make_snapshot(ec2,vol_id, retention['weeks'], \"MongoWeeklyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_weeks)\n else:\n print \"Creating snapshot of %s volume that will be retain for %d days\" % (vol_id, retention['days'])\n snap = make_snapshot(ec2,vol_id, retention['days'], \"MongoDailyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_days)\n\n return True", "def test_copy_volume_to_image(self):\n self.mox.StubOutWithMock(image_utils, 'upload_volume')\n\n image_utils.upload_volume(context,\n self.TEST_IMAGE_SERVICE,\n self.TEST_IMAGE_META,\n self.TEST_VOLPATH)\n\n self.mox.ReplayAll()\n\n self._driver.copy_volume_to_image(context,\n self.TEST_VOLUME,\n self.TEST_IMAGE_SERVICE,\n self.TEST_IMAGE_META)", "def snapshot(self):\n ts = datetime.datetime.now() # grab the current timestamp\n filename = \"{}.png\".format(ts.strftime(\n \"%Y-%m-%d_%H-%M-%S\")) # construct filename\n\n ok, frame = self.cap.read()\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(image)\n\n # save image as jpeg file\n image.save('exports/snapshots/' + filename, \"PNG\")\n print(\"[INFO] saved {}\".format(filename))", "def _temp_snapshot(rds, snapshot_name: str, src_instance_name: str):\n log.info(\n \"Creating snapshot %r from instance %r\",\n snapshot_name, src_instance_name)\n\n try:\n rds.create_db_snapshot(\n DBSnapshotIdentifier=snapshot_name,\n DBInstanceIdentifier=src_instance_name)\n except Exception:\n log.exception(\"Unable to create snapshot %r\", snapshot_name)\n exit(SNAPSHOT_FAILED_ERR)\n\n try:\n waiter = rds.get_waiter('db_snapshot_completed')\n waiter.wait(DBSnapshotIdentifier=snapshot_name)\n except Exception:\n log.exception(\n \"Snapshot creation of %r didn't finish. \"\n \"Snapshot may or may not exist; manual intervention is needed.\",\n snapshot_name)\n exit(SNAPSHOT_FAILED_ERR)\n\n try:\n yield\n finally:\n rds.delete_db_snapshot(DBSnapshotIdentifier=snapshot_name)\n log.debug(\"Deleted snapshot %r\", snapshot_name)", "def test_edit_volume_snapshot(self, snapshot, volumes_steps_ui):\n new_snapshot_name = snapshot.name + '(updated)'\n with snapshot.put(name=new_snapshot_name):\n volumes_steps_ui.update_snapshot(snapshot.name, new_snapshot_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132 . Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an Internet gateway, make sure to set the domainnameservers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide .
def create_dhcp_options(DryRun=None, DhcpConfigurations=None): pass
[ "def create_dhcp_options(\n domain_name=None,\n domain_name_servers=None,\n ntp_servers=None,\n netbios_name_servers=None,\n netbios_node_type=None,\n dhcp_options_name=None,\n tags=None,\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if vpc_id or vpc_name:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n\n r = _create_resource(\n \"dhcp_options\",\n name=dhcp_options_name,\n domain_name=domain_name,\n domain_name_servers=domain_name_servers,\n ntp_servers=ntp_servers,\n netbios_name_servers=netbios_name_servers,\n netbios_node_type=netbios_node_type,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if r.get(\"created\") and vpc_id:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.associate_dhcp_options(r[\"id\"], vpc_id)\n log.info(\"Associated options %s to VPC %s\", r[\"id\"], vpc_name or vpc_id)\n return r\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def associate_dhcp_options(DryRun=None, DhcpOptionsId=None, VpcId=None):\n pass", "def DhcpOptions(self):\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dhcpoptions_3rvy29su3rhy2svzghjce9wdglvbnm import DhcpOptions\n\t\treturn DhcpOptions(self)", "def describe_dhcp_options(DryRun=None, DhcpOptionsIds=None, Filters=None):\n pass", "def configure_dhcp():\n dhcp_config = {}\n dhcp_config_content = \"\"\"\nddns-update-style none;\ndefault-lease-time 600;\nmax-lease-time 7200;\noption domain-name-servers 84.200.69.80, 84.200.70.40;\noption domain-name \"pikube.local\";\nauthorative;\nlog-facility local7;\n\nsubnet 10.12.29.0 netmask 255.255.255.0 {\n range 10.12.29.10 10.12.29.100;\n}\n\"\"\"\n\n dhcp_config['path'] = r'/etc/dhcp/dhcpd.conf'\n dhcp_config['encoding'] = \"b64\"\n dhcp_config['content'] = base64.b64encode(\n bytes(dhcp_config_content, \"utf-8\"))\n return dhcp_config", "def associate_dhcp_options_to_vpc(\n dhcp_options_id,\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"associated\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if conn.associate_dhcp_options(dhcp_options_id, vpc_id):\n log.info(\n \"DHCP options with id %s were associated with VPC %s\",\n dhcp_options_id,\n vpc_id,\n )\n return {\"associated\": True}\n else:\n log.warning(\n \"DHCP options with id %s were not associated with VPC %s\",\n dhcp_options_id,\n vpc_id,\n )\n return {\n \"associated\": False,\n \"error\": {\"message\": \"DHCP options could not be associated.\"},\n }\n except BotoServerError as e:\n return {\"associated\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def _PopulateFromDhcpOptions(self, host, client):\n for option in client.OptionList.values():\n # DHCP Options tags\n if option.Tag == dhcp.HN:\n host['HostName'] = option.Value\n elif option.Tag == dhcp.CL:\n host['ClientID'] = option.Value\n elif option.Tag == dhcp.UC:\n host['UserClassID'] = option.Value\n elif option.Tag == dhcp.VC:\n host['VendorClassID'] = option.Value", "def delete_dhcp_options(DryRun=None, DhcpOptionsId=None):\n pass", "def create(\n hostname,\n refresh_key,\n authorization_host,\n org_id,\n sddc_id,\n type,\n dhcp_profile_id,\n verify_ssl=True,\n cert=None,\n server_addresses=None,\n tags=vmc_constants.VMC_NONE,\n lease_time=None,\n):\n\n log.info(\"Creating DHCP profile %s for SDDC %s\", dhcp_profile_id, sddc_id)\n\n profile_type = vmc_constants.DHCP_CONFIGS.format(type)\n api_url_base = vmc_request.set_base_url(hostname)\n api_url = (\n \"{base_url}vmc/reverse-proxy/api/orgs/{org_id}/sddcs/{sddc_id}/\"\n \"policy/api/v1/infra/{profile_type}/{profile_id}\"\n )\n api_url = api_url.format(\n base_url=api_url_base,\n org_id=org_id,\n sddc_id=sddc_id,\n profile_type=profile_type,\n profile_id=dhcp_profile_id,\n )\n\n allowed_dict = {\"server_addresses\": server_addresses, \"tags\": tags, \"lease_time\": lease_time}\n\n req_data = vmc_request._filter_kwargs(\n allowed_kwargs=allowed_dict.keys(), allow_none=[\"tags\"], **allowed_dict\n )\n\n payload = _create_payload_for_dhcp_profile(dhcp_profile_id, type, req_data)\n return vmc_request.call_api(\n method=vmc_constants.PUT_REQUEST_METHOD,\n url=api_url,\n refresh_key=refresh_key,\n authorization_host=authorization_host,\n description=\"vmc_dhcp_profiles.create\",\n data=payload,\n verify_ssl=verify_ssl,\n cert=cert,\n )", "async def begin_create_dhcp(\n self,\n resource_group_name: str,\n private_cloud_name: str,\n dhcp_id: str,\n workload_network_dhcp: _models.WorkloadNetworkDhcp,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> AsyncLROPoller[_models.WorkloadNetworkDhcp]:", "def dhcp_options_exists(\n dhcp_options_id=None,\n name=None,\n dhcp_options_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if name:\n log.warning(\n \"boto_vpc.dhcp_options_exists: name parameter is deprecated \"\n \"use dhcp_options_name instead.\"\n )\n dhcp_options_name = name\n\n return resource_exists(\n \"dhcp_options\",\n name=dhcp_options_name,\n resource_id=dhcp_options_id,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def DhcpHostsOptions(self):\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dhcphostsoptions_u3rhy2svzghjcehvc3rzt3b0aw9ucw import DhcpHostsOptions\n\t\treturn DhcpHostsOptions(self)", "def modify_vpc_attribute(VpcId=None, EnableDnsSupport=None, EnableDnsHostnames=None):\n pass", "async def begin_create_dhcp(\n self,\n resource_group_name: str,\n private_cloud_name: str,\n dhcp_id: str,\n workload_network_dhcp: IO,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> AsyncLROPoller[_models.WorkloadNetworkDhcp]:", "def CreateAddressSet(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def set_dhcp_server(self, config):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_SYSTEM, 2)\n if not self.s.is_element_present(self.info['loc_cfg_system_dhcps_fieldset']):\n raise Exception('The DHCP Server configurate field is invisible')\n\n else:\n enable_checkbox = self.info['loc_cfg_system_dhcps_enable_checkbox']\n enable_server = False if not config.has_key('enable') else config['enable']\n\n if not enable_server:\n # Uncheck the enable DHCPs server checkbox\n if self.s.is_checked(enable_checkbox):\n self.s.click_and_wait(enable_checkbox)\n\n # Click apply button\n self.s.click_and_wait(self.info['loc_cfg_system_dhcps_apply_button'], 3)\n\n return\n\n # Check the enable DHCPs server checkbox\n if not self.s.is_checked(enable_checkbox):\n self.s.click_and_wait(enable_checkbox)\n\n # Set starting ip value\n if config.has_key('start_ip'):\n self.s.type_text(self.info['loc_cfg_system_dhcps_starting_ip_textbox'], config['start_ip'])\n\n # Set ip range value\n if config.has_key('number_ip'):\n self.s.type_text(self.info['loc_cfg_system_dhcps_number_ip_textbox'], str(config['number_ip']))\n\n # Set lease time value\n if config.has_key('leasetime'):\n self.s.select_option(self.info['loc_cfg_system_dhcps_leasetime_options'], config['leasetime'])\n\n # Click 'Cancel' on the confirmation dialog to ZD do nothing if not ZD will auto correct the setting value.\n self.s.choose_cancel_on_next_confirmation()\n # Click apply button\n self.s.click_and_wait(self.info['loc_cfg_system_dhcps_apply_button'], 3)\n msg = ''\n # The ZD will be genarate an alert or an confirm dialog if there are any invalid or wrong setting value is setted.\n # Get any exist alert message\n if self.s.is_alert_present(5):\n msg = self.s.get_alert()\n\n # Get any confirmation message\n elif self.s.is_confirmation_present(5):\n msg = self.s.get_confirmation()\n\n if msg:\n raise Exception(msg)", "def create_vpc(DryRun=None, CidrBlock=None, InstanceTenancy=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def _create_dhcp_entries_for_many_instances(instances, ip_or_network):\n for instance in instances:\n # when IP is assigned to many instances, mac is not provided through\n # form and first non-mgmt mac should be used\n ethernet = _get_non_mgmt_ethernets(instance).values_list(\n 'id', flat=True\n ).first() # TODO: is first the best choice here?\n yield _create_dhcp_entries_for_single_instance(\n instance, ip_or_network, ethernet\n )", "def init_set(new_set, **set_options):\n _ipset('create', new_set, 'hash:ip',\n # Below expands to a list of k, v, one after the other\n *[str(i) for item in set_options.items() for i in item])\n flush_set(new_set)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[IPv6 only] Creates an egressonly Internet gateway for your VPC. An egressonly Internet gateway is used to enable outbound communication over IPv6 from instances in your VPC to the Internet, and prevents hosts outside of your VPC from initiating an IPv6 connection with your instance.
def create_egress_only_internet_gateway(DryRun=None, VpcId=None, ClientToken=None): pass
[ "def delete_egress_only_internet_gateways():\n client = boto3.client('ec2')\n print('Deleting Egress Only Internet Gateways')\n gw_resp = client.describe_egress_only_internet_gateways()\n while True:\n for gateway in gw_resp['EgressOnlyInternetGateways']:\n gw_id = gateway['EgressOnlyInternetGatewayId']\n client.delete_egress_only_internet_gateway(\n EgressOnlyInternetGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_egress_only_internet_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_egress_only_internet_gateways()['EgressOnlyInternetGateways']:\n time.sleep(5)\n print('Egress Only Internet Gateways deleted')", "def create_ipv6(self):\n int1 = Interface('eth1/1')\n int2 = Interface('eth1/2')\n pc1 = PortChannel('211')\n ipv6 = IPV6()\n ipv6.add_interface_address(int1, '2004:0DB8::1/10', link_local='FE83::1')\n ipv6.add_interface_address(int2, '2104:0DB8::1/11')\n ipv6.add_interface_address(int2, '2002:0DB8::1/12')\n ipv6.add_interface_address(pc1, '2022:0DB8::1/13')\n return ipv6", "def create_ipv6_route(self):\n # Create Interfaces\n int1 = Interface('eth1/1')\n int2 = Interface('eth1/2')\n # Create a L3 port channel\n pc1 = PortChannel('211', layer='Layer3')\n route = IPV6Route('2000:0::0/12')\n route.add_next_hop('234E:44::1', int1, vrf='default', track_id='0',\n tag='1')\n route.add_next_hop('234E:44::2', int2)\n route.add_next_hop('234E:44::4', pc1, vrf='default', track_id='1',\n tag='2')\n return route", "def BgpIPv6EvpnVpws(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnvpws import BgpIPv6EvpnVpws\n return BgpIPv6EvpnVpws(self)", "def ipv6_gateway(self):\n try:\n return ipaddress.ip_address(self._ipv6['gateway'])\n except (KeyError, ValueError, TypeError):\n return None", "def BgpIPv6EvpnEvi(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnevi import BgpIPv6EvpnEvi\n return BgpIPv6EvpnEvi(self)", "def enable_ipv6(self):\n return self.act_on_droplets(type='enable_ipv6')", "def create_nat_gateway(SubnetId=None, AllocationId=None, ClientToken=None):\n pass", "def keepalived_ipv6_supported():\n\n br_name, ha_port, gw_port = common_utils.get_related_rand_device_names(\n ['ka-test-', ha_router.HA_DEV_PREFIX, namespaces.INTERNAL_DEV_PREFIX])\n gw_vip = 'fdf8:f53b:82e4::10/64'\n expected_default_gw = 'fe80:f816::1'\n\n with ovs_lib.OVSBridge(br_name,\n datapath_type=cfg.CONF.OVS.datapath_type) as br:\n with KeepalivedIPv6Test(ha_port, gw_port, gw_vip,\n expected_default_gw) as ka:\n br.add_port(ha_port, ('type', 'internal'))\n br.add_port(gw_port, ('type', 'internal'))\n\n ha_dev = ip_lib.IPDevice(ha_port)\n gw_dev = ip_lib.IPDevice(gw_port)\n\n ha_dev.link.set_netns(ka.nsname)\n gw_dev.link.set_netns(ka.nsname)\n\n ha_dev.link.set_up()\n gw_dev.link.set_up()\n ha_dev.addr.add('169.254.192.8/18')\n\n ka.configure()\n\n ka.start_keepalived_process()\n\n ka.verify_ipv6_address_assignment(gw_dev)\n\n default_gw = gw_dev.route.get_gateway(ip_version=6)\n if default_gw:\n default_gw = default_gw['via']\n\n return expected_default_gw == default_gw", "def describe_egress_only_internet_gateways(DryRun=None, EgressOnlyInternetGatewayIds=None, MaxResults=None, NextToken=None):\n pass", "def test_create_network_invalid_gateway_ip(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1::/48', ip_version=6,\n gateway_ip='192.168.0.1')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n with self.assertRaises(BadRequest):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)", "def ex_create_network_interface(self, subnet, name=None,\r\n description=None,\r\n private_ip_address=None):\r\n raise NotImplementedError(self._not_implemented_msg)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ipv6_egress_rule_name: Optional[pulumi.Input[str]] = None,\n ipv6_gateway_id: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None) -> 'Ipv6EgressRule':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _Ipv6EgressRuleState.__new__(_Ipv6EgressRuleState)\n\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"instance_type\"] = instance_type\n __props__.__dict__[\"ipv6_egress_rule_name\"] = ipv6_egress_rule_name\n __props__.__dict__[\"ipv6_gateway_id\"] = ipv6_gateway_id\n __props__.__dict__[\"status\"] = status\n return Ipv6EgressRule(resource_name, opts=opts, __props__=__props__)", "def create_igw(conn, name, region, vpc_id):\n\n try:\n igw = conn.create_internet_gateway()\n except boto.exception.EC2ResponseError as e:\n print(e.message)\n exit(1)\n else:\n conn.attach_internet_gateway(igw.id, vpc_id)\n t = Tag(name, 'igw', region); t.tag_resource(conn, igw.id)\n\n return igw.id", "def create(self, networkipv6s):\n\n data = {'networks': networkipv6s}\n return super(ApiNetworkIPv6, self).post('api/v3/networkv6/', data)", "def BgpIpv6AdL2Vpn(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6adl2vpn import BgpIpv6AdL2Vpn\n return BgpIpv6AdL2Vpn(self)", "def BgpIPv6EvpnVXLAN(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnvxlan import BgpIPv6EvpnVXLAN\n return BgpIPv6EvpnVXLAN(self)", "def create_route(vserver_name: str, net_gateway_ip: str) -> None:\n \"\"\"The default destination will be set to \"0.0.0.0/0\" for IPv4 gateway addresses\"\"\" \n\n data = {\n 'gateway': net_gateway_ip,\n 'svm': {'name': vserver_name}\n }\n\n route = NetworkRoute(**data)\n\n try:\n route.post()\n print(\"Route %s created successfully\" % route.gateway)\n except NetAppRestError as err:\n print(\"Error: Route was not created: %s\" % err)\n return", "def vpnglobal_intranetip6_bindings(self) :\n\t\ttry :\n\t\t\treturn self._vpnglobal_intranetip6_binding\n\t\texcept Exception as e:\n\t\t\traise e" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates one or more flow logs to capture IP traffic for a specific network interface, subnet, or VPC. Flow logs are delivered to a specified log group in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, a log stream is created in CloudWatch Logs for each network interface in the subnet or VPC. Log streams can include information about accepted and rejected traffic to a network interface. You can view the data in your log streams using Amazon CloudWatch Logs. In your request, you must also specify an IAM role that has permission to publish logs to CloudWatch Logs.
def create_flow_logs(ResourceIds=None, ResourceType=None, TrafficType=None, LogGroupName=None, DeliverLogsPermissionArn=None, ClientToken=None): pass
[ "def create_flows(vpc_id, keyid, secret, region):\n\n session = Session(aws_access_key_id = keyid, aws_secret_access_key = secret, region_name = region)\n\n iam = session.client('iam')\n logs = session.client('logs')\n ec2 = session.client('ec2')\n\n # Check for an existing Role with standard name\n try:\n role = iam.get_role(RoleName='flowlogsRole')\n except:\n role = 'None'\n\n # Create VPC Flows Logs IAM Role\n if role != 'None':\n role_arn = role['Role']['Arn']\n error = 'None'\n else:\n try:\n role = iam.create_role(\n Path = '/',\n RoleName = 'flowlogsRole',\n AssumeRolePolicyDocument = json.dumps(Template.RolePolicy))\n except Exception as e:\n error = e.message; print(error)\n flow_id = 'null'\n else:\n error = 'None'\n\n # Create VPC Flow Logs policy\n policy = iam.create_policy(\n Path = '/',\n PolicyName = 'flowlogsPolicy',\n Description = 'Grants access to CloudWatch Logs.',\n PolicyDocument = json.dumps(Template.LogsPolicy))\n\n role_name = role['Role']['RoleName']\n role_arn = role['Role']['Arn']\n policy_arn = policy['Policy']['Arn']\n\n # Attach policy to the IAM Role\n attach = iam.attach_role_policy(\n RoleName = role_name,\n PolicyArn = policy_arn)\n\n if error == 'None':\n logs_name = 'flowlogsGroup' + '-' + vpc_id\n\n # Create CloudWatch Logs group\n group = logs.create_log_group(logGroupName = logs_name)\n retention = logs.put_retention_policy(logGroupName=logs_name, retentionInDays=14)\n\n # Enable VPC Flow Logs\n flow_id = ec2.create_flow_logs(\n ResourceIds = [vpc_id],\n ResourceType = 'VPC',\n TrafficType = 'ALL',\n LogGroupName = logs_name,\n DeliverLogsPermissionArn = role_arn)\n\n return flow_id", "def CreateFlowLog(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateFlowLog\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateFlowLogResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def create_log(self, **kwargs):\n uri = '/log/logs/'\n post_data = {'log': kwargs}\n return self.create_resource(uri, post_data)", "async def publish_logs(self, log_batch: dict) -> None:\n req = self._create_log_request(log_batch)\n await self._stub.GcsPublish(req)", "def DescribeFlowLogs(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeFlowLogs\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeFlowLogsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def create_group(ctx, env=None):\n for app_env in self.envs_from_string(env):\n log_group_name = self.get_log_group_name(app_env)\n self.create_log_group(log_group_name)\n ctx.info('Log group \"{}\" successfully created.'.format(log_group_name))", "def EnableFlowLogs(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"EnableFlowLogs\", params, headers=headers)\n response = json.loads(body)\n model = models.EnableFlowLogsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def describe_flow_logs(FlowLogIds=None, Filters=None, NextToken=None, MaxResults=None):\n pass", "async def _send_log_entry(log_entry, context):\n entry_type = _get_entry_type(log_entry)\n\n context = {\n \"function_name\": context.function_name,\n \"invoked_function_arn\": context.invoked_function_arn,\n \"log_group_name\": context.log_group_name,\n \"log_stream_name\": context.log_stream_name,\n }\n\n session_timeout = _calculate_session_timeout()\n\n async with aiohttp.ClientSession(\n timeout=aiohttp.ClientTimeout(total=session_timeout), trust_env=True\n ) as session:\n # Both Infrastructure and Logging require a \"LICENSE_KEY\" environment variable.\n # In order to send data to the Infrastructure Pipeline, the customer doesn't need\n # to do anything. To disable it, they'll set \"INFRA_ENABLED\" to \"false\".\n # To send data to the Logging Pipeline, an environment variable called \"LOGGING_ENABLED\"\n # is required and needs to be set to \"true\". To disable it, they don't need to do anything,\n # it is disabled by default\n # Instruction for how to find these keys are in the README.md\n requests = []\n if _infra_enabled():\n if entry_type == EntryType.LAMBDA:\n # If this is one of our lambda entries, we should only send the log lines we\n # actually care about\n data = {\n \"context\": context,\n \"entry\": json.dumps(_filter_log_lines(log_entry)),\n }\n else:\n # VPC logs are infra requests that aren't Lambda invocations\n data = {\"context\": context, \"entry\": json.dumps(log_entry)}\n for payload in _generate_payloads(data, _split_infra_payload):\n requests.append(\n _send_payload(\n _get_infra_request_creator(entry_type, payload), session, True\n )\n )\n\n if _logging_enabled():\n data = {\"context\": context, \"entry\": json.dumps(log_entry)}\n for payload in _generate_payloads(\n _package_log_payload(data), _split_log_payload\n ):\n requests.append(\n _send_payload(_get_logging_request_creator(payload), session)\n )\n\n logger.debug(\"Sending data to New Relic.....\")\n ini = time.perf_counter()\n result = await asyncio.gather(*requests)\n elapsed_millis = (time.perf_counter() - ini) * 1000\n logger.debug(f\"Time elapsed to send to New Relic: {elapsed_millis:0.2f}ms\")\n return result", "def create_log(self):\n pass", "def Process(self) -> None:\n logs_containers = self.GetContainers(containers.GCPLogs)\n for logs_container in logs_containers:\n self._ProcessLogContainer(logs_container)", "def get_log_body(group_name: str, start_time: int, end_time: int, log_stream, regex=None):\n client = get_logs_client()\n stream_name = log_stream['logStreamName']\n\n # ログを取得\n logs = client.get_log_events(\n logGroupName=group_name,\n logStreamName=stream_name,\n startTime=start_time,\n endTime=end_time,\n startFromHead=True\n )\n\n body = logs['events']\n\n result_str = ''\n\n for line in body:\n message = line['message']\n # if match, reformat message with timestamp and add to result str.\n if regex:\n index = regex.search(message)\n if index:\n message = '[{}] {}'.format(datetime.fromtimestamp(int(str(line['timestamp'])[:10])), message)\n result_str += message\n else:\n message = '[{}] {}'.format(datetime.fromtimestamp(int(str(line['timestamp'])[:10])), message)\n result_str += message\n return result_str", "def _log_in_tensorboard(self, steps: int, logs: List[LogData]):\n\n for log in logs:\n if log.type == \"scalar\":\n self._summary_writer.add_scalar(log.name, log.value, steps)\n if log.type == \"image\":\n self._summary_writer.add_image(log.name, log.value, steps)", "def list_log_streams(self):\n logging.info(\"Get image %s build log streams.\", self.image_id)\n command = [\"pcluster\", \"list-image-log-streams\", \"--region\", self.region, \"--image-id\", self.image_id]\n result = run_pcluster_command(command).stdout\n response = json.loads(result)\n return response", "def get_log_events(log_group, start_time=None, end_time=None):\n client = boto3.client('logs')\n kwargs = {'logGroupName': log_group, \n 'limit' : 10000,}\n \n if start_time:\n kwargs['start_time'] = start_time\n if end_time:\n kwargs['end_time'] = end_time\n\n count = 0\n while count < 2:\n resp = client.filter_log_events(**kwargs)\n #print (json.dumps(resp['events']))\n yield from resp['events']\n try:\n kwargs['nextToken'] = resp['nextToken']\n count += 1\n except:\n break", "def flow_action_log(\n action_id: str = typer.Argument(...),\n flow_id: str = typer.Option(\n ...,\n help=\"The ID for the Flow which triggered the Action.\",\n prompt=True,\n ),\n flow_scope: str = typer.Option(\n None,\n help=\"The scope this Flow uses to authenticate requests.\",\n callback=url_validator_callback,\n ),\n reverse: bool = typer.Option(\n # Defaulting to any boolean value will reverse output - so we use None\n None,\n \"--reverse\",\n help=\"Display logs starting from most recent and proceeding in reverse chronological order\",\n ),\n limit: int = typer.Option(\n None,\n help=\"Set a maximum number of events from the log to return\",\n min=0,\n max=100,\n ),\n marker: str = typer.Option(\n None,\n \"--marker\",\n \"-m\",\n help=\"A pagination token for iterating through returned data.\",\n ),\n per_page: int = typer.Option(\n None,\n \"--per-page\",\n \"-p\",\n help=\"The page size to return. Only valid when used without providing a marker.\",\n min=1,\n max=50,\n ),\n output_format: FlowDisplayFormat = typer.Option(\n FlowDisplayFormat.json,\n \"--format\",\n \"-f\",\n help=\"Output display format.\",\n case_sensitive=False,\n show_default=True,\n ),\n flows_endpoint: str = typer.Option(\n PROD_FLOWS_BASE_URL,\n hidden=True,\n callback=flows_endpoint_envvar_callback,\n ),\n verbose: bool = verbosity_option,\n):\n fc = create_flows_client(CLIENT_ID, flows_endpoint)\n resp = fc.flow_action_log(\n flow_id, flow_scope, action_id, limit, reverse, marker, per_page\n )\n\n if verbose:\n display_http_details(resp)\n\n if output_format in (FlowDisplayFormat.json, FlowDisplayFormat.yaml):\n _format_and_display_flow(resp, output_format, verbose)\n elif output_format in (FlowDisplayFormat.graphviz, FlowDisplayFormat.image):\n flow_def_resp = fc.get_flow(flow_id)\n flow_def = flow_def_resp.data[\"definition\"]\n colors = state_colors_for_log(resp.data[\"entries\"])\n graphviz_out = graphviz_format(flow_def, colors)\n\n if output_format == FlowDisplayFormat.graphviz:\n typer.echo(graphviz_out.source)\n else:\n graphviz_out.render(\"flows-output/graph\", view=True, cleanup=True)", "def list_flow_log_collectors(self,\n *,\n start: str = None,\n limit: int = None,\n resource_group_id: str = None,\n name: str = None,\n vpc_id: str = None,\n vpc_crn: str = None,\n vpc_name: str = None,\n target_id: str = None,\n target_resource_type: str = None,\n **kwargs\n ) -> DetailedResponse:\n\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='list_flow_log_collectors')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation,\n 'start': start,\n 'limit': limit,\n 'resource_group.id': resource_group_id,\n 'name': name,\n 'vpc.id': vpc_id,\n 'vpc.crn': vpc_crn,\n 'vpc.name': vpc_name,\n 'target.id': target_id,\n 'target.resource_type': target_resource_type\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/flow_log_collectors'\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def create_log(self, log_form):\n return # osid.logging.Log", "def get_logs_multipart(\n w3: \"Web3\",\n start_block: BlockNumber,\n stop_block: BlockNumber,\n address: Union[Address, ChecksumAddress, List[Union[Address, ChecksumAddress]]],\n topics: List[Optional[Union[_Hash32, List[_Hash32]]]],\n max_blocks: int,\n) -> Iterable[List[LogReceipt]]:\n _block_ranges = block_ranges(start_block, stop_block, max_blocks)\n for from_block, to_block in _block_ranges:\n params = {\n \"fromBlock\": from_block,\n \"toBlock\": to_block,\n \"address\": address,\n \"topics\": topics,\n }\n yield w3.eth.get_logs(cast(FilterParams, drop_items_with_none_value(params)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an Amazon FPGA Image (AFI) from the specified design checkpoint (DCP). The create operation is asynchronous. To verify that the AFI is ready for use, check the output logs. An AFI contains the FPGA bitstream that is ready to download to an FPGA. You can securely deploy an AFI on one or more FPGAaccelerated instances. For more information, see the AWS FPGA Hardware Development Kit .
def create_fpga_image(DryRun=None, InputStorageLocation=None, LogsStorageLocation=None, Description=None, Name=None, ClientToken=None): pass
[ "def aws_create_afi(self) -> Optional[bool]:\n local_deploy_dir = get_deploy_dir()\n local_results_dir = f\"{local_deploy_dir}/results-build/{self.build_config.get_build_dir_name()}\"\n\n afi = None\n agfi = None\n s3bucket = self.s3_bucketname\n afiname = self.build_config.name\n\n description = self.get_metadata_string()\n\n # if we're unlucky, multiple vivado builds may launch at the same time. so we\n # append the build node IP + a random string to diff them in s3\n global_append = \"-\" + str(env.host_string) + \"-\" + ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(10)) + \".tar\"\n\n with lcd(f\"{local_results_dir}/cl_{self.build_config.get_chisel_quintuplet()}/build/checkpoints/to_aws/\"):\n files = local('ls *.tar', capture=True)\n rootLogger.debug(files)\n rootLogger.debug(files.stderr)\n tarfile = files.split()[-1]\n s3_tarfile = tarfile + global_append\n localcap = local('aws s3 cp ' + tarfile + ' s3://' + s3bucket + '/dcp/' + s3_tarfile, capture=True)\n rootLogger.debug(localcap)\n rootLogger.debug(localcap.stderr)\n agfi_afi_ids = local(f\"\"\"aws ec2 create-fpga-image --input-storage-location Bucket={s3bucket},Key={\"dcp/\" + s3_tarfile} --logs-storage-location Bucket={s3bucket},Key={\"logs/\"} --name \"{afiname}\" --description \"{description}\" \"\"\", capture=True)\n rootLogger.debug(agfi_afi_ids)\n rootLogger.debug(agfi_afi_ids.stderr)\n rootLogger.debug(\"create-fpge-image result: \" + str(agfi_afi_ids))\n ids_as_dict = json.loads(agfi_afi_ids)\n agfi = ids_as_dict[\"FpgaImageGlobalId\"]\n afi = ids_as_dict[\"FpgaImageId\"]\n rootLogger.info(\"Resulting AGFI: \" + str(agfi))\n rootLogger.info(\"Resulting AFI: \" + str(afi))\n\n rootLogger.info(\"Waiting for create-fpga-image completion.\")\n checkstate = \"pending\"\n with lcd(local_results_dir):\n while checkstate == \"pending\":\n imagestate = local(f\"aws ec2 describe-fpga-images --fpga-image-id {afi} | tee AGFI_INFO\", capture=True)\n state_as_dict = json.loads(imagestate)\n checkstate = state_as_dict[\"FpgaImages\"][0][\"State\"][\"Code\"]\n rootLogger.info(\"Current state: \" + str(checkstate))\n time.sleep(10)\n\n\n if checkstate == \"available\":\n # copy the image to all regions for the current user\n copy_afi_to_all_regions(afi)\n\n message_title = \"FireSim FPGA Build Completed\"\n agfi_entry = afiname + \":\\n\"\n agfi_entry += \" agfi: \" + agfi + \"\\n\"\n agfi_entry += \" deploy_quintuplet_override: null\\n\"\n agfi_entry += \" custom_runtime_config: null\\n\"\n message_body = \"Your AGFI has been created!\\nAdd\\n\\n\" + agfi_entry + \"\\nto your config_hwdb.yaml to use this hardware configuration.\"\n\n send_firesim_notification(message_title, message_body)\n\n rootLogger.info(message_title)\n rootLogger.info(message_body)\n\n # for convenience when generating a bunch of images. you can just\n # cat all the files in this directory after your builds finish to get\n # all the entries to copy into config_hwdb.yaml\n hwdb_entry_file_location = f\"{local_deploy_dir}/built-hwdb-entries/\"\n local(\"mkdir -p \" + hwdb_entry_file_location)\n with open(hwdb_entry_file_location + \"/\" + afiname, \"w\") as outputfile:\n outputfile.write(agfi_entry)\n\n if self.build_config.post_build_hook:\n localcap = local(f\"{self.build_config.post_build_hook} {local_results_dir}\", capture=True)\n rootLogger.debug(\"[localhost] \" + str(localcap))\n rootLogger.debug(\"[localhost] \" + str(localcap.stderr))\n\n rootLogger.info(f\"Build complete! AFI ready. See {os.path.join(hwdb_entry_file_location,afiname)}.\")\n return True\n else:\n return None", "async def create_prediction(\n *,\n db: Session = Depends(deps.get_db),\n cephalo_in: schemas.CephaloCreate = Depends(schemas.CephaloCreate.as_form),\n file: UploadFile = File(...),\n) -> Any:\n saved_file_path = os.path.join(UPLOAD_FOLDER, file.filename)\n with open(saved_file_path, \"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n\n # downsize the file\n im = Image.open(saved_file_path)\n max_dimension_large = max(im.size)\n im.thumbnail((512, 512))\n max_dimension_thumbnail = max(im.size)\n im.save(saved_file_path)\n cephalo_in.px_per_cm = int(round((cephalo_in.px_per_cm / (max_dimension_large/max_dimension_thumbnail))))\n\n cephalo = crud.cephalo.create(db=db, obj_in=cephalo_in, file_path=saved_file_path)\n\n for i in range(0, 20):\n celery_app.send_task(\"app.worker.cephalo_celery\", args=[cephalo.id, saved_file_path, i])\n return cephalo", "def createfpg(self, cpgname, fpgname, size,\n comment=None, node=None,\n full=False, wait=False):", "def create_base_image(driver,\n source_image_family='debian-10', source_image_project='debian-cloud',\n repository_url='https://github.com/google/android-cuttlefish.git',\n repository_branch='main', build_branch='aosp-master',\n build_target='aosp_cf_x86_phone-userdebug', build_id='',\n build_instance='halyard-build', build_zone='europe-west4-a', tags=[],\n dest_image='', dest_family='', image_disk='halyard-image-disk', respin=False):\n\n # SETUP\n \n build_node = utils.find_instance(driver, build_instance, build_zone)\n if build_node:\n driver.destroy_node(build_node)\n print('successfully deleted', build_instance)\n\n build_volume = utils.find_disk(driver, image_disk, build_zone)\n if build_volume:\n driver.destroy_volume(build_volume)\n print('successfully deleted', image_disk)\n\n\n # BUILD INSTANCE CREATION\n\n build_volume = driver.create_volume(\n 30, image_disk,\n location=build_zone,\n ex_image_family=source_image_family)\n\n print('built', source_image_family, 'disk')\n\n gpu_type='nvidia-tesla-p100-vws'\n gpu = utils.find_gpu(driver, gpu_type, build_zone)\n if not gpu:\n utils.fatal_error(f'Please use a zone with {gpu_type} GPUs available')\n\n build_node = driver.create_node(\n build_instance,\n 'n1-standard-16',\n None,\n location=build_zone,\n ex_image_family=source_image_family,\n ex_accelerator_type=gpu_type,\n ex_on_host_maintenance='TERMINATE',\n ex_accelerator_count=1,\n ex_service_accounts=[{'scopes':['storage-ro']}],\n ex_disk_size=30,\n ex_tags=tags)\n print('successfully created', build_instance)\n\n utils.wait_for_instance(build_instance, build_zone)\n\n driver.attach_volume(build_node, build_volume)\n\n src_files = ['create_base_image_gce.sh', 'download_artifacts.sh']\n src_files = [PATH + '/' + file for file in src_files]\n src = ' '.join(list(map(str,src_files)))\n\n os.system(f'gcloud compute scp {src} {build_instance}: \\\n --zone={build_zone}')\n\n\n # IMAGE CREATION\n\n os.system(f'gcloud compute ssh --zone={build_zone} \\\n {build_instance} -- ./create_base_image_gce.sh \\\n {repository_url} {repository_branch} \\\n {build_branch} {build_target} {build_id}')\n\n dest_names = get_dest_names(\n build_branch, build_target, build_id,\n build_instance, build_zone, dest_image, dest_family)\n \n dest_image = dest_names['dest_image']\n dest_family = dest_names['dest_family']\n\n try:\n build_image = driver.ex_get_image(dest_image)\n except:\n build_image = None\n\n if build_image:\n if respin:\n driver.ex_delete_image(build_image)\n else:\n utils.fatal_error(f'''Image {dest_image} already exists.\n (To replace run with flag --respin)''')\n\n driver.destroy_node(build_node)\n\n driver.ex_create_image(\n dest_image,\n build_volume,\n ex_licenses=['https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx'],\n family=dest_family\n )\n\n print(f'Created image {dest_image} in {dest_family} family')\n\n driver.destroy_volume(build_volume)\n\n return {\"name\": dest_image, \"family\": dest_family}", "def create_new_image(self):\n logging.info('Starting image \\'' + self.name + '\\' creation')", "def TrackerKCF_create(parameters=...) -> retval:\n ...", "def aaf2fcp(terminate=int, waitCompletion=int, progress=int, getFileName=int, dstPath=\"string\", deleteFile=bool, srcFile=\"string\"):\n pass", "def AMI_builder(\n AWS_access_key_id,\n AWS_secret_access_key,\n region_name,\n base_image_id,\n os,\n security_group_id,\n AMI_name,\n RPM_package_version,\n APT_OSS_version,\n):\n try:\n instance = Instance(\n AWS_access_key_id=AWS_access_key_id,\n AWS_secret_access_key=AWS_secret_access_key,\n region_name=region_name,\n base_image_id=base_image_id,\n os=os, # ubuntu, amazonLinux\n security_group_id=security_group_id,\n AMI_name=AMI_name,\n RPM_package_version=RPM_package_version,\n APT_OSS_version=APT_OSS_version,\n )\n except Exception as err:\n logging.error(\"Could not bring up the instance. \" + str(err))\n sys.exit(-1)\n AMI_id = \"\"\n installation_failed = False\n try:\n instance.wait_until_ready()\n except Exception as err:\n logging.error(\n \"Could not bring the instance to ready state. \" + str(err))\n installation_failed = True\n else:\n try:\n instance.install_ODFE()\n AMI_id = instance.create_AMI()\n except Exception as err:\n installation_failed = True\n logging.error(\n \"AMI creation failed there was an error see the logs. \" + str(err))\n finally:\n try:\n instance.cleanup_instance()\n except Exception as err:\n logging.error(\n \"Could not cleanup the instance. There could be an instance currently running, terminate it. \" + str(err))\n installation_failed = True\n if installation_failed:\n sys.exit(-1)\n # copy the AMI to the required regions\n ec2_client = boto3.client(\n \"ec2\",\n aws_access_key_id=AWS_access_key_id,\n aws_secret_access_key=AWS_secret_access_key,\n region_name=region_name,\n )\n AMI_copy_regions = [region[\"RegionName\"]\n for region in ec2_client.describe_regions()[\"Regions\"]]\n AMI_copy_regions.remove(region_name) # since AMI is created here\n copy_AMI_to_regions(\n AWS_access_key_id=AWS_access_key_id,\n AWS_secret_access_key=AWS_secret_access_key,\n AMI_id=AMI_id,\n AMI_name=AMI_name,\n AMI_source_region=region_name,\n AMI_copy_regions=AMI_copy_regions,\n )", "def create_ami(self,instance_id,instance_name):\n #instance_name = conn.get_instance_attribute(instance_id, 'name')\n\n root_device = '/dev/sda1'\n\n block_map = self.block_device_map_for_instance(instance_id) # all the action starts here\n #description for daily\n if options.daily:\n b=(time.strftime('%A %d %b'))\n c=instance_id\n AMI_description = \"{} {} {}\".format('daily', b, c)\n\n #description for weekly\n if options.weekly:\n b=(datetime.now().strftime('%U'))\n c=instance_id\n AMI_description = \"{} {} {}\".format('weekly', b, c)\n\n #description for monthly\n if options.monthly:\n b=(datetime.now().strftime('%B %Y'))\n c=instance_id\n AMI_description = \"{} {} {}\".format('monthly', b, c)\n\n logger.info(\"AMI-Name [%s] AMI-Description [%s]\" % (AMI_description, AMI_description))\n\n instkernel = self.get_instance_kernel(instance_id)\n\n image_id = conn.register_image(name=AMI_description, description=AMI_description, root_device_name=root_device, block_device_map=block_map, architecture='x86_64', kernel_id=instkernel)\n logger.info(\"AMI Registered Successfully with AMI-ID [%s]\" % (image_id))\n\n #we sleep a little to be sure that the next query for the ami-id will return successfully - we got some errors that AMI-ID is not found, even it was successfully created...\n time.sleep(5)\n\n images = conn.get_all_images(image_ids=[image_id]) # get again the image id as object, because the first is string and is not valid for add_tag...\n for image in images:\n if instance_name != '':\n image.add_tag('Name', instance_name)\n else:\n image.add_tag('Name', instance_id)\n return image_id", "def main(input_file, output_file):\n\n LOGGER.info(\"Reading input file: %s\" % input_file)\n LOGGER.info(\"Seaking to: 0x%s\" % format(START_ADDR, 'x'))\n LOGGER.info(\"Reading image size: 0x%s\" % format(IMAGE_SIZE, 'x'))\n with open(input_file, \"rb\") as complete_image_file:\n complete_image_file.seek(START_ADDR)\n fpga_image = complete_image_file.read(IMAGE_SIZE)\n\n LOGGER.info(\"Writing file: %s\" % output_file)\n with open(output_file, \"wb\") as output_image_file:\n output_image_file.write(fpga_image)\n LOGGER.info(\"Finished extracting flash section\")", "def add_ficon_fe6sp(faked_cpc):\n\n # Adapter properties that will be auto-set:\n # - object-uri\n # - storage-port-uris\n faked_ficon_adapter = faked_cpc.adapters.add({\n 'object-id': 'fake-ficon6s-oid',\n 'parent': faked_cpc.uri,\n 'class': 'adapter',\n 'name': 'fake-ficon6s-name',\n 'description': 'FICON Express 6S+ #1',\n 'status': 'active',\n 'type': 'not-configured',\n 'adapter-id': '124',\n 'adapter-family': 'ficon',\n 'detected-card-type': 'ficon-express-16s-plus',\n 'card-location': 'vvvv-wwww',\n 'port-count': 1,\n 'state': 'online',\n 'configured-capacity': 254,\n 'used-capacity': 0,\n 'allowed-capacity': 254,\n 'maximum-total-capacity': 254,\n 'channel-path-id': None,\n 'physical-channel-status': 'not-defined',\n })\n\n # Port properties that will be auto-set:\n # - element-uri\n # Properties in parent adapter that will be auto-set:\n # - storage-port-uris\n faked_ficon_adapter.ports.add({\n 'element-id': 'fake-port11-oid',\n 'parent': faked_ficon_adapter.uri,\n 'class': 'storage-port',\n 'index': 0,\n 'name': 'fake-port11-name',\n 'description': 'FICON #1 Port #1',\n })\n return faked_ficon_adapter", "def create_image(self, **kw):\n cmd = \"rbd create \" + kw.get(\"image_name\") + \" -s 1G\"\n if kw.get(\"features\"):\n cmd = cmd + \" --image-feature \" + kw[\"features\"]\n self.exec_cmd(cmd)", "def insert_image(session, dataset, freq_eff, freq_bw, taustart_ts, tau_time, beam_smaj_pix, beam_smin_pix,\n beam_pa_rad, deltax, deltay, url, centre_ra, centre_decl, xtr_radius, rms_qc, freq_bw_max=0.0,\n rms_min=None, rms_max=None, detection_thresh=None, analysis_thresh=None):\n # this looks a bit weird, but this simplifies backwards compatibility\n dataset_id = dataset\n dataset = session.query(Dataset).filter(Dataset.id == dataset_id).one()\n\n skyrgn = get_skyregion(session, dataset, centre_ra, centre_decl, xtr_radius)\n band = get_band(session, dataset, freq_eff, freq_bw, freq_bw_max)\n rb_smaj = beam_smaj_pix * math.fabs(deltax)\n rb_smin = beam_smin_pix * math.fabs(deltay)\n rb_pa = 180 * beam_pa_rad / math.pi\n\n args = ['dataset', 'band', 'tau_time', 'freq_eff', 'freq_bw', 'taustart_ts', 'skyrgn', 'rb_smaj', 'rb_smin',\n 'rb_pa', 'deltax', 'deltay', 'url', 'rms_qc', 'rms_min', 'rms_max', 'detection_thresh', 'analysis_thresh']\n\n l = locals()\n kwargs = {arg: l[arg] for arg in args}\n image = Image(**kwargs)\n session.add(image)\n return image", "def cfn_create(test=False):\n stack_name = get_stack_name(new=True)\n cfn_config = get_config()\n\n cfn = get_connection(Cloudformation)\n if test:\n print cfn_config.process()\n return\n # Upload any SSL certs that we may need for the stack.\n if 'ssl' in cfn_config.data:\n print green(\"Uploading SSL certificates to stack\")\n iam = get_connection(IAM)\n iam.upload_ssl_certificate(cfn_config.ssl(), stack_name)\n # Useful for debug\n # print cfn_config.process()\n # Inject security groups in stack template and create stacks.\n try:\n stack = cfn.create(stack_name, cfn_config.process(), tags=get_cloudformation_tags())\n except:\n # cleanup ssl certificates if any\n if 'ssl' in cfn_config.data:\n print red(\"Deleting SSL certificates from stack\")\n iam.delete_ssl_certificate(cfn_config.ssl(), stack_name)\n import traceback\n abort(red(\"Failed to create: {error}\".format(error=traceback.format_exc())))\n\n print green(\"\\nSTACK {0} CREATING...\\n\").format(stack_name)\n\n if not env.blocking:\n print 'Running in non blocking mode. Exiting.'\n sys.exit(0)\n\n tail(cfn, stack_name)\n stack_evt = cfn.get_last_stack_event(stack)\n\n if stack_evt.resource_status == 'CREATE_COMPLETE':\n print 'Successfully built stack {0}.'.format(stack)\n else:\n # So delete the SSL cert that we uploaded\n if 'ssl' in cfn_config.data:\n iam.delete_ssl_certificate(cfn_config.ssl(), stack_name)\n abort('Failed to create stack: {0}'.format(stack))", "def MACE_create(IMGSIZE=None): # real signature unknown; restored from __doc__\n pass", "def create_dpa(self):\n oadp_data = templating.load_yaml(constants.ACM_DPA)\n oadp_data[\"spec\"][\"backupLocations\"][0][\"velero\"][\"objectStorage\"][\n \"bucket\"\n ] = self.meta_obj.bucket_name\n oadp_yaml = tempfile.NamedTemporaryFile(mode=\"w+\", prefix=\"oadp\", delete=False)\n templating.dump_data_to_temp_yaml(oadp_data, oadp_yaml.name)\n run_cmd(f\"oc create -f {oadp_yaml.name}\")\n # Validation\n self.validate_dpa()", "def __flavor_create__(self,**kwargs):\n\n\t\tself.validate_args(**kwargs)\n\t\tvcpus = kwargs[\"vcpus\"]\n\t\tdisk = kwargs[\"disk\"]\n\t\tname = kwargs[\"name\"]\n\t\tram = kwargs[\"ram\"]\n\t\tis_public = kwargs[\"is_public\"] if \"is_public\" in kwargs else \"true\"\n\t\trx_tx = kwargs[\"rxtx_factor\"] if \"rxtx_factor\" in kwargs else \"1.0\"\n\t\t#flavor_id = kwargs[\"id\"] if \"id\" in kwargs else \"auto\"\n\t\tephemeral = kwargs[\"OS-FLV-EXT-DATA:ephemeral\"] if \"OS-FLV-EXT-DATA:ephemeral\" in kwargs else '0'\n\t\tswap = kwargs[\"swap\"] if \"swap\" in kwargs else '0'\n\n\t\treturn self.novaClient.flavor_create(vcpus,disk,name,ram,is_public,rx_tx,ephemeral,swap)", "def create_efs(config):\n efs = boto.client('efs', config)\n\n print('Creating EFS')\n token = f'{config.vpc_name}-efs'\n response = efs.create_file_system(CreationToken=token)\n efs_id = response['FileSystemId']\n # Sleep for a second because the the object is created asynchronously. It's\n # not created when the response comes back from the server.\n time.sleep(1)\n efs.create_tags(\n FileSystemId=efs_id, Tags=[{'Key': 'Name', 'Value': token}]\n )\n # Wait until it's in the available state\n while True:\n response = efs.describe_file_systems(FileSystemId=efs_id)\n if response['FileSystems'][0]['LifeCycleState'] == 'available':\n break\n efs.create_mount_target(\n FileSystemId=efs_id,\n SubnetId=config.subnet_id,\n SecurityGroups=[config.efs_security_group_id]\n )\n config.efs_id = efs_id\n config.save()", "def acquire_image(self,\n fname='Test.fits',\n exptime=None,\n filt=None,\n binning=None,\n subarray=None,\n ACP_obj=None,\n **ObsClassArgs):\n assert self.MC.CCDCamera.GuiderRunning, 'Guider must be running. You can start it with PrecisionGuide.MC.guider_start()'\n\n # Here might be where we make the choice to use ACP's\n # TakePicture or record it ourselves based on whether or\n # not ACP's objects are present\n if ACP_obj:\n # Eventually we would read the file from the disk\n # Consider using ACP's TakePicture\n HDUList = fits.open(fname)\n O = self.create_ObsData(HDUList, **ObsClassArgs)\n else:\n HDUList = self.MC.acquire_im(fname=fname,\n exptime=exptime,\n filt=filt,\n binning=binning,\n subarray=subarray)\n #HDUList = self.MC.take_im(exptime, filt, binning)\n ## Write image to disk right away in case something goes wrong\n #if not self.MC.CCDCamera.SaveImage(fname):\n # raise EnvironmentError('Failed to save file ' + fname)\n #log.debug('Saved file: ' + fname)\n # Use the version of our image in HDUList for\n # processing so we don't have to read it off the disk\n # again\n O = self.create_ObsData(HDUList, **ObsClassArgs)\n return self.update_flex_pix_rate(O)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Exports a running or stopped instance to an S3 bucket. For information about the supported operating systems, image formats, and known limitations for the types of instances you can export, see Exporting an Instance as a VM Using VM Import/Export in the VM Import/Export User Guide .
def create_instance_export_task(Description=None, InstanceId=None, TargetEnvironment=None, ExportToS3Task=None): pass
[ "def run_instances(self):\n # create an entry in the s3 log for the start of this task \n self.log_to_s3('run-instances-start.log', 'start')\n\n session = botocore.session.get_session()\n client = session.create_client('ec2', region_name=self.aws_region)\n\n # convert user-data to base64\n user_data = ''\n # NOTE conversion of file to string, then string to bytes, the bytes encoded \n # base64 - then decode the base64 bytes into base64 string\n with open(self.ec2_user_data, 'r') as f:\n user_data = base64.b64encode(bytes(f.read(), \"utf-8\")).decode(\"utf-8\")\n\n if self.ec2_type in (CONST.VALID_EC2_INSTANCE_TYPES_EBS_ONLY).split('|'):\n # block device mapping for ebs backed instances\n # creates an ephemeral EBS volume (delete on terminate)\n # Note that gp2 instance type is EBS SSD\n custom_block_device_mapping = [{\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0',\n 'Ebs':{\n 'VolumeSize': self.ec2_ebs_only_volume_size,\n 'VolumeType': self.ec2_ebs_only_volume_type,\n },\n }]\n else:\n # block device mapping allows for 2 extra drives\n # - works for either single ssd or 2 ssd's\n custom_block_device_mapping = [ \n {\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0'\n },\n {\n 'DeviceName': '/dev/sdc',\n 'VirtualName': 'ephemeral1'\n }\n ]\n\n r = client.request_spot_instances(\n InstanceCount=self.ec2_count,\n SpotPrice=self.ec2_spot_price,\n LaunchSpecification= {\n 'SecurityGroupIds': [\n self.ec2_security_group_id,\n ],\n 'SecurityGroups': [\n self.ec2_security_groups,\n ],\n 'Placement': {\n 'AvailabilityZone': self.ec2_availability_zone,\n },\n 'BlockDeviceMappings': custom_block_device_mapping,\n 'IamInstanceProfile': {\n 'Arn': self.ec2_arn_id,\n },\n 'UserData': user_data,\n 'ImageId': self.ec2_image_id,\n 'InstanceType': self.ec2_type,\n 'KeyName': self.ec2_security_key,\n },\n )\n\n # get the spot instance request ids\n spot_ids = []\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Spot request ids:'))\n for i, spot_inst in enumerate(r['SpotInstanceRequests']):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, \n inst_str + '\\t' + spot_inst['SpotInstanceRequestId']))\n spot_ids.append(spot_inst['SpotInstanceRequestId'])\n utility.list_to_file(CONST.SPOT_REQUEST_IDS, spot_ids)\n\n # create a list of spot instance statuses - so we can print out\n # some updates to the user\n spot_status = ['']*len(spot_ids)\n # Expecting status codes of \"pending-evaluation\", \"pending-fulfillment\", or \n # fulfilled. Any other status-code should be printed out & the program \n # terminated.\n expected_status = ['fulfilled', 'pending-evaluation', 'pending-fulfillment']\n instance_ids = [None]*len(spot_ids)\n\n # check the status of the spot requests\n while True:\n fulfilled = 0\n for i, id in enumerate(spot_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_spot_instance_requests(SpotInstanceRequestIds=[id])\n status_code = r['SpotInstanceRequests'][0]['Status']['Code']\n if status_code not in expected_status:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, 'Unexpected status for spot request ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': ') +\n colour_msg(Colour.PURPLE, status_code))\n sys.exit(1)\n if status_code != spot_status[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Spot instance request: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tStatus: ') +\n colour_msg(Colour.PURPLE, status_code))\n spot_status[i] = status_code\n if status_code == 'fulfilled':\n fulfilled += 1\n # record the instance id\n instance_ids[i] = r['SpotInstanceRequests'][0]['InstanceId']\n if fulfilled == len(spot_ids):\n break\n time.sleep(1)\n\n utility.list_to_file(CONST.INSTANCE_IDS, instance_ids)\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ids:'))\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n tag_val = self.ec2_instance_tag + str(i)\n client.create_tags(Resources=[id], Tags=[{'Key':'Name', 'Value':tag_val}])\n\n # monitor the instances until all running\n instance_states = ['']*len(instance_ids)\n expected_states = ['running', 'pending']\n instance_ips = [None]*len(instance_ids)\n running = 0\n while True:\n running = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instances(InstanceIds=[id])\n state = r['Reservations'][0]['Instances'][0]['State']['Name']\n if state not in expected_states:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, \n 'Unexpected instance state for instance-id ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': \\t') +\n colour_msg(Colour.PURPLE, state))\n sys.exit(1)\n if state != instance_states[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tState: ') +\n colour_msg(Colour.PURPLE, state))\n instance_states[i] = state\n if state == 'running':\n running += 1\n # record the instance id\n instance_ips[i] = r['Reservations'][0]['Instances'][0]['PublicDnsName']\n if running == len(instance_ids):\n break\n time.sleep(10)\n\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ips:'))\n for i, id in enumerate(instance_ips):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n \n utility.list_to_file(CONST.INSTANCE_IPS_FILE, instance_ips)\n # need to at least wait until all the instances are reachable\n # possible statuses: (passed | failed | initializing | insufficient-data )\n reachability = ['']*len(instance_ids)\n while True:\n passed = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instance_status(InstanceIds=[id])\n state = r['InstanceStatuses'][0]['InstanceStatus']['Details'][0]['Status']\n if state != reachability[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tReachability: ') +\n colour_msg(Colour.PURPLE, state))\n reachability[i] = state\n if state == 'passed':\n passed += 1\n if passed == len(instance_ids):\n break\n time.sleep(10)\n \n lgr.info(CONST.INFO + colour_msg(Colour.GREEN, 'Instances are reachable'))\n \n # if user-data configuration file supplied - check that it has worked\n # Note that this checker is run once on each instance\n if self.ec2_user_data:\n lgr.info(CONST.INFO + colour_msg(Colour.CYAN, \n 'Starting job to monitor user-data configuration...'))\n # at the moment is calling a local script that does the checking\n result = subprocess.call('./' + self.ec2_user_data_check) \n if result:\n lgr.error(CONST.ERROR + colour_msg(Colour.CYAN, \n 'user data checker FAILED'))\n sys.exit(1)\n\n # create an entry in the s3 log for finish this task \n self.log_to_s3('run-instances-finish.log', 'finish')\n\n # return the list of ip's for the newly created instances\n return utility.file_to_list(CONST.INSTANCE_IPS_FILE)", "def save_results(output, dest_bucket, eventime):\n try:\n print(\"Saving results into: \" + dest_bucket)\n out='output/inference-{}.csv'.format(eventime)\n s3.put_object(Body=output, Bucket=dest_bucket, Key=out) \n\n except Exception as e:\n print(e)\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))\n raise e", "def backup_instance(self, instance):\n image_id = self._connection.create_image(\n instance.id,\n self._create_AMI_name(instance)\n )\n self._connection.create_tags([image_id],\n {'instance': instance.id,\n 'created_at': datetime.date.today(),\n 'created_by': 'cloudsnap',\n })\n return image_id", "def stop():\n local('aws ec2 stop-instances --instance-ids %s'%(AWS_INSTANCE_ID))", "def export_image(image: ee.Image, bucket: str, name: str) -> ee.batch.Task:\n # Define the export configuration\n exportconfig = {\n # Export Image\n \"image\": image,\n\n # Export Tranforms and Bounds\n \"scale\": 1,\n \"region\": image.geometry(),\n \"crs\": image.projection().crs(),\n \n # Destination Filename and Bucket\n \"bucket\": bucket,\n \"fileNamePrefix\": name,\n\n # Export Description\n \"description\": f\"export\"[0:100],\n \n # Export Constraints\n \"maxPixels\": 1e10,\n \"skipEmptyTiles\": True,\n \"fileFormat\": \"GeoTIFF\"\n }\n\n try:\n # Create an export task with the export configuration\n task = ee.batch.Export.image.toCloudStorage(**exportconfig)\n # Return the task\n return task\n\n except Exception as e:\n raise RuntimeError(f\"could not create image export task. {e}.\")", "def launch_instance(key_name, security_group):\n # Create Key Pair if it does not already exist\n key_names = get_key_pairs()\n if key_name not in key_names:\n create_key_pair(key_name, True)\n print()\n elif not os.path.isfile(key_name):\n delete_key_pair(key_name, True)\n print()\n create_key_pair(key_name, True)\n print()\n\n # Create Security Group if it does not already exist\n names = get_security_group_names()\n if security_group not in names:\n group_id = create_security_group(security_group)\n\n # Create EC2 Instance\n ec2 = boto3.client('ec2', AVAILABILITY_ZONE)\n response = ec2.run_instances(\n ImageId=AMI_IMAGE_ID,\n InstanceType=AMI_INSTANCE_TYPE,\n KeyName=key_name,\n MinCount=1,\n MaxCount=1,\n InstanceInitiatedShutdownBehavior='terminate',\n SecurityGroups=[\n security_group\n ],\n )\n instance = response['Instances'][0]\n instance_id = instance['InstanceId']\n print(f\"Launched EC2 Instance with: ID={instance_id}\")\n print(f\"Terminate this instance with the script: terminate_ec2_{instance_id[-4:]}.sh\")\n with open(f\"terminate_ec2_{instance_id[-4:]}.sh\", \"w\") as f:\n f.write(f\"python {sys.argv[0]} --terminate_id {instance_id}\")\n\n print(\"Waiting for public dns\", end='')\n while True:\n instance_info = describe_instances([instance_id])\n public_dns = instance_info['Reservations'][0]['Instances'][0]['PublicDnsName']\n if public_dns != '':\n print(f\"\\nPublic DNS: {public_dns}\")\n break\n print('.', end='')\n sys.stdout.flush()\n time.sleep(1)\n\n ssh_command = f'ssh -i {key_name} ec2-user@{public_dns}'\n with open('ssh_to_ec2.sh', 'w') as f:\n f.write(ssh_command)\n\n print('Access the EC2 instance with ssh_to_ec2.sh, or run following command directly:')\n print(ssh_command)\n return response", "def s3bucket(ec2, env, source):\n\tmime_types = {\n\t\t\"eot\" : \"application/vnd.ms-fontobject\",\n\t\t\"ttf\" : \"font/truetype\",\n\t\t\"otf\" : \"font/opentype\",\n\t\t\"woff\": \"font/woff\",\n\t}\n\ts3b = boto.connect_s3(ec2.access_key,ec2.secret_key)\n\tfor machine in env:\n\t\tif 's3bucket' in machine.keys():\n\t\t\tprint 'Copying static media for %s' % machine['name']\n\t\t\ts3bucket = machine['s3bucket']\n\n\t\t\t# Get the expires\n\t\t\ttime_format = '%a, %d %b %Y %H:%M:%S'\n\t\t\tnow = datetime.datetime.now().strftime(time_format)\n\t\t\texpires = s3bucket.get('expires',datetime.datetime.utcnow().strftime(time_format))\n\t\t\ttry:\n\t\t\t\tdatetime.datetime.strptime(expires,time_format)\n\t\t\texcept:\n\t\t\t\terror('Improperly formatted datetime: %s' % expires)\n\n\t\t\t# Get or create bucket using the name\n\t\t\tname = s3bucket.get('name','s3%s'%machine['name'])\n\t\t\ttry: b = s3b.get_bucket(name)\n\t\t\texcept: b = s3b.create_bucket(name)\n\t\t\t\n\t\t\t# Set ACL Public for all items in the bucket\n\t\t\tb.set_acl('public-read')\n\n\t\t\tk = Key(b)\n\t\t\tstatic_dir = os.path.join(source,'project','static')\n\t\t\tfor root, dirs, files in os.walk(static_dir):\n\t\t\t\tif '.svn' in dirs: dirs.remove('.svn')\n\t\t\t\tkey_root = root.split('static')[1]\n\n\t\t\t\tfor file in files:\n\t\t\t\t\tfilename = os.path.join(root,file)\n\n\t\t\t\t\t# Set the headers\n\t\t\t\t\theaders = {'Expires':expires}\n\t\t\t\t\tif '.gz' in file:\n\t\t\t\t\t\theaders.update({'Content-Encoding':'gzip'})\n\n\t\t\t\t\tif os.path.isfile(filename):\n\t\t\t\t\t\t# Set the mime-type\n\t\t\t\t\t\text = file.split('.')[-1]\n\t\t\t\t\t\tif ext in mime_types.keys():\n\t\t\t\t\t\t\tk.content_type = mime_types[ext]\n\n\t\t\t\t\t\t# Send the file\n\t\t\t\t\t\tk.key = os.path.join(key_root,file)\n\t\t\t\t\t\tprint '\\nTransfering %s' % filename\n\t\t\t\t\t\tk.set_contents_from_filename(filename, headers=headers, cb=s3_percent_cb, num_cb=10)\n\t\t\tprint '\\nTransfer complete'\n\n\tinvalidate_cache(ec2, env, source)", "def lambda_handler(event, context):\n client = boto3.client(\"ec2\", region_name=event[\"region\"])\n\n instance_id = event[\"instance_id\"]\n get_status = client.describe_instances(InstanceIds=[instance_id])\n current_status = get_status[\"Reservations\"][0][\"Instances\"][0][\"State\"]\n\n if current_status[\"Name\"] == \"running\":\n response = client.stop_instances(InstanceIds=[instance_id])\n if current_status[\"Name\"] == \"stopped\":\n response = client.start_instances(InstanceIds=[instance_id])\n\n return response", "def _deploy_to_s3():\n s3cmd = 's3cmd -P --add-header=Cache-Control:max-age=5 --guess-mime-type --recursive --exclude-from gzip_types.txt put gzip/ %s'\n s3cmd_gzip = 's3cmd -P --add-header=Cache-Control:max-age=5 --add-header=Content-encoding:gzip --guess-mime-type --recursive --exclude \"*\" --include-from gzip_types.txt put gzip/ %s'\n\n for bucket in env.s3_buckets:\n env.s3_bucket = bucket\n local(s3cmd % ('s3://%(s3_bucket)s/' % env))\n local(s3cmd_gzip % ('s3://%(s3_bucket)s/' % env))", "def stopinstances():\n username, conn = _getbotoconn(auth_user)\n print \"stopping instances running under the %s account\" % username\n\n running_instances = _getrunninginstances(conn)\n for instid, instance in running_instances.iteritems():\n instance.stop()\n print \"instance %s stopped\" % instid", "def export_data_to_s3(tgw_routetable_id, bucket):\n client = boto3.client(\"ec2\")\n try:\n client.export_transit_gateway_routes(\n TransitGatewayRouteTableId=tgw_routetable_id, \\\n S3Bucket=bucket\n )\n except botocore.exceptions.ClientError as error:\n print(error)", "def lambda_handler(event, context):\n instance_id = event[\"instance_id\"]\n # Capture all the info about the instance so we can extract the ASG name later\n response = ec2_client.describe_instances(\n Filters=[\n {\"Name\": \"instance-id\", \"Values\": [instance_id]},\n ],\n )\n\n # Get the ASG name from the response JSON\n tags = response[\"Reservations\"][0][\"Instances\"][0][\"Tags\"]\n autoscaling_name = next(\n t[\"Value\"] for t in tags if t[\"Key\"] == \"aws:autoscaling:groupName\"\n )\n\n # Put the instance in standby\n response = asg_client.exit_standby(\n InstanceIds=[\n instance_id,\n ],\n AutoScalingGroupName=autoscaling_name,\n )\n\n response = asg_client.describe_auto_scaling_instances(\n InstanceIds=[\n instance_id,\n ]\n )\n while response[\"AutoScalingInstances\"][0][\"LifecycleState\"] != \"InService\":\n print(\" The node is not yet in service state, waiting for 5 more seconds\")\n time.sleep(5)\n response = asg_client.describe_auto_scaling_instances(\n InstanceIds=[\n instance_id,\n ]\n )\n if response[\"AutoScalingInstances\"][0][\"LifecycleState\"] == \"InService\":\n break\n # Detach the instance\n response = asg_client.detach_instances(\n InstanceIds=[\n instance_id,\n ],\n AutoScalingGroupName=autoscaling_name,\n ShouldDecrementDesiredCapacity=True,\n )\n\n response = ec2_client.describe_instances(\n Filters=[\n {\"Name\": \"instance-id\", \"Values\": [instance_id]},\n ],\n )\n\n while response[\"Reservations\"][0][\"Instances\"][0][\"Tags\"] == autoscaling_name:\n # sleep added to reduce the number of api calls for checking the status\n print(\" The node is not yet detached, waiting for 10 more seconds\")\n time.sleep(10)\n response = ec2_client.describe_instances(\n Filters=[\n {\"Name\": \"instance-id\", \"Values\": [instance_id]},\n ],\n )\n if response[\"Reservations\"][0][\"Instances\"][0][\"Tags\"] != autoscaling_name:\n break\n\n # if the node is detqched then stop the instance\n\n response = ec2_client.stop_instances(\n InstanceIds=[\n instance_id,\n ],\n )", "def backup_volume(ec2,instances):\n\n for instance in instances:\n retention = get_retention(instance)\n if not is_master(instance['PrivateIpAddress']):\n #make snapshot only on primary\n continue\n\n for dev in instance['BlockDeviceMappings']:\n if dev.get('Ebs', None) is None:\n # skip non-EBS volumes\n continue\n\n retention = get_retention(instance)\n now = datetime.today()\n delete_date_days = (now + timedelta(days=retention['days'])).strftime('%Y-%m-%d')\n delete_date_weeks = (now + timedelta(weeks=retention['weeks'])).strftime('%Y-%m-%d')\n delete_date_months = (now + relativedelta(months=retention['months'])).strftime('%Y-%m-%d')\n desc_date = now.strftime('%Y-%m-%d.%H:%M:%S')\n\n\n # all mongo disks are sdf\n if dev['DeviceName'] == '/dev/sdf':\n vol_id = dev['Ebs']['VolumeId']\n\n # Make sure that only one snapshot is taken, whether daily, weekly or monthly.\n if now.strftime('%d') == '01':\n print \"Creating snapshot of %s volume that will be retain for %d months\" % (vol_id, retention['months'])\n snap = make_snapshot(ec2,vol_id, retention['months'], \"MongoMonthlyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_months)\n elif now.strftime('%a') == 'Sun':\n print \"Creating snapshot of %s volume that will be retain for %d weeks\" % (vol_id, retention['weeks'])\n snap = make_snapshot(ec2,vol_id, retention['weeks'], \"MongoWeeklyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_weeks)\n else:\n print \"Creating snapshot of %s volume that will be retain for %d days\" % (vol_id, retention['days'])\n snap = make_snapshot(ec2,vol_id, retention['days'], \"MongoDailyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_days)\n\n return True", "def delete_ec2_instances():\n print('Deleting EC2 instances')\n ec2 = boto3.resource('ec2')\n\n active_ec2_instance_count = 0\n for instance in ec2.instances.all():\n disable_api_termination = instance.describe_attribute(\n Attribute='disableApiTermination'\n )\n if disable_api_termination['DisableApiTermination']['Value']:\n print('Stopping instance to enable API termination - {}'.format(instance.instance_id))\n instance.stop()\n active_ec2_instance_count = active_ec2_instance_count + 1\n else:\n if instance.state['Code'] != 48: # code 48 is 'terminated'\n print('Terminating instance - {}'.format(instance.instance_id))\n instance.terminate()\n active_ec2_instance_count = active_ec2_instance_count + 1\n\n if active_ec2_instance_count > 0:\n print('Waiting for ec2 instances to stop or terminate')\n while [instance for instance in ec2.instances.all()]:\n all_terminated = True\n for instance in ec2.instances.all():\n disable_api_termination = instance.describe_attribute(\n Attribute='disableApiTermination'\n )\n if (disable_api_termination['DisableApiTermination']['Value'] and\n instance.state['Code'] == 80):\n # code 80 is 'stopped'\n # instance has termination protection switched on and is stopped\n # switch it off and terminate the instance\n instance.modify_attribute(\n DisableApiTermination={\n 'Value': False\n }\n )\n instance.terminate()\n if instance.state['Code'] != 48: # code 48 is 'terminated'\n all_terminated = False\n\n if all_terminated:\n break\n else:\n time.sleep(5)\n\n print('EC2 instances deleted')", "def create_spot_instance(config, job_id, sched_time, docker_image, env_vars):\n\n client = boto3.client('ec2')\n\n # Get my own public fqdn by quering metadata\n my_own_name = urllib2.urlopen(\n \"http://169.254.169.254/latest/meta-data/public-hostname\").read()\n\n user_data = (\n \"#!/bin/bash\\n\"\n \"touch /tmp/start.txt\\n\"\n \"curl -i -H 'Content-Type: application/json' \"\n \"'http://%s/v1/notifications/%s?status=started' -X PUT\\n\"\n \"yum -y update\\n\"\n \"yum install docker -y\\n\"\n \"sudo service docker start\\n\"\n \"sudo docker run %s %s\\n\"\n \"touch /tmp/executing.txt\\n\"\n \"sleep 180\\n\"\n \"curl -i -H 'Content-Type: application/json' \"\n \"'http://%s/v1/notifications/%s?status=finished' -X PUT\\n\" %\n (my_own_name, job_id, env_vars, docker_image, my_own_name, job_id))\n\n response = client.request_spot_instances(\n SpotPrice=\"%s\" % config[\"spot-price\"],\n InstanceCount=1,\n Type='one-time',\n ValidFrom=sched_time,\n LaunchSpecification={\n 'ImageId': config[\"ami-id\"],\n 'InstanceType': config[\"instance-type\"],\n 'KeyName': config[\"key-name\"],\n 'SecurityGroups': ['default', config[\"sg-name\"]],\n 'UserData': base64.b64encode(user_data)\n }\n )\n\n req_id = response['SpotInstanceRequests'][0]['SpotInstanceRequestId']\n req_state = response['SpotInstanceRequests'][0][\n 'State'] # open/failed/active/cancelled/closed\n req_status_code = response['SpotInstanceRequests'][0][\n 'Status']['Code'] # pending-evaluation/price-too-low/etc\n\n return [req_id, req_state, req_status_code]", "def generate_ansible_file():\n\n generate_instance_data()\n\n ansible_output_file = {\"vpc\": set()}\n ansible_output_file[\"region\"] = set()\n ansible_output_file[\"az\"] = {}\n\n for i in AwsInstance.instances_by_id.values():\n ansible_output_file[\"vpc\"].add(i.vpc)\n\n region = i.vpc + \"--\" + i.region\n ansible_output_file[\"region\"].add(region)\n\n az = i.vpc + \"--\" + i.az\n if az not in ansible_output_file[\"az\"]:\n ansible_output_file[\"az\"][az] = []\n ansible_output_file[\"az\"][az].append(i.hostname)\n\n vpcs = ansible_output_file[\"vpc\"]\n regions = ansible_output_file[\"region\"]\n azs = ansible_output_file[\"az\"]\n\n # generates ansible inventory file with nested grouping VPC, Region,\n # availability zone and instances in each of those\n for vpc in vpcs:\n print(\"\\n[\" + vpc + \":children]\")\n for region in regions:\n if region.startswith(vpc):\n print(region)\n\n for region in regions:\n print(\"\\n[\" + region + \":children]\")\n for az in azs:\n if az.startswith(region):\n print(az)\n\n for az in azs:\n print(\"\\n[\" + az + \"]\")\n for instance in AwsInstance.instances_by_id.values():\n if az.endswith(instance.az) and az.startswith(instance.vpc):\n tags = \"\"\n for td in instance.taglist:\n if all(\n [\n td[\"Key\"] != \"Name\",\n \" \" not in td[\"Key\"],\n \" \" not in td[\"Value\"],\n ]\n ):\n tags += td[\"Key\"] + \"=\" + '\"' + td[\"Value\"] + '\" '\n print(instance.hostname, tags, \"#\", instance.ip)", "def backup():\n\n # assume your AWS access key and secret key is present in ~/.boto\n REGION = 'us-east-1'\n SRCTABLE = '*' # all tables\n LOG = 'DEBUG' # DEBUG|INFO|WARNING|ERROR|CRITICAL\n local('./dynamodump/dynamodump.py -m backup -r {REGION} -s \"{SRCTABLE}\" --log {LOG}'.format(**locals()))", "def instance_backup(instance_name='', i=None, debug=False, logging=True):\n global ec2_backup_config, count_str\n # check the include list to see if it's always backed up\n for j in ec2_backup_config.include_instances:\n try:\n match = re.search(j, instance_name)\n except:\n if debug and logging:\n print 'problem with include_instances regex \"' + str(j) + '\"'\n else:\n error_report(\n count_str + ' problem with include_instances regex \"' + str(j) + '\": ' + str(sys.exc_info()[0]))\n match = False\n if match:\n if debug and logging:\n print instance_name + ' will be included with regex \"' + str(j) + '\"'\n elif logging:\n log(count_str + ' instance ' + instance_name + ' will be included by regex', 'notice')\n return True\n\n # check the exclude list to see if it's always skipped\n for j in ec2_backup_config.exclude_instances:\n try:\n match = re.search(j, instance_name)\n except:\n if debug and logging:\n print 'problem with exclude_instances regex \"' + str(j) + '\"'\n else:\n error_report(\n count_str + ' problem with exclude_instances regex \"' + str(j) + '\": ' + str(sys.exc_info()[0]))\n match = False\n if match:\n if debug and logging:\n print instance_name + ' will be excluded with regex \"' + str(j) + '\"'\n elif logging:\n log(count_str + ' instance ' + instance_name + ' will be excluded by regex', 'notice')\n return False\n\n # see if it's running and back it up if so\n if ec2_backup_config.backup_running_instances:\n try:\n if str(i.state) == 'running':\n if debug and logging:\n print instance_name + ' is running and will be included'\n elif logging:\n log(count_str + ' instance ' + instance_name + ' is running and will be included', 'notice')\n return True\n else:\n if debug and logging:\n print instance_name + ' is ' + str(i.state) + ' and will be excluded'\n elif logging:\n log(count_str + ' instance ' + instance_name + ' is ' + str(i.state) + ' and will be excluded',\n 'notice')\n return False\n except:\n if debug and logging:\n print instance_name + ' will be included if it is running'\n elif logging:\n log('cannot get state for ' + instance_name + ', excluding', 'notice')\n return False\n\n if debug and logging:\n print instance_name + ' will be excluded by default'\n elif logging:\n log(count_str + ' instance ' + instance_name + ' will be excluded by default', 'notice')\n return False", "def make_snapshot(ec2,vol,retention,description):\n\n snap = ec2.create_snapshot(VolumeId=vol,Description=description)\n return snap" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a NAT gateway in the specified subnet. A NAT gateway can be used to enable instances in a private subnet to connect to the Internet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide .
def create_nat_gateway(SubnetId=None, AllocationId=None, ClientToken=None): pass
[ "def create_nat_gateway(\n subnet_id=None,\n subnet_name=None,\n allocation_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\",\n subnet_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not subnet_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"Subnet {} does not exist.\".format(subnet_name)\n },\n }\n else:\n if not _get_resource(\n \"subnet\",\n resource_id=subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n ):\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_id)},\n }\n\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n\n if not allocation_id:\n address = conn3.allocate_address(Domain=\"vpc\")\n allocation_id = address.get(\"AllocationId\")\n\n # Have to go to boto3 to create NAT gateway\n r = conn3.create_nat_gateway(SubnetId=subnet_id, AllocationId=allocation_id)\n return {\"created\": True, \"id\": r.get(\"NatGateway\", {}).get(\"NatGatewayId\")}\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def ex_create_network_interface(self, subnet, name=None,\r\n description=None,\r\n private_ip_address=None):\r\n raise NotImplementedError(self._not_implemented_msg)", "def create_network_gateway(self, body=None):\n return self._post(self.network_gateways_path, body=body)", "def create_nat_gw(dmz_id) :\n\t\n\text_ip = client.allocate_address(\n \t#Domain='vpc'|'standard',\n\t #Address='string',\n \t#DryRun=True|False\n\t )\n\text_ip = client.describe_addresses(\n\t\tFilters=[\n \t{\n \t'Name': 'public-ip',\n 'Values': [ext_ip['PublicIp']]\n \t}\n ]\n \t\t)['Addresses'][0] # good part\n\n\tnat_gw = client.create_nat_gateway(\n \tAllocationId=ext_ip['AllocationId'],\n\t SubnetId=dmz_id\n \t)['NatGateway']\n\t\n\treturn ext_ip, nat_gw", "def create_route(vserver_name: str, net_gateway_ip: str) -> None:\n \"\"\"The default destination will be set to \"0.0.0.0/0\" for IPv4 gateway addresses\"\"\" \n\n data = {\n 'gateway': net_gateway_ip,\n 'svm': {'name': vserver_name}\n }\n\n route = NetworkRoute(**data)\n\n try:\n route.post()\n print(\"Route %s created successfully\" % route.gateway)\n except NetAppRestError as err:\n print(\"Error: Route was not created: %s\" % err)\n return", "def create_internet_gateway(\n internet_gateway_name=None,\n vpc_id=None,\n vpc_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if vpc_id or vpc_name:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n\n r = _create_resource(\n \"internet_gateway\",\n name=internet_gateway_name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if r.get(\"created\") and vpc_id:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.attach_internet_gateway(r[\"id\"], vpc_id)\n log.info(\n \"Attached internet gateway %s to VPC %s\", r[\"id\"], vpc_name or vpc_id\n )\n return r\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def create_vpn_gateway(DryRun=None, Type=None, AvailabilityZone=None):\n pass", "def create_subnet(DryRun=None, VpcId=None, CidrBlock=None, Ipv6CidrBlock=None, AvailabilityZone=None):\n pass", "def create_network(request):\n cloud_id = request.matchdict['cloud']\n\n params = params_from_request(request)\n network_params = params.get('network')\n subnet_params = params.get('subnet')\n\n auth_context = auth_context_from_request(request)\n\n if not network_params:\n raise RequiredParameterMissingError('network')\n\n # TODO\n if not auth_context.is_owner():\n raise PolicyUnauthorizedError()\n\n try:\n cloud = Cloud.objects.get(owner=auth_context.owner, id=cloud_id)\n except me.DoesNotExist:\n raise CloudNotFoundError\n\n network = methods.create_network(auth_context.owner, cloud, network_params)\n network_dict = network.as_dict()\n\n # Bundling Subnet creation in this call because it is required\n # for backwards compatibility with the current UI\n if subnet_params:\n try:\n subnet = create_subnet(auth_context.owner, cloud,\n network, subnet_params)\n except Exception as exc:\n # Cleaning up the network object in case subnet creation\n # fails for any reason\n network.ctl.delete()\n raise exc\n network_dict['subnet'] = subnet.as_dict()\n\n return network.as_dict()", "def create_subnet(body=None):\n return IMPL.create_subnet(body)", "def createNetwork(context):\n if common.MY_DEBUG:\n print 'ENTER vpc.createNetwork'\n\n my_vpc_name = getNetworkName(context)\n\n ret = {\n 'name': my_vpc_name,\n 'type': 'compute.v1.network',\n 'properties': {\n 'routingConfig': {\n 'routingMode': 'REGIONAL'\n },\n 'autoCreateSubnetworks': False\n }\n }\n if common.MY_DEBUG:\n print 'EXIT vpc.createNetwork, ret: ' + str(ret)\n return ret", "def add_subnet(tag_name, ip_part, route_table, az, realm):\n template_name = tag_name.title().replace('-', '')\n subnet = ec2.Subnet(\n template_name,\n VpcId=Ref(self.vpc),\n CidrBlock=_(Ref(self.vpc_base_net), \".{}.0/24\".format(ip_part)),\n AvailabilityZone=Select(az, GetAZs()),\n Tags=self.get_tags(tag_name, realm=realm)\n )\n subnet = self.t.add_resource(subnet)\n\n self.t.add_resource(ec2.SubnetRouteTableAssociation(\n \"{}RouteTableAssociation\".format(template_name),\n SubnetId=Ref(subnet),\n RouteTableId=Ref(route_table)\n ))\n\n return subnet", "def create_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def create_subnet(\n vpc_id=None,\n cidr_block=None,\n vpc_name=None,\n availability_zone=None,\n subnet_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n auto_assign_public_ipv4=False,\n):\n\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n subnet_object_dict = _create_resource(\n \"subnet\",\n name=subnet_name,\n tags=tags,\n vpc_id=vpc_id,\n availability_zone=availability_zone,\n cidr_block=cidr_block,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n # if auto_assign_public_ipv4 is requested set that to true using boto3\n if auto_assign_public_ipv4:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n conn3.modify_subnet_attribute(\n MapPublicIpOnLaunch={\"Value\": True}, SubnetId=subnet_object_dict[\"id\"]\n )\n return subnet_object_dict", "def _create_interface(name, ip, route_dst=None):\n\n logging.debug(\"Creating %s interface.\", name)\n _ipr.link(\"add\", ifname=name, kind=\"dummy\")\n\n logging.debug(\"Assigning %s address to %s interface.\", ip, name)\n index = _ipr.link_lookup(ifname=name)[0]\n _ipr.link(\"set\", index=index, state=\"down\")\n _ipr.addr(\"add\", index=index, address=ip)\n _ipr.link(\"set\", index=index, state=\"up\")\n\n if route_dst is not None:\n # Adding new route\n _add_route(route_dst, name)", "def __create_network__(self,**kwargs):\n\t\tself.validate_args(**kwargs)\n\t\t#first create the network\n\t\texisting_networks = self.neutronClient.get_networks()\n\t\tnew_network = kwargs[\"network\"]\n\t\tnew_subnet_cidr = kwargs[\"cidr\"]\n\t\tsubnet_name = kwargs[\"subnet_name\"]\n enable_dhcp = kwargs.get(\"enable_dhcp\", True)\n\n\t\tnetVal = {}\n\t\tsubnetVal = {}\n\t\tnet_id = None\n\t\t#check if the network with the same name exists\n\t\tif not any(network.get('name',None) == new_network for network in existing_networks['networks']) :\n\t\t\t#did not find the network. go ahead and create the network and subnet\n\t\t\tnetVal = self.neutronClient.create_network(new_network)\n\t\t\tsubnetVal = self.neutronClient.create_subnet(netVal['network']['id'],new_subnet_cidr,subnet_name,enable_dhcp)\n netVal = netVal['network']\n subnetVal = subnetVal['subnet']\n\t\t\t#return the dict with the network and subnet details\n\t\telse :\n\t\t\t#network name exists. get network id\n\t\t\tfor network in existing_networks['networks']:\n if new_network == network['name']:\n\t\t\t\t\tnet_id = network['id']\n\t\t\t\t\tnetVal = network\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t#check if the required subnet also exists\n\t\t\texisting_subnet = self.neutronClient.get_subnets()\n\t\t\tif not any(subnet.get('cidr',None) == new_subnet_cidr for subnet in existing_subnet['subnets']):\n\t\t\t\t#subnet needs to be created under this network\n\t\t\t\tsubnetVal = self.neutronClient.create_subnet(net_id,new_subnet_cidr,subnet_name, enable_dhcp)\n subnetVal = subnetVal['subnet']\n\t\t\telse :\n\t\t\t\tfor subnet in existing_subnet['subnets']:\n #TOCHK: Dont use in for string comparisons\n \t#if new_subnet_cidr in subnet['cidr'] :\n if new_subnet_cidr == subnet['cidr']:\n \tsubnetVal = subnet\n\t\t\t\t\t\tbreak\n\t\tnetVal['subnets'] = subnetVal\n\t\treturn netVal", "def add_natgw(self, idx: int, nat_eips: Ref = None):\n if nat_eips:\n eip = Select(idx, nat_eips)\n else:\n self.nat_eip = self.t.add_resource(EIP(\n f'NatEip{self.idx}',\n Domain='vpc',\n ))\n eip = GetAtt(self.nat_eip, 'AllocationId')\n\n self.natgw = self.t.add_resource(NatGateway(\n f'NatGw{self.idx}',\n AllocationId=eip,\n SubnetId=Ref(self.subnet),\n ))\n\n self.t.add_output(Output(\n f'NatEip{self.idx}',\n Value=eip,\n Description=f'Nat Gateway Elastic IP for {self.az}',\n ))", "def _create_tunnel(name, ip, gre_local, gre_remote, route_dst=None):\n\n logging.debug(\"Creating %s interface.\", name)\n _ipr.link(\"add\", ifname=name, kind=\"gre\",\n gre_local=gre_local,\n gre_remote=gre_remote,\n gre_ttl=255)\n\n logging.debug(\"Assigning %s address to %s interface.\", ip, name)\n index = _ipr.link_lookup(ifname=name)[0]\n _ipr.link(\"set\", index=index, state=\"down\")\n _ipr.addr(\"add\", index=index, address=ip)\n _ipr.link(\"set\", index=index, state=\"up\")\n\n if route_dst is not None:\n # Adding new route\n _add_route(route_dst, name)", "def create_network_acl(\n vpc_id=None,\n vpc_name=None,\n network_acl_name=None,\n subnet_id=None,\n subnet_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n _id = vpc_name or vpc_id\n\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\"message\": \"VPC {} does not exist.\".format(_id)},\n }\n\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n elif subnet_id:\n if not _get_resource(\n \"subnet\",\n resource_id=subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n ):\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_id)},\n }\n\n r = _create_resource(\n \"network_acl\",\n name=network_acl_name,\n vpc_id=vpc_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n\n if r.get(\"created\") and subnet_id:\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_network_acl(r[\"id\"], subnet_id)\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n r[\"association_id\"] = association_id\n return r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide .
def create_network_acl(DryRun=None, VpcId=None): pass
[ "def create_network_acl(\n vpc_id=None,\n vpc_name=None,\n network_acl_name=None,\n subnet_id=None,\n subnet_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n _id = vpc_name or vpc_id\n\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\"message\": \"VPC {} does not exist.\".format(_id)},\n }\n\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n elif subnet_id:\n if not _get_resource(\n \"subnet\",\n resource_id=subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n ):\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_id)},\n }\n\n r = _create_resource(\n \"network_acl\",\n name=network_acl_name,\n vpc_id=vpc_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n\n if r.get(\"created\") and subnet_id:\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_network_acl(r[\"id\"], subnet_id)\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n r[\"association_id\"] = association_id\n return r", "def createNetwork(context):\n if common.MY_DEBUG:\n print 'ENTER vpc.createNetwork'\n\n my_vpc_name = getNetworkName(context)\n\n ret = {\n 'name': my_vpc_name,\n 'type': 'compute.v1.network',\n 'properties': {\n 'routingConfig': {\n 'routingMode': 'REGIONAL'\n },\n 'autoCreateSubnetworks': False\n }\n }\n if common.MY_DEBUG:\n print 'EXIT vpc.createNetwork, ret: ' + str(ret)\n return ret", "def create_network_acl_entry(DryRun=None, NetworkAclId=None, RuleNumber=None, Protocol=None, RuleAction=None, Egress=None, CidrBlock=None, Ipv6CidrBlock=None, IcmpTypeCode=None, PortRange=None):\n pass", "def add_vpc(template, key_pair_name, nat_ip,\n nat_image_id=DEFAULT_NAT_IMAGE_ID,\n nat_instance_type=DEFAULT_NAT_INSTANCE_TYPE):\n vpc_id = \"VPC\"\n vpc = template.add_resource(ec2.VPC(\n vpc_id,\n CidrBlock=\"10.0.0.0/16\",\n Tags=Tags(\n Name=name_tag(vpc_id)\n ),\n ))\n public_subnet = _add_public_subnet(template, vpc)\n nat = _add_nat(template, vpc, public_subnet, nat_image_id, nat_instance_type,\n key_pair_name, nat_ip)\n _add_private_subnet(template, vpc, nat)\n return vpc", "def create(\n cidr_block,\n instance_tenancy=None,\n vpc_name=None,\n enable_dns_support=None,\n enable_dns_hostnames=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n vpc = conn.create_vpc(cidr_block, instance_tenancy=instance_tenancy)\n if vpc:\n log.info(\"The newly created VPC id is %s\", vpc.id)\n\n _maybe_set_name_tag(vpc_name, vpc)\n _maybe_set_tags(tags, vpc)\n _maybe_set_dns(conn, vpc.id, enable_dns_support, enable_dns_hostnames)\n _maybe_name_route_table(conn, vpc.id, vpc_name)\n if vpc_name:\n _cache_id(\n vpc_name,\n vpc.id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n return {\"created\": True, \"id\": vpc.id}\n else:\n log.warning(\"VPC was not created\")\n return {\"created\": False}\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def _create_security_group(client, vpc_id):\n\n res = client.create_security_group(\n Description=\"Allow ssh from user public IP address\",\n GroupName=f'ssh-from-public-ip-{_rand_chars(10)}',\n VpcId=vpc_id,\n )\n\n group_id = res['GroupId']\n\n try:\n public_ip = f'{requests.get(\"https://checkip.amazonaws.com/\").text.strip()}/32'\n except Exception:\n print('encountered error getting public ip; using 0.0.0.0/0 instead')\n public_ip = '0.0.0.0/0'\n\n res = client.authorize_security_group_ingress(\n CidrIp=public_ip,\n FromPort=22,\n GroupId=group_id,\n IpProtocol='tcp',\n ToPort=22,\n )\n\n return group_id", "def create_vpc(DryRun=None, CidrBlock=None, InstanceTenancy=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def create_network_acl_entry(\n network_acl_id=None,\n rule_number=None,\n protocol=None,\n rule_action=None,\n cidr_block=None,\n egress=None,\n network_acl_name=None,\n icmp_code=None,\n icmp_type=None,\n port_range_from=None,\n port_range_to=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n kwargs = locals()\n return _create_network_acl_entry(**kwargs)", "def __init__(self,\n vpc: 'VPCIdentity',\n *,\n name: str = None,\n resource_group: 'ResourceGroupIdentity' = None) -> None:\n msg = \"Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}\".format(\n \", \".join(['NetworkACLPrototypeNetworkACLByRules', 'NetworkACLPrototypeNetworkACLBySourceNetworkACL']))\n raise Exception(msg)", "def create_network(request):\n cloud_id = request.matchdict['cloud']\n\n params = params_from_request(request)\n network_params = params.get('network')\n subnet_params = params.get('subnet')\n\n auth_context = auth_context_from_request(request)\n\n if not network_params:\n raise RequiredParameterMissingError('network')\n\n # TODO\n if not auth_context.is_owner():\n raise PolicyUnauthorizedError()\n\n try:\n cloud = Cloud.objects.get(owner=auth_context.owner, id=cloud_id)\n except me.DoesNotExist:\n raise CloudNotFoundError\n\n network = methods.create_network(auth_context.owner, cloud, network_params)\n network_dict = network.as_dict()\n\n # Bundling Subnet creation in this call because it is required\n # for backwards compatibility with the current UI\n if subnet_params:\n try:\n subnet = create_subnet(auth_context.owner, cloud,\n network, subnet_params)\n except Exception as exc:\n # Cleaning up the network object in case subnet creation\n # fails for any reason\n network.ctl.delete()\n raise exc\n network_dict['subnet'] = subnet.as_dict()\n\n return network.as_dict()", "def AddVpcNetworkFlags(parser, resource_kind='service'):\n parser.add_argument(\n '--network',\n metavar='NETWORK',\n help=(\n 'The VPC network that the Cloud Run {kind} will be able to send'\n ' traffic to. If --subnet is also specified, subnet must be a'\n ' subnetwork of the network specified by this --network flag. To'\n ' clear existing VPC network settings, use --clear-network.'.format(\n kind=resource_kind\n )\n ),\n )", "def create_nacls(self) -> None:\n selection_sagsnl = _ec2.SubnetSelection(subnet_group_name=SwiftComponents.SAGSNL)\n selection_amh = _ec2.SubnetSelection(subnet_group_name=SwiftComponents.AMH)\n\n self.create_nacl(cid=SwiftComponents.SAGSNL + \"NACL\", name=SwiftComponents.SAGSNL + \"NACL\",\n description=\"NACL for SAGSNL Subnet\",\n subnet_selection=selection_sagsnl)\n self.create_nacl(cid=SwiftComponents.AMH + \"NACL\", name=SwiftComponents.AMH + \"NACL\",\n description=\"NACL For AMMH Subnet\",\n subnet_selection=selection_amh)\n\n self.add_nacl_entry(cid=SwiftComponents.SAGSNL + \"NACL\",\n nacl_id=\"SAGSNLNACLEntry1\",\n cidr=_ec2.AclCidr.any_ipv4(),\n rule_number=100,\n traffic=_ec2.AclTraffic.all_traffic(),\n direction=_ec2.TrafficDirection.EGRESS)\n self.add_nacl_entry(cid=SwiftComponents.SAGSNL + \"NACL\",\n nacl_id=\"SAGSNLNACLEntry2\",\n cidr=_ec2.AclCidr.any_ipv4(),\n rule_number=100,\n traffic=_ec2.AclTraffic.all_traffic(),\n direction=_ec2.TrafficDirection.INGRESS)\n\n self.add_nacl_entry(cid=SwiftComponents.AMH + \"NACL\",\n nacl_id=\"AMHNACLEntry1\",\n cidr=_ec2.AclCidr.any_ipv4(),\n rule_number=100,\n traffic=_ec2.AclTraffic.all_traffic(),\n direction=_ec2.TrafficDirection.EGRESS)\n self.add_nacl_entry(cid=SwiftComponents.AMH + \"NACL\",\n nacl_id=\"AMHNACLEntry2\",\n cidr=_ec2.AclCidr.any_ipv4(),\n rule_number=100,\n traffic=_ec2.AclTraffic.all_traffic(),\n direction=_ec2.TrafficDirection.INGRESS)", "def create_network_acl(self,\n *,\n network_acl_prototype: 'NetworkACLPrototype' = None,\n **kwargs\n ) -> DetailedResponse:\n\n if network_acl_prototype is not None and isinstance(network_acl_prototype, NetworkACLPrototype):\n network_acl_prototype = convert_model(network_acl_prototype)\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='create_network_acl')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n data = json.dumps(network_acl_prototype)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/network_acls'\n request = self.prepare_request(method='POST',\n url=url,\n headers=headers,\n params=params,\n data=data)\n\n response = self.send(request)\n return response", "def create_network_acl_rule(self, **kwargs):\n args = [\"acl\", \"action\", \"destination\", \"direction\", \"source\"]\n check_args(args, **kwargs)\n\n # Build dict of argument and assign default value when needed\n args = {\n \"acl\": kwargs.get('acl'),\n 'name': kwargs.get('name'),\n 'action': kwargs.get('action'),\n 'destination': kwargs.get('destination'),\n 'direction': kwargs.get('direction'),\n 'source': kwargs.get('source'),\n 'before': kwargs.get('before'),\n 'protocol': kwargs.get('protocol'),\n 'destination_port_max': kwargs.get('destination_port_max'),\n 'destination_port_min': kwargs.get('destination_port_min'),\n 'source_port_max': kwargs.get('source_port_max'),\n 'source_port_min': kwargs.get('source_port_min'),\n }\n\n # Construct payload\n payload = {}\n for key, value in args.items():\n # acl argument should not be in the payload\n if key != \"acl\" and value is not None:\n if key == \"before\":\n rg_info = self.rg.get_resource_group(\n args[\"resource_group\"])\n payload[\"resource_group\"] = {\"id\": rg_info[\"id\"]}\n else:\n payload[key] = value\n\n # Retrieve network ACL information to get the ID\n # (mostly useful if a name is provided)\n acl_info = self.get_network_acl(args[\"acl\"])\n if \"errors\" in acl_info:\n return acl_info\n\n try:\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls/{}/rules?version={}\"\n \"&generation={}\".format(acl_info[\"id\"],\n self.cfg[\"version\"],\n self.cfg[\"generation\"]))\n\n # Return data\n return qw(\"iaas\", \"POST\", path, headers(),\n json.dumps(payload))[\"data\"]\n\n except Exception as error:\n print(\"Error creating network ACL rule. {}\".format(error))\n raise", "def main(azs, region, keyid, secret, cidr, owner, env):\n\n # Validate the region\n myregion = boto.ec2.get_region(region_name=region)\n if myregion == None:\n print(\"Unknown region.\")\n exit(1)\n\n # Establish a VPC service connection\n try:\n conn = boto.vpc.VPCConnection(aws_access_key_id=keyid, aws_secret_access_key=secret, region=myregion)\n except boto.exception.EC2ResponseError as e:\n print(e.message)\n exit(1)\n\n # Grab the availability-zones\n zones = []\n all_zones = conn.get_all_zones()\n for zone in all_zones:\n if zone.state != 'available':\n continue\n zones.append(zone.name)\n\n subnets = subnet_sizes(azs, cidr) # Calculate the subnet sizes\n name = owner.lower() + '-' + env.lower() + '-' # Used for tagging\n\n vpc_id = create_vpc(conn, name, region, cidr)\n igw_id = create_igw(conn, name, region, vpc_id)\n sub_ids = create_sub(conn, name, region, vpc_id, azs, subnets, zones)\n rtb_ids = create_rtb(conn, name, region, vpc_id, azs, sub_ids, igw_id)\n acl_ids = create_acl(conn, name, region, vpc_id, azs, sub_ids, cidr)\n flow_id = create_flows(vpc_id, keyid, secret, region)", "def associate_network_acl_to_subnet(\n network_acl_id=None,\n subnet_id=None,\n network_acl_name=None,\n subnet_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if network_acl_name:\n network_acl_id = _get_resource_id(\n \"network_acl\",\n network_acl_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not network_acl_id:\n return {\n \"associated\": False,\n \"error\": {\n \"message\": \"Network ACL {} does not exist.\".format(network_acl_name)\n },\n }\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"associated\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_network_acl(network_acl_id, subnet_id)\n if association_id:\n log.info(\n \"Network ACL with id %s was associated with subnet %s\",\n network_acl_id,\n subnet_id,\n )\n\n return {\"associated\": True, \"id\": association_id}\n else:\n log.warning(\n \"Network ACL with id %s was not associated with subnet %s\",\n network_acl_id,\n subnet_id,\n )\n return {\n \"associated\": False,\n \"error\": {\"message\": \"ACL could not be assocaited.\"},\n }\n except BotoServerError as e:\n return {\"associated\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def create_subnets(ec2, vpc, subnets):\n # Generate candidate subnet CIDRs by shifting the VPC's prefix by 4 bits, yielding 16 possible subnet\n # CIDRs.\n vpc_cidr = ipaddress.ip_network(vpc.cidr_block)\n subnet_cidrs = list(vpc_cidr.subnets(prefixlen_diff=4))\n\n # The set difference between the availability zones that already have subnets and the availability zones\n # available in the region yields the set of availability zones where subnets must be created.\n subnet_azs = frozenset(map(lambda subnet: subnet.availability_zone, subnets))\n available_azs = frozenset(map(\n lambda az: az[\"ZoneName\"], ec2.meta.client.describe_availability_zones()[\"AvailabilityZones\"]))\n\n for az in (available_azs - subnet_azs):\n # If subnets already exist, their CIDRs may conflict with the candidate CIDRs that were generated.\n # Loop through the candidate list until subnet creation does not fail with a CIDR conflict error, or\n # until no candidates remain.\n while len(subnet_cidrs) > 0:\n try:\n cidr = subnet_cidrs.pop(0)\n subnet = vpc.create_subnet(AvailabilityZone=az, CidrBlock=cidr.with_prefixlen)\n # Ensure that the new subnet has the MapPublicIpOnLaunch attribute set\n ec2.meta.client.modify_subnet_attribute(SubnetId=subnet.id,\n MapPublicIpOnLaunch={\"Value\": True})\n click.echo(f\"Created new subnet: {subnet.id}\")\n break\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"InvalidSubnet.Conflict\":\n continue\n raise\n else:\n raise CraftingTableError(f\"Could not find valid CIDR to create subnet in {az}\")", "def create_security_group():\n conn = boto.connect_ec2()\n sec_group = conn.create_security_group(\"shopply\", \"Shopply servers security group\")\n sec_group.authorize('tcp', 80, 80, '0.0.0.0/0')\n sec_group.authorize('tcp', 22, 22, '0.0.0.0/0')\n sec_group.authorize('tcp', 8080, 8080, '0.0.0.0/0')\n sec_group.authorize('tcp', 9001, 9001, '0.0.0.0/0')", "async def create_vpc(self, tag_name, cidr_block):\n if not await self.exists(tag_name):\n vpc = self._resource.create_vpc(CidrBlock=cidr_block)\n vpc.create_tags(Tags=[{\"Key\": \"Name\", \"Value\": tag_name}])\n vpc.wait_until_available()\n else:\n raise VpcNameAlreadyExists", "def rbac_create(self, event_type, payload, timestamp):\n valid_types = [\"network\"]\n\n event_type = payload['rbac_policy']['object_type']\n action = payload['rbac_policy']['action']\n if action not in RBAC_VALID_ACTIONS or event_type not in valid_types:\n # I'm bored. Nothing that concerns nor interests us.\n return\n\n network_id = payload['rbac_policy']['object_id']\n target_tenant = payload['rbac_policy']['target_tenant']\n policy_id = payload['rbac_policy']['id']\n LOG.debug(\"Adding RBAC policy for network %s with tenant %s\",\n network_id, target_tenant)\n\n # Read, modify, write an existing network document. Grab and modify\n # the admin version of the document. When saving the document it will\n # be indexed for both admin and user.\n doc = self.index_helper.get_document(network_id, for_admin=True)\n\n if not doc or not doc['_source']:\n LOG.error(_LE('Error adding rule to network. Network %(id)s '\n 'does not exist.') % {'id': network_id})\n return\n\n body = doc['_source']\n\n # Update network with RBAC policy.\n add_rbac(body, target_tenant, policy_id)\n\n # Bump version for race condition prevention. Use doc and not\n # body, since '_version' is outside of '_source'.\n version = doc['_version'] + 1\n self.index_helper.save_document(body, version=version)\n return pipeline.IndexItem(self.index_helper.plugin,\n event_type,\n payload,\n body)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules. We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules. After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide .
def create_network_acl_entry(DryRun=None, NetworkAclId=None, RuleNumber=None, Protocol=None, RuleAction=None, Egress=None, CidrBlock=None, Ipv6CidrBlock=None, IcmpTypeCode=None, PortRange=None): pass
[ "def create_network_acl_entry(\n network_acl_id=None,\n rule_number=None,\n protocol=None,\n rule_action=None,\n cidr_block=None,\n egress=None,\n network_acl_name=None,\n icmp_code=None,\n icmp_type=None,\n port_range_from=None,\n port_range_to=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n kwargs = locals()\n return _create_network_acl_entry(**kwargs)", "def replace_network_acl_entry(\n network_acl_id=None,\n rule_number=None,\n protocol=None,\n rule_action=None,\n cidr_block=None,\n egress=None,\n network_acl_name=None,\n icmp_code=None,\n icmp_type=None,\n port_range_from=None,\n port_range_to=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n kwargs = locals()\n return _create_network_acl_entry(replace=True, **kwargs)", "def create_network_acl_rule(self, **kwargs):\n args = [\"acl\", \"action\", \"destination\", \"direction\", \"source\"]\n check_args(args, **kwargs)\n\n # Build dict of argument and assign default value when needed\n args = {\n \"acl\": kwargs.get('acl'),\n 'name': kwargs.get('name'),\n 'action': kwargs.get('action'),\n 'destination': kwargs.get('destination'),\n 'direction': kwargs.get('direction'),\n 'source': kwargs.get('source'),\n 'before': kwargs.get('before'),\n 'protocol': kwargs.get('protocol'),\n 'destination_port_max': kwargs.get('destination_port_max'),\n 'destination_port_min': kwargs.get('destination_port_min'),\n 'source_port_max': kwargs.get('source_port_max'),\n 'source_port_min': kwargs.get('source_port_min'),\n }\n\n # Construct payload\n payload = {}\n for key, value in args.items():\n # acl argument should not be in the payload\n if key != \"acl\" and value is not None:\n if key == \"before\":\n rg_info = self.rg.get_resource_group(\n args[\"resource_group\"])\n payload[\"resource_group\"] = {\"id\": rg_info[\"id\"]}\n else:\n payload[key] = value\n\n # Retrieve network ACL information to get the ID\n # (mostly useful if a name is provided)\n acl_info = self.get_network_acl(args[\"acl\"])\n if \"errors\" in acl_info:\n return acl_info\n\n try:\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls/{}/rules?version={}\"\n \"&generation={}\".format(acl_info[\"id\"],\n self.cfg[\"version\"],\n self.cfg[\"generation\"]))\n\n # Return data\n return qw(\"iaas\", \"POST\", path, headers(),\n json.dumps(payload))[\"data\"]\n\n except Exception as error:\n print(\"Error creating network ACL rule. {}\".format(error))\n raise", "def add_ingress_rule(self, rule):\n self.ingress_rules.append(rule)", "def create_network_acl_rule(self,\n network_acl_id: str,\n network_acl_rule_prototype: 'NetworkACLRulePrototype',\n **kwargs\n ) -> DetailedResponse:\n\n if network_acl_id is None:\n raise ValueError('network_acl_id must be provided')\n if network_acl_rule_prototype is None:\n raise ValueError('network_acl_rule_prototype must be provided')\n if isinstance(network_acl_rule_prototype, NetworkACLRulePrototype):\n network_acl_rule_prototype = convert_model(network_acl_rule_prototype)\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='create_network_acl_rule')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n data = json.dumps(network_acl_rule_prototype)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n path_param_keys = ['network_acl_id']\n path_param_values = self.encode_path_vars(network_acl_id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/network_acls/{network_acl_id}/rules'.format(**path_param_dict)\n request = self.prepare_request(method='POST',\n url=url,\n headers=headers,\n params=params,\n data=data)\n\n response = self.send(request)\n return response", "def rbac_create(self, event_type, payload, timestamp):\n valid_types = [\"network\"]\n\n event_type = payload['rbac_policy']['object_type']\n action = payload['rbac_policy']['action']\n if action not in RBAC_VALID_ACTIONS or event_type not in valid_types:\n # I'm bored. Nothing that concerns nor interests us.\n return\n\n network_id = payload['rbac_policy']['object_id']\n target_tenant = payload['rbac_policy']['target_tenant']\n policy_id = payload['rbac_policy']['id']\n LOG.debug(\"Adding RBAC policy for network %s with tenant %s\",\n network_id, target_tenant)\n\n # Read, modify, write an existing network document. Grab and modify\n # the admin version of the document. When saving the document it will\n # be indexed for both admin and user.\n doc = self.index_helper.get_document(network_id, for_admin=True)\n\n if not doc or not doc['_source']:\n LOG.error(_LE('Error adding rule to network. Network %(id)s '\n 'does not exist.') % {'id': network_id})\n return\n\n body = doc['_source']\n\n # Update network with RBAC policy.\n add_rbac(body, target_tenant, policy_id)\n\n # Bump version for race condition prevention. Use doc and not\n # body, since '_version' is outside of '_source'.\n version = doc['_version'] + 1\n self.index_helper.save_document(body, version=version)\n return pipeline.IndexItem(self.index_helper.plugin,\n event_type,\n payload,\n body)", "def replace_network_acl_entry(DryRun=None, NetworkAclId=None, RuleNumber=None, Protocol=None, RuleAction=None, Egress=None, CidrBlock=None, Ipv6CidrBlock=None, IcmpTypeCode=None, PortRange=None):\n pass", "def ex_create_balancer_access_rule_no_poll(self, balancer, rule):\r\n uri = '/loadbalancers/%s/accesslist' % (balancer.id)\r\n resp = self.connection.request(\r\n uri, method='POST',\r\n data=json.dumps({'networkItem': rule._to_dict()})\r\n )\r\n\r\n return resp.status == httplib.ACCEPTED", "def add(self, rule):\r\n self.insertRule(rule, index=None)", "def create_rule(connection, rule_info):\n connection.command_path = 'rule'\n extra_headers = {\n connection.header_key: connection.token,\n 'Content-Type': 'text/xml'\n }\n url = connection.build_url()\n rule_data = _build_rule_payload(rule_info)\n verify_ssl = connection.verify_ssl\n res = requests.post(url, headers=extra_headers,\n data=rule_data,\n verify=verify_ssl)\n if res.status_code == 201:\n return rules.parse_rule(res.content)\n\n if res.status_code == 403 and \"Rule already exists\" in res.text:\n raise RuleCreationDuplicateRule(\"Rule already exists\")\n\n raise RuleCreationException(\"Error creating rule: {0} => {0}\".format(\n res.status_code, res.content\n ))", "def delete_network_acl_entry(\n network_acl_id=None,\n rule_number=None,\n egress=None,\n network_acl_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n if not _exactly_one((network_acl_name, network_acl_id)):\n raise SaltInvocationError(\n \"One (but not both) of network_acl_id or network_acl_name must be provided.\"\n )\n\n for v in (\"rule_number\", \"egress\"):\n if locals()[v] is None:\n raise SaltInvocationError(\"{} is required.\".format(v))\n\n if network_acl_name:\n network_acl_id = _get_resource_id(\n \"network_acl\",\n network_acl_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not network_acl_id:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"Network ACL {} does not exist.\".format(\n network_acl_name or network_acl_id\n )\n },\n }\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n deleted = conn.delete_network_acl_entry(\n network_acl_id, rule_number, egress=egress\n )\n if deleted:\n log.info(\"Network ACL entry was deleted\")\n else:\n log.warning(\"Network ACL was not deleted\")\n return {\"deleted\": deleted}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def add_ipv4_rule_acl_bulk(self, **kwargs):\n if 'acl_rules' not in kwargs or not kwargs['acl_rules']:\n return True\n\n acl_rules = kwargs['acl_rules']\n\n if len(acl_rules) > 64:\n raise ValueError(\"On MLX device maximum 64 rules can be bulked \"\n \"while rule creation\")\n\n # Parse params\n acl_name = self.ip.parse_acl_name(**kwargs)\n ret = self.get_acl_address_and_acl_type(acl_name)\n acl_type = ret['type']\n address_type = ret['protocol']\n\n if address_type != 'ip':\n raise ValueError(\"IPv4 Rule can not be added to non-ip ACL.\"\n \"ACL {} is of type {}\"\n .format(acl_name, address_type))\n\n # Get already configured seq_ids\n configured_seq_ids = self.get_configured_seq_ids(acl_name,\n address_type)\n\n # if there are already configured rules. Make sure that they are\n # not overlapping with new rules to be configured\n self.set_seq_id_for_bulk_rules(configured_seq_ids, acl_rules)\n\n # Parse parameters\n if acl_type == 'standard':\n user_data_list = self.validate_std_rules(acl_name, acl_rules)\n cmd = acl_template.add_ip_standard_acl_rule_template\n elif acl_type == 'extended':\n user_data_list = self.validate_ext_rules(acl_name, acl_rules)\n cmd = acl_template.add_ip_extended_acl_rule_template\n else:\n raise ValueError('{} not supported'.format(acl_type))\n\n configured_count = 0\n\n cli_arr = ['ip access-list ' + ' ' + acl_type + ' ' + acl_name]\n for user_data in user_data_list:\n t = jinja2.Template(cmd)\n config = t.render(**user_data)\n config = ' '.join(config.split())\n cli_arr.append(config)\n try:\n output = self._callback(cli_arr, handler='cli-set')\n if 'Failed to initialize dns request' in output:\n raise ValueError('ACL DNS: Errno(5) Failed '\n 'to initialize dns request')\n if 'are undefined' in output:\n raise ValueError('Invlaid icmp filter: {}'\n .format(user_data['icmp_filter']))\n self._process_cli_output(inspect.stack()[0][3], config, output)\n configured_count = configured_count + 1\n cli_arr.pop()\n except Exception as err:\n raise ValueError(err)\n return True", "def _add_dnat_rule_cmd(self, cmd_list, rule_num, ext_if_id,\n dest_addr, translation_addr):\n\n nat_cmd = self._get_nat_cmd()\n\n # Execute the commands\n cmd_list.append(\n SetCmd(\"{0}/destination/rule/{1}\".format(nat_cmd, rule_num)))\n cmd_list.append(SetCmd(\"{0}/destination/rule/{1}/inbound-interface/{2}\"\n .format(nat_cmd, rule_num, ext_if_id)))\n cmd_list.append(SetCmd(\"{0}/destination/rule/{1}/destination/\"\n \"address/{2}\".format(nat_cmd, rule_num,\n urllib.quote_plus(dest_addr))))\n cmd_list.append(SetCmd(\"{0}/destination/rule/{1}/translation/\"\n \"address/{2}\".format(nat_cmd, rule_num,\n urllib.quote_plus(translation_addr))))", "def create_acl_rule(self, acl_name_list, mac_list, acl_policy):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_ACCESS_CONTROLS)\n\n for acl_name in acl_name_list:\n self._create_acl_rule(acl_name, acl_policy, mac_list)\n time.sleep(4)", "def delete_network_acl_entry(DryRun=None, NetworkAclId=None, RuleNumber=None, Egress=None):\n pass", "def create_or_update(\n self,\n resource_group_name: str,\n network_security_perimeter_name: str,\n profile_name: str,\n access_rule_name: str,\n parameters: IO,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.NspAccessRule:", "def add_ipv4_rule_acl(self, **parameters):\n params_validator.validate_params_mlx_add_ipv4_rule_acl(**parameters)\n\n acl_name = parameters['acl_name']\n ret = self.get_acl_address_and_acl_type(acl_name)\n acl_type = ret['type']\n address_type = ret['protocol']\n\n if address_type != 'ip':\n raise ValueError('{} not supported'.format(address_type))\n\n cli_arr = ['ip access-list ' + ' ' + acl_type + ' ' + acl_name]\n\n if acl_type == 'standard':\n user_data = self.parse_params_for_add_ipv4_standard(**parameters)\n cmd = acl_template.add_ip_standard_acl_rule_template\n elif acl_type == 'extended':\n user_data = self.parse_params_for_add_ipv4_extended(**parameters)\n cmd = acl_template.add_ip_extended_acl_rule_template\n else:\n raise ValueError('{} not supported'.format(acl_type))\n\n t = jinja2.Template(cmd)\n config = t.render(**user_data)\n config = ' '.join(config.split())\n cli_arr.append(config)\n\n output = self._callback(cli_arr, handler='cli-set')\n if 'Failed to initialize dns request' in output:\n raise ValueError('ACL DNS: Errno(5) Failed '\n 'to initialize dns request')\n if 'are undefined' in output:\n raise ValueError('Invlaid icmp filter: {}'\n .format(parameters['icmp_filter']))\n return self._process_cli_output(inspect.stack()[0][3], config, output)", "def _add_nat_rule(ctx, gateway, rule_type, original_ip, translated_ip):\n any_type = \"any\"\n\n ctx.logger.info(\"Create floating ip NAT rule: original_ip '{0}',\"\n \"translated_ip '{1}', rule type '{2}'\"\n .format(original_ip, translated_ip, rule_type))\n\n gateway.add_nat_rule(\n rule_type, original_ip, any_type, translated_ip, any_type, any_type)", "def add_secgroup_rule(self,\n name=None, # group name\n port=None,\n protocol=None,\n ip_range=None):\n\n try:\n portmin, portmax = port.split(\":\")\n except ValueError:\n portmin = -1\n portmax = -1\n\n try:\n data = self.ec2_client.authorize_security_group_ingress(\n GroupName=name,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': int(portmin),\n 'ToPort': int(portmax),\n 'IpRanges': [{'CidrIp': ip_range}]},\n ])\n Console.ok(f'Ingress Successfully Set as {data}')\n except ClientError as e:\n Console.info(\"Rule couldn't be added to security group\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a placement group that you launch cluster instances into. You must give the group a name that's unique within the scope of your account. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide .
def create_placement_group(DryRun=None, GroupName=None, Strategy=None): pass
[ "def create_group():\r\n new_group = input(\"| Enter the name of the Group |\")\r\n adgroup.ADGroup.create(new_group, security_enabled=True, scope='GLOBAL')\r\n return \"| Group created |\"", "def create(self, group):\n self.request.mongo_connection.shinken.hostgroups.insert(\n group.as_dict()\n )", "def Create(iam,groupname: str,tag='/'):\n\t\t\t\treturn iam.resource.Group(groupname).create(Path=AWS.preptag(tag))", "def create_group(c, runner, group):\n if group_exists(c, group, runner=runner):\n return True\n\n cmd = \"groupadd {}\".format(group)\n return runner(cmd, hide=True, warn=True).ok", "def _create_consistencygroup(self, context, group):\n name = group['id']\n\n LOG.debug(_(\"Creating consistencygroup %(id)s for %(display_name)s\") %\n {'id': name, 'display_name': group['name']})\n\n ans = self.vmem_mg.snapshot.create_snapgroup(name)\n\n if not ans['success']:\n msg = (_(\"Failed to create consistencygroup %(name)s: %(msg)s\") %\n {'name': group['name'], 'msg': ans['msg']})\n raise exception.ViolinBackendErr(message=msg)", "def test_create_eip_group_with_name(self):\n name = 'test_eip_group'\n self.client.create_eip_group(eip_count=2,\n bandwidth_in_mbps=10,\n name=name, config=None)", "def create_group(self, group, **kwargs):\n\n status, data = self.run_gerrit_command('create-group', group, **kwargs)\n\n return status, data", "def createfsgroup(self, groupname, gid=None, memberlist=None):", "def create_nodegroup(ctx, name, node_name, region, verbosity, node_subnets, tags, kubeconf, node_min, node_max,\n node_role, node_type, node_sg_ingress, keyname, ssh_public_key, ami, bootstrap_opt, no_user_data,\n yes):\n cp = ControlPlane(name, region=region)\n cluster_info = cp.query()\n\n if not kubeconf:\n files = os.environ.get('KUBECONFIG', '~/.kube/config')\n kubeconf = os.path.expanduser(files.split(':')[0])\n if not yes:\n if not click.confirm('Are you sure to create the EKS cluster in '\n 'region[{}] with kubeconfig[{}]'.format(region, kubeconf)):\n exit(0)\n ng = NodeGroup(node_name, cluster_info=cluster_info, region=region, ami=ami, keypair=keyname, subnets=node_subnets,\n role=node_role, sg_ingresses=node_sg_ingress, ssh_public_key=ssh_public_key, tags=tags,\n kubeconf=kubeconf, min_nodes=node_min, max_nodes=node_max, instance_type=node_type,\n no_user_data=no_user_data)\n ng.create()", "async def create_group(self, userid, gameid):\n raise NotImplementedError()", "def create_group_with_given_permissions(perm_pks, group_name, Group):\n group = Group.objects.create(name=group_name)\n group.permissions.add(*perm_pks)\n return group", "def create_group(self):\n group_name = self.line_grp.text().strip() # removes whitespaces from left and right\n\n if group_name == '':\n display_msg(MsgIcon.WARNING, \"Warning\", \"Please choose a group name\")\n return\n\n self.line_grp.setText(\"\")\n if self.db.insert_group(group_name): # if creation was successful:\n self.list_grp.addItem(group_name) # adds new group to the list.\n self.db.notify_stats() # update stats tab", "def create_group(self, group):\n path = \"api/groups/\"\n return Group.from_dict(self._post(path, group._asdict()))", "def __create_resource_group(args):\n\n resource_client = __create_resource_management_client()\n resource_client.resource_groups.create_or_update(\n args.resource_group_name,\n {\"location\": \"westus\"}\n ).result()", "def create_person_group(self):\n url = self.base_url + \"persongroups/\" + self.pg_name\n response = requests.put(url, headers=self.headers, json={\"name\" : self.pg_name})\n if response.status_code == 200 :\n print(\"added \" + self.pg_name + \" person group\")\n else:\n print(response.json())", "def _create_hostgroup(self, hostgroupname):\n cli_cmd = 'createhostgroup -n %(name)s' % {'name': hostgroupname}\n out = self._execute_cli(cli_cmd)\n\n self._assert_cli_operate_out('_create_hostgroup',\n ('Failed to Create hostgroup %s.'\n % hostgroupname),\n cli_cmd, out)", "def create_node_group(node_group_name, node_group_owner, node_group_description):\n\n data = {'node_group_name': node_group_name,\n 'node_group_owner': node_group_owner,\n 'node_group_description': node_group_description,\n }\n\n log.info('Creating node_group node_group_name={0},node_group_owner={1},node_group_description={2}'.format(node_group_name, node_group_owner, node_group_description))\n return api_submit('/api/node_groups', data, method='put')", "def create_group(gid: str):\n if subprocess.run('getent group {}'.format(gid), shell=True).returncode != 0:\n # create group with gid if not exist\n logger.info('Adding group with gid {}'.format(gid))\n subprocess.run(\n 'groupadd --gid {} group_{}'.format(gid, gid), shell=True)", "def test_001(self):\n HEADING()\n command = \"cm secgroup create --cloud={cloud} test-group\"\n result = run(command.format(**self.data))\n assert \"Created a new security group [test-group]\" in result\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a listing for Amazon EC2 Standard Reserved Instances to be sold in the Reserved Instance Marketplace. You can submit one Standard Reserved Instance listing at a time. To get a list of your Standard Reserved Instances, you can use the DescribeReservedInstances operation. The Reserved Instance Marketplace matches sellers who want to resell Standard Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances. To sell your Standard Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Standard Reserved Instances, and specify the upfront price to receive for them. Your Standard Reserved Instance listings then become available for purchase. To view the details of your Standard Reserved Instance listing, you can use the DescribeReservedInstancesListings operation. For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide .
def create_reserved_instances_listing(ReservedInstancesId=None, InstanceCount=None, PriceSchedules=None, ClientToken=None): pass
[ "def getReservedInstances(verbose):\n lres = {}\n jResp = EC2C.describe_reserved_instances()\n for reserved in jResp['ReservedInstances']:\n if reserved['State'] == 'active':\n if verbose:\n lres[reserved['InstanceType']] = str(reserved['Start'])+\";\"+\\\n str(reserved['End'])+\";\"+\\\n str(reserved['InstanceCount'])+\";\"+\\\n reserved['ProductDescription']+\";\"+\\\n str(reserved['UsagePrice'])\n else:\n if re.search(\"win\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"windows\"\n elif re.search(\"red hat\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"redhat\"\n elif re.search(\"suse\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"suse\"\n else:\n os = \"linux\"\n lres[reserved['InstanceType']+\";\"+os] = str(reserved['InstanceCount'])\n return lres", "def describe_reserved_instances(DryRun=None, ReservedInstancesIds=None, Filters=None, OfferingType=None, OfferingClass=None):\n pass", "def purchase_reserved_instances_offering(DryRun=None, ReservedInstancesOfferingId=None, InstanceCount=None, LimitPrice=None):\n pass", "def describe_reserved_instances_offerings(DryRun=None, ReservedInstancesOfferingIds=None, InstanceType=None, AvailabilityZone=None, ProductDescription=None, Filters=None, InstanceTenancy=None, OfferingType=None, NextToken=None, MaxResults=None, IncludeMarketplace=None, MinDuration=None, MaxDuration=None, MaxInstanceCount=None, OfferingClass=None):\n pass", "def modify_reserved_instances(ClientToken=None, ReservedInstancesIds=None, TargetConfigurations=None):\n pass", "def create_ec2_instances(count=1):\n conn = get_ec2_connection()\n user_data = get_user_data()\n reservation = conn.run_instances(image_id=settings.EC2_IMAGE_ID,\n min_count=count,\n max_count=count,\n instance_type=settings.EC2_INSTANCE_TYPE,\n user_data=user_data)\n return reservation.instances", "def request_spot_instances(DryRun=None, SpotPrice=None, ClientToken=None, InstanceCount=None, Type=None, ValidFrom=None, ValidUntil=None, LaunchGroup=None, AvailabilityZoneGroup=None, BlockDurationMinutes=None, LaunchSpecification=None):\n pass", "def get_listing():\n\n ec2 = boto3.client('ec2')\n listing = []\n\n try:\n full_listing = ec2.describe_instances(\n Filters=[\n {\n 'Name': 'instance-state-name',\n 'Values': [ 'running' ]\n }\n ],\n MaxResults=1000)\n except Exception as e:\n print(e)\n sys.exit(1)\n\n for reservation in full_listing['Reservations']:\n for instance in reservation['Instances']:\n listing.append(instance)\n\n return listing", "def ex_list_reserved_nodes(self):\r\n params = {'Action': 'DescribeReservedInstances'}\r\n\r\n response = self.connection.request(self.path, params=params).object\r\n\r\n return self._to_reserved_nodes(response, 'reservedInstancesSet/item')", "def run_instances(self):\n # create an entry in the s3 log for the start of this task \n self.log_to_s3('run-instances-start.log', 'start')\n\n session = botocore.session.get_session()\n client = session.create_client('ec2', region_name=self.aws_region)\n\n # convert user-data to base64\n user_data = ''\n # NOTE conversion of file to string, then string to bytes, the bytes encoded \n # base64 - then decode the base64 bytes into base64 string\n with open(self.ec2_user_data, 'r') as f:\n user_data = base64.b64encode(bytes(f.read(), \"utf-8\")).decode(\"utf-8\")\n\n if self.ec2_type in (CONST.VALID_EC2_INSTANCE_TYPES_EBS_ONLY).split('|'):\n # block device mapping for ebs backed instances\n # creates an ephemeral EBS volume (delete on terminate)\n # Note that gp2 instance type is EBS SSD\n custom_block_device_mapping = [{\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0',\n 'Ebs':{\n 'VolumeSize': self.ec2_ebs_only_volume_size,\n 'VolumeType': self.ec2_ebs_only_volume_type,\n },\n }]\n else:\n # block device mapping allows for 2 extra drives\n # - works for either single ssd or 2 ssd's\n custom_block_device_mapping = [ \n {\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0'\n },\n {\n 'DeviceName': '/dev/sdc',\n 'VirtualName': 'ephemeral1'\n }\n ]\n\n r = client.request_spot_instances(\n InstanceCount=self.ec2_count,\n SpotPrice=self.ec2_spot_price,\n LaunchSpecification= {\n 'SecurityGroupIds': [\n self.ec2_security_group_id,\n ],\n 'SecurityGroups': [\n self.ec2_security_groups,\n ],\n 'Placement': {\n 'AvailabilityZone': self.ec2_availability_zone,\n },\n 'BlockDeviceMappings': custom_block_device_mapping,\n 'IamInstanceProfile': {\n 'Arn': self.ec2_arn_id,\n },\n 'UserData': user_data,\n 'ImageId': self.ec2_image_id,\n 'InstanceType': self.ec2_type,\n 'KeyName': self.ec2_security_key,\n },\n )\n\n # get the spot instance request ids\n spot_ids = []\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Spot request ids:'))\n for i, spot_inst in enumerate(r['SpotInstanceRequests']):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, \n inst_str + '\\t' + spot_inst['SpotInstanceRequestId']))\n spot_ids.append(spot_inst['SpotInstanceRequestId'])\n utility.list_to_file(CONST.SPOT_REQUEST_IDS, spot_ids)\n\n # create a list of spot instance statuses - so we can print out\n # some updates to the user\n spot_status = ['']*len(spot_ids)\n # Expecting status codes of \"pending-evaluation\", \"pending-fulfillment\", or \n # fulfilled. Any other status-code should be printed out & the program \n # terminated.\n expected_status = ['fulfilled', 'pending-evaluation', 'pending-fulfillment']\n instance_ids = [None]*len(spot_ids)\n\n # check the status of the spot requests\n while True:\n fulfilled = 0\n for i, id in enumerate(spot_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_spot_instance_requests(SpotInstanceRequestIds=[id])\n status_code = r['SpotInstanceRequests'][0]['Status']['Code']\n if status_code not in expected_status:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, 'Unexpected status for spot request ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': ') +\n colour_msg(Colour.PURPLE, status_code))\n sys.exit(1)\n if status_code != spot_status[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Spot instance request: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tStatus: ') +\n colour_msg(Colour.PURPLE, status_code))\n spot_status[i] = status_code\n if status_code == 'fulfilled':\n fulfilled += 1\n # record the instance id\n instance_ids[i] = r['SpotInstanceRequests'][0]['InstanceId']\n if fulfilled == len(spot_ids):\n break\n time.sleep(1)\n\n utility.list_to_file(CONST.INSTANCE_IDS, instance_ids)\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ids:'))\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n tag_val = self.ec2_instance_tag + str(i)\n client.create_tags(Resources=[id], Tags=[{'Key':'Name', 'Value':tag_val}])\n\n # monitor the instances until all running\n instance_states = ['']*len(instance_ids)\n expected_states = ['running', 'pending']\n instance_ips = [None]*len(instance_ids)\n running = 0\n while True:\n running = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instances(InstanceIds=[id])\n state = r['Reservations'][0]['Instances'][0]['State']['Name']\n if state not in expected_states:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, \n 'Unexpected instance state for instance-id ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': \\t') +\n colour_msg(Colour.PURPLE, state))\n sys.exit(1)\n if state != instance_states[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tState: ') +\n colour_msg(Colour.PURPLE, state))\n instance_states[i] = state\n if state == 'running':\n running += 1\n # record the instance id\n instance_ips[i] = r['Reservations'][0]['Instances'][0]['PublicDnsName']\n if running == len(instance_ids):\n break\n time.sleep(10)\n\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ips:'))\n for i, id in enumerate(instance_ips):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n \n utility.list_to_file(CONST.INSTANCE_IPS_FILE, instance_ips)\n # need to at least wait until all the instances are reachable\n # possible statuses: (passed | failed | initializing | insufficient-data )\n reachability = ['']*len(instance_ids)\n while True:\n passed = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instance_status(InstanceIds=[id])\n state = r['InstanceStatuses'][0]['InstanceStatus']['Details'][0]['Status']\n if state != reachability[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tReachability: ') +\n colour_msg(Colour.PURPLE, state))\n reachability[i] = state\n if state == 'passed':\n passed += 1\n if passed == len(instance_ids):\n break\n time.sleep(10)\n \n lgr.info(CONST.INFO + colour_msg(Colour.GREEN, 'Instances are reachable'))\n \n # if user-data configuration file supplied - check that it has worked\n # Note that this checker is run once on each instance\n if self.ec2_user_data:\n lgr.info(CONST.INFO + colour_msg(Colour.CYAN, \n 'Starting job to monitor user-data configuration...'))\n # at the moment is calling a local script that does the checking\n result = subprocess.call('./' + self.ec2_user_data_check) \n if result:\n lgr.error(CONST.ERROR + colour_msg(Colour.CYAN, \n 'user data checker FAILED'))\n sys.exit(1)\n\n # create an entry in the s3 log for finish this task \n self.log_to_s3('run-instances-finish.log', 'finish')\n\n # return the list of ip's for the newly created instances\n return utility.file_to_list(CONST.INSTANCE_IPS_FILE)", "def list_instances(ec2):\n\n reservations = ec2.describe_instances(\n Filters=[\n {'Name': 'tag-key', 'Values': ['backup', 'Backup']},\n ]\n )['Reservations']\n\n instances = sum(\n [\n [i for i in r['Instances']]\n for r in reservations\n ], [])\n\n return instances", "def create_instance(config):\n\n try:\n client = boto3.client('ec2')\n except Exception as e:\n print(f'An error occurred while creating the boto3 client: {e}')\n sys.exit(1)\n\n ami_id = _get_ami_id(client, config.ami_type, config.architecture, config.root_device_type, config.virtualization_type)\n default_vpc_id = _ensure_default_vpc(client)\n key_pair_names = _create_key_pairs(client, config)\n\n blockDeviceMappings = []\n for volume in config.volumes:\n blockDeviceMappings.append({\n 'DeviceName': volume.device,\n 'Ebs': {\n 'DeleteOnTermination': True,\n 'VolumeSize': volume.size_gb,\n 'VolumeType': 'gp2',\n },\n })\n\n res = client.run_instances(\n BlockDeviceMappings=blockDeviceMappings,\n\n ImageId=ami_id,\n InstanceType=config.instance_type,\n\n MaxCount=config.max_count,\n MinCount=config.min_count,\n\n SecurityGroupIds=[\n _create_security_group(client, default_vpc_id)\n ],\n\n UserData=_user_data_script(config),\n )\n\n ec2 = boto3.resource('ec2')\n instances = res['Instances']\n\n for i, instance in enumerate(instances):\n public_ip = ec2.Instance(instance['InstanceId']).public_ip_address\n print(f'instance {i} public ip address = {public_ip}')", "def create_ec2_instace(name=\"shopply\", security_group=\"dwd\"):\n conn = boto.connect_ec2()\n reservation = conn.run_instances(\n AMI,\n key_name = KEYPAIR,\n instance_type = 't1.micro',\n security_groups = [security_group],\n instance_initiated_shutdown_behavior = \"stop\"\n )\n \n instance = reservation.instances[0]\n instance.add_tag(\"Name\", name)\n \n \n print \"Launching instance: \", instance.public_dns_name", "def _menu_aws_compute_new_instance():\n # AMIs dict\n amis = {'1': 'ami-31328842', '2': 'ami-8b8c57f8', '3': 'ami-f95ef58a', '4': 'ami-c6972fb5'}\n # Ask user to enter instance name\n i_name = raw_input(\"Enter instance name: \")\n # Ask user to choose OS\n print \"Creating new instance.. Choose OS:\"\n print \"\\t1. Amazon Linux\"\n print \"\\t2. Red Hat Enterprise Linux 7.2\"\n print \"\\t3. Ubuntu Server 14.04 LTS\"\n print \"\\t4. Microsoft Windows Server 2012 R2 Base\"\n op = raw_input(\"Enter option: \")\n # Validating entered option\n op = __op_validation(r'^([1-4]|\\\\q)$', op)\n if op == \"\\\\q\":\n _menu_aws_compute()\n else:\n # Create new fresh instance\n ec2i.start_new_instance(ec2conn, amis[op], i_name)", "def create_spot_instance(config, job_id, sched_time, docker_image, env_vars):\n\n client = boto3.client('ec2')\n\n # Get my own public fqdn by quering metadata\n my_own_name = urllib2.urlopen(\n \"http://169.254.169.254/latest/meta-data/public-hostname\").read()\n\n user_data = (\n \"#!/bin/bash\\n\"\n \"touch /tmp/start.txt\\n\"\n \"curl -i -H 'Content-Type: application/json' \"\n \"'http://%s/v1/notifications/%s?status=started' -X PUT\\n\"\n \"yum -y update\\n\"\n \"yum install docker -y\\n\"\n \"sudo service docker start\\n\"\n \"sudo docker run %s %s\\n\"\n \"touch /tmp/executing.txt\\n\"\n \"sleep 180\\n\"\n \"curl -i -H 'Content-Type: application/json' \"\n \"'http://%s/v1/notifications/%s?status=finished' -X PUT\\n\" %\n (my_own_name, job_id, env_vars, docker_image, my_own_name, job_id))\n\n response = client.request_spot_instances(\n SpotPrice=\"%s\" % config[\"spot-price\"],\n InstanceCount=1,\n Type='one-time',\n ValidFrom=sched_time,\n LaunchSpecification={\n 'ImageId': config[\"ami-id\"],\n 'InstanceType': config[\"instance-type\"],\n 'KeyName': config[\"key-name\"],\n 'SecurityGroups': ['default', config[\"sg-name\"]],\n 'UserData': base64.b64encode(user_data)\n }\n )\n\n req_id = response['SpotInstanceRequests'][0]['SpotInstanceRequestId']\n req_state = response['SpotInstanceRequests'][0][\n 'State'] # open/failed/active/cancelled/closed\n req_status_code = response['SpotInstanceRequests'][0][\n 'Status']['Code'] # pending-evaluation/price-too-low/etc\n\n return [req_id, req_state, req_status_code]", "def optimizeReservation(verbose,region):\n print(\"WARNING: As it's not possible to get OS through AWS API, All \"\\\n \"Linux are reported as Linux (no RedHat, Suse, etc)\\n\"\\\n \"This issue will be address in a future update\\n\\n\")\n shouldReserved = {}\n dreserved = getReservedInstances(False)\n dinstances = listInstances(False)\n dflavors = getInstanceTypes(region)\n count_by_type_os = countInstanceByTypeByOS(False, dinstances)\n resp = \"\"\n for typos, nb in count_by_type_os.items():\n if typos in dreserved:\n if int(count_by_type_os[typos]) - int(dreserved[typos]) >= 0:\n count_by_type_os[typos] = int(count_by_type_os[typos]) - int(dreserved[typos])\n resp += \"Reservation fully used for \"+typos+\"\\n\"\n else:\n print(\"Reservation not fully used for \"+typos+\": \"+dreserved[typos]+\"reserved but only \"+count_by_type_os[typos]+\" instances\")\n for typos, nb in dreserved.items():\n if typos not in count_by_type_os:\n resp += \"Reservation is not used for \"+typos+\"\\n\"\n #Provide tips for better reservations\n #Begin by removing instances that have reservation\n for instanceId in list(dinstances):\n if dinstances[instanceId]['flavor'] in dreserved:\n if int(dreserved[dinstances[instanceId]['flavor']]) > 0:\n dreserved[dinstances[instanceId]['flavor']] -= 1\n del dinstances[instanceId]\n today = datetime.datetime.now(datetime.timezone.utc)\n months6 = today-datetime.timedelta(days=180)\n for k, v in dinstances.items():\n if v['LaunchTime'] < months6:\n try:\n shouldReserved[v['flavor']+\";\"+v['platform']] += 1\n except:\n shouldReserved[v['flavor']+\";\"+v['platform']] = 1\n resp += \"\\nBased on instances older than 6 months, you should buy following reservations:\\n\"\n saveno, savepa = 0, 0\n for k, v in shouldReserved.items():\n resp += k+\":\"+str(v)+\"\\n\"\n saveno += (float(dflavors[k]['ondemand']) - float(dflavors[k]['reserved1yno'])) * v\n savepa += (float(dflavors[k]['ondemand']) - float(dflavors[k]['reserved1ypa'])) * v\n resp += \"You can save up to \"+str(saveno)+\"$/hour with no upfront reservation\\n\"\n resp += \"You can save up to \"+str(savepa)+\"$/hour with partial upfront reservation\\n\"\n if verbose:\n resp += \"\\nInstances below doesn't have reservation:\\n\"\n for k, v in count_by_type_os.items():\n resp += k+\":\"+str(v)+\"\\n\"\n return saveno, resp", "def create_vm(request: VMRequest):\n id = len(deployments)\n\n item = VMInstance(id=id)\n item.vmname = \"vg1111yr\"\n item.az = request.az\n item.dc = request.dc\n item.rd = request.rd\n item.size = request.size\n item.os = request.os\n print(item.dict())\n\n deployments.append(item.dict())\n return item", "def list_instances(name):\n\titems = []\n\ttry:\n\t\tif len(name) <2:\n\t\t\titems.append(alp.Item(\n\t\t\t\ttitle='Searching',\n\t\t\t\tsubtitle='Please type more then one character to start searching',\n\t\t\t\tvalid=False\n\t\t\t))\n\t\telse:\n\t\t\tec2 = boto.connect_ec2()\n\t\t\tfor r in ec2.get_all_instances():\n\t\t\t\tgroups = ';'.join([g.name or g.id for g in r.groups])\n\t\t\t\tfor instance in r.instances:\n\t\t\t\t\tinstance_name = instance.tags.get('Name', instance.tags.get('name', ''))\n\t\t\t\t\tif not name.lower() in instance_name.lower():\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif instance.public_dns_name:\n\t\t\t\t\t\targ = 'ssh ~/.ssh/%s.pem %s\\n' % (instance.key_name, instance.public_dns_name)\n\t\t\t\t\telse:\n\t\t\t\t\t\targ = 'ssh vpc\\nssh %s\\n' % instance.private_ip_address\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\titems.append(alp.Item(\n\t\t\t\t\t\ttitle=instance_name,\n\t\t\t\t\t\tsubtitle='[%s]: %s' % (instance.id, groups),\n\t\t\t\t\t\tvalid=True,\n\t\t\t\t\t\targ=arg\n\t\t\t\t\t))\n\t\t\t\t\n\t\tif len(items) == 0:\n\t\t\titems.append(alp.Item(\n\t\t\t\ttitle='No Results Found',\n\t\t\t\tsubtitle='Please refine your search and try again'\n\t\t\t))\n\texcept Exception, e:\n\t\talp.log(str(e))\n\t\titems = [alp.Item(\n\t\t\ttitle='Problem Searching',\n\t\t\tsubtitle='%s' % str(e).replace(\"'\", ''),\n\t\t\tvalid=False\n\t\t)]\n\t\talp.log(items[0].get())\n\talp.feedback(items)", "def _show_instances(self):\n conn = ec2.connect_to_region(\n self.availability_zone,\n aws_access_key_id=self.access_key_id,\n aws_secret_access_key=self.secret_access_key,\n )\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n print reservation\n for instance in reservation.instances:\n print instance\n print '- AMI ID:', instance.image_id\n print '- Instance Type:', instance.instance_type\n print '- Availability Zone:', instance.placement" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a route in a route table within a VPC.
def create_route(DryRun=None, RouteTableId=None, DestinationCidrBlock=None, GatewayId=None, DestinationIpv6CidrBlock=None, EgressOnlyInternetGatewayId=None, InstanceId=None, NetworkInterfaceId=None, VpcPeeringConnectionId=None, NatGatewayId=None): pass
[ "def create_route(\n route_table_id=None,\n destination_cidr_block=None,\n route_table_name=None,\n gateway_id=None,\n internet_gateway_name=None,\n instance_id=None,\n interface_id=None,\n vpc_peering_connection_id=None,\n vpc_peering_connection_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n nat_gateway_id=None,\n nat_gateway_subnet_name=None,\n nat_gateway_subnet_id=None,\n):\n\n if not _exactly_one((route_table_name, route_table_id)):\n raise SaltInvocationError(\n \"One (but not both) of route_table_id or route_table_name must be provided.\"\n )\n\n if not _exactly_one(\n (\n gateway_id,\n internet_gateway_name,\n instance_id,\n interface_id,\n vpc_peering_connection_id,\n nat_gateway_id,\n nat_gateway_subnet_id,\n nat_gateway_subnet_name,\n vpc_peering_connection_name,\n )\n ):\n raise SaltInvocationError(\n \"Only one of gateway_id, internet_gateway_name, instance_id, interface_id,\"\n \" vpc_peering_connection_id, nat_gateway_id, nat_gateway_subnet_id,\"\n \" nat_gateway_subnet_name or vpc_peering_connection_name may be provided.\"\n )\n\n if destination_cidr_block is None:\n raise SaltInvocationError(\"destination_cidr_block is required.\")\n\n try:\n if route_table_name:\n route_table_id = _get_resource_id(\n \"route_table\",\n route_table_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not route_table_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"route table {} does not exist.\".format(\n route_table_name\n )\n },\n }\n\n if internet_gateway_name:\n gateway_id = _get_resource_id(\n \"internet_gateway\",\n internet_gateway_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not gateway_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_name\n )\n },\n }\n\n if vpc_peering_connection_name:\n vpc_peering_connection_id = _get_resource_id(\n \"vpc_peering_connection\",\n vpc_peering_connection_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not vpc_peering_connection_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC peering connection {} does not exist.\".format(\n vpc_peering_connection_name\n )\n },\n }\n\n if nat_gateway_subnet_name:\n gws = describe_nat_gateways(\n subnet_name=nat_gateway_subnet_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not gws:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"nat gateway for {} does not exist.\".format(\n nat_gateway_subnet_name\n )\n },\n }\n nat_gateway_id = gws[0][\"NatGatewayId\"]\n\n if nat_gateway_subnet_id:\n gws = describe_nat_gateways(\n subnet_id=nat_gateway_subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not gws:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"nat gateway for {} does not exist.\".format(\n nat_gateway_subnet_id\n )\n },\n }\n nat_gateway_id = gws[0][\"NatGatewayId\"]\n\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n if not nat_gateway_id:\n return _create_resource(\n \"route\",\n route_table_id=route_table_id,\n destination_cidr_block=destination_cidr_block,\n gateway_id=gateway_id,\n instance_id=instance_id,\n interface_id=interface_id,\n vpc_peering_connection_id=vpc_peering_connection_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n # for nat gateway, boto3 is required\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n ret = conn3.create_route(\n RouteTableId=route_table_id,\n DestinationCidrBlock=destination_cidr_block,\n NatGatewayId=nat_gateway_id,\n )\n return {\"created\": True, \"id\": ret.get(\"NatGatewayId\")}\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def add_route_tgw_nh(route_table_id, destination_cidr_block, transit_gateway_id):\n ec2 = boto3.client('ec2')\n\n resp = ec2.create_route(\n DryRun=False,\n RouteTableId=route_table_id,\n DestinationCidrBlock=destination_cidr_block,\n TransitGatewayId=transit_gateway_id,\n )\n logger.info(\"Got response to add_route_tgw_nh {} \".format(resp))\n return resp", "def configure_routing(vpc):\n internet_gateways = list(vpc.internet_gateways.all())\n if len(internet_gateways) == 1:\n internet_gateway = internet_gateways[0]\n elif len(internet_gateways) == 0:\n raise CraftingTableError(\"No internet gateway found\")\n else:\n raise CraftingTableError(f\"Multiple internet gateways found: {id_list(internet_gateways)}\")\n\n route_tables = list(vpc.route_tables.filter(Filters=[{\"Name\": \"association.main\", \"Values\": [\"true\"]}]))\n if len(route_tables) == 1:\n route_table = route_tables[0]\n elif len(route_tables) == 0:\n raise CraftingTableError(\"No route table found\")\n if len(route_tables) != 1:\n raise CraftingTableError(f\"Multiple route tables found: {id_list(route_tables)}\")\n\n for route in route_table.routes:\n if route.gateway_id == internet_gateway.id:\n break\n else:\n route_table.create_route(DestinationCidrBlock=\"0.0.0.0/0\", GatewayId=internet_gateway.id)\n click.echo(f\"Created default route to {internet_gateway.id}\")", "def create_vpc_endpoint(self, vpc_id, route_table_id, service_name):\n params = {'VpcId': vpc_id, 'RouteTableId.1': route_table_id,\n 'ServiceName': service_name}\n return self.get_object('CreateVpcEndpoint', params, VPCEndpoint,\n verb='POST')", "def create_route(vserver_name: str, net_gateway_ip: str) -> None:\n \"\"\"The default destination will be set to \"0.0.0.0/0\" for IPv4 gateway addresses\"\"\" \n\n data = {\n 'gateway': net_gateway_ip,\n 'svm': {'name': vserver_name}\n }\n\n route = NetworkRoute(**data)\n\n try:\n route.post()\n print(\"Route %s created successfully\" % route.gateway)\n except NetAppRestError as err:\n print(\"Error: Route was not created: %s\" % err)\n return", "def create_route_entry(self, route_tables, vpc_id):\n params = {}\n results = []\n changed = False \n vrouter_table_id = None\n\n # Describe Vpc for getting VRouterId \n desc_vpc_param = {}\n self.build_list_params(desc_vpc_param, vpc_id, 'VpcId')\n desc_vpc_response = self.get_status('DescribeVpcs', desc_vpc_param)\n if int(desc_vpc_response[u'TotalCount']) > 0:\n vrouter_id = str(desc_vpc_response[u'Vpcs'][u'Vpc'][0][u'VRouterId']) \n\n # Describe Route Tables for getting RouteTable Id \n desc_route_table_param = {}\n self.build_list_params(desc_route_table_param, vrouter_id, 'VRouterId')\n desc_route_table_response = self.get_status('DescribeRouteTables', desc_route_table_param)\n if int(desc_route_table_response[u'TotalCount']) > 0:\n vrouter_table_id = str(desc_route_table_response[u'RouteTables'][u'RouteTable'][0][u'RouteTableId'])\n\n for vroute in route_tables:\n self.build_list_params(params, vrouter_table_id , 'RouteTableId') \n if \"next_hop_id\" in vroute:\n if (\"dest\" in vroute) or (\"destination_cidrblock\" in vroute):\n fixed_dest_cidr_block = None\n if 'dest' in vroute:\n fixed_dest_cidr_block = vroute[\"dest\"]\n if 'destination_cidrblock' in vroute:\n fixed_dest_cidr_block = vroute[\"destination_cidrblock\"]\n if fixed_dest_cidr_block:\n self.build_list_params(params, fixed_dest_cidr_block, 'DestinationCidrBlock')\n\n if 'next_hop_type' in vroute:\n self.build_list_params(params, vroute[\"next_hop_type\"], 'NextHopType')\n\n if 'next_hop_id' in vroute:\n self.build_list_params(params, vroute[\"next_hop_id\"], 'NextHopId')\n \n try:\n instance_result = self.get_instance_info()\n flag = False\n if instance_result:\n for instances in instance_result[0][u'Instances'][u'Instance']:\n if vroute[\"next_hop_id\"] == instances['InstanceId']:\n flag = True\n break\n if flag: \n response = self.get_status('CreateRouteEntry', params)\n results.append(response)\n changed = True\n time.sleep(10)\n else:\n results.append({\"Error Message\": str(vroute[\"next_hop_id\"])+\" Instance not found\"})\n except Exception as ex:\n error_code = ex.error_code\n error_msg = ex.message\n results.append({\"Error Code\": error_code, \"Error Message\": error_msg})\n else:\n results.append({\"Error Message\": \"destination_cidrblock is required to create custom route entry\"})\n else:\n results.append({\"Error Message\": \"next_hop_id is required to create custom route entry\"})\n else:\n results.append({\"Error Message\": \"vpc_id is not valid\"})\n \n return changed, results", "def create_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def associate_route_table(\n route_table_id=None,\n subnet_id=None,\n route_table_name=None,\n subnet_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"associated\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n\n if all((route_table_id, route_table_name)):\n raise SaltInvocationError(\n \"Only one of route_table_name or route_table_id may be provided.\"\n )\n if route_table_name:\n route_table_id = _get_resource_id(\n \"route_table\",\n route_table_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not route_table_id:\n return {\n \"associated\": False,\n \"error\": {\n \"message\": \"Route table {} does not exist.\".format(route_table_name)\n },\n }\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_route_table(route_table_id, subnet_id)\n log.info(\n \"Route table %s was associated with subnet %s\", route_table_id, subnet_id\n )\n return {\"association_id\": association_id}\n except BotoServerError as e:\n return {\"associated\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def associate_route_table(DryRun=None, SubnetId=None, RouteTableId=None):\n pass", "def insert_route(self, match_vRouter_number,\n match_ipv4address,\n action_dest_mac,\n action_egress_port):\n\n entry = shell.TableEntry(\"MyIngress.ipv4NextHopLPM\")(\n action=\"MyIngress.ipv4Forward\")\n entry.match[\"vRouterNumber\"] = str(match_vRouter_number)\n entry.match[\"hdr.ipv4.dstAddr\"] = str(match_ipv4address)\n entry.action[\"port\"] = str(action_egress_port)\n entry.action[\"dstAddr\"] = str(action_dest_mac)\n entry.insert()", "def createRoute(arg1: 'SoNode', eventout: 'char const *', to: 'SoNode', eventin: 'char const *') -> \"void\":\n return _coin.SoDB_createRoute(arg1, eventout, to, eventin)", "def delete_route(route_table_id, destination_cidr_block):\n ec2 = boto3.client('ec2')\n resp = ec2.delete_route(\n DestinationCidrBlock=destination_cidr_block,\n RouteTableId=route_table_id,\n )\n logger.info(\"Got response to delete_route {} \".format(resp))\n return resp", "def route_table_exists(\n route_table_id=None,\n name=None,\n route_table_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if name:\n log.warning(\n \"boto_vpc.route_table_exists: name parameter is deprecated \"\n \"use route_table_name instead.\"\n )\n route_table_name = name\n\n return resource_exists(\n \"route_table\",\n name=route_table_name,\n resource_id=route_table_id,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def SoDB_createRoute(arg2: 'SoNode', eventout: 'char const *', to: 'SoNode', eventin: 'char const *') -> \"void\":\n return _coin.SoDB_createRoute(arg2, eventout, to, eventin)", "def create_new_route(route_name, mtype, value):\r\n # The base uri for api requests\r\n query_builder = Configuration.BASE_URI\r\n \r\n # Prepare query string for API call\r\n query_builder += \"/routes/{route_name}\"\r\n\r\n # Process optional template parameters\r\n query_builder = APIHelper.append_url_with_template_parameters(query_builder, { \r\n \"route_name\": route_name\r\n })\r\n\r\n # Validate and preprocess url\r\n query_url = APIHelper.clean_url(query_builder)\r\n\r\n # Prepare headers\r\n headers = {\r\n \"user-agent\": \"Flowroute SDK 1.0\",\r\n \"content-type\": \"application/json; charset=utf-8\",\r\n }\r\n\r\n body = '{\"type\": \"%s\", \"value\": \"%s\"}' % (mtype, value)\r\n\r\n # Prepare and invoke the API call request to fetch the response\r\n response = CustomAuthUtility.appendCustomAuthParams(method='PUT',\r\n query_url=query_url,\r\n body=body,\r\n headers=headers)\r\n\r\n # Error handling using HTTP status codes\r\n if response.code == 400:\r\n raise APIException(\"USER ERROR\", 400, response.body)\r\n\r\n elif response.code == 500:\r\n raise APIException(\"APPLICATION/SERVER ERROR\", 500, response.body)\r\n\r\n elif response.code < 200 or response.code > 206: # 200 = HTTP OK\r\n raise APIException(\"HTTP Response Not OK\", response.code, response.body)\r\n \r\n return response.body", "def describe_route_tables(\n route_table_id=None,\n route_table_name=None,\n vpc_id=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not any((route_table_id, route_table_name, tags, vpc_id)):\n raise SaltInvocationError(\n \"At least one of the following must be specified: \"\n \"route table id, route table name, vpc_id, or tags.\"\n )\n\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n filter_parameters = {\"Filters\": []}\n\n if route_table_id:\n filter_parameters[\"RouteTableIds\"] = [route_table_id]\n\n if vpc_id:\n filter_parameters[\"Filters\"].append({\"Name\": \"vpc-id\", \"Values\": [vpc_id]})\n\n if route_table_name:\n filter_parameters[\"Filters\"].append(\n {\"Name\": \"tag:Name\", \"Values\": [route_table_name]}\n )\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"Filters\"].append(\n {\"Name\": \"tag:{}\".format(tag_name), \"Values\": [tag_value]}\n )\n\n route_tables = conn3.describe_route_tables(**filter_parameters).get(\n \"RouteTables\", []\n )\n\n if not route_tables:\n return []\n\n tables = []\n keys = {\n \"id\": \"RouteTableId\",\n \"vpc_id\": \"VpcId\",\n \"tags\": \"Tags\",\n \"routes\": \"Routes\",\n \"associations\": \"Associations\",\n }\n route_keys = {\n \"destination_cidr_block\": \"DestinationCidrBlock\",\n \"gateway_id\": \"GatewayId\",\n \"instance_id\": \"Instance\",\n \"interface_id\": \"NetworkInterfaceId\",\n \"nat_gateway_id\": \"NatGatewayId\",\n \"vpc_peering_connection_id\": \"VpcPeeringConnectionId\",\n }\n assoc_keys = {\n \"id\": \"RouteTableAssociationId\",\n \"main\": \"Main\",\n \"route_table_id\": \"RouteTableId\",\n \"SubnetId\": \"subnet_id\",\n }\n for item in route_tables:\n route_table = {}\n for outkey, inkey in keys.items():\n if inkey in item:\n if outkey == \"routes\":\n route_table[outkey] = _key_remap(inkey, route_keys, item)\n elif outkey == \"associations\":\n route_table[outkey] = _key_remap(inkey, assoc_keys, item)\n elif outkey == \"tags\":\n route_table[outkey] = {}\n for tagitem in item.get(inkey, []):\n route_table[outkey][tagitem.get(\"Key\")] = tagitem.get(\n \"Value\"\n )\n else:\n route_table[outkey] = item.get(inkey)\n tables.append(route_table)\n return tables\n\n except BotoServerError as e:\n return {\"error\": __utils__[\"boto.get_error\"](e)}", "def create_vpc_endpoint(DryRun=None, VpcId=None, ServiceName=None, PolicyDocument=None, RouteTableIds=None, ClientToken=None):\n pass", "def add_subnet(tag_name, ip_part, route_table, az, realm):\n template_name = tag_name.title().replace('-', '')\n subnet = ec2.Subnet(\n template_name,\n VpcId=Ref(self.vpc),\n CidrBlock=_(Ref(self.vpc_base_net), \".{}.0/24\".format(ip_part)),\n AvailabilityZone=Select(az, GetAZs()),\n Tags=self.get_tags(tag_name, realm=realm)\n )\n subnet = self.t.add_resource(subnet)\n\n self.t.add_resource(ec2.SubnetRouteTableAssociation(\n \"{}RouteTableAssociation\".format(template_name),\n SubnetId=Ref(subnet),\n RouteTableId=Ref(route_table)\n ))\n\n return subnet", "def make_route(self, *args, **kargs):\n return Route(*args, **kargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide .
def create_route_table(DryRun=None, VpcId=None): pass
[ "def create_route(DryRun=None, RouteTableId=None, DestinationCidrBlock=None, GatewayId=None, DestinationIpv6CidrBlock=None, EgressOnlyInternetGatewayId=None, InstanceId=None, NetworkInterfaceId=None, VpcPeeringConnectionId=None, NatGatewayId=None):\n pass", "def configure_routing(vpc):\n internet_gateways = list(vpc.internet_gateways.all())\n if len(internet_gateways) == 1:\n internet_gateway = internet_gateways[0]\n elif len(internet_gateways) == 0:\n raise CraftingTableError(\"No internet gateway found\")\n else:\n raise CraftingTableError(f\"Multiple internet gateways found: {id_list(internet_gateways)}\")\n\n route_tables = list(vpc.route_tables.filter(Filters=[{\"Name\": \"association.main\", \"Values\": [\"true\"]}]))\n if len(route_tables) == 1:\n route_table = route_tables[0]\n elif len(route_tables) == 0:\n raise CraftingTableError(\"No route table found\")\n if len(route_tables) != 1:\n raise CraftingTableError(f\"Multiple route tables found: {id_list(route_tables)}\")\n\n for route in route_table.routes:\n if route.gateway_id == internet_gateway.id:\n break\n else:\n route_table.create_route(DestinationCidrBlock=\"0.0.0.0/0\", GatewayId=internet_gateway.id)\n click.echo(f\"Created default route to {internet_gateway.id}\")", "def create_route(\n route_table_id=None,\n destination_cidr_block=None,\n route_table_name=None,\n gateway_id=None,\n internet_gateway_name=None,\n instance_id=None,\n interface_id=None,\n vpc_peering_connection_id=None,\n vpc_peering_connection_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n nat_gateway_id=None,\n nat_gateway_subnet_name=None,\n nat_gateway_subnet_id=None,\n):\n\n if not _exactly_one((route_table_name, route_table_id)):\n raise SaltInvocationError(\n \"One (but not both) of route_table_id or route_table_name must be provided.\"\n )\n\n if not _exactly_one(\n (\n gateway_id,\n internet_gateway_name,\n instance_id,\n interface_id,\n vpc_peering_connection_id,\n nat_gateway_id,\n nat_gateway_subnet_id,\n nat_gateway_subnet_name,\n vpc_peering_connection_name,\n )\n ):\n raise SaltInvocationError(\n \"Only one of gateway_id, internet_gateway_name, instance_id, interface_id,\"\n \" vpc_peering_connection_id, nat_gateway_id, nat_gateway_subnet_id,\"\n \" nat_gateway_subnet_name or vpc_peering_connection_name may be provided.\"\n )\n\n if destination_cidr_block is None:\n raise SaltInvocationError(\"destination_cidr_block is required.\")\n\n try:\n if route_table_name:\n route_table_id = _get_resource_id(\n \"route_table\",\n route_table_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not route_table_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"route table {} does not exist.\".format(\n route_table_name\n )\n },\n }\n\n if internet_gateway_name:\n gateway_id = _get_resource_id(\n \"internet_gateway\",\n internet_gateway_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not gateway_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_name\n )\n },\n }\n\n if vpc_peering_connection_name:\n vpc_peering_connection_id = _get_resource_id(\n \"vpc_peering_connection\",\n vpc_peering_connection_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not vpc_peering_connection_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC peering connection {} does not exist.\".format(\n vpc_peering_connection_name\n )\n },\n }\n\n if nat_gateway_subnet_name:\n gws = describe_nat_gateways(\n subnet_name=nat_gateway_subnet_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not gws:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"nat gateway for {} does not exist.\".format(\n nat_gateway_subnet_name\n )\n },\n }\n nat_gateway_id = gws[0][\"NatGatewayId\"]\n\n if nat_gateway_subnet_id:\n gws = describe_nat_gateways(\n subnet_id=nat_gateway_subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not gws:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"nat gateway for {} does not exist.\".format(\n nat_gateway_subnet_id\n )\n },\n }\n nat_gateway_id = gws[0][\"NatGatewayId\"]\n\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n if not nat_gateway_id:\n return _create_resource(\n \"route\",\n route_table_id=route_table_id,\n destination_cidr_block=destination_cidr_block,\n gateway_id=gateway_id,\n instance_id=instance_id,\n interface_id=interface_id,\n vpc_peering_connection_id=vpc_peering_connection_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n # for nat gateway, boto3 is required\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n ret = conn3.create_route(\n RouteTableId=route_table_id,\n DestinationCidrBlock=destination_cidr_block,\n NatGatewayId=nat_gateway_id,\n )\n return {\"created\": True, \"id\": ret.get(\"NatGatewayId\")}\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def add_route_tgw_nh(route_table_id, destination_cidr_block, transit_gateway_id):\n ec2 = boto3.client('ec2')\n\n resp = ec2.create_route(\n DryRun=False,\n RouteTableId=route_table_id,\n DestinationCidrBlock=destination_cidr_block,\n TransitGatewayId=transit_gateway_id,\n )\n logger.info(\"Got response to add_route_tgw_nh {} \".format(resp))\n return resp", "def create_route_entry(self, route_tables, vpc_id):\n params = {}\n results = []\n changed = False \n vrouter_table_id = None\n\n # Describe Vpc for getting VRouterId \n desc_vpc_param = {}\n self.build_list_params(desc_vpc_param, vpc_id, 'VpcId')\n desc_vpc_response = self.get_status('DescribeVpcs', desc_vpc_param)\n if int(desc_vpc_response[u'TotalCount']) > 0:\n vrouter_id = str(desc_vpc_response[u'Vpcs'][u'Vpc'][0][u'VRouterId']) \n\n # Describe Route Tables for getting RouteTable Id \n desc_route_table_param = {}\n self.build_list_params(desc_route_table_param, vrouter_id, 'VRouterId')\n desc_route_table_response = self.get_status('DescribeRouteTables', desc_route_table_param)\n if int(desc_route_table_response[u'TotalCount']) > 0:\n vrouter_table_id = str(desc_route_table_response[u'RouteTables'][u'RouteTable'][0][u'RouteTableId'])\n\n for vroute in route_tables:\n self.build_list_params(params, vrouter_table_id , 'RouteTableId') \n if \"next_hop_id\" in vroute:\n if (\"dest\" in vroute) or (\"destination_cidrblock\" in vroute):\n fixed_dest_cidr_block = None\n if 'dest' in vroute:\n fixed_dest_cidr_block = vroute[\"dest\"]\n if 'destination_cidrblock' in vroute:\n fixed_dest_cidr_block = vroute[\"destination_cidrblock\"]\n if fixed_dest_cidr_block:\n self.build_list_params(params, fixed_dest_cidr_block, 'DestinationCidrBlock')\n\n if 'next_hop_type' in vroute:\n self.build_list_params(params, vroute[\"next_hop_type\"], 'NextHopType')\n\n if 'next_hop_id' in vroute:\n self.build_list_params(params, vroute[\"next_hop_id\"], 'NextHopId')\n \n try:\n instance_result = self.get_instance_info()\n flag = False\n if instance_result:\n for instances in instance_result[0][u'Instances'][u'Instance']:\n if vroute[\"next_hop_id\"] == instances['InstanceId']:\n flag = True\n break\n if flag: \n response = self.get_status('CreateRouteEntry', params)\n results.append(response)\n changed = True\n time.sleep(10)\n else:\n results.append({\"Error Message\": str(vroute[\"next_hop_id\"])+\" Instance not found\"})\n except Exception as ex:\n error_code = ex.error_code\n error_msg = ex.message\n results.append({\"Error Code\": error_code, \"Error Message\": error_msg})\n else:\n results.append({\"Error Message\": \"destination_cidrblock is required to create custom route entry\"})\n else:\n results.append({\"Error Message\": \"next_hop_id is required to create custom route entry\"})\n else:\n results.append({\"Error Message\": \"vpc_id is not valid\"})\n \n return changed, results", "def associate_route_table(\n route_table_id=None,\n subnet_id=None,\n route_table_name=None,\n subnet_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"associated\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n\n if all((route_table_id, route_table_name)):\n raise SaltInvocationError(\n \"Only one of route_table_name or route_table_id may be provided.\"\n )\n if route_table_name:\n route_table_id = _get_resource_id(\n \"route_table\",\n route_table_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not route_table_id:\n return {\n \"associated\": False,\n \"error\": {\n \"message\": \"Route table {} does not exist.\".format(route_table_name)\n },\n }\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_route_table(route_table_id, subnet_id)\n log.info(\n \"Route table %s was associated with subnet %s\", route_table_id, subnet_id\n )\n return {\"association_id\": association_id}\n except BotoServerError as e:\n return {\"associated\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def create_vpc_endpoint(self, vpc_id, route_table_id, service_name):\n params = {'VpcId': vpc_id, 'RouteTableId.1': route_table_id,\n 'ServiceName': service_name}\n return self.get_object('CreateVpcEndpoint', params, VPCEndpoint,\n verb='POST')", "def create_route(vserver_name: str, net_gateway_ip: str) -> None:\n \"\"\"The default destination will be set to \"0.0.0.0/0\" for IPv4 gateway addresses\"\"\" \n\n data = {\n 'gateway': net_gateway_ip,\n 'svm': {'name': vserver_name}\n }\n\n route = NetworkRoute(**data)\n\n try:\n route.post()\n print(\"Route %s created successfully\" % route.gateway)\n except NetAppRestError as err:\n print(\"Error: Route was not created: %s\" % err)\n return", "def associate_route_table(DryRun=None, SubnetId=None, RouteTableId=None):\n pass", "def describe_route_tables(\n route_table_id=None,\n route_table_name=None,\n vpc_id=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not any((route_table_id, route_table_name, tags, vpc_id)):\n raise SaltInvocationError(\n \"At least one of the following must be specified: \"\n \"route table id, route table name, vpc_id, or tags.\"\n )\n\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n filter_parameters = {\"Filters\": []}\n\n if route_table_id:\n filter_parameters[\"RouteTableIds\"] = [route_table_id]\n\n if vpc_id:\n filter_parameters[\"Filters\"].append({\"Name\": \"vpc-id\", \"Values\": [vpc_id]})\n\n if route_table_name:\n filter_parameters[\"Filters\"].append(\n {\"Name\": \"tag:Name\", \"Values\": [route_table_name]}\n )\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"Filters\"].append(\n {\"Name\": \"tag:{}\".format(tag_name), \"Values\": [tag_value]}\n )\n\n route_tables = conn3.describe_route_tables(**filter_parameters).get(\n \"RouteTables\", []\n )\n\n if not route_tables:\n return []\n\n tables = []\n keys = {\n \"id\": \"RouteTableId\",\n \"vpc_id\": \"VpcId\",\n \"tags\": \"Tags\",\n \"routes\": \"Routes\",\n \"associations\": \"Associations\",\n }\n route_keys = {\n \"destination_cidr_block\": \"DestinationCidrBlock\",\n \"gateway_id\": \"GatewayId\",\n \"instance_id\": \"Instance\",\n \"interface_id\": \"NetworkInterfaceId\",\n \"nat_gateway_id\": \"NatGatewayId\",\n \"vpc_peering_connection_id\": \"VpcPeeringConnectionId\",\n }\n assoc_keys = {\n \"id\": \"RouteTableAssociationId\",\n \"main\": \"Main\",\n \"route_table_id\": \"RouteTableId\",\n \"SubnetId\": \"subnet_id\",\n }\n for item in route_tables:\n route_table = {}\n for outkey, inkey in keys.items():\n if inkey in item:\n if outkey == \"routes\":\n route_table[outkey] = _key_remap(inkey, route_keys, item)\n elif outkey == \"associations\":\n route_table[outkey] = _key_remap(inkey, assoc_keys, item)\n elif outkey == \"tags\":\n route_table[outkey] = {}\n for tagitem in item.get(inkey, []):\n route_table[outkey][tagitem.get(\"Key\")] = tagitem.get(\n \"Value\"\n )\n else:\n route_table[outkey] = item.get(inkey)\n tables.append(route_table)\n return tables\n\n except BotoServerError as e:\n return {\"error\": __utils__[\"boto.get_error\"](e)}", "def create_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def create_subnets(ec2, vpc, subnets):\n # Generate candidate subnet CIDRs by shifting the VPC's prefix by 4 bits, yielding 16 possible subnet\n # CIDRs.\n vpc_cidr = ipaddress.ip_network(vpc.cidr_block)\n subnet_cidrs = list(vpc_cidr.subnets(prefixlen_diff=4))\n\n # The set difference between the availability zones that already have subnets and the availability zones\n # available in the region yields the set of availability zones where subnets must be created.\n subnet_azs = frozenset(map(lambda subnet: subnet.availability_zone, subnets))\n available_azs = frozenset(map(\n lambda az: az[\"ZoneName\"], ec2.meta.client.describe_availability_zones()[\"AvailabilityZones\"]))\n\n for az in (available_azs - subnet_azs):\n # If subnets already exist, their CIDRs may conflict with the candidate CIDRs that were generated.\n # Loop through the candidate list until subnet creation does not fail with a CIDR conflict error, or\n # until no candidates remain.\n while len(subnet_cidrs) > 0:\n try:\n cidr = subnet_cidrs.pop(0)\n subnet = vpc.create_subnet(AvailabilityZone=az, CidrBlock=cidr.with_prefixlen)\n # Ensure that the new subnet has the MapPublicIpOnLaunch attribute set\n ec2.meta.client.modify_subnet_attribute(SubnetId=subnet.id,\n MapPublicIpOnLaunch={\"Value\": True})\n click.echo(f\"Created new subnet: {subnet.id}\")\n break\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"InvalidSubnet.Conflict\":\n continue\n raise\n else:\n raise CraftingTableError(f\"Could not find valid CIDR to create subnet in {az}\")", "def create_network_acl(\n vpc_id=None,\n vpc_name=None,\n network_acl_name=None,\n subnet_id=None,\n subnet_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n _id = vpc_name or vpc_id\n\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\"message\": \"VPC {} does not exist.\".format(_id)},\n }\n\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n elif subnet_id:\n if not _get_resource(\n \"subnet\",\n resource_id=subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n ):\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_id)},\n }\n\n r = _create_resource(\n \"network_acl\",\n name=network_acl_name,\n vpc_id=vpc_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n\n if r.get(\"created\") and subnet_id:\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_network_acl(r[\"id\"], subnet_id)\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n r[\"association_id\"] = association_id\n return r", "def create_vpc_endpoint(DryRun=None, VpcId=None, ServiceName=None, PolicyDocument=None, RouteTableIds=None, ClientToken=None):\n pass", "def add_subnet(tag_name, ip_part, route_table, az, realm):\n template_name = tag_name.title().replace('-', '')\n subnet = ec2.Subnet(\n template_name,\n VpcId=Ref(self.vpc),\n CidrBlock=_(Ref(self.vpc_base_net), \".{}.0/24\".format(ip_part)),\n AvailabilityZone=Select(az, GetAZs()),\n Tags=self.get_tags(tag_name, realm=realm)\n )\n subnet = self.t.add_resource(subnet)\n\n self.t.add_resource(ec2.SubnetRouteTableAssociation(\n \"{}RouteTableAssociation\".format(template_name),\n SubnetId=Ref(subnet),\n RouteTableId=Ref(route_table)\n ))\n\n return subnet", "def createNetwork(context):\n if common.MY_DEBUG:\n print 'ENTER vpc.createNetwork'\n\n my_vpc_name = getNetworkName(context)\n\n ret = {\n 'name': my_vpc_name,\n 'type': 'compute.v1.network',\n 'properties': {\n 'routingConfig': {\n 'routingMode': 'REGIONAL'\n },\n 'autoCreateSubnetworks': False\n }\n }\n if common.MY_DEBUG:\n print 'EXIT vpc.createNetwork, ret: ' + str(ret)\n return ret", "def route_table_exists(\n route_table_id=None,\n name=None,\n route_table_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if name:\n log.warning(\n \"boto_vpc.route_table_exists: name parameter is deprecated \"\n \"use route_table_name instead.\"\n )\n route_table_name = name\n\n return resource_exists(\n \"route_table\",\n name=route_table_name,\n resource_id=route_table_id,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def add_vpc(template, key_pair_name, nat_ip,\n nat_image_id=DEFAULT_NAT_IMAGE_ID,\n nat_instance_type=DEFAULT_NAT_INSTANCE_TYPE):\n vpc_id = \"VPC\"\n vpc = template.add_resource(ec2.VPC(\n vpc_id,\n CidrBlock=\"10.0.0.0/16\",\n Tags=Tags(\n Name=name_tag(vpc_id)\n ),\n ))\n public_subnet = _add_public_subnet(template, vpc)\n nat = _add_nat(template, vpc, public_subnet, nat_image_id, nat_instance_type,\n key_pair_name, nat_ip)\n _add_private_subnet(template, vpc, nat)\n return vpc", "def create_subnet(\n vpc_id=None,\n cidr_block=None,\n vpc_name=None,\n availability_zone=None,\n subnet_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n auto_assign_public_ipv4=False,\n):\n\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n subnet_object_dict = _create_resource(\n \"subnet\",\n name=subnet_name,\n tags=tags,\n vpc_id=vpc_id,\n availability_zone=availability_zone,\n cidr_block=cidr_block,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n # if auto_assign_public_ipv4 is requested set that to true using boto3\n if auto_assign_public_ipv4:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n conn3.modify_subnet_attribute(\n MapPublicIpOnLaunch={\"Value\": True}, SubnetId=subnet_object_dict[\"id\"]\n )\n return subnet_object_dict", "def create_table(table_name, hash):\n print(\"[+] Creating Table {}...\".format(table_name))\n params = {\n \"TableName\": table_name,\n \"KeySchema\": [\n {\n 'AttributeName': str(hash),\n 'KeyType': 'HASH'\n }\n ],\n \"AttributeDefinitions\": [\n {\n 'AttributeName': str(hash),\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': \"email\",\n 'AttributeType': 'S'\n }\n ],\n \"ProvisionedThroughput\": {\n 'ReadCapacityUnits': 5,\n 'WriteCapacityUnits': 5\n },\n \"GlobalSecondaryIndexes\": [\n {\n 'IndexName': 'email-gsi',\n 'KeySchema': [\n {\n 'AttributeName': 'email',\n 'KeyType': 'HASH'\n },\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 5,\n 'WriteCapacityUnits': 5\n }\n },\n ]\n }\n table = dynamodb.create_table(**params)\n table.meta.client.get_waiter('table_exists').wait(TableName=table_name)\n\n return table" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a data feed for Spot instances, enabling you to view Spot instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide .
def create_spot_datafeed_subscription(DryRun=None, Bucket=None, Prefix=None): pass
[ "def create_spot_instance(config, job_id, sched_time, docker_image, env_vars):\n\n client = boto3.client('ec2')\n\n # Get my own public fqdn by quering metadata\n my_own_name = urllib2.urlopen(\n \"http://169.254.169.254/latest/meta-data/public-hostname\").read()\n\n user_data = (\n \"#!/bin/bash\\n\"\n \"touch /tmp/start.txt\\n\"\n \"curl -i -H 'Content-Type: application/json' \"\n \"'http://%s/v1/notifications/%s?status=started' -X PUT\\n\"\n \"yum -y update\\n\"\n \"yum install docker -y\\n\"\n \"sudo service docker start\\n\"\n \"sudo docker run %s %s\\n\"\n \"touch /tmp/executing.txt\\n\"\n \"sleep 180\\n\"\n \"curl -i -H 'Content-Type: application/json' \"\n \"'http://%s/v1/notifications/%s?status=finished' -X PUT\\n\" %\n (my_own_name, job_id, env_vars, docker_image, my_own_name, job_id))\n\n response = client.request_spot_instances(\n SpotPrice=\"%s\" % config[\"spot-price\"],\n InstanceCount=1,\n Type='one-time',\n ValidFrom=sched_time,\n LaunchSpecification={\n 'ImageId': config[\"ami-id\"],\n 'InstanceType': config[\"instance-type\"],\n 'KeyName': config[\"key-name\"],\n 'SecurityGroups': ['default', config[\"sg-name\"]],\n 'UserData': base64.b64encode(user_data)\n }\n )\n\n req_id = response['SpotInstanceRequests'][0]['SpotInstanceRequestId']\n req_state = response['SpotInstanceRequests'][0][\n 'State'] # open/failed/active/cancelled/closed\n req_status_code = response['SpotInstanceRequests'][0][\n 'Status']['Code'] # pending-evaluation/price-too-low/etc\n\n return [req_id, req_state, req_status_code]", "def run_instances(self):\n # create an entry in the s3 log for the start of this task \n self.log_to_s3('run-instances-start.log', 'start')\n\n session = botocore.session.get_session()\n client = session.create_client('ec2', region_name=self.aws_region)\n\n # convert user-data to base64\n user_data = ''\n # NOTE conversion of file to string, then string to bytes, the bytes encoded \n # base64 - then decode the base64 bytes into base64 string\n with open(self.ec2_user_data, 'r') as f:\n user_data = base64.b64encode(bytes(f.read(), \"utf-8\")).decode(\"utf-8\")\n\n if self.ec2_type in (CONST.VALID_EC2_INSTANCE_TYPES_EBS_ONLY).split('|'):\n # block device mapping for ebs backed instances\n # creates an ephemeral EBS volume (delete on terminate)\n # Note that gp2 instance type is EBS SSD\n custom_block_device_mapping = [{\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0',\n 'Ebs':{\n 'VolumeSize': self.ec2_ebs_only_volume_size,\n 'VolumeType': self.ec2_ebs_only_volume_type,\n },\n }]\n else:\n # block device mapping allows for 2 extra drives\n # - works for either single ssd or 2 ssd's\n custom_block_device_mapping = [ \n {\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0'\n },\n {\n 'DeviceName': '/dev/sdc',\n 'VirtualName': 'ephemeral1'\n }\n ]\n\n r = client.request_spot_instances(\n InstanceCount=self.ec2_count,\n SpotPrice=self.ec2_spot_price,\n LaunchSpecification= {\n 'SecurityGroupIds': [\n self.ec2_security_group_id,\n ],\n 'SecurityGroups': [\n self.ec2_security_groups,\n ],\n 'Placement': {\n 'AvailabilityZone': self.ec2_availability_zone,\n },\n 'BlockDeviceMappings': custom_block_device_mapping,\n 'IamInstanceProfile': {\n 'Arn': self.ec2_arn_id,\n },\n 'UserData': user_data,\n 'ImageId': self.ec2_image_id,\n 'InstanceType': self.ec2_type,\n 'KeyName': self.ec2_security_key,\n },\n )\n\n # get the spot instance request ids\n spot_ids = []\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Spot request ids:'))\n for i, spot_inst in enumerate(r['SpotInstanceRequests']):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, \n inst_str + '\\t' + spot_inst['SpotInstanceRequestId']))\n spot_ids.append(spot_inst['SpotInstanceRequestId'])\n utility.list_to_file(CONST.SPOT_REQUEST_IDS, spot_ids)\n\n # create a list of spot instance statuses - so we can print out\n # some updates to the user\n spot_status = ['']*len(spot_ids)\n # Expecting status codes of \"pending-evaluation\", \"pending-fulfillment\", or \n # fulfilled. Any other status-code should be printed out & the program \n # terminated.\n expected_status = ['fulfilled', 'pending-evaluation', 'pending-fulfillment']\n instance_ids = [None]*len(spot_ids)\n\n # check the status of the spot requests\n while True:\n fulfilled = 0\n for i, id in enumerate(spot_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_spot_instance_requests(SpotInstanceRequestIds=[id])\n status_code = r['SpotInstanceRequests'][0]['Status']['Code']\n if status_code not in expected_status:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, 'Unexpected status for spot request ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': ') +\n colour_msg(Colour.PURPLE, status_code))\n sys.exit(1)\n if status_code != spot_status[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Spot instance request: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tStatus: ') +\n colour_msg(Colour.PURPLE, status_code))\n spot_status[i] = status_code\n if status_code == 'fulfilled':\n fulfilled += 1\n # record the instance id\n instance_ids[i] = r['SpotInstanceRequests'][0]['InstanceId']\n if fulfilled == len(spot_ids):\n break\n time.sleep(1)\n\n utility.list_to_file(CONST.INSTANCE_IDS, instance_ids)\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ids:'))\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n tag_val = self.ec2_instance_tag + str(i)\n client.create_tags(Resources=[id], Tags=[{'Key':'Name', 'Value':tag_val}])\n\n # monitor the instances until all running\n instance_states = ['']*len(instance_ids)\n expected_states = ['running', 'pending']\n instance_ips = [None]*len(instance_ids)\n running = 0\n while True:\n running = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instances(InstanceIds=[id])\n state = r['Reservations'][0]['Instances'][0]['State']['Name']\n if state not in expected_states:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, \n 'Unexpected instance state for instance-id ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': \\t') +\n colour_msg(Colour.PURPLE, state))\n sys.exit(1)\n if state != instance_states[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tState: ') +\n colour_msg(Colour.PURPLE, state))\n instance_states[i] = state\n if state == 'running':\n running += 1\n # record the instance id\n instance_ips[i] = r['Reservations'][0]['Instances'][0]['PublicDnsName']\n if running == len(instance_ids):\n break\n time.sleep(10)\n\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ips:'))\n for i, id in enumerate(instance_ips):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n \n utility.list_to_file(CONST.INSTANCE_IPS_FILE, instance_ips)\n # need to at least wait until all the instances are reachable\n # possible statuses: (passed | failed | initializing | insufficient-data )\n reachability = ['']*len(instance_ids)\n while True:\n passed = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instance_status(InstanceIds=[id])\n state = r['InstanceStatuses'][0]['InstanceStatus']['Details'][0]['Status']\n if state != reachability[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tReachability: ') +\n colour_msg(Colour.PURPLE, state))\n reachability[i] = state\n if state == 'passed':\n passed += 1\n if passed == len(instance_ids):\n break\n time.sleep(10)\n \n lgr.info(CONST.INFO + colour_msg(Colour.GREEN, 'Instances are reachable'))\n \n # if user-data configuration file supplied - check that it has worked\n # Note that this checker is run once on each instance\n if self.ec2_user_data:\n lgr.info(CONST.INFO + colour_msg(Colour.CYAN, \n 'Starting job to monitor user-data configuration...'))\n # at the moment is calling a local script that does the checking\n result = subprocess.call('./' + self.ec2_user_data_check) \n if result:\n lgr.error(CONST.ERROR + colour_msg(Colour.CYAN, \n 'user data checker FAILED'))\n sys.exit(1)\n\n # create an entry in the s3 log for finish this task \n self.log_to_s3('run-instances-finish.log', 'finish')\n\n # return the list of ip's for the newly created instances\n return utility.file_to_list(CONST.INSTANCE_IPS_FILE)", "def _create_feeds(input_signature, input_dataset=None):\n if input_dataset is None:\n inputs = [array_ops.zeros(s.shape, s.dtype) for s in input_signature]\n input_dataset = dataset_ops.Dataset.from_tensors(tuple(inputs))\n input_dataset = input_dataset.repeat()\n\n infeed = ipu_infeed_queue.IPUInfeedQueue(input_dataset)\n outfeed = ipu_outfeed_queue.IPUOutfeedQueue()\n return (infeed, outfeed)", "def feed():\n scrape_data()\n with open(os.path.join(REFERENCE_DIR, \"profile_spider.json\"), \"r\") as f:\n twimages = json.load(f)\n\n status = tweets()\n twimages.extend(t for t in tweets())\n shuffle(twimages)\n\n return render_template('feed.html', twimages=twimages, status=status)", "def request_spot_instances(DryRun=None, SpotPrice=None, ClientToken=None, InstanceCount=None, Type=None, ValidFrom=None, ValidUntil=None, LaunchGroup=None, AvailabilityZoneGroup=None, BlockDurationMinutes=None, LaunchSpecification=None):\n pass", "def _add_feeding_entry(self):\n method = choice(models.Feeding._meta.get_field(\"method\").choices)[0]\n amount = None\n if method == \"bottle\":\n amount = Decimal(\"%d.%d\" % (randint(0, 6), randint(0, 9)))\n start = self.time + timedelta(minutes=randint(1, 60))\n end = start + timedelta(minutes=randint(5, 20))\n\n notes = \"\"\n if choice([True, False, False, False]):\n notes = \" \".join(self.faker.sentences(randint(1, 5)))\n\n if end < self.time_now:\n instance = models.Feeding.objects.create(\n child=self.child,\n start=start,\n end=end,\n type=choice(models.Feeding._meta.get_field(\"type\").choices)[0],\n method=method,\n amount=amount,\n notes=notes,\n )\n instance.save()\n self._add_tags(instance)\n self.time = end", "def _getFeed(self):\n feed = FEED_DATA.get(self.data.url,None)\n if feed is None:\n # create it\n print 'Creating FEED_DATA[%s]'%self.data.url\n feed = FEED_DATA[self.data.url] = ItsatripFeed(self.data.url,\n self.data.timeout)\n return feed", "def TemporarySpotFetch():\n print 'View call to store new spot data'\n spot.StoreNewData()\n return 'success!'", "def createDataPoints():\n usersList = files.readUsers()\n beersList = files.readBeers()\n points = []\n i = 1\n for hashId, user in usersList.iteritems():\n if 'lat' in user.location and user.ratings:\n for bid, rating in user.ratings.iteritems():\n country = None\n if 'country' in user.location:\n country = user.location['country']\n pointAttribs = {'lat': user.location['lat'], 'lng': user.location['lng'],\n 'country': country, 'abv': beersList[str(hash(bid))].abv, 'rating': rating,\n 'style': beersList[str(hash(bid))].style}\n point = dp.dataPoint(pointAttribs)\n points.append(point)\n if i % 1000 == 0:\n print \"Points added: \" + str(i)\n i += 1\n data = dp.dataPoints(points)\n writeJSONFile('../data/dataPoints.json', data)", "def create_feed(self, feed):\n path = \"api/feeds/\"\n return Feed.from_dict(self._post(path, feed._asdict()))", "def create_daily_stats(context: CallbackContext, session: scoped_session) -> None:\n try:\n today = date.today()\n tomorrow = today + timedelta(days=1)\n for stat_date in [today, tomorrow]:\n statistic = session.query(DailyStatistic).get(stat_date)\n\n if statistic is None:\n statistic = DailyStatistic(stat_date)\n session.add(statistic)\n\n session.commit()\n except Exception as e:\n sentry.capture_job_exception(e)", "def create_ec2_instances(count=1):\n conn = get_ec2_connection()\n user_data = get_user_data()\n reservation = conn.run_instances(image_id=settings.EC2_IMAGE_ID,\n min_count=count,\n max_count=count,\n instance_type=settings.EC2_INSTANCE_TYPE,\n user_data=user_data)\n return reservation.instances", "def add_to_feed(feed_user_id, aggr_key, activity, version):\n feed_key = \"activity_feed:%s:%s\" % (version, feed_user_id,)\n write_aggregate_to_feed(aggr_key, feed_key, activity['timestamp'])", "def spotify(self, app_region, template):\n if app_region.spot is None:\n return\n\n resources = template['Resources']\n parameters = template['Parameters']\n self._add_spot_fleet(app_region, resources, parameters)\n self._clean_up_asg(template)", "def generate(app, category, torrents):\n feed = FeedGenerator()\n if category:\n url = util.fullSiteURL(app, 'feed', '{}.rss'.format(category))\n else:\n url = util.fullSiteURL(app, 'feed', 'all.rss')\n feed.link(href=url, rel=\"self\")\n feed.id(url)\n if category:\n title = \"new {} torrents on index ex invisibilis\".format(category)\n else:\n title = \"new torrents on index ex invisibilis\"\n feed.title(title)\n feed.description(title)\n feed.author({\"name\": \"anonymous\"})\n feed.language(\"en\")\n for torrent in torrents:\n item = feed.add_entry()\n url = util.fullSiteURL(app, torrent.downloadURL())\n item.id(torrent.infohash)\n item.link(href=url)\n item.title(torrent.title)\n item.description(torrent.summary(100))\n return feed", "def generate_feed(entries):\n entries = list(entries)\n if len(entries) == 0:\n return None\n\n # Get the update date. Assumption: most recent entry is first.\n updated = entries[0]['created']\n\n with contextlib.closing(stringio.StringIO()) as out:\n builder = utils.XMLBuilder(out)\n build_feed(\n builder,\n web.config.app.site_name,\n updated,\n web.config.app.tag_uri,\n entries)\n return out.getvalue()", "def create_data(self, feed, data):\n path = \"api/feeds/{0}/data\".format(feed)\n return Data.from_dict(self._post(path, data._asdict()))", "def feed_instance(self, data, instance):\n for prop, val in data.items():\n setattr(instance, prop, val)\n return instance", "def upload_datapoints_live(logger, sensor, api_key, project_name, log):\n sensor_values = sensor\n name = sensor_values[\"Name\"]\n\n points = []\n t = sensor_values[\"LocalTimestamp\"]\n\n if \",\" in (sensor_values[\"PresentValue\"]):\n val = float(sensor_values[\"PresentValue\"].replace(\",\", \".\"))\n points.append(Datapoint((int(t * 1000)), val))\n elif sensor_values[\"PresentValue\"] == \"inactive\":\n val = (sensor_values[\"PresentValue\"])\n points.append(Datapoint((int(t * 1000)), val))\n elif sensor_values[\"PresentValue\"] == \"active\":\n val = (sensor_values[\"PresentValue\"])\n points.append(Datapoint((int(t * 1000)), val))\n else:\n val = float(sensor_values[\"PresentValue\"])\n points.append(Datapoint((int(t * 1000)), val))\n try:\n timeseries.post_datapoints(name, points, api_key=api_key, project=project_name)\n log.info(\"Posting last data\")\n except ConnectionError as err:\n logger.error(\"upload_datapoints_historical: \" + str(err))\n except TimeoutError as err:\n logger.error(\"upload_datapoints_historical: \" + str(err))\n except Exception as err:\n logger.error(\"upload_datapoints_historical: \" + str(err))\n else:\n log.info(\"Posting last data\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a subnet in an existing VPC. When you create each subnet, you provide the VPC ID and the CIDR block you want for the subnet. After you create a subnet, you can't change its CIDR block. The subnet's IPv4 CIDR block can be the same as the VPC's IPv4 CIDR block (assuming you want only a single subnet in the VPC), or a subset of the VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length. If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle. If you launch an instance in a VPC using an Amazon EBSbacked AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available. For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide .
def create_subnet(DryRun=None, VpcId=None, CidrBlock=None, Ipv6CidrBlock=None, AvailabilityZone=None): pass
[ "def create_subnets(ec2, vpc, subnets):\n # Generate candidate subnet CIDRs by shifting the VPC's prefix by 4 bits, yielding 16 possible subnet\n # CIDRs.\n vpc_cidr = ipaddress.ip_network(vpc.cidr_block)\n subnet_cidrs = list(vpc_cidr.subnets(prefixlen_diff=4))\n\n # The set difference between the availability zones that already have subnets and the availability zones\n # available in the region yields the set of availability zones where subnets must be created.\n subnet_azs = frozenset(map(lambda subnet: subnet.availability_zone, subnets))\n available_azs = frozenset(map(\n lambda az: az[\"ZoneName\"], ec2.meta.client.describe_availability_zones()[\"AvailabilityZones\"]))\n\n for az in (available_azs - subnet_azs):\n # If subnets already exist, their CIDRs may conflict with the candidate CIDRs that were generated.\n # Loop through the candidate list until subnet creation does not fail with a CIDR conflict error, or\n # until no candidates remain.\n while len(subnet_cidrs) > 0:\n try:\n cidr = subnet_cidrs.pop(0)\n subnet = vpc.create_subnet(AvailabilityZone=az, CidrBlock=cidr.with_prefixlen)\n # Ensure that the new subnet has the MapPublicIpOnLaunch attribute set\n ec2.meta.client.modify_subnet_attribute(SubnetId=subnet.id,\n MapPublicIpOnLaunch={\"Value\": True})\n click.echo(f\"Created new subnet: {subnet.id}\")\n break\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"InvalidSubnet.Conflict\":\n continue\n raise\n else:\n raise CraftingTableError(f\"Could not find valid CIDR to create subnet in {az}\")", "def create_subnet(\n vpc_id=None,\n cidr_block=None,\n vpc_name=None,\n availability_zone=None,\n subnet_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n auto_assign_public_ipv4=False,\n):\n\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n subnet_object_dict = _create_resource(\n \"subnet\",\n name=subnet_name,\n tags=tags,\n vpc_id=vpc_id,\n availability_zone=availability_zone,\n cidr_block=cidr_block,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n # if auto_assign_public_ipv4 is requested set that to true using boto3\n if auto_assign_public_ipv4:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n conn3.modify_subnet_attribute(\n MapPublicIpOnLaunch={\"Value\": True}, SubnetId=subnet_object_dict[\"id\"]\n )\n return subnet_object_dict", "def create_subnet(body=None):\n return IMPL.create_subnet(body)", "def subnet_create_api():\r\n try:\r\n req = models.Subnet(request.json)\r\n req.validate()\r\n except Exception as e:\r\n return err_return('Parameter Invalid', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n try:\r\n if not req.network_id:\r\n return err_return('networkid is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not req.subnet_id:\r\n req_id = str(uuid.uuid4())\r\n else:\r\n req_id = req.subnet_id\r\n sb_name = subnet_db_get_one('name', id=req_id)\r\n if sb_name:\r\n return err_return('id(%s) in use by %s' % (req_id, sb_name),\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n if req.subnet_name:\r\n if len(req.subnet_name) > NAME_MAX_LEN:\r\n return err_return('Length of name must be less than 255',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n else:\r\n req.subnet_name = ''\r\n\r\n external = network_db_get_one('external', id=req.network_id)\r\n if external is None:\r\n return err_return(\"networkid does not exist\",\r\n \"ParameterInvalid\", \"\", HTTP_BAD_REQUEST)\r\n if not req.dns_nameservers:\r\n req.dns_nameservers = []\r\n if not req.allocation_pools:\r\n req.allocation_pools = []\r\n allocation_pools = []\r\n for all_pool in req.allocation_pools:\r\n allocation_pools.append(all_pool.to_primitive())\r\n req.allocation_pools = allocation_pools\r\n for pool in req.allocation_pools:\r\n if ip_to_bin(pool['start']) > ip_to_bin(pool['end']):\r\n return err_return(\"end_ip must be more than start_ip\",\r\n \"IPRangeError\", \"\", HTTP_BAD_REQUEST)\r\n\r\n if external == 0:\r\n if not req.cidr:\r\n return err_return('cidr is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not validate_cidr(req.cidr):\r\n return err_return('cidr invalid', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not req.gateway_ip:\r\n return err_return('gateway ip is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n vl2lcid = yynetworkid_to_lcvl2id(req.network_id)\r\n log.debug('vl2lcid=%s' % vl2lcid)\r\n nets = [{\"prefix\": VFW_TOR_LINK_NET_PRE,\r\n \"netmask\": VFW_TOR_LINK_NET_MASK}]\r\n cidr = str(req.cidr).split('/')\r\n new_prf = cidr[0]\r\n new_mask = int(cidr[1])\r\n subnets = get_subnets_by_network(req.network_id)\r\n for subnet in subnets:\r\n cidr = subnet['cidr'].split('/')\r\n old_prf = cidr[0]\r\n old_mask = int(cidr[1])\r\n if subnet_equ(new_prf, old_prf, new_mask, old_mask):\r\n log.error('cidr is the same')\r\n return err_return('subnet already exist',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n nets.append({\"prefix\": old_prf, \"netmask\": old_mask})\r\n nets.append({\"prefix\": new_prf, \"netmask\": new_mask})\r\n log.debug('nets=%s' % nets)\r\n nw_name = network_db_get_one('name', id=req.network_id)\r\n payload = json.dumps({\"name\": nw_name, \"nets\": nets})\r\n r = lcapi.patch(conf.livecloud_url + '/v1/vl2s/%s' % vl2lcid,\r\n data=payload)\r\n if r.status_code != HTTP_OK:\r\n return Response(json.dumps(NEUTRON_400)), HTTP_NOT_FOUND\r\n nets = r.json()['DATA']['NETS']\r\n for net in nets:\r\n if subnet_equ(net['PREFIX'], new_prf,\r\n net['NETMASK'], new_mask):\r\n sb_lcuuid = net['LCUUID']\r\n sb_idx = net['NET_INDEX']\r\n break\r\n else:\r\n log.error('sb_lcuuid no found')\r\n sb_lcuuid = 'sb_lcuuid no found'\r\n sb_idx = -1\r\n else:\r\n subnetid = subnet_db_get_one('id', network_id=req.network_id)\r\n if subnetid:\r\n return err_return('subnet(%s) already exists' % subnetid,\r\n 'Fail', '', HTTP_BAD_REQUEST)\r\n # ISP\r\n if not req.allocation_pools:\r\n return err_return('allocation_pools can not be empty',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n id = subnet_db_get_one('id', network_id=req.network_id)\r\n if id:\r\n return subnet_get(subnetid=id)\r\n lcuuid = network_db_get_one('lcuuid', id=req.network_id)\r\n isp = lc_vl2_db_get_one('isp', lcuuid=lcuuid)\r\n items = lc_ip_res_db_get_all(req='ip, netmask, gateway, userid',\r\n isp=isp)\r\n if not items:\r\n return err_return(\"No ISP IP found\", \"BadRequest\",\r\n \"Please add ISP IP to system first\",\r\n HTTP_BAD_REQUEST)\r\n req.gateway_ip = items[0]['gateway']\r\n req.cidr = ip_mask_to_cidr(items[0]['ip'], items[0]['netmask'])\r\n isp_all_ips = []\r\n ip_to_userid = {}\r\n for it in items:\r\n isp_all_ips.append(it['ip'])\r\n ip_to_userid[it['ip']] = it['userid']\r\n req_ips = alloc_pools_to_ip_list(req.allocation_pools)\r\n for req_ip in req_ips:\r\n if req_ip not in isp_all_ips:\r\n return err_return(\"%s does not exist\" % req_ip,\r\n \"IPInvalid\", \"\", HTTP_BAD_REQUEST)\r\n if ip_to_userid[req_ip] != 0:\r\n return err_return(\"%s in use\" % req_ip,\r\n \"IPInUse\", \"\", HTTP_BAD_REQUEST)\r\n sb_lcuuid = str(uuid.uuid4())\r\n sb_idx = -1\r\n\r\n sql = (\"INSERT INTO neutron_subnets \"\r\n \"VALUES('%s','%s','%s','%s','%s','%s','%s','%s',%d)\" %\r\n (req_id, req.subnet_name, req.network_id,\r\n req.cidr, json.dumps(req.allocation_pools),\r\n req.gateway_ip, json.dumps(req.dns_nameservers),\r\n sb_lcuuid, sb_idx))\r\n log.debug('add subnet sql=%s' % sql)\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n cursor.execute(sql)\r\n if external:\r\n sql = \"UPDATE ip_resource_v2_2 SET userid=%s WHERE ip in ('-1',\"\r\n for req_ip in req_ips:\r\n sql += \"'%s',\" % req_ip\r\n sql = sql[:-1]\r\n sql += \")\"\r\n log.debug('sql=%s' % sql)\r\n with MySQLdb.connect(**LCDB_INFO) as cursor:\r\n cursor.execute(sql, conf.livecloud_userid)\r\n\r\n resp, code = subnet_get(subnetid=req_id)\r\n return resp, HTTP_CREATED\r\n\r\n except Exception as e:\r\n log.error(e)\r\n return Response(json.dumps(NEUTRON_500)), HTTP_INTERNAL_SERVER_ERROR", "def test_create_subnet_no_cidr_and_default_subnetpool(self):\n with self.network() as network:\n tenant_id = network['network']['tenant_id']\n subnetpool_prefix = '10.0.0.0/8'\n with self.subnetpool(prefixes=[subnetpool_prefix],\n admin=True,\n name=\"My subnet pool\",\n tenant_id=tenant_id,\n min_prefixlen='25',\n is_default=True):\n data = {'subnet': {'network_id': network['network']['id'],\n 'ip_version': constants.IP_VERSION_4,\n 'tenant_id': tenant_id}}\n subnet_req = self.new_create_request('subnets', data)\n res = subnet_req.get_response(self.api)\n self.assertEqual(\n webob.exc.HTTPClientError.code, res.status_int)", "def create_network_acl(\n vpc_id=None,\n vpc_name=None,\n network_acl_name=None,\n subnet_id=None,\n subnet_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n _id = vpc_name or vpc_id\n\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\"message\": \"VPC {} does not exist.\".format(_id)},\n }\n\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n elif subnet_id:\n if not _get_resource(\n \"subnet\",\n resource_id=subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n ):\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_id)},\n }\n\n r = _create_resource(\n \"network_acl\",\n name=network_acl_name,\n vpc_id=vpc_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n\n if r.get(\"created\") and subnet_id:\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_network_acl(r[\"id\"], subnet_id)\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n r[\"association_id\"] = association_id\n return r", "def _provide_subnets(self):\n if not self.cfg.aws.subnet:\n logging.debug(\"Subnets are not provided\")\n # Try to get subnet from default VPC or VPC set in aws-vpc config parameter\n vpc = self._provide_vpc()\n if vpc:\n subnet_list = vpc.subnets.all()\n self.vpc_id = vpc.id\n self.subnets = ','.join(map(lambda x: x.id, subnet_list))\n else:\n # Ensure that VPC is set and that subnets provided belong to it\n subnets = [x.strip() for x in self.cfg.aws.subnet.split(',')]\n # If aws-vpc parameter is set, use this VPC, otherwise use VPC of the\n # first subnet\n logging.debug(f\"Subnets are provided: {' ,'.join(subnets)}\")\n vpc = None\n if self.vpc_id:\n if self.vpc_id.lower() == 'none':\n return None\n vpc = self.ec2.Vpc(self.vpc_id)\n for subnet_name in subnets:\n subnet = self.ec2.Subnet(subnet_name)\n if not vpc:\n vpc = subnet.vpc # if subnet is invalid - will throw an exception botocore.exceptions.ClientError with InvalidSubnetID.NotFound\n else:\n if subnet.vpc != vpc:\n raise UserReportError(returncode=INPUT_ERROR, message=\"Subnets set in aws-subnet parameter belong to different VPCs\")\n self.vpc_id = vpc.id\n self.subnets = ','.join(subnets)\n logging.debug(f\"Using VPC {self.vpc_id}, subnet(s) {self.subnets}\")", "def test_create_subnet_with_cidr_and_default_subnetpool(self):\n with self.network() as network:\n tenant_id = network['network']['tenant_id']\n subnetpool_prefix = '10.0.0.0/8'\n with self.subnetpool(prefixes=[subnetpool_prefix],\n admin=True,\n name=\"My subnet pool\",\n tenant_id=tenant_id,\n min_prefixlen='25',\n is_default=True):\n data = {'subnet': {'network_id': network['network']['id'],\n 'cidr': '10.0.0.0/24',\n 'ip_version': constants.IP_VERSION_4,\n 'tenant_id': tenant_id}}\n subnet_req = self.new_create_request('subnets', data)\n res = subnet_req.get_response(self.api)\n subnet = self.deserialize(self.fmt, res)['subnet']\n self.assertIsNone(subnet['subnetpool_id'])", "def subnet(action=None, cidr=None, vlan=None):\n base_url = '%s/subnets' % (server)\n r = None\n if action == 'list':\n r = call('get', '%s' % base_url)\n elif action == 'create':\n if not vlan:\n print 'Missing vlan to create'\n sys.exit(1)\n\n vlan_id = fetch_id('vlans', vlan)\n r = call('post', '%s/subnets' % (server),\n data=json.dumps({'cidr': cidr, 'vlan_id': vlan_id})\n )\n elif action == 'delete':\n r = call('delete', '%s/%s' %\n (base_url, fetch_id('subnets', cidr))\n )\n elif action == 'info':\n r = call('get', '%s/by-cidr/%s' %\n (base_url, cidr.replace('/', '_'))\n )\n else:\n baker.usage(sys._getframe().f_code.co_name)\n sys.exit(1)\n pretty_output(r)", "def subnet_put_api(subnetid=None):\r\n try:\r\n if not subnetid:\r\n return err_return('subnetId is required', \"ParameterInvalid\",\r\n \"\", HTTP_BAD_REQUEST)\r\n db_subnet = subnet_db_get_one('*', id=subnetid)\r\n if not db_subnet:\r\n return err_return('subnetId does not exist', \"ParameterInvalid\",\r\n \"\", HTTP_NOT_FOUND)\r\n cidr = db_subnet['cidr']\r\n try:\r\n req = models.Subnet(request.json)\r\n req.validate()\r\n except Exception as e:\r\n log.error(e)\r\n return err_return('Parameter Invalid', \"ParameterInvalid\",\r\n \"\", HTTP_BAD_REQUEST)\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n if req.subnet_name is not None:\r\n if len(req.subnet_name) > NAME_MAX_LEN:\r\n return err_return('Length of name must be less than 255',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n sql = \"UPDATE neutron_subnets SET name=%s WHERE id=%s\"\r\n cursor.execute(sql, (req.subnet_name, subnetid))\r\n if req.dns_nameservers is not None:\r\n sql = (\"UPDATE neutron_subnets SET \"\r\n \"dns_nameservers=%s WHERE id=%s\")\r\n cursor.execute(sql,\r\n (json.dumps(req.dns_nameservers), subnetid))\r\n if req.allocation_pools is not None:\r\n allocation_pools = []\r\n for all_pool in req.allocation_pools:\r\n allocation_pools.append(all_pool.to_primitive())\r\n req.allocation_pools = allocation_pools\r\n for pool in req.allocation_pools:\r\n if ip_to_bin(pool['start']) > ip_to_bin(pool['end']):\r\n return err_return(\"end_ip must be more than start_ip\",\r\n \"IPRangeError\", \"\", HTTP_BAD_REQUEST)\r\n networkid = subnetid_to_networkid(subnetid)\r\n db_network = network_db_get_one('*', id=networkid)\r\n external = db_network['external']\r\n log.debug('external=%s' % external)\r\n if external:\r\n if req.allocation_pools is not None:\r\n old_alloc_pools = json.loads(db_subnet['allocation_pools'])\r\n old_alloc_ips = alloc_pools_to_ip_list(old_alloc_pools)\r\n new_alloc_ips = alloc_pools_to_ip_list(req.allocation_pools)\r\n tmp_nips = copy.deepcopy(new_alloc_ips)\r\n for new_ip in tmp_nips:\r\n if new_ip in old_alloc_ips:\r\n new_alloc_ips.remove(new_ip)\r\n old_alloc_ips.remove(new_ip)\r\n isp = lc_vl2_db_get_one('isp', lcuuid=db_network['lcuuid'])\r\n items = lc_ip_res_db_get_all(req='ip, userid, vifid',\r\n isp=isp)\r\n isp_all_ips = []\r\n ip_to_userid = {}\r\n ip_to_vifid = {}\r\n for it in items:\r\n isp_all_ips.append(it['ip'])\r\n ip_to_userid[it['ip']] = it['userid']\r\n ip_to_vifid[it['ip']] = it['vifid']\r\n for new_alloc_ip in new_alloc_ips:\r\n if new_alloc_ip not in isp_all_ips:\r\n return err_return(\"%s invalid\" % new_alloc_ip,\r\n \"IPInvalid\", \"\", HTTP_BAD_REQUEST)\r\n if ip_to_userid[new_alloc_ip] != 0:\r\n return err_return(\"%s in use\" % new_alloc_ip,\r\n \"IPInUse\", \"\", HTTP_BAD_REQUEST)\r\n for old_alloc_ip in old_alloc_ips:\r\n if ip_to_vifid[old_alloc_ip] != 0:\r\n return err_return(\"%s in use\" % old_alloc_ip,\r\n \"IPInUse\", \"\", HTTP_BAD_REQUEST)\r\n sql = (\"UPDATE neutron_subnets SET allocation_pools='%s' \"\r\n \"WHERE id='%s'\" % (json.dumps(req.allocation_pools),\r\n subnetid))\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n cursor.execute(sql)\r\n sql = (\"UPDATE ip_resource_v2_2 SET userid=0 \"\r\n \"WHERE ip in ('-1',\")\r\n for ip in old_alloc_ips:\r\n sql += \"'%s',\" % ip\r\n sql = sql[:-1]\r\n sql += \")\"\r\n sql2 = (\"UPDATE ip_resource_v2_2 SET userid=%s \"\r\n \"WHERE ip in ('-1',\")\r\n for ip in new_alloc_ips:\r\n sql2 += \"'%s',\" % ip\r\n sql2 = sql2[:-1]\r\n sql2 += \")\"\r\n with MySQLdb.connect(**LCDB_INFO) as cursor:\r\n cursor.execute(sql)\r\n cursor.execute(sql2, conf.livecloud_userid)\r\n return subnet_get(subnetid=subnetid)\r\n\r\n if req.gateway_ip is not None:\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n sql = \"UPDATE neutron_subnets SET gateway_ip=%s WHERE id=%s\"\r\n cursor.execute(sql, (req.gateway_ip, subnetid))\r\n log.debug('old_cidr=%s, new_cidr=%s' % (cidr, req.cidr))\r\n if req.cidr and cidr != req.cidr:\r\n vl2lcid = yynetworkid_to_lcvl2id(networkid)\r\n nets = [{\"prefix\": VFW_TOR_LINK_NET_PRE,\r\n \"netmask\": VFW_TOR_LINK_NET_MASK}]\r\n subnets = get_subnets_by_network(networkid)\r\n for subnet in subnets:\r\n if str(subnet['id']) == subnetid:\r\n continue\r\n cidr = subnet['cidr'].split('/')\r\n nets.append({\"prefix\": cidr[0], \"netmask\": int(cidr[1])})\r\n cidr = str(req.cidr).split('/')\r\n log.debug('netmask=%s' % cidr[1])\r\n nets.append({\"prefix\": cidr[0], \"netmask\": int(cidr[1])})\r\n nw_name = network_db_get_one('name', id=networkid)\r\n payload = json.dumps({\"name\": nw_name, \"nets\": nets})\r\n log.debug('patch vl2 data=%s' % payload)\r\n r = lcapi.patch(conf.livecloud_url + '/v1/vl2s/%s' % vl2lcid,\r\n data=payload)\r\n if r.status_code != HTTP_OK:\r\n err = r.json()['DESCRIPTION']\r\n log.error(err)\r\n return err_return(err, 'Fail', '', HTTP_BAD_REQUEST)\r\n nets = r.json()['DATA']['NETS']\r\n for net in nets:\r\n if subnet_equ(net['PREFIX'], cidr[0],\r\n net['NETMASK'], int(cidr[1])):\r\n sb_lcuuid = net['LCUUID']\r\n sb_idx = net['NET_INDEX']\r\n break\r\n else:\r\n log.error('sb_lcuuid no found')\r\n return Response(json.dumps(NEUTRON_500)), \\\r\n HTTP_INTERNAL_SERVER_ERROR\r\n if req.allocation_pools is None:\r\n req.allocation_pools = []\r\n else:\r\n req.cidr = db_subnet['cidr']\r\n sb_lcuuid = db_subnet['lcuuid']\r\n sb_idx = db_subnet['net_idx']\r\n if req.allocation_pools is None:\r\n return subnet_get(subnetid=subnetid)\r\n new_alloc_ips = alloc_pools_to_ip_list(req.allocation_pools)\r\n vl2id = lc_vl2_db_get_one('id', lcuuid=sb_lcuuid)\r\n used_ips = lc_vif_ip_db_get_all('ip', vl2id=vl2id,\r\n net_index=sb_idx)\r\n for used_ip in used_ips:\r\n ip = used_ip['ip']\r\n if ip not in new_alloc_ips:\r\n return err_return('used ip(%s) not in alloc pool' % ip,\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n\r\n sql = (\"UPDATE neutron_subnets SET cidr='%s', \"\r\n \"allocation_pools='%s', lcuuid='%s', net_idx=%s \"\r\n \"WHERE id='%s'\" %\r\n (req.cidr, json.dumps(req.allocation_pools),\r\n sb_lcuuid, sb_idx, subnetid))\r\n log.debug('sql=%s' % sql)\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n cursor.execute(sql)\r\n return subnet_get(subnetid=subnetid)\r\n except Exception as e:\r\n log.error(e)\r\n return Response(json.dumps(NEUTRON_500)), HTTP_INTERNAL_SERVER_ERROR", "def subnet_exists(\n subnet_id=None,\n name=None,\n subnet_name=None,\n cidr=None,\n tags=None,\n zones=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n if name:\n log.warning(\n \"boto_vpc.subnet_exists: name parameter is deprecated \"\n \"use subnet_name instead.\"\n )\n subnet_name = name\n\n if not any((subnet_id, subnet_name, cidr, tags, zones)):\n raise SaltInvocationError(\n \"At least one of the following must be \"\n \"specified: subnet id, cidr, subnet_name, \"\n \"tags, or zones.\"\n )\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n except BotoServerError as err:\n return {\"error\": __utils__[\"boto.get_error\"](err)}\n\n filter_parameters = {\"filters\": {}}\n if subnet_id:\n filter_parameters[\"subnet_ids\"] = [subnet_id]\n if subnet_name:\n filter_parameters[\"filters\"][\"tag:Name\"] = subnet_name\n if cidr:\n filter_parameters[\"filters\"][\"cidr\"] = cidr\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n if zones:\n filter_parameters[\"filters\"][\"availability_zone\"] = zones\n\n try:\n subnets = conn.get_all_subnets(**filter_parameters)\n except BotoServerError as err:\n boto_err = __utils__[\"boto.get_error\"](err)\n if boto_err.get(\"aws\", {}).get(\"code\") == \"InvalidSubnetID.NotFound\":\n # Subnet was not found: handle the error and return False.\n return {\"exists\": False}\n return {\"error\": boto_err}\n\n log.debug(\n \"The filters criteria %s matched the following subnets:%s\",\n filter_parameters,\n subnets,\n )\n if subnets:\n log.info(\"Subnet %s exists.\", subnet_name or subnet_id)\n return {\"exists\": True}\n else:\n log.info(\"Subnet %s does not exist.\", subnet_name or subnet_id)\n return {\"exists\": False}", "def __create_network__(self,**kwargs):\n\t\tself.validate_args(**kwargs)\n\t\t#first create the network\n\t\texisting_networks = self.neutronClient.get_networks()\n\t\tnew_network = kwargs[\"network\"]\n\t\tnew_subnet_cidr = kwargs[\"cidr\"]\n\t\tsubnet_name = kwargs[\"subnet_name\"]\n enable_dhcp = kwargs.get(\"enable_dhcp\", True)\n\n\t\tnetVal = {}\n\t\tsubnetVal = {}\n\t\tnet_id = None\n\t\t#check if the network with the same name exists\n\t\tif not any(network.get('name',None) == new_network for network in existing_networks['networks']) :\n\t\t\t#did not find the network. go ahead and create the network and subnet\n\t\t\tnetVal = self.neutronClient.create_network(new_network)\n\t\t\tsubnetVal = self.neutronClient.create_subnet(netVal['network']['id'],new_subnet_cidr,subnet_name,enable_dhcp)\n netVal = netVal['network']\n subnetVal = subnetVal['subnet']\n\t\t\t#return the dict with the network and subnet details\n\t\telse :\n\t\t\t#network name exists. get network id\n\t\t\tfor network in existing_networks['networks']:\n if new_network == network['name']:\n\t\t\t\t\tnet_id = network['id']\n\t\t\t\t\tnetVal = network\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t#check if the required subnet also exists\n\t\t\texisting_subnet = self.neutronClient.get_subnets()\n\t\t\tif not any(subnet.get('cidr',None) == new_subnet_cidr for subnet in existing_subnet['subnets']):\n\t\t\t\t#subnet needs to be created under this network\n\t\t\t\tsubnetVal = self.neutronClient.create_subnet(net_id,new_subnet_cidr,subnet_name, enable_dhcp)\n subnetVal = subnetVal['subnet']\n\t\t\telse :\n\t\t\t\tfor subnet in existing_subnet['subnets']:\n #TOCHK: Dont use in for string comparisons\n \t#if new_subnet_cidr in subnet['cidr'] :\n if new_subnet_cidr == subnet['cidr']:\n \tsubnetVal = subnet\n\t\t\t\t\t\tbreak\n\t\tnetVal['subnets'] = subnetVal\n\t\treturn netVal", "def _create_subnet(self, network):\n cfg = self.config.network\n tenant_cidr = netaddr.IPNetwork(cfg.tenant_network_cidr)\n result = None\n # Repeatedly attempt subnet creation with sequential cidr\n # blocks until an unallocated block is found.\n for subnet_cidr in tenant_cidr.subnet(cfg.tenant_network_mask_bits):\n body = dict(\n subnet=dict(\n ip_version=4,\n network_id=network.id,\n tenant_id=network.tenant_id,\n cidr=str(subnet_cidr),\n ),\n )\n try:\n result = self.network_client.create_subnet(body=body)\n break\n except exc.QuantumClientException as e:\n is_overlapping_cidr = 'overlaps with another subnet' in str(e)\n if not is_overlapping_cidr:\n raise\n self.assertIsNotNone(result, 'Unable to allocate tenant network')\n subnet = DeletableSubnet(client=self.network_client,\n **result['subnet'])\n self.assertEqual(subnet.cidr, str(subnet_cidr))\n self.set_resource(rand_name('subnet-smoke-'), subnet)\n return subnet", "def create_subnet(self,\n subnet_prototype: 'SubnetPrototype',\n **kwargs\n ) -> DetailedResponse:\n\n if subnet_prototype is None:\n raise ValueError('subnet_prototype must be provided')\n if isinstance(subnet_prototype, SubnetPrototype):\n subnet_prototype = convert_model(subnet_prototype)\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='create_subnet')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n data = json.dumps(subnet_prototype)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/subnets'\n request = self.prepare_request(method='POST',\n url=url,\n headers=headers,\n params=params,\n data=data)\n\n response = self.send(request)\n return response", "def createNetwork(context):\n if common.MY_DEBUG:\n print 'ENTER vpc.createNetwork'\n\n my_vpc_name = getNetworkName(context)\n\n ret = {\n 'name': my_vpc_name,\n 'type': 'compute.v1.network',\n 'properties': {\n 'routingConfig': {\n 'routingMode': 'REGIONAL'\n },\n 'autoCreateSubnetworks': False\n }\n }\n if common.MY_DEBUG:\n print 'EXIT vpc.createNetwork, ret: ' + str(ret)\n return ret", "def allocate_subnet(self, request):", "def associate_subnet_cidr_block(SubnetId=None, Ipv6CidrBlock=None):\n pass", "def create_vpc(DryRun=None, CidrBlock=None, InstanceTenancy=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def validate_subnets(subnet_spec):\n exit_if_none(subnet_spec, \"Missing subnets\")\n actual_subnets = {}\n paginator = boto3.client('ec2').get_paginator('describe_subnets')\n for page in paginator.paginate():\n for subnet in page['Subnets']:\n actual_subnets[subnet['SubnetId']] = subnet['VpcId']\n subnets = []\n vpcs = set()\n for subnet_id in subnet_spec.split(\",\"):\n vpc_id = actual_subnets.get(subnet_id)\n exit_if_none(vpc_id, f\"invalid subnet: {subnet_id}\")\n subnets.append(subnet_id)\n vpcs.add(vpc_id)\n if (len(vpcs) > 1):\n exit_if_none(None, \"subnets belong to different VPCs\")\n return subnets" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints . You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume. You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide . You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources . For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide .
def create_volume(DryRun=None, Size=None, SnapshotId=None, AvailabilityZone=None, VolumeType=None, Iops=None, Encrypted=None, KmsKeyId=None, TagSpecifications=None): pass
[ "def _create_volume(self, name, size):\n\n params = {}\n params['name'] = self.configuration.ixsystems_dataset_path + '/' + name\n params['type'] = 'VOLUME'\n params['volsize'] = ix_utils.get_bytes_from_gb(size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s') % (FreeNASServer.REST_API_VOLUME)\n LOG.debug('_create_volume params : %s', params)\n LOG.debug('_create_volume urn : %s', request_urn)\n ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,\n request_urn, jparams)\n LOG.debug('_create_volume response : %s', json.dumps(ret))\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while creating volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)", "def addvol(tag, region, size, snapshot=None):\n print 'Creating {0}GB volume in {1} ...'.format(size, region)\n conn = _ec2connect()\n vol = conn.create_volume(size, region, snapshot)\n vol.add_tag(TAG_NAME, tag)\n return vol", "def create_volume(self, compartment_id, availability_domain, size, display_name=None, wait=True):\n _logger.debug('%s', where_am_i())\n bsc = self.get_block_storage_client()\n cvds = oci_sdk.core.models.CreateVolumeDetails(availability_domain=availability_domain,\n compartment_id=compartment_id,\n size_in_gbs=size,\n display_name=display_name)\n try:\n vol_data = bsc.create_volume(create_volume_details=cvds).data\n if wait:\n get_vol_state = bsc.get_volume(volume_id=vol_data.id)\n oci_sdk.wait_until(bsc, get_vol_state, 'lifecycle_state', 'AVAILABLE')\n ocivol = OCIVolume(self, vol_data)\n return ocivol\n except oci_sdk.exceptions.ServiceError as e:\n raise Exception('Failed to create volume') from e", "def attach_disk(self, instance, size=10, volume_type=None, iops=None, device=None):\n conn = self.conn or self.vpc_conn\n # Add EBS volume DONE\n ebs_vol = conn.create_volume(size, self.zone, volume_type=volume_type, iops=iops)\n self.wait_for_state(ebs_vol, 'status', 'available')\n if not device:\n device = '/dev/sdx'\n conn.attach_volume(ebs_vol.id, instance.id, device=device)\n self.ebs_vols.append(ebs_vol)\n return ebs_vol", "def do_create_volume(sess, size, display_name, attach_it, chap_credentials, mode):\n\n try:\n _logger.info(\"Creating a new %d GB volume %s\", size, display_name)\n inst = sess.this_instance()\n if inst is None:\n raise Exception(\"OCI SDK error: couldn't get instance info\")\n _logger.debug('\\n availability_domain %s\\n compartment_id %s',\n inst.get_availability_domain_name(), inst.get_compartment_id())\n #\n # GT\n # vol = sess.create_volume(inst.get_compartment_id(),\n vol = sess.create_volume(sess.this_compartment().get_ocid(),\n inst.get_availability_domain_name(),\n size=size,\n display_name=display_name,\n wait=True)\n except Exception as e:\n _logger.debug(\"Failed to create volume\", exc_info=True)\n raise Exception(\"Failed to create volume\") from e\n\n _logger.info(\"Volume [%s] created\", vol.get_display_name())\n\n if not attach_it:\n return\n\n compat_info_message(gen_msg=\"Attaching the volume to this instance\", mode=mode)\n try:\n if chap_credentials:\n vol = vol.attach_to(instance_id=inst.get_ocid(), use_chap=True)\n else:\n vol = vol.attach_to(instance_id=inst.get_ocid(), use_chap=False)\n except Exception as e:\n _logger.debug('Cannot attach BV', exc_info=True)\n vol.destroy()\n raise Exception('Cannot attach BV') from e\n #\n # attach using iscsiadm commands\n compat_info_message(gen_msg=\"Attaching iSCSI device.\", mode=mode)\n\n vol_portal_ip = vol.get_portal_ip()\n vol_portal_port = vol.get_portal_port()\n vol_iqn = vol.get_iqn()\n vol_username = vol.get_user()\n vol_password = vol.get_password()\n retval = iscsiadm.attach(ipaddr=vol_portal_ip,\n port=vol_portal_port,\n iqn=vol_iqn,\n username=vol_username,\n password=vol_password,\n auto_startup=True)\n compat_info_message(compat_msg=\"iscsiadm attach Result: %s\" % iscsiadm.error_message_from_code(retval),\n gen_msg=\"Volume [%s] is attached.\" % vol.get_display_name(), mode=mode)\n if retval == 0:\n _logger.debug('Creation successful')\n if chap_credentials:\n _logger.debug('Attachment OK: saving chap credentials.')\n add_chap_secret(vol_iqn, vol_username, vol_password)\n return\n\n # here because of error case\n try:\n _logger.debug('Destroying the volume')\n vol.destroy()\n except Exception as e:\n _logger.debug(\"Failed to destroy volume\", exc_info=True)\n _logger.error(\"Failed to destroy volume: %s\", str(e))\n\n raise Exception('Failed to attach created volume: %s' % iscsiadm.error_message_from_code(retval))", "def create_volume(self, xml_bytes):\n root = XML(xml_bytes)\n volume_id = root.findtext(\"volumeId\")\n size = int(root.findtext(\"size\"))\n snapshot_id = root.findtext(\"snapshotId\")\n availability_zone = root.findtext(\"availabilityZone\")\n status = root.findtext(\"status\")\n create_time = root.findtext(\"createTime\")\n create_time = datetime.strptime(\n create_time[:19], \"%Y-%m-%dT%H:%M:%S\")\n volume = model.Volume(\n volume_id, size, status, create_time, availability_zone,\n snapshot_id)\n return volume", "def create_volume():\n with settings(warn_only=True):\n run(f'docker volume create {db_volume}')", "def _create_boot_volume(self, context, instance):\n LOG.debug('Creating boot volume')\n boot_vol_az = CONF.solariszones.boot_volume_az\n boot_vol_type = CONF.solariszones.boot_volume_type\n try:\n vol = self._volume_api.create(\n context, instance['root_gb'],\n instance['hostname'] + \"-\" + self._rootzpool_suffix,\n \"Boot volume for instance '%s' (%s)\"\n % (instance['name'], instance['uuid']),\n volume_type=boot_vol_type, availability_zone=boot_vol_az)\n # TODO(npower): Polling is what nova/compute/manager also does when\n # creating a new volume, so we do likewise here.\n while True:\n volume = self._volume_api.get(context, vol['id'])\n if volume['status'] != 'creating':\n return volume\n greenthread.sleep(1)\n\n except Exception as reason:\n LOG.exception(_(\"Unable to create root zpool volume for instance \"\n \"'%s': %s\") % (instance['name'], reason))\n raise", "def test_create_volume(self):\n ret = self._driver.create_volume(self.TEST_VOLUME)\n self.assertEqual(ret['provider_location'],\n os.path.join(self.TEST_VOLDIR,\n self.TEST_VOLNAME))\n self.assertTrue(os.path.isfile(self.TEST_VOLPATH))\n self.assertEqual(os.stat(self.TEST_VOLPATH).st_size,\n 1 * units.Gi)", "def create_volume(self,\n volume_prototype: 'VolumePrototype',\n **kwargs\n ) -> DetailedResponse:\n\n if volume_prototype is None:\n raise ValueError('volume_prototype must be provided')\n if isinstance(volume_prototype, VolumePrototype):\n volume_prototype = convert_model(volume_prototype)\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='create_volume')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n data = json.dumps(volume_prototype)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/volumes'\n request = self.prepare_request(method='POST',\n url=url,\n headers=headers,\n params=params,\n data=data)\n\n response = self.send(request)\n return response", "def _create_vol_req(self, size, name, location=None, snapshot=None,\r\n image=None):\r\n volume_data = {}\r\n params = None\r\n volume_data['name'] = name\r\n if size:\r\n volume_data['sizeGb'] = str(size)\r\n if image:\r\n if not hasattr(image, 'name'):\r\n image = self.ex_get_image(image)\r\n params = {'sourceImage': image.extra['selfLink']}\r\n volume_data['description'] = 'Image: %s' % (\r\n image.extra['selfLink'])\r\n if snapshot:\r\n if not hasattr(snapshot, 'name'):\r\n # Check for full URI to not break backward-compatibility\r\n if snapshot.startswith('https'):\r\n snapshot = self._get_components_from_path(snapshot)['name']\r\n snapshot = self.ex_get_snapshot(snapshot)\r\n snapshot_link = snapshot.extra['selfLink']\r\n volume_data['sourceSnapshot'] = snapshot_link\r\n volume_data['description'] = 'Snapshot: %s' % (snapshot_link)\r\n location = location or self.zone\r\n if not hasattr(location, 'name'):\r\n location = self.ex_get_zone(location)\r\n request = '/zones/%s/disks' % (location.name)\r\n\r\n return request, volume_data, params", "def attach_volume(self, node, volume, device=None, ex_mode=None,\r\n ex_boot=False):\r\n volume_data = {}\r\n if volume is None:\r\n volume_data['type'] = 'SCRATCH'\r\n else:\r\n volume_data['type'] = 'PERSISTENT'\r\n volume_data['source'] = volume.extra['selfLink']\r\n volume_data['kind'] = 'compute#attachedDisk'\r\n volume_data['mode'] = ex_mode or 'READ_WRITE'\r\n\r\n if device:\r\n volume_data['deviceName'] = device\r\n else:\r\n volume_data['deviceName'] = volume.name\r\n\r\n volume_data['boot'] = ex_boot\r\n\r\n request = '/zones/%s/instances/%s/attachDisk' % (\r\n node.extra['zone'].name, node.name)\r\n self.connection.async_request(request, method='POST',\r\n data=volume_data)\r\n return True", "def attach_to_instance(self, instance, mountpoint):\r\n instance_id = _resolve_id(instance)\r\n try:\r\n resp = self._nova_volumes.create_server_volume(instance_id,\r\n self.id, mountpoint)\r\n except Exception as e:\r\n raise exc.VolumeAttachmentFailed(\"%s\" % e)", "def create_volume_from_image(self, context, instance, image_id):\n pass", "def test_volume_create(self):\n pass", "def create_volume_snapshot(self, volume, name):\r\n snapshot_data = {}\r\n snapshot_data['name'] = name\r\n request = '/zones/%s/disks/%s/createSnapshot' % (\r\n volume.extra['zone'].name, volume.name)\r\n self.connection.async_request(request, method='POST',\r\n data=snapshot_data)\r\n\r\n return self.ex_get_snapshot(name)", "def create_namespaced_persistent_volume(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_persistent_volume\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_persistent_volume`\")\n\n resource_path = '/api/v1/persistentvolumes'.replace('{format}', 'json')\n method = 'POST'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1PersistentVolume',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def volume_add(self, volume, disk_size_with_unit=None, mirror_disks=None, disk_size=None, force=None, disks=None, raid_group=None, disk_count=None):\n return self.request( \"volume-add\", {\n 'disk_size_with_unit': [ disk_size_with_unit, 'disk-size-with-unit', [ basestring, 'None' ], False ],\n 'mirror_disks': [ mirror_disks, 'mirror-disks', [ DiskInfo, 'None' ], True ],\n 'disk_size': [ disk_size, 'disk-size', [ int, 'None' ], False ],\n 'force': [ force, 'force', [ bool, 'None' ], False ],\n 'disks': [ disks, 'disks', [ DiskInfo, 'None' ], True ],\n 'raid_group': [ raid_group, 'raid-group', [ basestring, 'None' ], False ],\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'disk_count': [ disk_count, 'disk-count', [ int, 'None' ], False ],\n }, {\n 'bad-disks': [ DiskInfo, True ],\n } )", "def test_create_volume_no_noncustomized_offering_with_size(self):\n\n location = self.driver.list_locations()[0]\n\n self.assertRaises(\n LibcloudError,\n self.driver.create_volume,\n 'vol-0', location, 11)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). To help you decide how big to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide . You can optionally request an Amazonprovided IPv6 CIDR block for the VPC. The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC. By default, each instance you launch in the VPC has the default DHCP options, which includes only a default DNS server that we provide (AmazonProvidedDNS). For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide . You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide .
def create_vpc(DryRun=None, CidrBlock=None, InstanceTenancy=None, AmazonProvidedIpv6CidrBlock=None): pass
[ "def create(\n cidr_block,\n instance_tenancy=None,\n vpc_name=None,\n enable_dns_support=None,\n enable_dns_hostnames=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n vpc = conn.create_vpc(cidr_block, instance_tenancy=instance_tenancy)\n if vpc:\n log.info(\"The newly created VPC id is %s\", vpc.id)\n\n _maybe_set_name_tag(vpc_name, vpc)\n _maybe_set_tags(tags, vpc)\n _maybe_set_dns(conn, vpc.id, enable_dns_support, enable_dns_hostnames)\n _maybe_name_route_table(conn, vpc.id, vpc_name)\n if vpc_name:\n _cache_id(\n vpc_name,\n vpc.id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n return {\"created\": True, \"id\": vpc.id}\n else:\n log.warning(\"VPC was not created\")\n return {\"created\": False}\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "async def create_vpc(self, tag_name, cidr_block):\n if not await self.exists(tag_name):\n vpc = self._resource.create_vpc(CidrBlock=cidr_block)\n vpc.create_tags(Tags=[{\"Key\": \"Name\", \"Value\": tag_name}])\n vpc.wait_until_available()\n else:\n raise VpcNameAlreadyExists", "def create_subnet(DryRun=None, VpcId=None, CidrBlock=None, Ipv6CidrBlock=None, AvailabilityZone=None):\n pass", "def create_subnet(\n vpc_id=None,\n cidr_block=None,\n vpc_name=None,\n availability_zone=None,\n subnet_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n auto_assign_public_ipv4=False,\n):\n\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n subnet_object_dict = _create_resource(\n \"subnet\",\n name=subnet_name,\n tags=tags,\n vpc_id=vpc_id,\n availability_zone=availability_zone,\n cidr_block=cidr_block,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n # if auto_assign_public_ipv4 is requested set that to true using boto3\n if auto_assign_public_ipv4:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n conn3.modify_subnet_attribute(\n MapPublicIpOnLaunch={\"Value\": True}, SubnetId=subnet_object_dict[\"id\"]\n )\n return subnet_object_dict", "def createNetwork(context):\n if common.MY_DEBUG:\n print 'ENTER vpc.createNetwork'\n\n my_vpc_name = getNetworkName(context)\n\n ret = {\n 'name': my_vpc_name,\n 'type': 'compute.v1.network',\n 'properties': {\n 'routingConfig': {\n 'routingMode': 'REGIONAL'\n },\n 'autoCreateSubnetworks': False\n }\n }\n if common.MY_DEBUG:\n print 'EXIT vpc.createNetwork, ret: ' + str(ret)\n return ret", "def create_subnets(ec2, vpc, subnets):\n # Generate candidate subnet CIDRs by shifting the VPC's prefix by 4 bits, yielding 16 possible subnet\n # CIDRs.\n vpc_cidr = ipaddress.ip_network(vpc.cidr_block)\n subnet_cidrs = list(vpc_cidr.subnets(prefixlen_diff=4))\n\n # The set difference between the availability zones that already have subnets and the availability zones\n # available in the region yields the set of availability zones where subnets must be created.\n subnet_azs = frozenset(map(lambda subnet: subnet.availability_zone, subnets))\n available_azs = frozenset(map(\n lambda az: az[\"ZoneName\"], ec2.meta.client.describe_availability_zones()[\"AvailabilityZones\"]))\n\n for az in (available_azs - subnet_azs):\n # If subnets already exist, their CIDRs may conflict with the candidate CIDRs that were generated.\n # Loop through the candidate list until subnet creation does not fail with a CIDR conflict error, or\n # until no candidates remain.\n while len(subnet_cidrs) > 0:\n try:\n cidr = subnet_cidrs.pop(0)\n subnet = vpc.create_subnet(AvailabilityZone=az, CidrBlock=cidr.with_prefixlen)\n # Ensure that the new subnet has the MapPublicIpOnLaunch attribute set\n ec2.meta.client.modify_subnet_attribute(SubnetId=subnet.id,\n MapPublicIpOnLaunch={\"Value\": True})\n click.echo(f\"Created new subnet: {subnet.id}\")\n break\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"InvalidSubnet.Conflict\":\n continue\n raise\n else:\n raise CraftingTableError(f\"Could not find valid CIDR to create subnet in {az}\")", "def create_vpc_endpoint(DryRun=None, VpcId=None, ServiceName=None, PolicyDocument=None, RouteTableIds=None, ClientToken=None):\n pass", "def add_vpc(template, key_pair_name, nat_ip,\n nat_image_id=DEFAULT_NAT_IMAGE_ID,\n nat_instance_type=DEFAULT_NAT_INSTANCE_TYPE):\n vpc_id = \"VPC\"\n vpc = template.add_resource(ec2.VPC(\n vpc_id,\n CidrBlock=\"10.0.0.0/16\",\n Tags=Tags(\n Name=name_tag(vpc_id)\n ),\n ))\n public_subnet = _add_public_subnet(template, vpc)\n nat = _add_nat(template, vpc, public_subnet, nat_image_id, nat_instance_type,\n key_pair_name, nat_ip)\n _add_private_subnet(template, vpc, nat)\n return vpc", "def associate_vpc_cidr_block(VpcId=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def get_usable_vpc(config):\n _, _, compute, _ = construct_clients_from_provider_config(config[\"provider\"])\n\n # For backward compatibility, reuse the VPC if the VM is launched.\n resource = GCPCompute(\n compute,\n config[\"provider\"][\"project_id\"],\n config[\"provider\"][\"availability_zone\"],\n config[\"cluster_name\"],\n )\n node = resource._list_instances(label_filters=None, status_filter=None)\n if len(node) > 0:\n netInterfaces = node[0].get(\"networkInterfaces\", [])\n if len(netInterfaces) > 0:\n vpc_name = netInterfaces[0][\"network\"].split(\"/\")[-1]\n return vpc_name\n\n vpcnets_all = _list_vpcnets(config, compute)\n\n usable_vpc_name = None\n for vpc in vpcnets_all:\n if _check_firewall_rules(vpc[\"name\"], config, compute):\n usable_vpc_name = vpc[\"name\"]\n break\n\n proj_id = config[\"provider\"][\"project_id\"]\n if usable_vpc_name is None:\n logger.info(f\"Creating a default VPC network, {SKYPILOT_VPC_NAME}...\")\n\n # Create a SkyPilot VPC network if it doesn't exist\n vpc_list = _list_vpcnets(config, compute, filter=f\"name={SKYPILOT_VPC_NAME}\")\n if len(vpc_list) == 0:\n body = VPC_TEMPLATE.copy()\n body[\"name\"] = body[\"name\"].format(VPC_NAME=SKYPILOT_VPC_NAME)\n body[\"selfLink\"] = body[\"selfLink\"].format(\n PROJ_ID=proj_id, VPC_NAME=SKYPILOT_VPC_NAME\n )\n _create_vpcnet(config, compute, body)\n\n _create_rules(\n config, compute, FIREWALL_RULES_TEMPLATE, SKYPILOT_VPC_NAME, proj_id\n )\n\n usable_vpc_name = SKYPILOT_VPC_NAME\n logger.info(f\"A VPC network {SKYPILOT_VPC_NAME} created.\")\n\n # Configure user specified rules\n ports = config[\"provider\"].get(\"ports\", [])\n user_rules = []\n for port in ports:\n cluster_name_hash = common_utils.truncate_and_hash_cluster_name(\n config[\"cluster_name\"]\n )\n name = f\"user-ports-{cluster_name_hash}-{port}\"\n user_rules.append(\n {\n \"name\": name,\n \"description\": f\"Allow user-specified port {port} for cluster {config['cluster_name']}\",\n \"network\": \"projects/{PROJ_ID}/global/networks/{VPC_NAME}\",\n \"selfLink\": \"projects/{PROJ_ID}/global/firewalls/\" + name,\n \"direction\": \"INGRESS\",\n \"priority\": 65534,\n \"allowed\": [\n {\n \"IPProtocol\": \"tcp\",\n \"ports\": [str(port)],\n },\n ],\n \"sourceRanges\": [\"0.0.0.0/0\"],\n \"targetTags\": [config[\"cluster_name\"]],\n }\n )\n\n _create_rules(config, compute, user_rules, usable_vpc_name, proj_id)\n\n return usable_vpc_name", "def create_instance(config):\n\n try:\n client = boto3.client('ec2')\n except Exception as e:\n print(f'An error occurred while creating the boto3 client: {e}')\n sys.exit(1)\n\n ami_id = _get_ami_id(client, config.ami_type, config.architecture, config.root_device_type, config.virtualization_type)\n default_vpc_id = _ensure_default_vpc(client)\n key_pair_names = _create_key_pairs(client, config)\n\n blockDeviceMappings = []\n for volume in config.volumes:\n blockDeviceMappings.append({\n 'DeviceName': volume.device,\n 'Ebs': {\n 'DeleteOnTermination': True,\n 'VolumeSize': volume.size_gb,\n 'VolumeType': 'gp2',\n },\n })\n\n res = client.run_instances(\n BlockDeviceMappings=blockDeviceMappings,\n\n ImageId=ami_id,\n InstanceType=config.instance_type,\n\n MaxCount=config.max_count,\n MinCount=config.min_count,\n\n SecurityGroupIds=[\n _create_security_group(client, default_vpc_id)\n ],\n\n UserData=_user_data_script(config),\n )\n\n ec2 = boto3.resource('ec2')\n instances = res['Instances']\n\n for i, instance in enumerate(instances):\n public_ip = ec2.Instance(instance['InstanceId']).public_ip_address\n print(f'instance {i} public ip address = {public_ip}')", "def test_create_subnet_with_cidr_and_default_subnetpool(self):\n with self.network() as network:\n tenant_id = network['network']['tenant_id']\n subnetpool_prefix = '10.0.0.0/8'\n with self.subnetpool(prefixes=[subnetpool_prefix],\n admin=True,\n name=\"My subnet pool\",\n tenant_id=tenant_id,\n min_prefixlen='25',\n is_default=True):\n data = {'subnet': {'network_id': network['network']['id'],\n 'cidr': '10.0.0.0/24',\n 'ip_version': constants.IP_VERSION_4,\n 'tenant_id': tenant_id}}\n subnet_req = self.new_create_request('subnets', data)\n res = subnet_req.get_response(self.api)\n subnet = self.deserialize(self.fmt, res)['subnet']\n self.assertIsNone(subnet['subnetpool_id'])", "def create_network_acl(\n vpc_id=None,\n vpc_name=None,\n network_acl_name=None,\n subnet_id=None,\n subnet_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n _id = vpc_name or vpc_id\n\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\"message\": \"VPC {} does not exist.\".format(_id)},\n }\n\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_name)},\n }\n elif subnet_id:\n if not _get_resource(\n \"subnet\",\n resource_id=subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n ):\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_id)},\n }\n\n r = _create_resource(\n \"network_acl\",\n name=network_acl_name,\n vpc_id=vpc_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n\n if r.get(\"created\") and subnet_id:\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.associate_network_acl(r[\"id\"], subnet_id)\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}\n r[\"association_id\"] = association_id\n return r", "def test_create_subnet_no_cidr_and_default_subnetpool(self):\n with self.network() as network:\n tenant_id = network['network']['tenant_id']\n subnetpool_prefix = '10.0.0.0/8'\n with self.subnetpool(prefixes=[subnetpool_prefix],\n admin=True,\n name=\"My subnet pool\",\n tenant_id=tenant_id,\n min_prefixlen='25',\n is_default=True):\n data = {'subnet': {'network_id': network['network']['id'],\n 'ip_version': constants.IP_VERSION_4,\n 'tenant_id': tenant_id}}\n subnet_req = self.new_create_request('subnets', data)\n res = subnet_req.get_response(self.api)\n self.assertEqual(\n webob.exc.HTTPClientError.code, res.status_int)", "def create_network(request):\n cloud_id = request.matchdict['cloud']\n\n params = params_from_request(request)\n network_params = params.get('network')\n subnet_params = params.get('subnet')\n\n auth_context = auth_context_from_request(request)\n\n if not network_params:\n raise RequiredParameterMissingError('network')\n\n # TODO\n if not auth_context.is_owner():\n raise PolicyUnauthorizedError()\n\n try:\n cloud = Cloud.objects.get(owner=auth_context.owner, id=cloud_id)\n except me.DoesNotExist:\n raise CloudNotFoundError\n\n network = methods.create_network(auth_context.owner, cloud, network_params)\n network_dict = network.as_dict()\n\n # Bundling Subnet creation in this call because it is required\n # for backwards compatibility with the current UI\n if subnet_params:\n try:\n subnet = create_subnet(auth_context.owner, cloud,\n network, subnet_params)\n except Exception as exc:\n # Cleaning up the network object in case subnet creation\n # fails for any reason\n network.ctl.delete()\n raise exc\n network_dict['subnet'] = subnet.as_dict()\n\n return network.as_dict()", "def subnet_create_api():\r\n try:\r\n req = models.Subnet(request.json)\r\n req.validate()\r\n except Exception as e:\r\n return err_return('Parameter Invalid', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n try:\r\n if not req.network_id:\r\n return err_return('networkid is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not req.subnet_id:\r\n req_id = str(uuid.uuid4())\r\n else:\r\n req_id = req.subnet_id\r\n sb_name = subnet_db_get_one('name', id=req_id)\r\n if sb_name:\r\n return err_return('id(%s) in use by %s' % (req_id, sb_name),\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n if req.subnet_name:\r\n if len(req.subnet_name) > NAME_MAX_LEN:\r\n return err_return('Length of name must be less than 255',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n else:\r\n req.subnet_name = ''\r\n\r\n external = network_db_get_one('external', id=req.network_id)\r\n if external is None:\r\n return err_return(\"networkid does not exist\",\r\n \"ParameterInvalid\", \"\", HTTP_BAD_REQUEST)\r\n if not req.dns_nameservers:\r\n req.dns_nameservers = []\r\n if not req.allocation_pools:\r\n req.allocation_pools = []\r\n allocation_pools = []\r\n for all_pool in req.allocation_pools:\r\n allocation_pools.append(all_pool.to_primitive())\r\n req.allocation_pools = allocation_pools\r\n for pool in req.allocation_pools:\r\n if ip_to_bin(pool['start']) > ip_to_bin(pool['end']):\r\n return err_return(\"end_ip must be more than start_ip\",\r\n \"IPRangeError\", \"\", HTTP_BAD_REQUEST)\r\n\r\n if external == 0:\r\n if not req.cidr:\r\n return err_return('cidr is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not validate_cidr(req.cidr):\r\n return err_return('cidr invalid', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not req.gateway_ip:\r\n return err_return('gateway ip is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n vl2lcid = yynetworkid_to_lcvl2id(req.network_id)\r\n log.debug('vl2lcid=%s' % vl2lcid)\r\n nets = [{\"prefix\": VFW_TOR_LINK_NET_PRE,\r\n \"netmask\": VFW_TOR_LINK_NET_MASK}]\r\n cidr = str(req.cidr).split('/')\r\n new_prf = cidr[0]\r\n new_mask = int(cidr[1])\r\n subnets = get_subnets_by_network(req.network_id)\r\n for subnet in subnets:\r\n cidr = subnet['cidr'].split('/')\r\n old_prf = cidr[0]\r\n old_mask = int(cidr[1])\r\n if subnet_equ(new_prf, old_prf, new_mask, old_mask):\r\n log.error('cidr is the same')\r\n return err_return('subnet already exist',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n nets.append({\"prefix\": old_prf, \"netmask\": old_mask})\r\n nets.append({\"prefix\": new_prf, \"netmask\": new_mask})\r\n log.debug('nets=%s' % nets)\r\n nw_name = network_db_get_one('name', id=req.network_id)\r\n payload = json.dumps({\"name\": nw_name, \"nets\": nets})\r\n r = lcapi.patch(conf.livecloud_url + '/v1/vl2s/%s' % vl2lcid,\r\n data=payload)\r\n if r.status_code != HTTP_OK:\r\n return Response(json.dumps(NEUTRON_400)), HTTP_NOT_FOUND\r\n nets = r.json()['DATA']['NETS']\r\n for net in nets:\r\n if subnet_equ(net['PREFIX'], new_prf,\r\n net['NETMASK'], new_mask):\r\n sb_lcuuid = net['LCUUID']\r\n sb_idx = net['NET_INDEX']\r\n break\r\n else:\r\n log.error('sb_lcuuid no found')\r\n sb_lcuuid = 'sb_lcuuid no found'\r\n sb_idx = -1\r\n else:\r\n subnetid = subnet_db_get_one('id', network_id=req.network_id)\r\n if subnetid:\r\n return err_return('subnet(%s) already exists' % subnetid,\r\n 'Fail', '', HTTP_BAD_REQUEST)\r\n # ISP\r\n if not req.allocation_pools:\r\n return err_return('allocation_pools can not be empty',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n id = subnet_db_get_one('id', network_id=req.network_id)\r\n if id:\r\n return subnet_get(subnetid=id)\r\n lcuuid = network_db_get_one('lcuuid', id=req.network_id)\r\n isp = lc_vl2_db_get_one('isp', lcuuid=lcuuid)\r\n items = lc_ip_res_db_get_all(req='ip, netmask, gateway, userid',\r\n isp=isp)\r\n if not items:\r\n return err_return(\"No ISP IP found\", \"BadRequest\",\r\n \"Please add ISP IP to system first\",\r\n HTTP_BAD_REQUEST)\r\n req.gateway_ip = items[0]['gateway']\r\n req.cidr = ip_mask_to_cidr(items[0]['ip'], items[0]['netmask'])\r\n isp_all_ips = []\r\n ip_to_userid = {}\r\n for it in items:\r\n isp_all_ips.append(it['ip'])\r\n ip_to_userid[it['ip']] = it['userid']\r\n req_ips = alloc_pools_to_ip_list(req.allocation_pools)\r\n for req_ip in req_ips:\r\n if req_ip not in isp_all_ips:\r\n return err_return(\"%s does not exist\" % req_ip,\r\n \"IPInvalid\", \"\", HTTP_BAD_REQUEST)\r\n if ip_to_userid[req_ip] != 0:\r\n return err_return(\"%s in use\" % req_ip,\r\n \"IPInUse\", \"\", HTTP_BAD_REQUEST)\r\n sb_lcuuid = str(uuid.uuid4())\r\n sb_idx = -1\r\n\r\n sql = (\"INSERT INTO neutron_subnets \"\r\n \"VALUES('%s','%s','%s','%s','%s','%s','%s','%s',%d)\" %\r\n (req_id, req.subnet_name, req.network_id,\r\n req.cidr, json.dumps(req.allocation_pools),\r\n req.gateway_ip, json.dumps(req.dns_nameservers),\r\n sb_lcuuid, sb_idx))\r\n log.debug('add subnet sql=%s' % sql)\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n cursor.execute(sql)\r\n if external:\r\n sql = \"UPDATE ip_resource_v2_2 SET userid=%s WHERE ip in ('-1',\"\r\n for req_ip in req_ips:\r\n sql += \"'%s',\" % req_ip\r\n sql = sql[:-1]\r\n sql += \")\"\r\n log.debug('sql=%s' % sql)\r\n with MySQLdb.connect(**LCDB_INFO) as cursor:\r\n cursor.execute(sql, conf.livecloud_userid)\r\n\r\n resp, code = subnet_get(subnetid=req_id)\r\n return resp, HTTP_CREATED\r\n\r\n except Exception as e:\r\n log.error(e)\r\n return Response(json.dumps(NEUTRON_500)), HTTP_INTERNAL_SERVER_ERROR", "def from_dict(cls, _dict: Dict) -> 'VPC':\n args = {}\n if 'classic_access' in _dict:\n args['classic_access'] = _dict.get('classic_access')\n else:\n raise ValueError('Required property \\'classic_access\\' not present in VPC JSON')\n if 'created_at' in _dict:\n args['created_at'] = string_to_datetime(_dict.get('created_at'))\n else:\n raise ValueError('Required property \\'created_at\\' not present in VPC JSON')\n if 'crn' in _dict:\n args['crn'] = _dict.get('crn')\n else:\n raise ValueError('Required property \\'crn\\' not present in VPC JSON')\n if 'cse_source_ips' in _dict:\n args['cse_source_ips'] = [VPCCSESourceIP.from_dict(x) for x in _dict.get('cse_source_ips')]\n if 'default_network_acl' in _dict:\n args['default_network_acl'] = NetworkACLReference.from_dict(_dict.get('default_network_acl'))\n else:\n raise ValueError('Required property \\'default_network_acl\\' not present in VPC JSON')\n if 'default_routing_table' in _dict:\n args['default_routing_table'] = RoutingTableReference.from_dict(_dict.get('default_routing_table'))\n else:\n raise ValueError('Required property \\'default_routing_table\\' not present in VPC JSON')\n if 'default_security_group' in _dict:\n args['default_security_group'] = SecurityGroupReference.from_dict(_dict.get('default_security_group'))\n else:\n raise ValueError('Required property \\'default_security_group\\' not present in VPC JSON')\n if 'href' in _dict:\n args['href'] = _dict.get('href')\n else:\n raise ValueError('Required property \\'href\\' not present in VPC JSON')\n if 'id' in _dict:\n args['id'] = _dict.get('id')\n else:\n raise ValueError('Required property \\'id\\' not present in VPC JSON')\n if 'name' in _dict:\n args['name'] = _dict.get('name')\n else:\n raise ValueError('Required property \\'name\\' not present in VPC JSON')\n if 'resource_group' in _dict:\n args['resource_group'] = ResourceGroupReference.from_dict(_dict.get('resource_group'))\n else:\n raise ValueError('Required property \\'resource_group\\' not present in VPC JSON')\n if 'status' in _dict:\n args['status'] = _dict.get('status')\n else:\n raise ValueError('Required property \\'status\\' not present in VPC JSON')\n return cls(**args)", "def _create_security_group(client, vpc_id):\n\n res = client.create_security_group(\n Description=\"Allow ssh from user public IP address\",\n GroupName=f'ssh-from-public-ip-{_rand_chars(10)}',\n VpcId=vpc_id,\n )\n\n group_id = res['GroupId']\n\n try:\n public_ip = f'{requests.get(\"https://checkip.amazonaws.com/\").text.strip()}/32'\n except Exception:\n print('encountered error getting public ip; using 0.0.0.0/0 instead')\n public_ip = '0.0.0.0/0'\n\n res = client.authorize_security_group_ingress(\n CidrIp=public_ip,\n FromPort=22,\n GroupId=group_id,\n IpProtocol='tcp',\n ToPort=22,\n )\n\n return group_id", "def __create_network__(self,**kwargs):\n\t\tself.validate_args(**kwargs)\n\t\t#first create the network\n\t\texisting_networks = self.neutronClient.get_networks()\n\t\tnew_network = kwargs[\"network\"]\n\t\tnew_subnet_cidr = kwargs[\"cidr\"]\n\t\tsubnet_name = kwargs[\"subnet_name\"]\n enable_dhcp = kwargs.get(\"enable_dhcp\", True)\n\n\t\tnetVal = {}\n\t\tsubnetVal = {}\n\t\tnet_id = None\n\t\t#check if the network with the same name exists\n\t\tif not any(network.get('name',None) == new_network for network in existing_networks['networks']) :\n\t\t\t#did not find the network. go ahead and create the network and subnet\n\t\t\tnetVal = self.neutronClient.create_network(new_network)\n\t\t\tsubnetVal = self.neutronClient.create_subnet(netVal['network']['id'],new_subnet_cidr,subnet_name,enable_dhcp)\n netVal = netVal['network']\n subnetVal = subnetVal['subnet']\n\t\t\t#return the dict with the network and subnet details\n\t\telse :\n\t\t\t#network name exists. get network id\n\t\t\tfor network in existing_networks['networks']:\n if new_network == network['name']:\n\t\t\t\t\tnet_id = network['id']\n\t\t\t\t\tnetVal = network\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t#check if the required subnet also exists\n\t\t\texisting_subnet = self.neutronClient.get_subnets()\n\t\t\tif not any(subnet.get('cidr',None) == new_subnet_cidr for subnet in existing_subnet['subnets']):\n\t\t\t\t#subnet needs to be created under this network\n\t\t\t\tsubnetVal = self.neutronClient.create_subnet(net_id,new_subnet_cidr,subnet_name, enable_dhcp)\n subnetVal = subnetVal['subnet']\n\t\t\telse :\n\t\t\t\tfor subnet in existing_subnet['subnets']:\n #TOCHK: Dont use in for string comparisons\n \t#if new_subnet_cidr in subnet['cidr'] :\n if new_subnet_cidr == subnet['cidr']:\n \tsubnetVal = subnet\n\t\t\t\t\t\tbreak\n\t\tnetVal['subnets'] = subnetVal\n\t\treturn netVal" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a VPC endpoint for a specified AWS service. An endpoint enables you to create a private connection between your VPC and another AWS service in your account. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint. Use DescribeVpcEndpointServices to get a list of supported AWS services.
def create_vpc_endpoint(DryRun=None, VpcId=None, ServiceName=None, PolicyDocument=None, RouteTableIds=None, ClientToken=None): pass
[ "def create_vpc_endpoint(self, vpc_id, route_table_id, service_name):\n params = {'VpcId': vpc_id, 'RouteTableId.1': route_table_id,\n 'ServiceName': service_name}\n return self.get_object('CreateVpcEndpoint', params, VPCEndpoint,\n verb='POST')", "def vpc_stack_with_endpoints(region, request, key_name):\n\n logging.info(\"Creating VPC stack with endpoints\")\n credential = request.config.getoption(\"credential\")\n stack_factory = CfnStacksFactory(request.config.getoption(\"credential\"))\n\n def _create_stack(request, template, region, default_az_id, az_ids, stack_factory):\n # TODO: be able to reuse an existing VPC endpoint stack\n stack = CfnVpcStack(\n name=generate_stack_name(\"integ-tests-vpc-endpoints\", request.config.getoption(\"stackname_suffix\")),\n region=region,\n template=template.to_json(),\n default_az_id=default_az_id,\n az_ids=az_ids,\n )\n stack_factory.create_stack(stack)\n return stack\n\n # tests with VPC endpoints are not using multi-AZ\n default_az_id, default_az_name, _ = get_az_setup_for_region(region, credential)\n\n bastion_subnet = SubnetConfig(\n name=subnet_name(visibility=\"Public\", az_id=default_az_id),\n cidr=CIDR_FOR_PUBLIC_SUBNETS[0],\n map_public_ip_on_launch=True,\n has_nat_gateway=True,\n availability_zone=default_az_name,\n default_gateway=Gateways.INTERNET_GATEWAY,\n )\n\n no_internet_subnet = SubnetConfig(\n name=subnet_name(visibility=\"Private\", flavor=\"NoInternet\"),\n cidr=CIDR_FOR_PRIVATE_SUBNETS[0],\n map_public_ip_on_launch=False,\n has_nat_gateway=False,\n availability_zone=default_az_name,\n default_gateway=Gateways.NONE,\n )\n\n vpc_config = VPCConfig(\n cidr=\"192.168.0.0/17\",\n additional_cidr_blocks=[\"192.168.128.0/17\"],\n subnets=[\n bastion_subnet,\n no_internet_subnet,\n ],\n )\n\n with aws_credential_provider(region, credential):\n bastion_image_id = retrieve_latest_ami(region, \"alinux2\")\n\n template = NetworkTemplateBuilder(\n vpc_configuration=vpc_config,\n default_availability_zone=default_az_name,\n create_vpc_endpoints=True,\n bastion_key_name=key_name,\n bastion_image_id=bastion_image_id,\n region=region,\n ).build()\n\n yield _create_stack(request, template, region, default_az_id, [default_az_id], stack_factory)\n\n if not request.config.getoption(\"no_delete\"):\n stack_factory.delete_all_stacks()\n else:\n logging.warning(\"Skipping deletion of CFN VPC endpoints stack because --no-delete option is set\")", "def create_endpoint_gateway(self,\n target: 'EndpointGatewayTargetPrototype',\n vpc: 'VPCIdentity',\n *,\n ips: List['EndpointGatewayReservedIP'] = None,\n name: str = None,\n resource_group: 'ResourceGroupIdentity' = None,\n **kwargs\n ) -> DetailedResponse:\n\n if target is None:\n raise ValueError('target must be provided')\n if vpc is None:\n raise ValueError('vpc must be provided')\n target = convert_model(target)\n vpc = convert_model(vpc)\n if ips is not None:\n ips = [convert_model(x) for x in ips]\n if resource_group is not None:\n resource_group = convert_model(resource_group)\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='create_endpoint_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n data = {\n 'target': target,\n 'vpc': vpc,\n 'ips': ips,\n 'name': name,\n 'resource_group': resource_group\n }\n data = {k: v for (k, v) in data.items() if v is not None}\n data = json.dumps(data)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/endpoint_gateways'\n request = self.prepare_request(method='POST',\n url=url,\n headers=headers,\n params=params,\n data=data)\n\n response = self.send(request)\n return response", "def create_endpoint(\n self,\n request: registration_service.CreateEndpointRequest = None,\n *,\n parent: str = None,\n endpoint: gcs_endpoint.Endpoint = None,\n endpoint_id: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> gcs_endpoint.Endpoint:\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n if request is not None and any([parent, endpoint, endpoint_id]):\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = registration_service.CreateEndpointRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n\n if parent is not None:\n request.parent = parent\n if endpoint is not None:\n request.endpoint = endpoint\n if endpoint_id is not None:\n request.endpoint_id = endpoint_id\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method.wrap_method(\n self._transport.create_endpoint,\n default_timeout=None,\n client_info=_client_info,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"parent\", request.parent),)),\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)\n\n # Done; return the response.\n return response", "def modify_vpc_endpoint(DryRun=None, VpcEndpointId=None, ResetPolicy=None, PolicyDocument=None, AddRouteTableIds=None, RemoveRouteTableIds=None):\n pass", "def describe_vpc_endpoint_services(DryRun=None, MaxResults=None, NextToken=None):\n pass", "def test_endpoints_add(self):\n ctx = sm.ServiceContext(INFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n self.assertEqual(len(svc.endpoints), 9)\n svc.endpoints.append(sm.Endpoint(\"foo\", \"bar\"))\n svc.endpoints.append(sm.Endpoint(\"bar\", \"baz\"))\n ctx.commit(OUTFILENAME)\n ctx = sm.ServiceContext(OUTFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n if not \"foo\" in [ep.name for ep in svc.endpoints]:\n raise ValueError(\"Failed to alter endpoints.\")\n if not \"bar\" in [ep.name for ep in svc.endpoints]:\n raise ValueError(\"Failed to alter endpoints.\")\n for ep in svc.endpoints:\n if ep.name == \"foo\":\n self.assertEqual(ep.purpose, \"bar\")\n if ep.name == \"bar\":\n self.assertEqual(ep.purpose, \"baz\")\n self.assertEqual(len(svc.endpoints), 11)", "def delete_vpc_endpoint_resources():\n print('Deleting VPC endpoints')\n ec2 = boto3.client('ec2')\n endpoint_ids = []\n for endpoint in ec2.describe_vpc_endpoints()['VpcEndpoints']:\n print('Deleting VPC Endpoint - {}'.format(endpoint['ServiceName']))\n endpoint_ids.append(endpoint['VpcEndpointId'])\n\n if endpoint_ids:\n ec2.delete_vpc_endpoints(\n VpcEndpointIds=endpoint_ids\n )\n\n print('Waiting for VPC endpoints to get deleted')\n while ec2.describe_vpc_endpoints()['VpcEndpoints']:\n time.sleep(5)\n\n print('VPC endpoints deleted')\n\n # VPC endpoints connections\n print('Deleting VPC endpoint connections')\n service_ids = []\n for connection in ec2.describe_vpc_endpoint_connections()['VpcEndpointConnections']:\n service_id = connection['ServiceId']\n state = connection['VpcEndpointState']\n\n if state in ['PendingAcceptance', 'Pending', 'Available', 'Rejected', 'Failed', 'Expired']:\n print('Deleting VPC Endpoint Service - {}'.format(service_id))\n service_ids.append(service_id)\n\n ec2.reject_vpc_endpoint_connections(\n ServiceId=service_id,\n VpcEndpointIds=[\n connection['VpcEndpointId'],\n ]\n )\n\n if service_ids:\n ec2.delete_vpc_endpoint_service_configurations(\n ServiceIds=service_ids\n )\n\n print('Waiting for VPC endpoint services to be destroyed')\n while ec2.describe_vpc_endpoint_connections()['VpcEndpointConnections']:\n time.sleep(5)\n\n print('VPC endpoint connections deleted')", "def create_service(\n self,\n id=None, # type: Optional[str]\n endpoints=None, # type: Optional[List[\"models.MicrosoftGraphPrintServiceEndpoint\"]]\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphPrintService\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphPrintService\"]\n error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}))\n\n _body = models.MicrosoftGraphPrintService(id=id, endpoints=endpoints)\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_service.metadata['url'] # type: ignore\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n header_parameters['Accept'] = 'application/json'\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(_body, 'MicrosoftGraphPrintService')\n body_content_kwargs['content'] = body_content\n request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphPrintService', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def add_endpoint(self, endpoint):\n self.endpoints[endpoint.name] = endpoint", "def create(self, network, service_name, blueprint, template_vars, count):\n logger.debug('Creating service %s, %s with blueprint %s and ' 'template_vars %s',\n network.name, service_name, blueprint, template_vars)\n self.subnetwork.create(network.name, service_name,\n blueprint=blueprint)\n instances_blueprint = ServiceBlueprint.from_file(blueprint)\n az_count = instances_blueprint.availability_zone_count()\n availability_zones = list(itertools.islice(self._get_availability_zones(), az_count))\n if len(availability_zones) < az_count:\n raise DisallowedOperationException(\"Do not have %s availability zones: %s\" % (\n az_count, availability_zones))\n instance_count = az_count\n if count:\n instance_count = count\n\n def get_image(image_specifier):\n images = [image for image in self.driver.list_images() if re.match(image_specifier,\n image.name)]\n if not images:\n raise DisallowedOperationException(\"Could not find image named %s\"\n % image_specifier)\n if len(images) > 1:\n raise DisallowedOperationException(\"Found multiple images for specifier %s: %s\"\n % (image_specifier, images))\n return images[0]\n\n image = get_image(instances_blueprint.image())\n instance_type = get_fitting_instance(self, instances_blueprint)\n for availability_zone, instance_num in zip(itertools.cycle(availability_zones),\n range(0, instance_count)):\n full_subnetwork_name = \"%s-%s\" % (network.name, service_name)\n instance_name = \"%s-%s\" % (full_subnetwork_name, instance_num)\n metadata = [\n {\"key\": \"startup-script\", \"value\":\n instances_blueprint.runtime_scripts(template_vars)},\n {\"key\": \"network\", \"value\": network.name},\n {\"key\": \"subnetwork\", \"value\": service_name}\n ]\n logger.info('Creating instance %s in zone %s', instance_name, availability_zone.name)\n self.driver.create_node(instance_name, instance_type, image, location=availability_zone,\n ex_network=network.name, ex_subnetwork=full_subnetwork_name,\n external_ip=\"ephemeral\", ex_metadata=metadata,\n ex_tags=[full_subnetwork_name])\n return self.get(network, service_name)", "def create_vpn_service(self, **attrs):\n return self._create(_vpn_service.VpnService, **attrs)", "def get_or_create_endpoint(project_id: str,\n data_region: str,\n data_pipeline_root: str,\n display_name: str,\n create_if_not_exists: bool,\n endpoint: Output[Artifact]):\n\n aiplatform.init(\n project=project_id,\n location=data_region,\n staging_bucket=data_pipeline_root)\n\n # Check if the named endpoint exists\n endpoints = aiplatform.Endpoint.list(\n project=project_id,\n location=data_region,\n filter=f'display_name=\"{display_name}\"',\n order_by='create_time desc'\n )\n\n # If create_if_not_exists is True and no existing\n # endpoint with the display name, create one\n if endpoints:\n model_endpoint = endpoints[0]\n logging.info(f'Endpoint {model_endpoint.name} is found')\n elif create_if_not_exists:\n logging.info(\n f'Endpoint with display_name {display_name} is not found, create one.')\n model_endpoint = _create_endpoint(project_id, data_region, display_name)\n else:\n raise RuntimeError(\n 'Endpoint is not found and create_if_not_exists is False')\n\n endpoint.uri = f'aiplatform://v1/{model_endpoint.resource_name}'", "def create(\n cidr_block,\n instance_tenancy=None,\n vpc_name=None,\n enable_dns_support=None,\n enable_dns_hostnames=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n vpc = conn.create_vpc(cidr_block, instance_tenancy=instance_tenancy)\n if vpc:\n log.info(\"The newly created VPC id is %s\", vpc.id)\n\n _maybe_set_name_tag(vpc_name, vpc)\n _maybe_set_tags(tags, vpc)\n _maybe_set_dns(conn, vpc.id, enable_dns_support, enable_dns_hostnames)\n _maybe_name_route_table(conn, vpc.id, vpc_name)\n if vpc_name:\n _cache_id(\n vpc_name,\n vpc.id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n return {\"created\": True, \"id\": vpc.id}\n else:\n log.warning(\"VPC was not created\")\n return {\"created\": False}\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def create(cls, service=None, endpoint=None, data=None, *args, **kwargs):\n cls.validate(data)\n\n if service is None and endpoint is None:\n raise InvalidArguments(service, endpoint)\n if endpoint is None:\n sid = service['id'] if isinstance(service, Entity) else service\n endpoint = '/services/{0}/integrations'.format(sid)\n\n # otherwise endpoint should contain the service path too\n getattr(Entity, 'create').im_func(cls, endpoint=endpoint, data=data,\n *args, **kwargs)", "def add_vpc(template, key_pair_name, nat_ip,\n nat_image_id=DEFAULT_NAT_IMAGE_ID,\n nat_instance_type=DEFAULT_NAT_INSTANCE_TYPE):\n vpc_id = \"VPC\"\n vpc = template.add_resource(ec2.VPC(\n vpc_id,\n CidrBlock=\"10.0.0.0/16\",\n Tags=Tags(\n Name=name_tag(vpc_id)\n ),\n ))\n public_subnet = _add_public_subnet(template, vpc)\n nat = _add_nat(template, vpc, public_subnet, nat_image_id, nat_instance_type,\n key_pair_name, nat_ip)\n _add_private_subnet(template, vpc, nat)\n return vpc", "def get_vpc_endpoint(self, vpc_id, route_table_id):\n vpc_endpoints = self.get_list('DescribeVpcEndpoints', {},\n [('item', VPCEndpoint)], verb='POST')\n\n for vpc_endpoint in vpc_endpoints:\n if (vpc_endpoint.vpc_id == vpc_id and\n vpc_endpoint.route_tables[0] == route_table_id):\n return vpc_endpoint\n\n return None", "def create_route(DryRun=None, RouteTableId=None, DestinationCidrBlock=None, GatewayId=None, DestinationIpv6CidrBlock=None, EgressOnlyInternetGatewayId=None, InstanceId=None, NetworkInterfaceId=None, VpcPeeringConnectionId=None, NatGatewayId=None):\n pass", "def create_namespaced_endpoints(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_endpoints\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_endpoints`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_endpoints`\")\n\n resource_path = '/api/v1/namespaces/{namespace}/endpoints'.replace('{format}', 'json')\n method = 'POST'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1Endpoints',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The only supported connection type is ipsec.1 . The response includes information that you need to give to your network administrator to configure your customer gateway. If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call. This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error. For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide .
def create_vpn_connection(DryRun=None, Type=None, CustomerGatewayId=None, VpnGatewayId=None, Options=None): pass
[ "def create_vpn_gateway(DryRun=None, Type=None, AvailabilityZone=None):\n pass", "def CreateVpnGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateVpnGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateVpnGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def create_vpn_gateway_connection(self,\n vpn_gateway_id: str,\n vpn_gateway_connection_prototype: 'VPNGatewayConnectionPrototype',\n **kwargs\n ) -> DetailedResponse:\n\n if vpn_gateway_id is None:\n raise ValueError('vpn_gateway_id must be provided')\n if vpn_gateway_connection_prototype is None:\n raise ValueError('vpn_gateway_connection_prototype must be provided')\n if isinstance(vpn_gateway_connection_prototype, VPNGatewayConnectionPrototype):\n vpn_gateway_connection_prototype = convert_model(vpn_gateway_connection_prototype)\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='create_vpn_gateway_connection')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n data = json.dumps(vpn_gateway_connection_prototype)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n path_param_keys = ['vpn_gateway_id']\n path_param_values = self.encode_path_vars(vpn_gateway_id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/vpn_gateways/{vpn_gateway_id}/connections'.format(**path_param_dict)\n request = self.prepare_request(method='POST',\n url=url,\n headers=headers,\n params=params,\n data=data)\n\n response = self.send(request)\n return response", "def create_VPN(self, public_ip):\n\n self.debug(\"Creating VPN with public IP: %s\" % public_ip.ipaddress.id)\n try:\n # Assign VPN to Public IP\n vpn = Vpn.create(self.apiclient,\n self.public_ip.ipaddress.id,\n account=self.account.name,\n domainid=self.account.domainid)\n\n self.debug(\"Verifying the remote VPN access\")\n vpns = Vpn.list(self.apiclient,\n publicipid=public_ip.ipaddress.id,\n listall=True)\n self.assertEqual(\n isinstance(vpns, list),\n True,\n \"List VPNs shall return a valid response\"\n )\n return vpn\n except Exception as e:\n self.fail(\"Failed to create remote VPN access: %s\" % e)", "def attach_vpn_gateway(DryRun=None, VpnGatewayId=None, VpcId=None):\n pass", "def test_01_vpc_remote_access_vpn(self):\n # 1) Create VPC\n vpc = VPC.create(\n api_client=self.apiclient,\n services=self.services[\"vpc\"],\n networkDomain=\"vpc.vpn\",\n vpcofferingid=self.vpc_offering.id,\n zoneid=self.zone.id,\n account=self.account.name,\n domainid=self.domain.id\n )\n\n self.assertIsNotNone(vpc, \"VPC creation failed\")\n self.logger.debug(\"VPC %s created\" % (vpc.id))\n\n self.cleanup.append(vpc)\n\n # 2) Create network in VPC\n ntwk = Network.create(\n api_client=self.apiclient,\n services=self.services[\"network_1\"],\n accountid=self.account.name,\n domainid=self.domain.id,\n networkofferingid=self.network_offering.id,\n zoneid=self.zone.id,\n vpcid=vpc.id\n )\n\n self.assertIsNotNone(ntwk, \"Network failed to create\")\n self.logger.debug(\"Network %s created in VPC %s\" % (ntwk.id, vpc.id))\n\n self.cleanup.append(ntwk)\n\n # 3) Deploy a vm\n vm = VirtualMachine.create(self.apiclient, services=self.services[\"virtual_machine\"],\n templateid=self.template.id,\n zoneid=self.zone.id,\n accountid=self.account.name,\n domainid=self.domain.id,\n serviceofferingid=self.virtual_machine_offering.id,\n networkids=ntwk.id,\n hypervisor=self.hypervisor\n )\n self.assertIsNotNone(vm, \"VM failed to deploy\")\n self.assertEquals(vm.state, 'Running', \"VM is not running\")\n self.debug(\"VM %s deployed in VPC %s\" % (vm.id, vpc.id))\n\n self.logger.debug(\"Deployed virtual machine: OK\")\n self.cleanup.append(vm)\n\n # 4) Enable VPN for VPC\n src_nat_list = PublicIPAddress.list(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n listall=True,\n issourcenat=True,\n vpcid=vpc.id\n )\n ip = src_nat_list[0]\n\n self.logger.debug(\"Acquired public ip address: OK\")\n\n vpn = Vpn.create(self.apiclient,\n publicipid=ip.id,\n account=self.account.name,\n domainid=self.account.domainid,\n iprange=self.services[\"vpn\"][\"iprange\"],\n fordisplay=self.services[\"vpn\"][\"fordisplay\"]\n )\n\n self.assertIsNotNone(vpn, \"Failed to create Remote Access VPN\")\n self.logger.debug(\"Created Remote Access VPN: OK\")\n\n vpn_user = None\n # 5) Add VPN user for VPC\n vpn_user = VpnUser.create(self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n username=self.services[\"vpn\"][\"vpn_user\"],\n password=self.services[\"vpn\"][\"vpn_pass\"]\n )\n\n self.assertIsNotNone(vpn_user, \"Failed to create Remote Access VPN User\")\n self.logger.debug(\"Created VPN User: OK\")\n\n # TODO: Add an actual remote vpn connection test from a remote vpc\n\n # 9) Disable VPN for VPC\n vpn.delete(self.apiclient)\n\n self.logger.debug(\"Deleted the Remote Access VPN: OK\")", "def create_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def create(cls, api_client, publicipid, account=None, domainid=None,\n projectid=None, networkid=None, vpcid=None, openfirewall=None, iprange=None, fordisplay=False):\n cmd = {'publicipid': publicipid}\n if account:\n cmd['account'] = account\n if domainid:\n cmd['domainid'] = domainid\n if projectid:\n cmd['projectid'] = projectid\n if networkid:\n cmd['networkid'] = networkid\n if vpcid:\n cmd['vpcid'] = vpcid\n if iprange:\n cmd['iprange'] = iprange\n if openfirewall:\n cmd['openfirewall'] = openfirewall\n\n cmd['fordisplay'] = fordisplay\n cmd['fetch_result'] = True\n return Vpn(api_client.createRemoteAccessVpn(**cmd).get('remoteaccessvpn'))", "def create_nat_gateway(SubnetId=None, AllocationId=None, ClientToken=None):\n pass", "def create(cls, api_client, services=None, name=None, gateway=None, cidrlist=None, account=None, domainid=None,\n presharedkey=None, ikepolicy=None, esppolicy=None):\n cmd = {'name': name, 'gateway': gateway, 'cidrlist': cidrlist}\n\n if not services:\n services = {}\n if \"ipsecpsk\" in services:\n cmd['ipsecpsk'] = services[\"ipsecpsk\"]\n elif presharedkey:\n cmd['ipsecpsk'] = presharedkey\n\n if \"ikepolicy\" in services:\n cmd['ikepolicy'] = services[\"ikepolicy\"]\n elif ikepolicy:\n cmd['ikepolicy'] = ikepolicy\n\n if \"ikelifetime\" in services:\n cmd['ikelifetime'] = services[\"ikelifetime\"]\n\n if \"esppolicy\" in services:\n cmd['esppolicy'] = services[\"esppolicy\"]\n elif esppolicy:\n cmd['esppolicy'] = esppolicy\n\n if \"esplifetime\" in services:\n cmd['esplifetime'] = services[\"esplifetime\"]\n\n if \"dpd\" in services:\n cmd['dpd'] = services[\"dpd\"]\n\n if \"forceencap\" in services:\n cmd['forceencap'] = services[\"forceencap\"]\n\n if account:\n cmd['account'] = account\n if domainid:\n cmd['domainid'] = domainid\n cmd['fetch_result'] = True\n return VpnCustomerGateway(api_client.createVpnCustomerGateway(**cmd).get('vpncustomergateway'))", "def _set_vpn(self, str_vpn):\n # The VPN name is passed as a varaible to get the right commands to use.\n if str_vpn is not None:\n dict_vpn = VPN.dict_vpn\n self.vpn_status = dict_vpn['vpn_is_on'][str_vpn]\n self.vpn_connect = dict_vpn['vpn_new_connection'][str_vpn]\n\n request = ConnectionManager.request(url='https://nordvpn.com/api/server')\n soup = BeautifulSoup(request, 'lxml')\n lst_servers = json.loads(soup.text)\n\n servers = pd.DataFrame({'Country': [serv['country'] for serv in lst_servers],\n 'Domain': [serv['domain'] for serv in lst_servers]})\n\n servers['Flag'] = servers['Domain'].astype(str).str.extract(r'^([a-z]{2})\\d{2,4}.nordvpn.com$')\n servers['ID'] = servers['Domain'].astype(str).str.extract(r'^[a-z]{2}(\\d{2,4}).nordvpn.com$')\n\n self.servers = servers\n else:\n self.vpn_status = None\n self.vpn_connect = None", "def RenewVpnGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"RenewVpnGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.RenewVpnGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def _vpn(port, config_file, user, privileged, ssh_port, host, verbose,\n openvpn_container, vpn_client, docker_cmd):\n\n if verbose:\n set_verbose()\n\n if privileged:\n os.environ[constants.privileged] = '1'\n\n if sys.platform == 'win32':\n logger.error(\"*** VPN is currently unsupported on Windows\")\n return 1\n\n if not is_privileged():\n logger.error(\"*** You don't have permission to run this command.\" +\n \"\\n{}\".format(constants.unprivileged_suggestion))\n return 1\n\n if not ((os.path.isfile(vpn_client) and os.access(vpn_client, os.X_OK)) or\n shutil.which(vpn_client)):\n msg = \"*** Not a valid executable: {}\"\n logger.error(msg.format(vpn_client))\n return 1\n\n port = validate_port(port, default=1194)\n if port is None:\n return 1\n client = sshclient(config_file, user, ssh_port, host)\n\n if not distutils.spawn.find_executable(vpn_client):\n msg = (\"You don't seem to have the '{}' executable. Please add it to \"\n \"your $PATH or equivalent.\")\n logger.error(msg.format(vpn_client))\n return 1\n\n docker_cmd = resolve_docker_cmd(client, docker_cmd)\n\n mesos_hosts, dns_hosts = gen_hosts(client)\n container_name = \"openvpn-{}\".format(rand_str(8))\n remote_openvpn_dir = \"/etc/openvpn\"\n remote_keyfile = \"{}/static.key\".format(remote_openvpn_dir)\n remote_clientfile = \"{}/client.ovpn\".format(remote_openvpn_dir)\n\n emitter.publish(\"\\nATTENTION: IF DNS DOESN'T WORK, add these DNS servers!\")\n for host in dns_hosts:\n emitter.publish(host)\n\n parsed_routes = ','.join(mesos_hosts)\n parsed_dns = ','.join(dns_hosts)\n\n with util.temptext() as server_tup, \\\n util.temptext() as key_tup, \\\n util.temptext() as config_tup, \\\n util.temptext() as client_tup:\n\n serverfile, serverpath = server_tup\n keyfile, keypath = key_tup\n clientconfigfile, clientconfigpath = config_tup\n clientfile, clientpath = client_tup\n\n scom = \"\"\"\\\n {} run --rm --cap-add=NET_ADMIN -p 0:1194 \\\n -e \"OPENVPN_ROUTES={}\" -e \"OPENVPN_DNS={}\" --name {} {}\\\n \"\"\".format(docker_cmd, parsed_routes, parsed_dns,\n container_name, openvpn_container)\n\n # FDs created when python opens a file have O_CLOEXEC set, which\n # makes them invalid in new threads (cloning). So we duplicate the\n # FD, which creates one without O_CLOEXEC.\n serverfile_dup = os.dup(serverfile)\n # XXX This FD is never closed because it would cause the vpn server\n # thread to crash\n\n vpn_server = threading.Thread(target=logging_exec,\n args=(client, scom, serverfile_dup,\n True),\n daemon=True)\n vpn_server.start()\n\n msg = \"\\nWaiting for VPN server in container '{}' to come up...\"\n emitter.publish(msg.format(container_name))\n\n scom = (\"until \"\n \"\"\"[ \"$(%s inspect --format='{{ .State.Running }}' \"\"\"\n \"\"\"%s 2>/dev/null)\" = \"true\" ] 2>/dev/null; do sleep 0.5; \"\"\"\n \"\"\"done\"\"\") % (docker_cmd, container_name)\n scom += (\" && \"\n \"\"\"{} exec {} sh -c 'until [ -s {} ]; do sleep 0.5; \"\"\"\n \"\"\"done' \"\"\").format(docker_cmd, container_name,\n remote_keyfile)\n scom += (\" && \"\n \"\"\"{} exec {} sh -c 'until [ -s {} ]; do sleep 0.5; \"\"\"\n \"\"\"done' \"\"\").format(docker_cmd, container_name,\n remote_clientfile)\n ssh_exec_fatal(client, scom)\n\n scom = (\"\"\"%s inspect --format='\"\"\"\n \"\"\"{{range $p, $conf := .NetworkSettings.Ports}}\"\"\"\n \"\"\"{{(index $conf 0).HostPort}}{{end}}' %s\"\"\"\n ) % (docker_cmd, container_name)\n remote_port = int(ssh_exec_fatal(client, scom).read().decode().strip())\n\n def tunnel_def():\n ssh_transport = client.get_transport()\n forward_tunnel(port, '127.0.0.1', remote_port, ssh_transport)\n tunnel = threading.Thread(target=tunnel_def, daemon=True)\n tunnel.start()\n\n container_cp(client, container_name, remote_keyfile, keyfile,\n docker_cmd)\n container_cp(client, container_name, remote_clientfile,\n clientconfigfile, docker_cmd)\n\n vpn_com = '{} --config {} --secret {} --port {}'\n vpn_com = vpn_com.format(vpn_client, clientconfigpath, keypath, port)\n\n emitter.publish(\"\\nVPN server output at {}\".format(serverpath))\n emitter.publish(\"VPN client output at {}\\n\".format(clientpath))\n ret = run_vpn(vpn_com, clientfile)\n\n client.close()\n input('Exited. Temporary files will be gone once you hit <Return>.')\n return ret", "def create_route(vserver_name: str, net_gateway_ip: str) -> None:\n \"\"\"The default destination will be set to \"0.0.0.0/0\" for IPv4 gateway addresses\"\"\" \n\n data = {\n 'gateway': net_gateway_ip,\n 'svm': {'name': vserver_name}\n }\n\n route = NetworkRoute(**data)\n\n try:\n route.post()\n print(\"Route %s created successfully\" % route.gateway)\n except NetAppRestError as err:\n print(\"Error: Route was not created: %s\" % err)\n return", "def create_network_gateway(self, body=None):\n return self._post(self.network_gateways_path, body=body)", "def create_nat_gw(dmz_id) :\n\t\n\text_ip = client.allocate_address(\n \t#Domain='vpc'|'standard',\n\t #Address='string',\n \t#DryRun=True|False\n\t )\n\text_ip = client.describe_addresses(\n\t\tFilters=[\n \t{\n \t'Name': 'public-ip',\n 'Values': [ext_ip['PublicIp']]\n \t}\n ]\n \t\t)['Addresses'][0] # good part\n\n\tnat_gw = client.create_nat_gateway(\n \tAllocationId=ext_ip['AllocationId'],\n\t SubnetId=dmz_id\n \t)['NatGateway']\n\t\n\treturn ext_ip, nat_gw", "def create_vpn_gateway(self,\n vpn_gateway_prototype: 'VPNGatewayPrototype',\n **kwargs\n ) -> DetailedResponse:\n\n if vpn_gateway_prototype is None:\n raise ValueError('vpn_gateway_prototype must be provided')\n if isinstance(vpn_gateway_prototype, VPNGatewayPrototype):\n vpn_gateway_prototype = convert_model(vpn_gateway_prototype)\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='create_vpn_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n data = json.dumps(vpn_gateway_prototype)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/vpn_gateways'\n request = self.prepare_request(method='POST',\n url=url,\n headers=headers,\n params=params,\n data=data)\n\n response = self.send(request)\n return response", "def createNetwork(context):\n if common.MY_DEBUG:\n print 'ENTER vpc.createNetwork'\n\n my_vpc_name = getNetworkName(context)\n\n ret = {\n 'name': my_vpc_name,\n 'type': 'compute.v1.network',\n 'properties': {\n 'routingConfig': {\n 'routingMode': 'REGIONAL'\n },\n 'autoCreateSubnetworks': False\n }\n }\n if common.MY_DEBUG:\n print 'EXIT vpc.createNetwork, ret: ' + str(ret)\n return ret", "def create_nat_gateway(\n subnet_id=None,\n subnet_name=None,\n allocation_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\",\n subnet_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not subnet_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"Subnet {} does not exist.\".format(subnet_name)\n },\n }\n else:\n if not _get_resource(\n \"subnet\",\n resource_id=subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n ):\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_id)},\n }\n\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n\n if not allocation_id:\n address = conn3.allocate_address(Domain=\"vpc\")\n allocation_id = address.get(\"AllocationId\")\n\n # Have to go to boto3 to create NAT gateway\n r = conn3.create_nat_gateway(SubnetId=subnet_id, AllocationId=allocation_id)\n return {\"created\": True, \"id\": r.get(\"NatGateway\", {}).get(\"NatGatewayId\")}\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway. For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide .
def create_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None): pass
[ "def create_route(vserver_name: str, net_gateway_ip: str) -> None:\n \"\"\"The default destination will be set to \"0.0.0.0/0\" for IPv4 gateway addresses\"\"\" \n\n data = {\n 'gateway': net_gateway_ip,\n 'svm': {'name': vserver_name}\n }\n\n route = NetworkRoute(**data)\n\n try:\n route.post()\n print(\"Route %s created successfully\" % route.gateway)\n except NetAppRestError as err:\n print(\"Error: Route was not created: %s\" % err)\n return", "def create_vpn_gateway(DryRun=None, Type=None, AvailabilityZone=None):\n pass", "def create_vpn_connection(DryRun=None, Type=None, CustomerGatewayId=None, VpnGatewayId=None, Options=None):\n pass", "def add_static_route(self, gateway, destination, network=None):\n routing_node_gateway = RoutingNodeGateway(gateway,\n destinations=destination) \n return self._add_gateway_node('router', routing_node_gateway, network)", "def create_static_route(parent_mo, prefix, **args):\n args = args['optional_args'] if 'optional_args' in args.keys() else args\n\n ip_routep = RouteP(parent_mo, prefix)\n\n if is_valid_key(args, 'next_hop_address'):\n for ip in args['next_hop_address']:\n ip_nexthopp = NexthopP(ip_routep, ip)", "def create_route(DryRun=None, RouteTableId=None, DestinationCidrBlock=None, GatewayId=None, DestinationIpv6CidrBlock=None, EgressOnlyInternetGatewayId=None, InstanceId=None, NetworkInterfaceId=None, VpcPeeringConnectionId=None, NatGatewayId=None):\n pass", "def create_nat_gateway(SubnetId=None, AllocationId=None, ClientToken=None):\n pass", "def add_route_tgw_nh(route_table_id, destination_cidr_block, transit_gateway_id):\n ec2 = boto3.client('ec2')\n\n resp = ec2.create_route(\n DryRun=False,\n RouteTableId=route_table_id,\n DestinationCidrBlock=destination_cidr_block,\n TransitGatewayId=transit_gateway_id,\n )\n logger.info(\"Got response to add_route_tgw_nh {} \".format(resp))\n return resp", "def configure_routing(vpc):\n internet_gateways = list(vpc.internet_gateways.all())\n if len(internet_gateways) == 1:\n internet_gateway = internet_gateways[0]\n elif len(internet_gateways) == 0:\n raise CraftingTableError(\"No internet gateway found\")\n else:\n raise CraftingTableError(f\"Multiple internet gateways found: {id_list(internet_gateways)}\")\n\n route_tables = list(vpc.route_tables.filter(Filters=[{\"Name\": \"association.main\", \"Values\": [\"true\"]}]))\n if len(route_tables) == 1:\n route_table = route_tables[0]\n elif len(route_tables) == 0:\n raise CraftingTableError(\"No route table found\")\n if len(route_tables) != 1:\n raise CraftingTableError(f\"Multiple route tables found: {id_list(route_tables)}\")\n\n for route in route_table.routes:\n if route.gateway_id == internet_gateway.id:\n break\n else:\n route_table.create_route(DestinationCidrBlock=\"0.0.0.0/0\", GatewayId=internet_gateway.id)\n click.echo(f\"Created default route to {internet_gateway.id}\")", "def l3route(name, gateway, ip_network):\n engine = Engine(name).load()\n return engine.add_route(gateway, ip_network)", "def create_static_routes(ADDR_TYPE, input_dict, tgen, CWD, topo):\n\n try:\n global frr_cfg\n for router in input_dict.keys():\n if \"static_routes\" in input_dict[router]:\n static_routes_list = []\n\n # Reset config for routers\n frr_cfg[router].reset_it()\n\n static_routes = input_dict[router][\"static_routes\"]\n for static_route in static_routes:\n network = static_route[\"network\"]\n # No of IPs\n if \"no_of_ip\" in static_route:\n no_of_ip = static_route[\"no_of_ip\"]\n else:\n no_of_ip = 0\n\n if \"admin_distance\" in static_route:\n admin_distance = static_route[\"admin_distance\"]\n else:\n admin_distance = 1\n\n if \"tag\" in static_route:\n tag = static_route[\"tag\"]\n else:\n tag = None\n\n if \"if_name\" in static_route:\n if_name = static_route[\"if_name\"]\n else:\n if_name = None\n\n next_hop = static_route[\"next_hop\"]\n\n ip_list = generate_ips(ADDR_TYPE, network, no_of_ip)\n for ip in ip_list:\n ip = str(ipaddress.ip_network(unicode(ip)))\n if ADDR_TYPE == \"ipv4\":\n addr = Address(ADDR_TYPE_IPv4, ip, None)\n route = Route(addr)\n nh = Address(ADDR_TYPE_IPv4, next_hop, None)\n else:\n addr = Address(ADDR_TYPE_IPv6, None, ip)\n route = Route(addr)\n nh = Address(ADDR_TYPE_IPv6, None, next_hop)\n route.add_nexthop(nh, None, admin_distance, if_name, tag)\n\n static_routes_list.append(route)\n frr_cfg[router].routing_pb.static_route = static_routes_list\n\n interfaces_cfg(frr_cfg[router])\n static_rt_cfg(frr_cfg[router])\n frr_cfg[router].print_common_config_to_file(topo)\n # Load configuration to router\n load_config_to_router(tgen, CWD, router)\n\n except Exception as e:\n errormsg = traceback.format_exc()\n logger.error(errormsg)\n return errormsg\n\n return True", "def attach_vpn_gateway(DryRun=None, VpnGatewayId=None, VpcId=None):\n pass", "def create_nat_gw(dmz_id) :\n\t\n\text_ip = client.allocate_address(\n \t#Domain='vpc'|'standard',\n\t #Address='string',\n \t#DryRun=True|False\n\t )\n\text_ip = client.describe_addresses(\n\t\tFilters=[\n \t{\n \t'Name': 'public-ip',\n 'Values': [ext_ip['PublicIp']]\n \t}\n ]\n \t\t)['Addresses'][0] # good part\n\n\tnat_gw = client.create_nat_gateway(\n \tAllocationId=ext_ip['AllocationId'],\n\t SubnetId=dmz_id\n \t)['NatGateway']\n\t\n\treturn ext_ip, nat_gw", "def CreateVpnGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateVpnGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateVpnGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def _program_dcnm_static_route(self, tenant_id, tenant_name):\n in_ip_dict = self.get_in_ip_addr(tenant_id)\n in_gw = in_ip_dict.get('gateway')\n in_ip = in_ip_dict.get('subnet')\n if in_gw is None:\n LOG.error(_LE(\"No FW service GW present\"))\n return False\n out_ip_dict = self.get_out_ip_addr(tenant_id)\n out_ip = out_ip_dict.get('subnet')\n\n # Program DCNM to update profile's static IP address on OUT part\n excl_list = []\n excl_list.append(in_ip)\n excl_list.append(out_ip)\n subnet_lst = self.os_helper.get_subnet_nwk_excl(tenant_id, excl_list,\n excl_part=True)\n # This count is for telling DCNM to insert the static route in a\n # particular position. Total networks created - exclusive list as\n # above - the network that just got created.\n srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id)\n ret = self.dcnm_obj.update_partition_static_route(\n tenant_name, fw_const.SERV_PART_NAME, subnet_lst,\n vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile,\n service_node_ip=srvc_node_ip)\n if not ret:\n LOG.error(_LE(\"Unable to update DCNM ext profile with static \"\n \"route\"))\n return False\n return True", "def AddRouterStaticIdempotent(self, dst, device, gateway, comment=''):\n dst = str(dst)\n device = str(device)\n gateway = str(gateway)\n\n return_code = self.AddRouterStatic(dst, device, gateway, comment)\n if return_code != 200:\n # creation failed, check to see if the object already exists\n objects = [['dst', dst], ['device', device], ['gateway', gateway]]\n if self.Exists('cmdb/router/static/', objects):\n return_code = 200\n return return_code", "def test_01_vpc_remote_access_vpn(self):\n # 1) Create VPC\n vpc = VPC.create(\n api_client=self.apiclient,\n services=self.services[\"vpc\"],\n networkDomain=\"vpc.vpn\",\n vpcofferingid=self.vpc_offering.id,\n zoneid=self.zone.id,\n account=self.account.name,\n domainid=self.domain.id\n )\n\n self.assertIsNotNone(vpc, \"VPC creation failed\")\n self.logger.debug(\"VPC %s created\" % (vpc.id))\n\n self.cleanup.append(vpc)\n\n # 2) Create network in VPC\n ntwk = Network.create(\n api_client=self.apiclient,\n services=self.services[\"network_1\"],\n accountid=self.account.name,\n domainid=self.domain.id,\n networkofferingid=self.network_offering.id,\n zoneid=self.zone.id,\n vpcid=vpc.id\n )\n\n self.assertIsNotNone(ntwk, \"Network failed to create\")\n self.logger.debug(\"Network %s created in VPC %s\" % (ntwk.id, vpc.id))\n\n self.cleanup.append(ntwk)\n\n # 3) Deploy a vm\n vm = VirtualMachine.create(self.apiclient, services=self.services[\"virtual_machine\"],\n templateid=self.template.id,\n zoneid=self.zone.id,\n accountid=self.account.name,\n domainid=self.domain.id,\n serviceofferingid=self.virtual_machine_offering.id,\n networkids=ntwk.id,\n hypervisor=self.hypervisor\n )\n self.assertIsNotNone(vm, \"VM failed to deploy\")\n self.assertEquals(vm.state, 'Running', \"VM is not running\")\n self.debug(\"VM %s deployed in VPC %s\" % (vm.id, vpc.id))\n\n self.logger.debug(\"Deployed virtual machine: OK\")\n self.cleanup.append(vm)\n\n # 4) Enable VPN for VPC\n src_nat_list = PublicIPAddress.list(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n listall=True,\n issourcenat=True,\n vpcid=vpc.id\n )\n ip = src_nat_list[0]\n\n self.logger.debug(\"Acquired public ip address: OK\")\n\n vpn = Vpn.create(self.apiclient,\n publicipid=ip.id,\n account=self.account.name,\n domainid=self.account.domainid,\n iprange=self.services[\"vpn\"][\"iprange\"],\n fordisplay=self.services[\"vpn\"][\"fordisplay\"]\n )\n\n self.assertIsNotNone(vpn, \"Failed to create Remote Access VPN\")\n self.logger.debug(\"Created Remote Access VPN: OK\")\n\n vpn_user = None\n # 5) Add VPN user for VPC\n vpn_user = VpnUser.create(self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n username=self.services[\"vpn\"][\"vpn_user\"],\n password=self.services[\"vpn\"][\"vpn_pass\"]\n )\n\n self.assertIsNotNone(vpn_user, \"Failed to create Remote Access VPN User\")\n self.logger.debug(\"Created VPN User: OK\")\n\n # TODO: Add an actual remote vpn connection test from a remote vpc\n\n # 9) Disable VPN for VPC\n vpn.delete(self.apiclient)\n\n self.logger.debug(\"Deleted the Remote Access VPN: OK\")", "async def createvirtualrouter(self, id: (str, None) = None,\n **kwargs: {\"?routes\": [tuple_((cidr_nonstrict_type, ip_address_type))]}):\n if not id:\n id = str(uuid1())\n\n router = {\"id\":id}\n router.update(kwargs)\n\n return await self.createvirtualrouters([router])", "def create_nat_gateway(\n subnet_id=None,\n subnet_name=None,\n allocation_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError(\n \"Only one of subnet_name or subnet_id may be provided.\"\n )\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\",\n subnet_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not subnet_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"Subnet {} does not exist.\".format(subnet_name)\n },\n }\n else:\n if not _get_resource(\n \"subnet\",\n resource_id=subnet_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n ):\n return {\n \"created\": False,\n \"error\": {\"message\": \"Subnet {} does not exist.\".format(subnet_id)},\n }\n\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n\n if not allocation_id:\n address = conn3.allocate_address(Domain=\"vpc\")\n allocation_id = address.get(\"AllocationId\")\n\n # Have to go to boto3 to create NAT gateway\n r = conn3.create_nat_gateway(SubnetId=subnet_id, AllocationId=allocation_id)\n return {\"created\": True, \"id\": r.get(\"NatGateway\", {}).get(\"NatGatewayId\")}\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a virtual private gateway. A virtual private gateway is the endpoint on the VPC side of your VPN connection. You can create a virtual private gateway before creating the VPC itself. For more information about virtual private gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide .
def create_vpn_gateway(DryRun=None, Type=None, AvailabilityZone=None): pass
[ "def CreateVpnGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateVpnGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateVpnGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def create_route(vserver_name: str, net_gateway_ip: str) -> None:\n \"\"\"The default destination will be set to \"0.0.0.0/0\" for IPv4 gateway addresses\"\"\" \n\n data = {\n 'gateway': net_gateway_ip,\n 'svm': {'name': vserver_name}\n }\n\n route = NetworkRoute(**data)\n\n try:\n route.post()\n print(\"Route %s created successfully\" % route.gateway)\n except NetAppRestError as err:\n print(\"Error: Route was not created: %s\" % err)\n return", "def test_01_vpc_remote_access_vpn(self):\n # 1) Create VPC\n vpc = VPC.create(\n api_client=self.apiclient,\n services=self.services[\"vpc\"],\n networkDomain=\"vpc.vpn\",\n vpcofferingid=self.vpc_offering.id,\n zoneid=self.zone.id,\n account=self.account.name,\n domainid=self.domain.id\n )\n\n self.assertIsNotNone(vpc, \"VPC creation failed\")\n self.logger.debug(\"VPC %s created\" % (vpc.id))\n\n self.cleanup.append(vpc)\n\n # 2) Create network in VPC\n ntwk = Network.create(\n api_client=self.apiclient,\n services=self.services[\"network_1\"],\n accountid=self.account.name,\n domainid=self.domain.id,\n networkofferingid=self.network_offering.id,\n zoneid=self.zone.id,\n vpcid=vpc.id\n )\n\n self.assertIsNotNone(ntwk, \"Network failed to create\")\n self.logger.debug(\"Network %s created in VPC %s\" % (ntwk.id, vpc.id))\n\n self.cleanup.append(ntwk)\n\n # 3) Deploy a vm\n vm = VirtualMachine.create(self.apiclient, services=self.services[\"virtual_machine\"],\n templateid=self.template.id,\n zoneid=self.zone.id,\n accountid=self.account.name,\n domainid=self.domain.id,\n serviceofferingid=self.virtual_machine_offering.id,\n networkids=ntwk.id,\n hypervisor=self.hypervisor\n )\n self.assertIsNotNone(vm, \"VM failed to deploy\")\n self.assertEquals(vm.state, 'Running', \"VM is not running\")\n self.debug(\"VM %s deployed in VPC %s\" % (vm.id, vpc.id))\n\n self.logger.debug(\"Deployed virtual machine: OK\")\n self.cleanup.append(vm)\n\n # 4) Enable VPN for VPC\n src_nat_list = PublicIPAddress.list(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n listall=True,\n issourcenat=True,\n vpcid=vpc.id\n )\n ip = src_nat_list[0]\n\n self.logger.debug(\"Acquired public ip address: OK\")\n\n vpn = Vpn.create(self.apiclient,\n publicipid=ip.id,\n account=self.account.name,\n domainid=self.account.domainid,\n iprange=self.services[\"vpn\"][\"iprange\"],\n fordisplay=self.services[\"vpn\"][\"fordisplay\"]\n )\n\n self.assertIsNotNone(vpn, \"Failed to create Remote Access VPN\")\n self.logger.debug(\"Created Remote Access VPN: OK\")\n\n vpn_user = None\n # 5) Add VPN user for VPC\n vpn_user = VpnUser.create(self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n username=self.services[\"vpn\"][\"vpn_user\"],\n password=self.services[\"vpn\"][\"vpn_pass\"]\n )\n\n self.assertIsNotNone(vpn_user, \"Failed to create Remote Access VPN User\")\n self.logger.debug(\"Created VPN User: OK\")\n\n # TODO: Add an actual remote vpn connection test from a remote vpc\n\n # 9) Disable VPN for VPC\n vpn.delete(self.apiclient)\n\n self.logger.debug(\"Deleted the Remote Access VPN: OK\")", "def createNetwork(context):\n if common.MY_DEBUG:\n print 'ENTER vpc.createNetwork'\n\n my_vpc_name = getNetworkName(context)\n\n ret = {\n 'name': my_vpc_name,\n 'type': 'compute.v1.network',\n 'properties': {\n 'routingConfig': {\n 'routingMode': 'REGIONAL'\n },\n 'autoCreateSubnetworks': False\n }\n }\n if common.MY_DEBUG:\n print 'EXIT vpc.createNetwork, ret: ' + str(ret)\n return ret", "def create_vpc(DryRun=None, CidrBlock=None, InstanceTenancy=None, AmazonProvidedIpv6CidrBlock=None):\n pass", "def __create_virtual_network(self):\n logger.info(\"Creating a virtual network '{}' and a linux bridge '{}'\"\n .format(self.__CONFIG_CONTEXT['virtual_network_name'],\n self.__CONFIG_CONTEXT['linux_bridge_name']))\n subprocess.check_call(['virsh', 'net-define',\n self.__VIRTUAL_NETWORK_CONFIG_FILE])\n subprocess.check_call(['virsh', 'net-start',\n self.__CONFIG_CONTEXT['virtual_network_name']])\n subprocess.check_call(['virsh', 'net-autostart',\n self.__CONFIG_CONTEXT['virtual_network_name']])", "def create_vpn_connection(DryRun=None, Type=None, CustomerGatewayId=None, VpnGatewayId=None, Options=None):\n pass", "def create(cls, api_client, publicipid, account=None, domainid=None,\n projectid=None, networkid=None, vpcid=None, openfirewall=None, iprange=None, fordisplay=False):\n cmd = {'publicipid': publicipid}\n if account:\n cmd['account'] = account\n if domainid:\n cmd['domainid'] = domainid\n if projectid:\n cmd['projectid'] = projectid\n if networkid:\n cmd['networkid'] = networkid\n if vpcid:\n cmd['vpcid'] = vpcid\n if iprange:\n cmd['iprange'] = iprange\n if openfirewall:\n cmd['openfirewall'] = openfirewall\n\n cmd['fordisplay'] = fordisplay\n cmd['fetch_result'] = True\n return Vpn(api_client.createRemoteAccessVpn(**cmd).get('remoteaccessvpn'))", "def create_nat_gw(dmz_id) :\n\t\n\text_ip = client.allocate_address(\n \t#Domain='vpc'|'standard',\n\t #Address='string',\n \t#DryRun=True|False\n\t )\n\text_ip = client.describe_addresses(\n\t\tFilters=[\n \t{\n \t'Name': 'public-ip',\n 'Values': [ext_ip['PublicIp']]\n \t}\n ]\n \t\t)['Addresses'][0] # good part\n\n\tnat_gw = client.create_nat_gateway(\n \tAllocationId=ext_ip['AllocationId'],\n\t SubnetId=dmz_id\n \t)['NatGateway']\n\t\n\treturn ext_ip, nat_gw", "def create_vpc_endpoint(DryRun=None, VpcId=None, ServiceName=None, PolicyDocument=None, RouteTableIds=None, ClientToken=None):\n pass", "def create_VPN(self, public_ip):\n\n self.debug(\"Creating VPN with public IP: %s\" % public_ip.ipaddress.id)\n try:\n # Assign VPN to Public IP\n vpn = Vpn.create(self.apiclient,\n self.public_ip.ipaddress.id,\n account=self.account.name,\n domainid=self.account.domainid)\n\n self.debug(\"Verifying the remote VPN access\")\n vpns = Vpn.list(self.apiclient,\n publicipid=public_ip.ipaddress.id,\n listall=True)\n self.assertEqual(\n isinstance(vpns, list),\n True,\n \"List VPNs shall return a valid response\"\n )\n return vpn\n except Exception as e:\n self.fail(\"Failed to create remote VPN access: %s\" % e)", "def create_vpn_gateway(self,\n vpn_gateway_prototype: 'VPNGatewayPrototype',\n **kwargs\n ) -> DetailedResponse:\n\n if vpn_gateway_prototype is None:\n raise ValueError('vpn_gateway_prototype must be provided')\n if isinstance(vpn_gateway_prototype, VPNGatewayPrototype):\n vpn_gateway_prototype = convert_model(vpn_gateway_prototype)\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='create_vpn_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n data = json.dumps(vpn_gateway_prototype)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/vpn_gateways'\n request = self.prepare_request(method='POST',\n url=url,\n headers=headers,\n params=params,\n data=data)\n\n response = self.send(request)\n return response", "def _create_virtual_network(self) -> dict:\n name = ''\n while not name:\n name = input(self.format('Virtual network name: '))\n name = name.strip()\n\n print('Creating virtual network...')\n\n try:\n virtual_network = self._run_az([\n 'network', 'vnet', 'create',\n '--name', name,\n '--location', self._selected_resource_group['location'],\n '--resource-group', self._selected_resource_group['name']\n ])\n self._az_virtual_networks.append(virtual_network)\n\n except APIError as e:\n print(self.format_error(str(e)))\n return self._create_virtual_network()\n\n return virtual_network", "def create_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def add_vpc(template, key_pair_name, nat_ip,\n nat_image_id=DEFAULT_NAT_IMAGE_ID,\n nat_instance_type=DEFAULT_NAT_INSTANCE_TYPE):\n vpc_id = \"VPC\"\n vpc = template.add_resource(ec2.VPC(\n vpc_id,\n CidrBlock=\"10.0.0.0/16\",\n Tags=Tags(\n Name=name_tag(vpc_id)\n ),\n ))\n public_subnet = _add_public_subnet(template, vpc)\n nat = _add_nat(template, vpc, public_subnet, nat_image_id, nat_instance_type,\n key_pair_name, nat_ip)\n _add_private_subnet(template, vpc, nat)\n return vpc", "def create_internet_gateway(\n internet_gateway_name=None,\n vpc_id=None,\n vpc_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if vpc_id or vpc_name:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n\n r = _create_resource(\n \"internet_gateway\",\n name=internet_gateway_name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if r.get(\"created\") and vpc_id:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.attach_internet_gateway(r[\"id\"], vpc_id)\n log.info(\n \"Attached internet gateway %s to VPC %s\", r[\"id\"], vpc_name or vpc_id\n )\n return r\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def create_network_gateway(self, body=None):\n return self._post(self.network_gateways_path, body=body)", "def create_nat_gateway(SubnetId=None, AllocationId=None, ClientToken=None):\n pass", "def attach_vpn_gateway(DryRun=None, VpnGatewayId=None, VpcId=None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified customer gateway. You must delete the VPN connection before you can delete the customer gateway.
def delete_customer_gateway(DryRun=None, CustomerGatewayId=None): pass
[ "def delete(self, api_client):\n\n cmd = {'id': self.id}\n api_client.deleteVpnCustomerGateway(**cmd)", "def delete_customer_gateway(\n customer_gateway_id=None,\n customer_gateway_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n return _delete_resource(\n resource=\"customer_gateway\",\n name=customer_gateway_name,\n resource_id=customer_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def delete_vpn_gateway(DryRun=None, VpnGatewayId=None):\n pass", "def DeleteCustomerGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteCustomerGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteCustomerGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_network_gateway(self, gateway_id):\n return self._delete(self.network_gateway_path % gateway_id)", "def delete_nat_gateway(NatGatewayId=None):\n pass", "def delete_customer_gateways():\n client = boto3.client('ec2')\n print('Deleting Customer Gateways')\n cust_resp = client.describe_customer_gateways()\n while True:\n for gateway in cust_resp['CustomerGateways']:\n gw_id = gateway['CustomerGatewayId']\n client.delete_customer_gateway(\n CustomerGatewayId=gw_id\n )\n time.sleep(0.25)\n if 'NextMarker' in cust_resp:\n cust_resp = client.describe_customer_gateways(\n Marker=cust_resp['NextMarker'],\n )\n else:\n break\n while client.describe_customer_gateways()['CustomerGateways']:\n all_deleted = True\n for gateway in client.describe_customer_gateways()['CustomerGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('Customer Gateways deleted')", "def DeleteVpnGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteVpnGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteVpnGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_virtual_gateways():\n client = boto3.client('ec2')\n print('Deleting VPN Gateways')\n gw_resp = client.describe_vpn_gateways()\n while True:\n for gateway in gw_resp['VpnGateways']:\n gw_id = gateway['VpnGatewayId']\n gw_attachments = gateway['VpcAttachments']\n for attachment in gw_attachments:\n if attachment['State'] == 'attached':\n vpc_id = attachment['VpcId']\n print('Detaching virtual gateway {} from vpc {}'.format(gw_id, vpc_id))\n client.detach_vpn_gateway(\n VpcId=vpc_id,\n VpnGatewayId=gw_id\n )\n print('Deleting VPN gateway {}'.format(gw_id))\n client.delete_vpn_gateway(\n VpnGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_vpn_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_vpn_gateways()['VpnGateways']:\n all_deleted = True\n for gateway in client.describe_vpn_gateways()['VpnGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('VPN Gateways deleted')", "def delete_vpn_gateway(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_vpn_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/vpn_gateways/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def delete_gateway(self, process_id, gateway_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_gateway_with_http_info(process_id, gateway_id, **kwargs)\n else:\n (data) = self.delete_gateway_with_http_info(process_id, gateway_id, **kwargs)\n return data", "def delete_nat_gateway(\n nat_gateway_id,\n release_eips=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n wait_for_delete=False,\n wait_for_delete_retries=5,\n):\n\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])\n if gwinfo:\n gwinfo = gwinfo.get(\"NatGateways\", [None])[0]\n conn3.delete_nat_gateway(NatGatewayId=nat_gateway_id)\n\n # wait for deleting nat gateway to finish prior to attempt to release elastic ips\n if wait_for_delete:\n for retry in range(wait_for_delete_retries, 0, -1):\n if gwinfo and gwinfo[\"State\"] not in [\"deleted\", \"failed\"]:\n time.sleep(\n (2 ** (wait_for_delete_retries - retry))\n + (random.randint(0, 1000) / 1000.0)\n )\n gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])\n if gwinfo:\n gwinfo = gwinfo.get(\"NatGateways\", [None])[0]\n continue\n break\n\n if release_eips and gwinfo:\n for addr in gwinfo.get(\"NatGatewayAddresses\"):\n conn3.release_address(AllocationId=addr.get(\"AllocationId\"))\n return {\"deleted\": True}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_nat_gateways():\n print('Deleting NAT gateways')\n ec2 = boto3.client('ec2')\n for page in ec2.get_paginator('describe_nat_gateways').paginate():\n for nat_gateway in page['NatGateways']:\n nat_gateway_id = nat_gateway['NatGatewayId']\n print('Deleting Nat Gateway - {}'.format(nat_gateway_id))\n ec2.delete_nat_gateway(\n NatGatewayId=nat_gateway_id\n )\n\n while ec2.describe_nat_gateways()['NatGateways']:\n all_deleted = True\n for gateway in ec2.describe_nat_gateways()['NatGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n\n print('NAT gateways deleted')", "def delGw(interface):\n logging.debugv(\"functions/linux.py->delGw(interface)\", [interface])\n logging.info(\"removing default gateway of device \" + interface)\n cmd = [\"ip\", \"route\", \"del\", \"default\", \"dev\", interface]\n runWrapper(cmd)", "def delete(self, api_client):\n\n cmd = {'publicipid': self.publicipid}\n api_client.deleteRemoteAccessVpn(**cmd)", "def delete_internet_gateway(\n internet_gateway_id=None,\n internet_gateway_name=None,\n detach=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if internet_gateway_name:\n internet_gateway_id = _get_resource_id(\n \"internet_gateway\",\n internet_gateway_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not internet_gateway_id:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_name\n )\n },\n }\n\n if detach:\n igw = _get_resource(\n \"internet_gateway\",\n resource_id=internet_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n\n if not igw:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_id\n )\n },\n }\n\n if igw.attachments:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.detach_internet_gateway(\n internet_gateway_id, igw.attachments[0].vpc_id\n )\n return _delete_resource(\n \"internet_gateway\",\n resource_id=internet_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def delete_vpn_gateway_connection(self,\n vpn_gateway_id: str,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if vpn_gateway_id is None:\n raise ValueError('vpn_gateway_id must be provided')\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_vpn_gateway_connection')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['vpn_gateway_id', 'id']\n path_param_values = self.encode_path_vars(vpn_gateway_id, id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/vpn_gateways/{vpn_gateway_id}/connections/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def delete_public_gateway(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_public_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/public_gateways/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified set of DHCP options. You must disassociate the set of DHCP options before you can delete it. You can disassociate the set of DHCP options by associating either a new set of options or the default set of options with the VPC.
def delete_dhcp_options(DryRun=None, DhcpOptionsId=None): pass
[ "def delete_dhcp_options(\n dhcp_options_id=None,\n dhcp_options_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n return _delete_resource(\n resource=\"dhcp_options\",\n name=dhcp_options_name,\n resource_id=dhcp_options_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def clean_dhcp(cls, instances, **kwargs):\n for instance in instances:\n _get_non_mgmt_ethernets(instance).values_list('mac', flat=True)\n for dhcp_entry in DHCPEntry.objects.filter(\n ethernet__base_object=instance, dhcp_expose=True\n ):\n logger.warning('Removing %s DHCP entry', dhcp_entry)\n dhcp_entry.delete()", "def create_dhcp_options(DryRun=None, DhcpConfigurations=None):\n pass", "def describe_dhcp_options(DryRun=None, DhcpOptionsIds=None, Filters=None):\n pass", "def remove_from_dhcp_entries(cls, instances, ipaddress, **kwargs):\n ip = IPAddress.objects.get(pk=ipaddress)\n entry = '{} ({}) / {}'.format(\n ip.address, ip.hostname, ip.ethernet.mac if ip.ethernet else None\n )\n logger.warning('Removing entry from DHCP: %s', entry)\n kwargs['history_kwargs'][instances[0].pk]['DHCP entry'] = entry\n ip.dhcp_expose = False\n ip.save()", "def DeleteAddressSet(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete(\n hostname,\n refresh_key,\n authorization_host,\n org_id,\n sddc_id,\n type,\n dhcp_profile_id,\n verify_ssl=True,\n cert=None,\n):\n\n log.info(\"Deleting DHCP profile %s for SDDC %s\", dhcp_profile_id, sddc_id)\n profile_type = vmc_constants.DHCP_CONFIGS.format(type)\n api_url_base = vmc_request.set_base_url(hostname)\n api_url = (\n \"{base_url}vmc/reverse-proxy/api/orgs/{org_id}/sddcs/{sddc_id}/\"\n \"policy/api/v1/infra/{profile_type}/{profile_id}\"\n )\n api_url = api_url.format(\n base_url=api_url_base,\n org_id=org_id,\n sddc_id=sddc_id,\n profile_type=profile_type,\n profile_id=dhcp_profile_id,\n )\n\n return vmc_request.call_api(\n method=vmc_constants.DELETE_REQUEST_METHOD,\n url=api_url,\n refresh_key=refresh_key,\n authorization_host=authorization_host,\n description=\"vmc_dhcp_profiles.delete\",\n responsebody_applicable=False,\n verify_ssl=verify_ssl,\n cert=cert,\n )", "def delete_vpc(DryRun=None, VpcId=None):\n pass", "def associate_dhcp_options(DryRun=None, DhcpOptionsId=None, VpcId=None):\n pass", "def destroy_set(target_set):\n _ipset('destroy', target_set)", "def create_dhcp_options(\n domain_name=None,\n domain_name_servers=None,\n ntp_servers=None,\n netbios_name_servers=None,\n netbios_node_type=None,\n dhcp_options_name=None,\n tags=None,\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if vpc_id or vpc_name:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n\n r = _create_resource(\n \"dhcp_options\",\n name=dhcp_options_name,\n domain_name=domain_name,\n domain_name_servers=domain_name_servers,\n ntp_servers=ntp_servers,\n netbios_name_servers=netbios_name_servers,\n netbios_node_type=netbios_node_type,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if r.get(\"created\") and vpc_id:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.associate_dhcp_options(r[\"id\"], vpc_id)\n log.info(\"Associated options %s to VPC %s\", r[\"id\"], vpc_name or vpc_id)\n return r\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def deletevpc(vpc_choices):\n progressbar(\"Deleting VPC\")\n vpcname=vpc_choices['vpc'][0]\n try:\n ec2.delete_vpc(VpcId=str(vpcname))\n print(\"\\n \\n vpc \" +vpcname +\" has been deleted \\n \\n\")\n except botocore.exceptions.ClientError as e:\n coloredtext(\"There was an error while deleting vpc: \\n\\n\\n\")\n print(e)", "def killAllDhcp():\n logging.debugv(\"functions/linux.py->killAllDhcp()\", [])\n cmd=[locations.KILLALL, '-q', locations.DHCLIENT]\n runWrapper(cmd, ignoreError=True)\n return True", "def rm_ip_set(target_set, del_ip):\n _ipset('del', target_set, del_ip)", "def delDHCPEntry(net, xml):\n logging.debug(\"Delete the dhcp entry %s.\" % xml)\n return net.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_DELETE, libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST, -1 ,xml,0)", "def delete_virtual_gateways():\n client = boto3.client('ec2')\n print('Deleting VPN Gateways')\n gw_resp = client.describe_vpn_gateways()\n while True:\n for gateway in gw_resp['VpnGateways']:\n gw_id = gateway['VpnGatewayId']\n gw_attachments = gateway['VpcAttachments']\n for attachment in gw_attachments:\n if attachment['State'] == 'attached':\n vpc_id = attachment['VpcId']\n print('Detaching virtual gateway {} from vpc {}'.format(gw_id, vpc_id))\n client.detach_vpn_gateway(\n VpcId=vpc_id,\n VpnGatewayId=gw_id\n )\n print('Deleting VPN gateway {}'.format(gw_id))\n client.delete_vpn_gateway(\n VpnGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_vpn_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_vpn_gateways()['VpnGateways']:\n all_deleted = True\n for gateway in client.describe_vpn_gateways()['VpnGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('VPN Gateways deleted')", "def delete_zones(self, zone_names, activate, active_zone_set=None):\n active_zoneset_name = None\n zone_list = []\n if not active_zone_set:\n active_zone_set = self.get_active_zone_set()\n active_zoneset_name = active_zone_set[\n zone_constant.ACTIVE_ZONE_CONFIG]\n zone_list = active_zone_set[zone_constant.CFG_ZONES]\n zones = self.patrn.split(''.join(zone_names))\n cmd = None\n try:\n if len(zones) == len(zone_list):\n self.deactivate_zoneset()\n cmd = 'cfgdelete \"%(active_zoneset_name)s\"' \\\n % {'active_zoneset_name': active_zoneset_name}\n # Active zoneset is being deleted, hence reset activate flag\n activate = False\n else:\n cmd = 'cfgremove \"%(active_zoneset_name)s\", \"%(zone_names)s\"' \\\n % {'active_zoneset_name': active_zoneset_name,\n 'zone_names': zone_names\n }\n LOG.debug(\"Delete zones: Config cmd to run: %s\", cmd)\n self.apply_zone_change(cmd.split())\n for zone in zones:\n self._zone_delete(zone)\n if activate:\n self.activate_zoneset(active_zoneset_name)\n else:\n self._cfg_save()\n except Exception as e:\n msg = _(\"Deleting zones failed: (command=%(cmd)s error=%(err)s).\"\n ) % {'cmd': cmd, 'err': six.text_type(e)}\n LOG.error(msg)\n self._cfg_trans_abort()\n raise b_exception.BrocadeZoningCliException(reason=msg)", "def DhcpOptions(self):\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dhcpoptions_3rvy29su3rhy2svzghjce9wdglvbnm import DhcpOptions\n\t\treturn DhcpOptions(self)", "def delete_vpc_endpoint_resources():\n print('Deleting VPC endpoints')\n ec2 = boto3.client('ec2')\n endpoint_ids = []\n for endpoint in ec2.describe_vpc_endpoints()['VpcEndpoints']:\n print('Deleting VPC Endpoint - {}'.format(endpoint['ServiceName']))\n endpoint_ids.append(endpoint['VpcEndpointId'])\n\n if endpoint_ids:\n ec2.delete_vpc_endpoints(\n VpcEndpointIds=endpoint_ids\n )\n\n print('Waiting for VPC endpoints to get deleted')\n while ec2.describe_vpc_endpoints()['VpcEndpoints']:\n time.sleep(5)\n\n print('VPC endpoints deleted')\n\n # VPC endpoints connections\n print('Deleting VPC endpoint connections')\n service_ids = []\n for connection in ec2.describe_vpc_endpoint_connections()['VpcEndpointConnections']:\n service_id = connection['ServiceId']\n state = connection['VpcEndpointState']\n\n if state in ['PendingAcceptance', 'Pending', 'Available', 'Rejected', 'Failed', 'Expired']:\n print('Deleting VPC Endpoint Service - {}'.format(service_id))\n service_ids.append(service_id)\n\n ec2.reject_vpc_endpoint_connections(\n ServiceId=service_id,\n VpcEndpointIds=[\n connection['VpcEndpointId'],\n ]\n )\n\n if service_ids:\n ec2.delete_vpc_endpoint_service_configurations(\n ServiceIds=service_ids\n )\n\n print('Waiting for VPC endpoint services to be destroyed')\n while ec2.describe_vpc_endpoint_connections()['VpcEndpointConnections']:\n time.sleep(5)\n\n print('VPC endpoint connections deleted')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes an egressonly Internet gateway.
def delete_egress_only_internet_gateway(DryRun=None, EgressOnlyInternetGatewayId=None): pass
[ "def delete_egress_only_internet_gateways():\n client = boto3.client('ec2')\n print('Deleting Egress Only Internet Gateways')\n gw_resp = client.describe_egress_only_internet_gateways()\n while True:\n for gateway in gw_resp['EgressOnlyInternetGateways']:\n gw_id = gateway['EgressOnlyInternetGatewayId']\n client.delete_egress_only_internet_gateway(\n EgressOnlyInternetGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_egress_only_internet_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_egress_only_internet_gateways()['EgressOnlyInternetGateways']:\n time.sleep(5)\n print('Egress Only Internet Gateways deleted')", "def delete_internet_gateways():\n print('Deleting Internet Gateways')\n client = boto3.resource('ec2')\n for igw in client.internet_gateways.all():\n for attachment in igw.attachments:\n if 'State' in attachment and attachment['State'] == 'available':\n vpc_id = attachment['VpcId']\n print('Detaching internet gateway {} from vpc {}'.format(igw.id, vpc_id))\n igw.detach_from_vpc(\n VpcId=vpc_id\n )\n print('Deleting Internet Gateway {}'.format(igw.id))\n igw.delete()\n\n while [igw for igw in client.internet_gateways.all()]:\n time.sleep(5)\n print('Internet Gateways deleted')", "def delete_nat_gateway(NatGatewayId=None):\n pass", "def delete_internet_gateway(\n internet_gateway_id=None,\n internet_gateway_name=None,\n detach=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if internet_gateway_name:\n internet_gateway_id = _get_resource_id(\n \"internet_gateway\",\n internet_gateway_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not internet_gateway_id:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_name\n )\n },\n }\n\n if detach:\n igw = _get_resource(\n \"internet_gateway\",\n resource_id=internet_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n\n if not igw:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_id\n )\n },\n }\n\n if igw.attachments:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.detach_internet_gateway(\n internet_gateway_id, igw.attachments[0].vpc_id\n )\n return _delete_resource(\n \"internet_gateway\",\n resource_id=internet_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def create_egress_only_internet_gateway(DryRun=None, VpcId=None, ClientToken=None):\n pass", "def delete(self, api_client):\n cmd = {'id': self.id}\n api_client.deleteEgressFirewallRule(**cmd)\n return", "def delete_nat_gateway(\n nat_gateway_id,\n release_eips=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n wait_for_delete=False,\n wait_for_delete_retries=5,\n):\n\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])\n if gwinfo:\n gwinfo = gwinfo.get(\"NatGateways\", [None])[0]\n conn3.delete_nat_gateway(NatGatewayId=nat_gateway_id)\n\n # wait for deleting nat gateway to finish prior to attempt to release elastic ips\n if wait_for_delete:\n for retry in range(wait_for_delete_retries, 0, -1):\n if gwinfo and gwinfo[\"State\"] not in [\"deleted\", \"failed\"]:\n time.sleep(\n (2 ** (wait_for_delete_retries - retry))\n + (random.randint(0, 1000) / 1000.0)\n )\n gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])\n if gwinfo:\n gwinfo = gwinfo.get(\"NatGateways\", [None])[0]\n continue\n break\n\n if release_eips and gwinfo:\n for addr in gwinfo.get(\"NatGatewayAddresses\"):\n conn3.release_address(AllocationId=addr.get(\"AllocationId\"))\n return {\"deleted\": True}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_network_gateway(self, gateway_id):\n return self._delete(self.network_gateway_path % gateway_id)", "def delGw(interface):\n logging.debugv(\"functions/linux.py->delGw(interface)\", [interface])\n logging.info(\"removing default gateway of device \" + interface)\n cmd = [\"ip\", \"route\", \"del\", \"default\", \"dev\", interface]\n runWrapper(cmd)", "def delete_vpn_gateway(DryRun=None, VpnGatewayId=None):\n pass", "def delete_nat_gateways():\n print('Deleting NAT gateways')\n ec2 = boto3.client('ec2')\n for page in ec2.get_paginator('describe_nat_gateways').paginate():\n for nat_gateway in page['NatGateways']:\n nat_gateway_id = nat_gateway['NatGatewayId']\n print('Deleting Nat Gateway - {}'.format(nat_gateway_id))\n ec2.delete_nat_gateway(\n NatGatewayId=nat_gateway_id\n )\n\n while ec2.describe_nat_gateways()['NatGateways']:\n all_deleted = True\n for gateway in ec2.describe_nat_gateways()['NatGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n\n print('NAT gateways deleted')", "def delete(self):\r\n if self.pool is not None:\r\n return self.pool.delete_floating_ip(self)\r\n elif self.driver is not None:\r\n return self.driver.ex_delete_floating_ip(self)", "def test_remove_gateway(self):\n pass", "def delete_agent_gateway_port(self, context, **kwargs):\n network_id = kwargs.get('network_id')\n host = kwargs.get('host')\n admin_ctx = neutron_context.get_admin_context()\n self.l3plugin.delete_floatingip_agent_gateway_port(\n admin_ctx, host, network_id)", "def delete_customer_gateway(DryRun=None, CustomerGatewayId=None):\n pass", "def delete_network(request):\n cloud_id = request.matchdict['cloud']\n network_id = request.matchdict['network']\n\n auth_context = auth_context_from_request(request)\n\n # TODO\n if not auth_context.is_owner():\n raise PolicyUnauthorizedError()\n\n try:\n cloud = Cloud.objects.get(id=cloud_id, owner=auth_context.owner)\n except Cloud.DoesNotExist:\n raise CloudNotFoundError\n try:\n network = Network.objects.get(id=network_id, cloud=cloud)\n except me.DoesNotExist:\n raise NetworkNotFoundError\n\n methods.delete_network(auth_context.owner, network)\n\n return OK", "def describe_egress_only_internet_gateways(DryRun=None, EgressOnlyInternetGatewayIds=None, MaxResults=None, NextToken=None):\n pass", "def delete_elastic_ips():\n client = boto3.client('ec2')\n print('Deleting Elastic IPs')\n for eip in client.describe_addresses()['Addresses']:\n allocation_id = eip['AllocationId']\n print('Releasing EIP {}'.format(allocation_id))\n client.release_address(\n AllocationId=allocation_id\n )\n\n print('Elastic IPs deleted')", "def ex_delete_floating_ip(self, ip):\r\n resp = self.connection.request('/os-floating-ips/%s' % ip.id,\r\n method='DELETE')\r\n return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)", "def delete(self, api_client):\n\n cmd = {'id': self.id}\n return api_client.deleteNetworkServiceProvider(**cmd)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes one or more flow logs.
def delete_flow_logs(FlowLogIds=None): pass
[ "def do_delete_logs(self, *args):\n print args\n FileSystemCleaner().delete_logs( )", "def delete_steps(self, logs_id):\n self._get('/walking_logs/%s?_method=delete' % logs_id)", "def delete_regular_logs(_args):\n delete_logs(_args, ('affine.log', 'freeform.log', 'segment.log'))", "def clear_logs():\n db = get_db()\n db.execute('delete from logs')\n db.commit()\n\n return redirect(url_for('checklog'))", "def DeleteFlowLog(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteFlowLog\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteFlowLogResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_logs(_args, log_names):\n for log_name in log_names:\n log_path = os.path.join(_args.output_folder, '{}_{}'.format(_args.sample_name, log_name))\n os.remove(log_path)", "def delete_log(self, log_id):\n uri = '/log/logs/%s' % log_id\n return self.delete_resource(uri)", "def delete_log_files(self, logger):\n\n now = time.time()\n for _x in os.listdir(self.log_dir):\n if self.log_name in _x:\n if os.stat(os.path.join(self.log_dir, _x)).st_mtime < now - self.log_days * 86400:\n logger.debug('Removing log file : %s', _x)\n os.remove(self.log_dir + '/' + _x)\n else:\n logger.debug('NOT removing log file : %s', _x)", "def delete_logger(client, args):\n # [START delete]\n logger = client.logger(args.logger_name)\n print('Deleting all logging entries for {}'.format(logger.name))\n logger.delete()\n # [END delete]", "def delete_logs(train_log_path, test_log_path, is_test):\n command = 'rm ' + train_log_path\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n\n if is_test == 0:\n command = 'rm ' + test_log_path\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()", "def delete_error_logs(_args):\n delete_logs(_args, ('affine.err', 'freeform.err', 'segment.err'))", "def delete_log(self, log: WellLog) -> None:\n if type(log) is not WellLog:\n raise TypeError(\"log {} is not of type WellLog!\".format(str(log)))\n\n try:\n self.logs.remove(log)\n except ValueError as e:\n raise ValueError(str(e) + \"\\nWellLog with ID \" + str(log.id) + \" not found in list!\")", "def delete_flow_log_collector(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_flow_log_collector')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/flow_log_collectors/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def clear_logs():\n with cd('%(path)s' % env):\n run('rm ./logs/*.log')", "def delete_log(self, context, log_obj):\n LOG.debug(\"Delete_log %s\", log_obj)\n\n # If we are removing the last log_obj, let's clear log from all acls.\n # This is a simple way of ensuring that no acl logs are left behind!\n log_objs = self._get_logs(context)\n if not log_objs or (\n len(log_objs) == 1 and log_objs[0].id == log_obj.id):\n pgs = self._pgs_all()\n with self.ovn_nb.transaction(check_error=True) as ovn_txn:\n self._remove_acls_log(pgs, ovn_txn)\n ovn_txn.add(self.ovn_nb.meter_del(self.meter_name,\n if_exists=True))\n LOG.info(\"All ACL logs cleared after deletion of log_obj %s\",\n log_obj.id)\n return\n\n # Remove log_obj and revisit all remaining ones, since the acls that\n # were serving the removed log_obj may be usable by the remaining\n # log_objs.\n pgs = self._pgs_from_log_obj(context, log_obj)\n with self.ovn_nb.transaction(check_error=True) as ovn_txn:\n self._remove_acls_log(pgs, ovn_txn, utils.ovn_name(log_obj.id))\n\n # TODO(flaviof): We needed to break this second part into a separate\n # transaction because logic that determines the value of the 'freed up'\n # acl rows will not see the modified rows unless it was inside an an\n # idl command.\n with self.ovn_nb.transaction(check_error=True) as ovn_txn:\n self._update_log_objs(context, ovn_txn, [lo for lo in log_objs\n if lo.id != log_obj.id])", "def delete_log(df_del):\n skip = [\"reserved\", \"totals_log\", \"delete_all_log\"]\n tables = [table for table in md.TABLE_NAMES if table not in skip]\n for table in tables:\n for i,row in df_del.iterrows():\n filing_del = row[\"filing_num\"]\n cursor.execute(sql.SQL(\"DELETE FROM {} WHERE filing_num=%s;\").format(sql.Identifier(table)),[str(int(filing_del))])\n conn.commit()\n logger.info(f\"Removed delete_all_log entries for {table}\")\n return", "async def delete_log(bot, id, guild) -> None:\n await bot.mod.delete_one({\"guild_id\": guild.id, \"case_id\": id})\n await update_log_caseids(bot, guild)", "def _stash_log(self) -> None:\n self.log.info(f\"Move source log for {self.__api.upload_id} to\"\n f\" '{self.__api.storage.deleted_logs_path}'.\")\n self.log.info(f\"Delete workspace '{self.__api.upload_id}'.\")\n try:\n self.__api.storage.stash_deleted_log(self,\n self.log.file)\n except Exception as e:\n self.log.info(f'Saving source.log failed: {e}')", "def del_flows(self, datapath):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n msg = parser.OFPFlowMod(datapath=datapath, table_id=ofproto.OFPTT_ALL,\n command=ofproto.OFPFC_DELETE, out_port=ofproto.OFPP_ANY,\n out_group=ofproto.OFPG_ANY)\n datapath.send_msg(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified Internet gateway. You must detach the Internet gateway from the VPC before you can delete it.
def delete_internet_gateway(DryRun=None, InternetGatewayId=None): pass
[ "def delete_internet_gateway(\n internet_gateway_id=None,\n internet_gateway_name=None,\n detach=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if internet_gateway_name:\n internet_gateway_id = _get_resource_id(\n \"internet_gateway\",\n internet_gateway_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not internet_gateway_id:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_name\n )\n },\n }\n\n if detach:\n igw = _get_resource(\n \"internet_gateway\",\n resource_id=internet_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n\n if not igw:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_id\n )\n },\n }\n\n if igw.attachments:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.detach_internet_gateway(\n internet_gateway_id, igw.attachments[0].vpc_id\n )\n return _delete_resource(\n \"internet_gateway\",\n resource_id=internet_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_network_gateway(self, gateway_id):\n return self._delete(self.network_gateway_path % gateway_id)", "def delete_internet_gateways():\n print('Deleting Internet Gateways')\n client = boto3.resource('ec2')\n for igw in client.internet_gateways.all():\n for attachment in igw.attachments:\n if 'State' in attachment and attachment['State'] == 'available':\n vpc_id = attachment['VpcId']\n print('Detaching internet gateway {} from vpc {}'.format(igw.id, vpc_id))\n igw.detach_from_vpc(\n VpcId=vpc_id\n )\n print('Deleting Internet Gateway {}'.format(igw.id))\n igw.delete()\n\n while [igw for igw in client.internet_gateways.all()]:\n time.sleep(5)\n print('Internet Gateways deleted')", "def delete_nat_gateway(NatGatewayId=None):\n pass", "def delete_vpn_gateway(DryRun=None, VpnGatewayId=None):\n pass", "def delGw(interface):\n logging.debugv(\"functions/linux.py->delGw(interface)\", [interface])\n logging.info(\"removing default gateway of device \" + interface)\n cmd = [\"ip\", \"route\", \"del\", \"default\", \"dev\", interface]\n runWrapper(cmd)", "def delete_nat_gateway(\n nat_gateway_id,\n release_eips=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n wait_for_delete=False,\n wait_for_delete_retries=5,\n):\n\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])\n if gwinfo:\n gwinfo = gwinfo.get(\"NatGateways\", [None])[0]\n conn3.delete_nat_gateway(NatGatewayId=nat_gateway_id)\n\n # wait for deleting nat gateway to finish prior to attempt to release elastic ips\n if wait_for_delete:\n for retry in range(wait_for_delete_retries, 0, -1):\n if gwinfo and gwinfo[\"State\"] not in [\"deleted\", \"failed\"]:\n time.sleep(\n (2 ** (wait_for_delete_retries - retry))\n + (random.randint(0, 1000) / 1000.0)\n )\n gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])\n if gwinfo:\n gwinfo = gwinfo.get(\"NatGateways\", [None])[0]\n continue\n break\n\n if release_eips and gwinfo:\n for addr in gwinfo.get(\"NatGatewayAddresses\"):\n conn3.release_address(AllocationId=addr.get(\"AllocationId\"))\n return {\"deleted\": True}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_customer_gateway(DryRun=None, CustomerGatewayId=None):\n pass", "def delete_nat_gateways():\n print('Deleting NAT gateways')\n ec2 = boto3.client('ec2')\n for page in ec2.get_paginator('describe_nat_gateways').paginate():\n for nat_gateway in page['NatGateways']:\n nat_gateway_id = nat_gateway['NatGatewayId']\n print('Deleting Nat Gateway - {}'.format(nat_gateway_id))\n ec2.delete_nat_gateway(\n NatGatewayId=nat_gateway_id\n )\n\n while ec2.describe_nat_gateways()['NatGateways']:\n all_deleted = True\n for gateway in ec2.describe_nat_gateways()['NatGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n\n print('NAT gateways deleted')", "def delete(self, api_client):\n\n cmd = {'id': self.id}\n api_client.deleteVpnCustomerGateway(**cmd)", "def delete_customer_gateway(\n customer_gateway_id=None,\n customer_gateway_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n return _delete_resource(\n resource=\"customer_gateway\",\n name=customer_gateway_name,\n resource_id=customer_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def delete_virtual_gateways():\n client = boto3.client('ec2')\n print('Deleting VPN Gateways')\n gw_resp = client.describe_vpn_gateways()\n while True:\n for gateway in gw_resp['VpnGateways']:\n gw_id = gateway['VpnGatewayId']\n gw_attachments = gateway['VpcAttachments']\n for attachment in gw_attachments:\n if attachment['State'] == 'attached':\n vpc_id = attachment['VpcId']\n print('Detaching virtual gateway {} from vpc {}'.format(gw_id, vpc_id))\n client.detach_vpn_gateway(\n VpcId=vpc_id,\n VpnGatewayId=gw_id\n )\n print('Deleting VPN gateway {}'.format(gw_id))\n client.delete_vpn_gateway(\n VpnGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_vpn_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_vpn_gateways()['VpnGateways']:\n all_deleted = True\n for gateway in client.describe_vpn_gateways()['VpnGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('VPN Gateways deleted')", "def delete_egress_only_internet_gateways():\n client = boto3.client('ec2')\n print('Deleting Egress Only Internet Gateways')\n gw_resp = client.describe_egress_only_internet_gateways()\n while True:\n for gateway in gw_resp['EgressOnlyInternetGateways']:\n gw_id = gateway['EgressOnlyInternetGatewayId']\n client.delete_egress_only_internet_gateway(\n EgressOnlyInternetGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_egress_only_internet_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_egress_only_internet_gateways()['EgressOnlyInternetGateways']:\n time.sleep(5)\n print('Egress Only Internet Gateways deleted')", "def DeleteVpnGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteVpnGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteVpnGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_vpc(DryRun=None, VpcId=None):\n pass", "def delete_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def disconnect_network_gateway(self, gateway_id, body=None):\n base_uri = self.network_gateway_path % gateway_id\n return self._put(\"%s/disconnect_network\" % base_uri, body=body)", "def delete_agent_gateway_port(self, context, **kwargs):\n network_id = kwargs.get('network_id')\n host = kwargs.get('host')\n admin_ctx = neutron_context.get_admin_context()\n self.l3plugin.delete_floatingip_agent_gateway_port(\n admin_ctx, host, network_id)", "def detach_vpn_gateway(DryRun=None, VpnGatewayId=None, VpcId=None):\n pass", "def delete(self, api_client):\n\n cmd = {'publicipid': self.publicipid}\n api_client.deleteRemoteAccessVpn(**cmd)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified NAT gateway. Deleting a NAT gateway disassociates its Elastic IP address, but does not release the address from your account. Deleting a NAT gateway does not delete any NAT gateway routes in your route tables.
def delete_nat_gateway(NatGatewayId=None): pass
[ "def delete_network_gateway(self, gateway_id):\n return self._delete(self.network_gateway_path % gateway_id)", "def delete_vpn_gateway(DryRun=None, VpnGatewayId=None):\n pass", "def delete_nat_gateway(\n nat_gateway_id,\n release_eips=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n wait_for_delete=False,\n wait_for_delete_retries=5,\n):\n\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])\n if gwinfo:\n gwinfo = gwinfo.get(\"NatGateways\", [None])[0]\n conn3.delete_nat_gateway(NatGatewayId=nat_gateway_id)\n\n # wait for deleting nat gateway to finish prior to attempt to release elastic ips\n if wait_for_delete:\n for retry in range(wait_for_delete_retries, 0, -1):\n if gwinfo and gwinfo[\"State\"] not in [\"deleted\", \"failed\"]:\n time.sleep(\n (2 ** (wait_for_delete_retries - retry))\n + (random.randint(0, 1000) / 1000.0)\n )\n gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])\n if gwinfo:\n gwinfo = gwinfo.get(\"NatGateways\", [None])[0]\n continue\n break\n\n if release_eips and gwinfo:\n for addr in gwinfo.get(\"NatGatewayAddresses\"):\n conn3.release_address(AllocationId=addr.get(\"AllocationId\"))\n return {\"deleted\": True}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_nat_gateways():\n print('Deleting NAT gateways')\n ec2 = boto3.client('ec2')\n for page in ec2.get_paginator('describe_nat_gateways').paginate():\n for nat_gateway in page['NatGateways']:\n nat_gateway_id = nat_gateway['NatGatewayId']\n print('Deleting Nat Gateway - {}'.format(nat_gateway_id))\n ec2.delete_nat_gateway(\n NatGatewayId=nat_gateway_id\n )\n\n while ec2.describe_nat_gateways()['NatGateways']:\n all_deleted = True\n for gateway in ec2.describe_nat_gateways()['NatGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n\n print('NAT gateways deleted')", "def delete_internet_gateway(\n internet_gateway_id=None,\n internet_gateway_name=None,\n detach=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if internet_gateway_name:\n internet_gateway_id = _get_resource_id(\n \"internet_gateway\",\n internet_gateway_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not internet_gateway_id:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_name\n )\n },\n }\n\n if detach:\n igw = _get_resource(\n \"internet_gateway\",\n resource_id=internet_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n\n if not igw:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_id\n )\n },\n }\n\n if igw.attachments:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.detach_internet_gateway(\n internet_gateway_id, igw.attachments[0].vpc_id\n )\n return _delete_resource(\n \"internet_gateway\",\n resource_id=internet_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_customer_gateway(\n customer_gateway_id=None,\n customer_gateway_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n return _delete_resource(\n resource=\"customer_gateway\",\n name=customer_gateway_name,\n resource_id=customer_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def disconnect_network_gateway(self, gateway_id, body=None):\n base_uri = self.network_gateway_path % gateway_id\n return self._put(\"%s/disconnect_network\" % base_uri, body=body)", "def delete_gateway(self, process_id, gateway_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_gateway_with_http_info(process_id, gateway_id, **kwargs)\n else:\n (data) = self.delete_gateway_with_http_info(process_id, gateway_id, **kwargs)\n return data", "def DeleteVpnGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteVpnGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteVpnGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_agent_gateway_port(self, context, **kwargs):\n network_id = kwargs.get('network_id')\n host = kwargs.get('host')\n admin_ctx = neutron_context.get_admin_context()\n self.l3plugin.delete_floatingip_agent_gateway_port(\n admin_ctx, host, network_id)", "def delGw(interface):\n logging.debugv(\"functions/linux.py->delGw(interface)\", [interface])\n logging.info(\"removing default gateway of device \" + interface)\n cmd = [\"ip\", \"route\", \"del\", \"default\", \"dev\", interface]\n runWrapper(cmd)", "def _del_nat_rule(ctx, gateway, rule_type, original_ip, translated_ip):\n any_type = 'any'\n\n ctx.logger.info(\"Delete floating ip NAT rule: original_ip '{0}',\"\n \"translated_ip '{1}', rule type '{2}'\"\n .format(original_ip, translated_ip, rule_type))\n\n gateway.del_nat_rule(\n rule_type, original_ip, any_type, translated_ip, any_type, any_type)", "def delete_customer_gateway(DryRun=None, CustomerGatewayId=None):\n pass", "def delete(self, api_client):\n\n cmd = {'id': self.id}\n api_client.deleteVpnCustomerGateway(**cmd)", "def delete_vpn_gateway(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_vpn_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/vpn_gateways/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def delete_virtual_gateways():\n client = boto3.client('ec2')\n print('Deleting VPN Gateways')\n gw_resp = client.describe_vpn_gateways()\n while True:\n for gateway in gw_resp['VpnGateways']:\n gw_id = gateway['VpnGatewayId']\n gw_attachments = gateway['VpcAttachments']\n for attachment in gw_attachments:\n if attachment['State'] == 'attached':\n vpc_id = attachment['VpcId']\n print('Detaching virtual gateway {} from vpc {}'.format(gw_id, vpc_id))\n client.detach_vpn_gateway(\n VpcId=vpc_id,\n VpnGatewayId=gw_id\n )\n print('Deleting VPN gateway {}'.format(gw_id))\n client.delete_vpn_gateway(\n VpnGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_vpn_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_vpn_gateways()['VpnGateways']:\n all_deleted = True\n for gateway in client.describe_vpn_gateways()['VpnGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('VPN Gateways deleted')", "def update_network_gateway(self, gateway_id, body=None):\n return self._put(self.network_gateway_path % gateway_id, body=body)", "def delete_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def delete_internet_gateways():\n print('Deleting Internet Gateways')\n client = boto3.resource('ec2')\n for igw in client.internet_gateways.all():\n for attachment in igw.attachments:\n if 'State' in attachment and attachment['State'] == 'available':\n vpc_id = attachment['VpcId']\n print('Detaching internet gateway {} from vpc {}'.format(igw.id, vpc_id))\n igw.detach_from_vpc(\n VpcId=vpc_id\n )\n print('Deleting Internet Gateway {}'.format(igw.id))\n igw.delete()\n\n while [igw for igw in client.internet_gateways.all()]:\n time.sleep(5)\n print('Internet Gateways deleted')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified network ACL. You can't delete the ACL if it's associated with any subnets. You can't delete the default network ACL.
def delete_network_acl(DryRun=None, NetworkAclId=None): pass
[ "def delete_network_acl(\n network_acl_id=None,\n network_acl_name=None,\n disassociate=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if disassociate:\n network_acl = _get_resource(\n \"network_acl\",\n name=network_acl_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if network_acl and network_acl.associations:\n subnet_id = network_acl.associations[0].subnet_id\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.disassociate_network_acl(subnet_id)\n except BotoServerError:\n pass\n\n return _delete_resource(\n resource=\"network_acl\",\n name=network_acl_name,\n resource_id=network_acl_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def delete(self, api_client):\n\n cmd = {'id': self.id}\n return api_client.deleteNetworkACLList(**cmd)", "def DeleteNetworkAcl(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteNetworkAcl\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteNetworkAclResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_network_acl_entry(DryRun=None, NetworkAclId=None, RuleNumber=None, Egress=None):\n pass", "def delete_network_acl_entry(\n network_acl_id=None,\n rule_number=None,\n egress=None,\n network_acl_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n if not _exactly_one((network_acl_name, network_acl_id)):\n raise SaltInvocationError(\n \"One (but not both) of network_acl_id or network_acl_name must be provided.\"\n )\n\n for v in (\"rule_number\", \"egress\"):\n if locals()[v] is None:\n raise SaltInvocationError(\"{} is required.\".format(v))\n\n if network_acl_name:\n network_acl_id = _get_resource_id(\n \"network_acl\",\n network_acl_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not network_acl_id:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"Network ACL {} does not exist.\".format(\n network_acl_name or network_acl_id\n )\n },\n }\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n deleted = conn.delete_network_acl_entry(\n network_acl_id, rule_number, egress=egress\n )\n if deleted:\n log.info(\"Network ACL entry was deleted\")\n else:\n log.warning(\"Network ACL was not deleted\")\n return {\"deleted\": deleted}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def deleteacl(self, mailbox, who):\n return self._simple_command('DELETEACL', mailbox, who)", "def delete_network_acl_rule(self, acl, rule):\n try:\n # Check if network ACL and network ACL rule exist\n acl_info = self.get_network_acl(acl)\n if \"errors\" in acl_info:\n return acl_info\n\n rule_info = self.get_network_acl_rule(acl_info[\"id\"], rule)\n if \"errors\" in rule_info:\n return rule_info\n\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls/{}/rules/{}?version={}\"\n \"&generation={}\".format(acl_info[\"id\"], rule_info[\"id\"],\n self.cfg[\"version\"],\n self.cfg[\"generation\"]))\n\n data = qw(\"iaas\", \"DELETE\", path, headers())\n\n # Return data\n if data[\"response\"].status != 204:\n return data[\"data\"]\n\n # Return status\n return resource_deleted()\n\n except Exception as error:\n print(\"Error deleting network ACL rule {} for network\"\n \"ACL {}. {}\".format(rule, acl, error))\n raise", "def delete_acl(group: str, scope: str, profile: str):\n # Remove the existing acl\n acl_query = 'databricks secrets delete-acl'\n acl_query += f' --profile {profile}'\n acl_query += f' --scope {scope}'\n acl_query += f' --principal {group}'\n\n # Run and enforce success\n logging.warning(f'Removing existing acl to {scope} for {group}')\n sp = subprocess.run(acl_query, capture_output=True)\n sp.check_returncode()", "def delete_network_acl_rule(self,\n network_acl_id: str,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if network_acl_id is None:\n raise ValueError('network_acl_id must be provided')\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_network_acl_rule')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['network_acl_id', 'id']\n path_param_values = self.encode_path_vars(network_acl_id, id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/network_acls/{network_acl_id}/rules/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def delete_acls(self, acl_list):\n return self.access_list_manager.delete_object_list(acl_list)", "def delete( # pylint: disable=inconsistent-return-statements\n self,\n resource_group_name: str,\n network_security_perimeter_name: str,\n profile_name: str,\n access_rule_name: str,\n **kwargs: Any\n ) -> None:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n api_version: str = kwargs.pop(\n \"api_version\", _params.pop(\"api-version\", self._api_version or \"2021-02-01-preview\")\n )\n cls: ClsType[None] = kwargs.pop(\"cls\", None)\n\n request = build_nsp_access_rules_delete_request(\n resource_group_name=resource_group_name,\n network_security_perimeter_name=network_security_perimeter_name,\n profile_name=profile_name,\n access_rule_name=access_rule_name,\n subscription_id=self._config.subscription_id,\n api_version=api_version,\n template_url=self.delete.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n if cls:\n return cls(pipeline_response, None, {})", "def do_clear_acls(self, args):\n lb = self.findlb(args.loadbalancer, readonly=False)\n lb_acl = lb.accesslist()\n lb_acl.delete()", "def disassociate_network_acl(\n subnet_id=None,\n vpc_id=None,\n subnet_name=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not _exactly_one((subnet_name, subnet_id)):\n raise SaltInvocationError(\n \"One (but not both) of subnet_id or subnet_name must be provided.\"\n )\n\n if all((vpc_name, vpc_id)):\n raise SaltInvocationError(\"Only one of vpc_id or vpc_name may be provided.\")\n try:\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\",\n subnet_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not subnet_id:\n return {\n \"disassociated\": False,\n \"error\": {\n \"message\": \"Subnet {} does not exist.\".format(subnet_name)\n },\n }\n\n if vpc_name or vpc_id:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n association_id = conn.disassociate_network_acl(subnet_id, vpc_id=vpc_id)\n return {\"disassociated\": True, \"association_id\": association_id}\n except BotoServerError as e:\n return {\"disassociated\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_network(request):\n cloud_id = request.matchdict['cloud']\n network_id = request.matchdict['network']\n\n auth_context = auth_context_from_request(request)\n\n # TODO\n if not auth_context.is_owner():\n raise PolicyUnauthorizedError()\n\n try:\n cloud = Cloud.objects.get(id=cloud_id, owner=auth_context.owner)\n except Cloud.DoesNotExist:\n raise CloudNotFoundError\n try:\n network = Network.objects.get(id=network_id, cloud=cloud)\n except me.DoesNotExist:\n raise NetworkNotFoundError\n\n methods.delete_network(auth_context.owner, network)\n\n return OK", "def network_assoc_delete(context, network_id, session=None):\n # If we weren't given a session, then we need to create a new one\n if not session:\n session = nova_db_sa_api.get_session()\n # Create a Transaction around the delete in the Database\n with session.begin():\n query = model_query(\n context, pvc_models.NetworkAssociationDTO, session=session)\n query = query.filter_by(neutron_net_id=network_id)\n query.delete(synchronize_session=False)", "def delete_ipv4_acl_rule_bulk(self, **kwargs):\n # Validate required and accepted kwargs\n params_validator.validate_params_mlx_delete_ipv4_rule_acl(**kwargs)\n\n acl_name = self.mac.parse_acl_name(**kwargs)\n\n ret = self.get_acl_address_and_acl_type(acl_name)\n acl_type = ret['type']\n address_type = ret['protocol']\n\n if address_type != 'ip':\n raise ValueError(\"IPv4 Rule can not be added to non-ip ACL.\"\n \"ACL {} is of type {}\"\n .format(acl_name, address_type))\n\n # Get already configured seq_ids\n configured_seq_ids = self.get_configured_seq_ids(acl_name,\n address_type)\n seq_range = self.mac.parse_seq_id_by_range(configured_seq_ids,\n **kwargs)\n\n cli_arr = ['ip access-list ' + ' ' + acl_type + ' ' + acl_name]\n\n for seq_id in seq_range:\n cli_arr.append('no sequence ' + str(seq_id))\n\n output = self._callback(cli_arr, handler='cli-set')\n return self._process_cli_output(inspect.stack()[0][3],\n str(cli_arr), output)", "def cli(env, securitygroup_id):\n mgr = SoftLayer.NetworkManager(env.client)\n if not mgr.delete_securitygroup(securitygroup_id):\n raise exceptions.CLIAbort(\"Failed to delete security group\")", "def delete_network(req):\n if not isinstance(req, dict):\n return {\"error\": \"no parameters given to delete. Please send the network name to delete\"}, Const.HTTP_BAD_REQUEST\n if not \"name\" in req:\n return {\"error\": \"please send the 'name' of the network to delete\"}, Const.HTTP_BAD_REQUEST\n name = req['name']\n db = YamlDB()\n err, msg = db.delete_network_group(Const.KUBAM_CFG, name)\n if err == 1:\n return {\"error\": msg}, Const.HTTP_BAD_REQUEST\n else:\n return {\"status\": \"Network deleted\"}, Const.HTTP_CREATED", "def delete_ipv6_acl_rule_bulk(self, **kwargs):\n # Validate required and accepted kwargs\n params_validator.validate_params_mlx_delete_ipv6_rule_acl(**kwargs)\n\n acl_name = self.mac.parse_acl_name(**kwargs)\n\n ret = self.get_acl_address_and_acl_type(acl_name)\n address_type = ret['protocol']\n\n if address_type != 'ipv6':\n raise ValueError(\"IPv6 Rule can not be deleted from non-ipv6 ACL.\"\n \"ACL {} is of type {}\"\n .format(acl_name, address_type))\n\n # Get already configured seq_ids\n configured_seq_ids = self.get_configured_seq_ids(acl_name,\n address_type)\n seq_range = self.mac.parse_seq_id_by_range(configured_seq_ids,\n **kwargs)\n\n cli_arr = ['ipv6 access-list ' + acl_name]\n\n for seq_id in seq_range:\n cli_arr.append('no sequence ' + str(seq_id))\n\n output = self._callback(cli_arr, handler='cli-set')\n return self._process_cli_output(inspect.stack()[0][3],\n str(cli_arr), output)", "def subnet_delete(ctx, subnet_id):\n ctx.obj['nc'].delete(\"subnets/%s\" %subnet_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified ingress or egress entry (rule) from the specified network ACL.
def delete_network_acl_entry(DryRun=None, NetworkAclId=None, RuleNumber=None, Egress=None): pass
[ "def delete_network_acl_entry(\n network_acl_id=None,\n rule_number=None,\n egress=None,\n network_acl_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n if not _exactly_one((network_acl_name, network_acl_id)):\n raise SaltInvocationError(\n \"One (but not both) of network_acl_id or network_acl_name must be provided.\"\n )\n\n for v in (\"rule_number\", \"egress\"):\n if locals()[v] is None:\n raise SaltInvocationError(\"{} is required.\".format(v))\n\n if network_acl_name:\n network_acl_id = _get_resource_id(\n \"network_acl\",\n network_acl_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not network_acl_id:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"Network ACL {} does not exist.\".format(\n network_acl_name or network_acl_id\n )\n },\n }\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n deleted = conn.delete_network_acl_entry(\n network_acl_id, rule_number, egress=egress\n )\n if deleted:\n log.info(\"Network ACL entry was deleted\")\n else:\n log.warning(\"Network ACL was not deleted\")\n return {\"deleted\": deleted}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_network_acl_rule(self, acl, rule):\n try:\n # Check if network ACL and network ACL rule exist\n acl_info = self.get_network_acl(acl)\n if \"errors\" in acl_info:\n return acl_info\n\n rule_info = self.get_network_acl_rule(acl_info[\"id\"], rule)\n if \"errors\" in rule_info:\n return rule_info\n\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls/{}/rules/{}?version={}\"\n \"&generation={}\".format(acl_info[\"id\"], rule_info[\"id\"],\n self.cfg[\"version\"],\n self.cfg[\"generation\"]))\n\n data = qw(\"iaas\", \"DELETE\", path, headers())\n\n # Return data\n if data[\"response\"].status != 204:\n return data[\"data\"]\n\n # Return status\n return resource_deleted()\n\n except Exception as error:\n print(\"Error deleting network ACL rule {} for network\"\n \"ACL {}. {}\".format(rule, acl, error))\n raise", "def delete( # pylint: disable=inconsistent-return-statements\n self,\n resource_group_name: str,\n network_security_perimeter_name: str,\n profile_name: str,\n access_rule_name: str,\n **kwargs: Any\n ) -> None:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n api_version: str = kwargs.pop(\n \"api_version\", _params.pop(\"api-version\", self._api_version or \"2021-02-01-preview\")\n )\n cls: ClsType[None] = kwargs.pop(\"cls\", None)\n\n request = build_nsp_access_rules_delete_request(\n resource_group_name=resource_group_name,\n network_security_perimeter_name=network_security_perimeter_name,\n profile_name=profile_name,\n access_rule_name=access_rule_name,\n subscription_id=self._config.subscription_id,\n api_version=api_version,\n template_url=self.delete.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n if cls:\n return cls(pipeline_response, None, {})", "def cli_cosmosdb_mongocluster_firewall_rule_delete(client, resource_group_name, cluster_name, rule_name):\r\n\r\n return client.begin_delete_firewall_rule(resource_group_name, cluster_name, rule_name)", "def delete_network_acl(\n network_acl_id=None,\n network_acl_name=None,\n disassociate=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if disassociate:\n network_acl = _get_resource(\n \"network_acl\",\n name=network_acl_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if network_acl and network_acl.associations:\n subnet_id = network_acl.associations[0].subnet_id\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.disassociate_network_acl(subnet_id)\n except BotoServerError:\n pass\n\n return _delete_resource(\n resource=\"network_acl\",\n name=network_acl_name,\n resource_id=network_acl_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def delete_network_acl_rule(self,\n network_acl_id: str,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if network_acl_id is None:\n raise ValueError('network_acl_id must be provided')\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_network_acl_rule')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['network_acl_id', 'id']\n path_param_values = self.encode_path_vars(network_acl_id, id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/network_acls/{network_acl_id}/rules/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def delete_command(*, login_manager: LoginManager, endpoint_id, rule_id):\n transfer_client = login_manager.get_transfer_client()\n\n res = transfer_client.delete_endpoint_acl_rule(endpoint_id, rule_id)\n formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key=\"message\")", "def ex_destroy_balancer_access_rule(self, balancer, rule):\r\n accepted = self.ex_destroy_balancer_access_rule_no_poll(balancer, rule)\r\n if not accepted:\r\n msg = 'Delete access rule not accepted'\r\n raise LibcloudError(msg, driver=self)\r\n\r\n return self._get_updated_balancer(balancer)", "def delete(self, api_client):\n cmd = {'id': self.id}\n api_client.deleteEgressFirewallRule(**cmd)\n return", "def delete_dnat_rule(dnat_rule, chain=PREROUTING_DNAT):\n if chain is None:\n chain = PREROUTING_DNAT\n\n return delete_raw_rule(\n 'nat', chain,\n _dnat_rule_format(dnat_rule),\n )", "def DeleteNetworkAcl(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteNetworkAcl\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteNetworkAclResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_security_group_rule(rule):\n return IMPL.delete_security_group_rule(rule)", "def _del_nat_rule(ctx, gateway, rule_type, original_ip, translated_ip):\n any_type = 'any'\n\n ctx.logger.info(\"Delete floating ip NAT rule: original_ip '{0}',\"\n \"translated_ip '{1}', rule type '{2}'\"\n .format(original_ip, translated_ip, rule_type))\n\n gateway.del_nat_rule(\n rule_type, original_ip, any_type, translated_ip, any_type, any_type)", "def delete_rule(rule, chain=None):\n if isinstance(rule, firewall.DNATRule):\n delete_dnat_rule(rule, chain=chain)\n\n elif isinstance(rule, firewall.SNATRule):\n delete_snat_rule(rule, chain=chain)\n\n elif isinstance(rule, firewall.PassThroughRule):\n delete_passthrough_rule(rule, chain=chain)\n\n else:\n raise ValueError(\"Unknown rule type %r\" % (type(rule)))", "def delete_raw_rule(table, chain, rule):\n del_cmd = ['iptables', '-t', table, '-D', chain] + rule.split()\n _LOGGER.info(\"%s\", del_cmd)\n\n try:\n subproc.check_call(del_cmd)\n except subprocess.CalledProcessError as exc:\n if exc.returncode == 1:\n # iptables exit with rc 1 if rule is not found, not fatal when\n # deleting.\n pass\n else:\n raise", "def PBH_RULE_delete(db, table_name, rule_name):\n\n ctx = click.get_current_context()\n\n table_name_validator(ctx, db.cfgdb_pipe, table_name)\n rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)\n\n table = str(PBH_RULE_CDB)\n key = (str(table_name), str(rule_name))\n\n try:\n del_entry(db.cfgdb_pipe, table, key)\n update_pbh_counters(table_name, rule_name)\n except Exception as err:\n exit_with_error(\"Error: {}\".format(err), fg=\"red\")", "def delete(self, api_client):\n\n cmd = {'id': self.id}\n return api_client.deleteNetworkACLList(**cmd)", "def replace_network_acl_entry(\n network_acl_id=None,\n rule_number=None,\n protocol=None,\n rule_action=None,\n cidr_block=None,\n egress=None,\n network_acl_name=None,\n icmp_code=None,\n icmp_type=None,\n port_range_from=None,\n port_range_to=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n kwargs = locals()\n return _create_network_acl_entry(replace=True, **kwargs)", "def deleteacl(self, mailbox, who):\n return self._simple_command('DELETEACL', mailbox, who)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide .
def delete_placement_group(DryRun=None, GroupName=None): pass
[ "def delete_placement_groups():\n client = boto3.resource('ec2')\n print('Deleting Placement Groups')\n for placement_group in client.placement_groups.all():\n print('Deleting Placement Group {}'.format(placement_group.name))\n placement_group.delete()\n print('Placement Groups deleted')", "def delete_group(self, group):\n path = \"api/groups/{0}\".format(group)\n self._delete(path)", "def delete_group(self, group_id):\n raise exception.NotImplemented() # pragma: no cover", "def test_destroy_deployed_group(self):\r\n location = self.driver.list_locations()[0]\r\n group = self.driver.ex_list_groups(location)[0]\r\n self.assertTrue(group.destroy())", "def Delete(iam,groupname: str):\n\t\t\t\treturn iam.resource.Group(groupname).delete()", "def _delete_consistencygroup(self, context, group, db):\n name = group['id']\n model_update = {}\n model_update['status'] = group['status']\n\n LOG.debug(_(\"Deleting consistencygroup %(id)s for %(display_name)s\") %\n {'id': name, 'display_name': group['name']})\n\n try:\n group_info = self.vmem_mg.snapshot.get_snapgroup_info(name)\n except vmemclient.core.error.NoMatchingObjectIdError:\n LOG.debug(_(\"Group %(name)s already deleted.\") %\n {'name': name})\n else:\n # Disable timemarks if necessary\n if group_info['timemarkEnabled']:\n LOG.debug(_(\"Deleting policy for consistencygroup %(id)s\") %\n {'id': name})\n\n ans = self.vmem_mg.snapshot.delete_snapgroup_policy(name)\n\n if not ans['success']:\n msg = (_(\"Failed to delete policy for \" +\n \"consistencygroup %(name)s: %(msg)s\") %\n {'name': name, 'msg': ans['msg']})\n raise exception.ViolinBackendErr(message=msg)\n\n # Remove LUNs from the consistency group\n vols = [x['name'] for x in group_info['members']]\n if vols:\n self._remove_from_consistencygroup(name, vols)\n\n # Delete the consistency group\n ans = self.vmem_mg.snapshot.delete_snapgroup(name)\n\n if not ans['success']:\n msg = (_(\"Failed to delete consistencygroup \" +\n \"%(name)s: %(msg)s\") %\n {'name': name, 'msg': ans['msg']})\n raise exception.ViolinBackendErr(message=msg)\n\n # Delete all volumes in this consistency group\n volumes = db.volume_get_all_by_group(context, name)\n for volume in volumes:\n lun_id = volume['id']\n LOG.debug(_(\"Deleting %(name)s volume: %(lun_id)s\") %\n {'name': name, 'lun_id': lun_id})\n try:\n self._delete_lun({'id': lun_id})\n except Exception as e:\n LOG.warn(_(\"Failed to delete volume %(lun_id)s: %(reason)s\") %\n {'lun_id': lun_id, 'reason': str(e)})\n volume['status'] = 'error_deleting'\n else:\n volume['status'] = 'deleted'\n\n return model_update, volumes", "def delete(self, group_name):\n self.request.mongo_connection.shinken.hostgroups.remove(\n {\"hostgroup_name\": group_name}\n )", "def delete_group_tpat(self, group):\n self._delete_group_tpat(group)", "def delete_customer_group(self,\n group_id):\n\n # Prepare query URL\n _url_path = '/v2/customers/groups/{group_id}'\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, {\n 'group_id': group_id\n })\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n _headers = {\n 'accept': 'application/json'\n }\n\n # Prepare and execute request\n _request = self.config.http_client.delete(_query_url, headers=_headers)\n OAuth2.apply(self.config, _request)\n _response = self.execute_request(_request)\n\n decoded = APIHelper.json_deserialize(_response.text)\n if type(decoded) is dict:\n _errors = decoded.get('errors')\n else:\n _errors = None\n _result = ApiResponse(_response, body=decoded, errors=_errors)\n return _result", "def test_destroy_not_deployed_group(self):\r\n location = self.driver.list_locations()[0]\r\n group = self.driver.ex_list_groups(location)[1]\r\n self.assertTrue(group.destroy())", "def ex_delete_ip_group(self, group_id):\r\n uri = '/shared_ip_groups/%s' % group_id\r\n resp = self.connection.request(uri, method='DELETE')\r\n return resp.status == httplib.NO_CONTENT", "def delete(self):\n url = f'{self._okta.api}/groups/{self.id}'\n response = self._okta.session.delete(url)\n return response.ok", "def delete(self, group_id):\r\n group = self._get_group(group_id)\r\n if (group.kwargs['min_entities'] > 0\r\n or group.kwargs['max_entities'] > 0):\r\n raise Exception(\"Can't delete yet!\")\r\n del self.groups[group_id]", "def destroy(self):\r\n return self.driver.ex_destroy_group(self)", "def delete_device_group(\n cmd,\n app_id: str,\n device_group_id: str,\n token: str,\n api_version=API_VERSION,\n central_dns_suffix=CENTRAL_ENDPOINT,\n) -> dict:\n api_version = API_VERSION\n\n return _utility.make_api_call(\n cmd,\n app_id=app_id,\n method=\"DELETE\",\n url=\"https://{}.{}/{}/{}\".format(app_id, central_dns_suffix, BASE_PATH, device_group_id),\n payload=None,\n token=token,\n api_version=api_version,\n central_dnx_suffix=central_dns_suffix,\n )", "def deleteGroup(id):", "def remove_group():\r\n group_input = input(\"| Enter the name of the Group |\")\r\n adgroup.ADGroup.from_dn(group_input).delete()\r\n return \"| Group Removed |\"", "def delete_node_group(node_group_id):\n\n # FIXME: Support name and id or ?\n data = {'node_group_id': node_group_id}\n return api_submit('/api/node_groups/{0}'.format(node_group_id), data, method='delete')", "def DeleteAddressTemplateGroup(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteAddressTemplateGroup\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteAddressTemplateGroupResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_group(self,\n context: context.RequestContext,\n group: objects.Group) -> None:\n context = context.elevated()\n project_id = group.project_id\n\n if context.project_id != group.project_id:\n project_id = group.project_id\n else:\n project_id = context.project_id\n\n volumes = objects.VolumeList.get_all_by_generic_group(\n context, group.id)\n\n for vol_obj in volumes:\n if vol_obj.attach_status == \"attached\":\n # Volume is still attached, need to detach first\n raise exception.VolumeAttached(volume_id=vol_obj.id)\n self._check_is_our_resource(vol_obj)\n\n self._notify_about_group_usage(\n context, group, \"delete.start\")\n\n volumes_model_update = None\n model_update = None\n try:\n volume_utils.require_driver_initialized(self.driver)\n\n try:\n model_update, volumes_model_update = (\n self.driver.delete_group(context, group, volumes))\n except NotImplementedError:\n if not group_types.is_default_cgsnapshot_type(\n group.group_type_id):\n model_update, volumes_model_update = (\n self._delete_group_generic(context, group, volumes))\n else:\n cg, volumes = self._convert_group_to_cg(\n group, volumes)\n model_update, volumes_model_update = (\n self.driver.delete_consistencygroup(context, cg,\n volumes))\n self._remove_consistencygroup_id_from_volumes(volumes)\n\n if volumes_model_update:\n for update in volumes_model_update:\n # If we failed to delete a volume, make sure the\n # status for the group is set to error as well\n if (update['status'] in ['error_deleting', 'error']\n and model_update['status'] not in\n ['error_deleting', 'error']):\n model_update['status'] = update['status']\n self.db.volumes_update(context, volumes_model_update)\n\n if model_update:\n if model_update['status'] in ['error_deleting', 'error']:\n msg = (_('Delete group failed.'))\n LOG.error(msg,\n resource={'type': 'group',\n 'id': group.id})\n raise exception.VolumeDriverException(message=msg)\n else:\n group.update(model_update)\n group.save()\n\n except Exception:\n with excutils.save_and_reraise_exception():\n group.status = fields.GroupStatus.ERROR\n group.save()\n # Update volume status to 'error' if driver returns\n # None for volumes_model_update.\n if not volumes_model_update:\n self._remove_consistencygroup_id_from_volumes(volumes)\n for vol_obj in volumes:\n vol_obj.status = 'error'\n vol_obj.save()\n\n # Get reservations for group\n grpreservations: Optional[list]\n try:\n reserve_opts = {'groups': -1}\n grpreservations = GROUP_QUOTAS.reserve(context,\n project_id=project_id,\n **reserve_opts)\n except Exception:\n grpreservations = None\n LOG.exception(\"Delete group \"\n \"failed to update usages.\",\n resource={'type': 'group',\n 'id': group.id})\n\n for vol in volumes:\n # Get reservations for volume\n reservations: Optional[list]\n try:\n reserve_opts = {'volumes': -1,\n 'gigabytes': -vol.size}\n QUOTAS.add_volume_type_opts(context,\n reserve_opts,\n vol.volume_type_id)\n reservations = QUOTAS.reserve(context,\n project_id=project_id,\n **reserve_opts)\n except Exception:\n reservations = None\n LOG.exception(\"Delete group \"\n \"failed to update usages.\",\n resource={'type': 'group',\n 'id': group.id})\n\n vol.destroy()\n\n # Commit the reservations\n if reservations:\n QUOTAS.commit(context, reservations, project_id=project_id)\n\n self.stats['allocated_capacity_gb'] -= vol.size\n\n if grpreservations:\n GROUP_QUOTAS.commit(context, grpreservations,\n project_id=project_id)\n\n group.destroy()\n self._notify_about_group_usage(\n context, group, \"delete.end\")\n self.publish_service_capabilities(context)\n LOG.info(\"Delete group \"\n \"completed successfully.\",\n resource={'type': 'group',\n 'id': group.id})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified route from the specified route table.
def delete_route(DryRun=None, RouteTableId=None, DestinationCidrBlock=None, DestinationIpv6CidrBlock=None): pass
[ "def delete_route_table(\n route_table_id=None,\n route_table_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n return _delete_resource(\n resource=\"route_table\",\n name=route_table_name,\n resource_id=route_table_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def delete_route(route_table_id, destination_cidr_block):\n ec2 = boto3.client('ec2')\n resp = ec2.delete_route(\n DestinationCidrBlock=destination_cidr_block,\n RouteTableId=route_table_id,\n )\n logger.info(\"Got response to delete_route {} \".format(resp))\n return resp", "def delete(self,\n route_id,\n ):\n return self._invoke('delete',\n {\n 'route_id': route_id,\n })", "def remove_route(self, rte):\n self.lock.acquire()\n try:\n self.table.remove_rte(rte)\n finally:\n self.lock.release()", "def test_delete_route(self):\n self.ht.delete_route(self.test_route, self.method)\n resp = requests.get(self.req)\n msg = 'route %s has not been removed' % self.test_route\n self.assertEqual(resp.status_code, 404, msg)", "def remove_route(self, route_id):\n if not self.has_route(route_id):\n raise RouteIndexError(f'Route with ID `{route_id}` does not exist in the Schedule. '\n \"Cannot remove a Route that isn't present.\")\n route = self.route(route_id)\n route_data = self._graph.graph['routes'][route_id]\n service_id = self._graph.graph['route_to_service_map'][route_id]\n\n for stop in route.reference_nodes():\n self._graph.nodes[stop]['routes'] = self._graph.nodes[stop]['routes'] - {route_id}\n if (not self._graph.nodes[stop]['routes']) or (\n self._graph.nodes[stop]['routes'] & set(self._graph.graph['service_to_route_map'])):\n self._graph.nodes[stop]['services'] = self._graph.nodes[stop]['services'] - {service_id}\n for u, v in route.reference_edges():\n self._graph[u][v]['routes'] = self._graph[u][v]['routes'] - {route_id}\n if (not self._graph[u][v]['routes']) or (\n set(self._graph[u][v]['routes']) & set(self._graph.graph['service_to_route_map'])):\n self._graph[u][v]['services'] = self._graph[u][v]['services'] - {service_id}\n\n self._graph.graph['service_to_route_map'][service_id].remove(route_id)\n del self._graph.graph['route_to_service_map'][route_id]\n del self._graph.graph['routes'][route_id]\n self._graph.graph['change_log'].remove(object_type='route', object_id=route_id, object_attributes=route_data)\n logging.info(f'Removed Route with index `{route_id}`, data={route_data}. '\n f'It was linked to Service `{service_id}`.')", "def delete_custom_route(self, purge_routes, vpc_id):\n params = {}\n results = []\n vrouter_table_id = None\n changed = False\n\n # Describe Vpc for getting VRouterId \n desc_vpc_param = {}\n self.build_list_params(desc_vpc_param, vpc_id, 'VpcId')\n desc_vpc_response = self.get_status('DescribeVpcs', desc_vpc_param)\n if int(desc_vpc_response[u'TotalCount']) > 0:\n vrouter_id = str(desc_vpc_response[u'Vpcs'][u'Vpc'][0][u'VRouterId']) \n \n # Describe Route Tables for getting RouteTable Id \n desc_route_table_param = {}\n self.build_list_params(desc_route_table_param, vrouter_id, 'VRouterId')\n desc_route_table_response = self.get_status('DescribeRouteTables', desc_route_table_param)\n if int(desc_route_table_response[u'TotalCount']) > 0:\n vrouter_table_id = str(desc_route_table_response[u'RouteTables'][u'RouteTable'][0][u'RouteTableId'])\n\n if 'route_table_id' in purge_routes:\n if 'next_hop_id' in purge_routes:\n if vrouter_table_id == purge_routes[\"route_table_id\"]: \n self.build_list_params(params, purge_routes[\"route_table_id\"], 'RouteTableId') \n fixed_dest_cidr_block = None\n if 'dest' in purge_routes:\n fixed_dest_cidr_block = purge_routes[\"dest\"]\n if 'destination_cidrblock' in purge_routes:\n fixed_dest_cidr_block = purge_routes[\"destination_cidrblock\"]\n if fixed_dest_cidr_block:\n self.build_list_params(params, fixed_dest_cidr_block, 'DestinationCidrBlock')\n \n self.build_list_params(params, purge_routes[\"next_hop_id\"], 'NextHopId')\n\n try:\n results = self.get_status('DeleteRouteEntry', params)\n changed = True\n except Exception as ex:\n error_code = ex.error_code\n error_msg = ex.message\n results.append({\"Error Code\": error_code, \"Error Message\": error_msg})\n else:\n changed = False\n results.append({ \"Error Message\": \"RouteTableId or VpcId does not exist\"})\n else:\n results.append({\"Error Message\": \"next_hop_id is required to delete route entry\"})\n else:\n results.append({\"Error Message\": \"route_table_id is required to delete route entry\"})\n\n return changed, results", "def delete_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def delete_route(request):\n routeID = request.POST.get('routeID')\n print(routeID)\n # Check if route exist\n if Routes.objects.filter(routeID=int(routeID)).exists():\n # Delete the routeID entry from the database\n a = Routes.objects.get(routeID=int(routeID))\n a.delete()\n # Check if there are questions with the deleted routeID\n if Questions.objects.filter(routeID=int(routeID)).exists:\n # Delete all the question entry which has the same routeID as the one deleted\n Questions.objects.filter(routeID=int(routeID)).delete()\n return HttpResponse(\"Deleted successfully\")\n else:\n return HttpResponse(\"Not exist\")", "def delete_route_tables():\n client = boto3.resource('ec2')\n print('Deleting Route Tables')\n for route_table in client.route_tables.all():\n for route in route_table.routes:\n if route.origin == 'CreateRoute':\n print('Deleting Route {} in Route Table {}'.format(route.destination_cidr_block,\n route_table.id))\n route.delete()\n main = False\n for rta in route_table.associations:\n if rta.main:\n main = True\n else:\n print('Deleting Route Table Association {}'.format(rta.id))\n rta.delete()\n if not main:\n print('Deleting Route Table {}'.format(route_table.id))\n route_table.delete()\n print('Route Tables deleted')", "def delete_local(route_dist, prefix, route_family=VRF_RF_IPV4):\n try:\n tm = CORE_MANAGER.get_core_service().table_manager\n tm.remove_from_vrf(route_dist, prefix, route_family)\n # Send success response to ApgwAgent.\n return [{ROUTE_DISTINGUISHER: route_dist, PREFIX: prefix,\n VRF_RF: route_family}]\n except BgpCoreError as e:\n raise PrefixError(desc=e)", "def delete(self):\n\n uri = \"{base_url}{class_uri}/{asn}\".format(\n base_url=self.session.base_url,\n class_uri=self.base_uri,\n asn=self.asn)\n\n try:\n response = self.session.s.delete(uri,\n verify=False,\n proxies=self.session.proxy)\n\n except Exception as e:\n raise ResponseError(\"DELETE\", e)\n\n if not utils._response_ok(response, \"DELETE\"):\n raise GenericOperationError(response.text, response.status_code)\n\n else:\n logging.info(\"SUCCESS: Delete BGP table entry {} succeeded\".format(\n self.asn))\n\n # Delete back reference from VRF\n for bgp_router in self.__parent_vrf.bgp_routers:\n if bgp_router.asn == self.asn:\n self.__parent_vrf.bgp_routers.remove(bgp_router)\n\n # Delete object attributes\n utils.delete_attrs(self, self.config_attrs)", "def disassociate_route_table(DryRun=None, AssociationId=None):\n pass", "def removeRoute(arg1: 'SoNode', eventout: 'char const *', to: 'SoNode', eventin: 'char const *') -> \"void\":\n return _coin.SoDB_removeRoute(arg1, eventout, to, eventin)", "def delete_resource(self):\r\n results = ResponsesREST.SERVER_ERROR.value\r\n query = \"DELETE FROM Resource WHERE routeSave = %s; \"\r\n param = [self.route_save]\r\n result = self.connect.send_query(query, param)\r\n if result:\r\n results = ResponsesREST.SUCCESSFUL.value\r\n return results", "def delete(mod, id):\n Model = mod_lookup.get(mod, None)\n if not Model:\n return f\"No such route: {mod}\", 404\n Model.query.filter_by(id=id).delete()\n db.session.commit()\n return redirect(url_for('all', mod=mod))", "def unbind(self, uuid):\n try:\n route = Route.objects.get(uuid=uuid)\n except Route.DoesNotExist:\n pass\n else:\n route.delete()", "def del_routes(cli_opts, prefixes): # noqa: B902\n\n fib.FibDelRoutesCmd(cli_opts).run(prefixes)", "def delete_destination(self, trip, destination):\n self.remove(path_destination(trip, destination))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified route table. You must disassociate the route table from any subnets before you can delete it. You can't delete the main route table.
def delete_route_table(DryRun=None, RouteTableId=None): pass
[ "def delete_route_table(\n route_table_id=None,\n route_table_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n return _delete_resource(\n resource=\"route_table\",\n name=route_table_name,\n resource_id=route_table_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def delete_route(DryRun=None, RouteTableId=None, DestinationCidrBlock=None, DestinationIpv6CidrBlock=None):\n pass", "def delete_route(route_table_id, destination_cidr_block):\n ec2 = boto3.client('ec2')\n resp = ec2.delete_route(\n DestinationCidrBlock=destination_cidr_block,\n RouteTableId=route_table_id,\n )\n logger.info(\"Got response to delete_route {} \".format(resp))\n return resp", "def delete_route_tables():\n client = boto3.resource('ec2')\n print('Deleting Route Tables')\n for route_table in client.route_tables.all():\n for route in route_table.routes:\n if route.origin == 'CreateRoute':\n print('Deleting Route {} in Route Table {}'.format(route.destination_cidr_block,\n route_table.id))\n route.delete()\n main = False\n for rta in route_table.associations:\n if rta.main:\n main = True\n else:\n print('Deleting Route Table Association {}'.format(rta.id))\n rta.delete()\n if not main:\n print('Deleting Route Table {}'.format(route_table.id))\n route_table.delete()\n print('Route Tables deleted')", "def remove_route(self, rte):\n self.lock.acquire()\n try:\n self.table.remove_rte(rte)\n finally:\n self.lock.release()", "def delete_custom_route(self, purge_routes, vpc_id):\n params = {}\n results = []\n vrouter_table_id = None\n changed = False\n\n # Describe Vpc for getting VRouterId \n desc_vpc_param = {}\n self.build_list_params(desc_vpc_param, vpc_id, 'VpcId')\n desc_vpc_response = self.get_status('DescribeVpcs', desc_vpc_param)\n if int(desc_vpc_response[u'TotalCount']) > 0:\n vrouter_id = str(desc_vpc_response[u'Vpcs'][u'Vpc'][0][u'VRouterId']) \n \n # Describe Route Tables for getting RouteTable Id \n desc_route_table_param = {}\n self.build_list_params(desc_route_table_param, vrouter_id, 'VRouterId')\n desc_route_table_response = self.get_status('DescribeRouteTables', desc_route_table_param)\n if int(desc_route_table_response[u'TotalCount']) > 0:\n vrouter_table_id = str(desc_route_table_response[u'RouteTables'][u'RouteTable'][0][u'RouteTableId'])\n\n if 'route_table_id' in purge_routes:\n if 'next_hop_id' in purge_routes:\n if vrouter_table_id == purge_routes[\"route_table_id\"]: \n self.build_list_params(params, purge_routes[\"route_table_id\"], 'RouteTableId') \n fixed_dest_cidr_block = None\n if 'dest' in purge_routes:\n fixed_dest_cidr_block = purge_routes[\"dest\"]\n if 'destination_cidrblock' in purge_routes:\n fixed_dest_cidr_block = purge_routes[\"destination_cidrblock\"]\n if fixed_dest_cidr_block:\n self.build_list_params(params, fixed_dest_cidr_block, 'DestinationCidrBlock')\n \n self.build_list_params(params, purge_routes[\"next_hop_id\"], 'NextHopId')\n\n try:\n results = self.get_status('DeleteRouteEntry', params)\n changed = True\n except Exception as ex:\n error_code = ex.error_code\n error_msg = ex.message\n results.append({\"Error Code\": error_code, \"Error Message\": error_msg})\n else:\n changed = False\n results.append({ \"Error Message\": \"RouteTableId or VpcId does not exist\"})\n else:\n results.append({\"Error Message\": \"next_hop_id is required to delete route entry\"})\n else:\n results.append({\"Error Message\": \"route_table_id is required to delete route entry\"})\n\n return changed, results", "def delete_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def delete(self):\n\n uri = \"{base_url}{class_uri}/{asn}\".format(\n base_url=self.session.base_url,\n class_uri=self.base_uri,\n asn=self.asn)\n\n try:\n response = self.session.s.delete(uri,\n verify=False,\n proxies=self.session.proxy)\n\n except Exception as e:\n raise ResponseError(\"DELETE\", e)\n\n if not utils._response_ok(response, \"DELETE\"):\n raise GenericOperationError(response.text, response.status_code)\n\n else:\n logging.info(\"SUCCESS: Delete BGP table entry {} succeeded\".format(\n self.asn))\n\n # Delete back reference from VRF\n for bgp_router in self.__parent_vrf.bgp_routers:\n if bgp_router.asn == self.asn:\n self.__parent_vrf.bgp_routers.remove(bgp_router)\n\n # Delete object attributes\n utils.delete_attrs(self, self.config_attrs)", "def disassociate_route_table(DryRun=None, AssociationId=None):\n pass", "def DeleteTable(self, table_name):\n pass", "def delete_local(route_dist, prefix, route_family=VRF_RF_IPV4):\n try:\n tm = CORE_MANAGER.get_core_service().table_manager\n tm.remove_from_vrf(route_dist, prefix, route_family)\n # Send success response to ApgwAgent.\n return [{ROUTE_DISTINGUISHER: route_dist, PREFIX: prefix,\n VRF_RF: route_family}]\n except BgpCoreError as e:\n raise PrefixError(desc=e)", "def deletefromtable(self, *args, **kwargs):\n return _regionmanager.regionmanager_deletefromtable(self, *args, **kwargs)", "def del_routes(cli_opts, prefixes): # noqa: B902\n\n fib.FibDelRoutesCmd(cli_opts).run(prefixes)", "def delete(self,\n route_id,\n ):\n return self._invoke('delete',\n {\n 'route_id': route_id,\n })", "def subnet_delete(ctx, subnet_id):\n ctx.obj['nc'].delete(\"subnets/%s\" %subnet_id)", "def delete_geometry_table(self, table):\n\t\tsql = \"DROP TABLE %s\" % (self._quote(table))\n\t\tself._exec_sql_and_commit(sql)", "def PBH_TABLE_delete(db, table_name):\n\n ctx = click.get_current_context()\n\n table_name_validator(ctx, db.cfgdb_pipe, table_name)\n\n table = str(PBH_TABLE_CDB)\n key = str(table_name)\n\n try:\n del_entry(db.cfgdb_pipe, table, key)\n except Exception as err:\n exit_with_error(\"Error: {}\".format(err), fg=\"red\")", "def disassociate_route_table(\n association_id, region=None, key=None, keyid=None, profile=None\n):\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if conn.disassociate_route_table(association_id):\n log.info(\n \"Route table with association id %s has been disassociated.\",\n association_id,\n )\n return {\"disassociated\": True}\n else:\n log.warning(\n \"Route table with association id %s has not been disassociated.\",\n association_id,\n )\n return {\"disassociated\": False}\n except BotoServerError as e:\n return {\"disassociated\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def removeTable(self, table_name):\n pass", "def describe_route_tables(\n route_table_id=None,\n route_table_name=None,\n vpc_id=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not any((route_table_id, route_table_name, tags, vpc_id)):\n raise SaltInvocationError(\n \"At least one of the following must be specified: \"\n \"route table id, route table name, vpc_id, or tags.\"\n )\n\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n filter_parameters = {\"Filters\": []}\n\n if route_table_id:\n filter_parameters[\"RouteTableIds\"] = [route_table_id]\n\n if vpc_id:\n filter_parameters[\"Filters\"].append({\"Name\": \"vpc-id\", \"Values\": [vpc_id]})\n\n if route_table_name:\n filter_parameters[\"Filters\"].append(\n {\"Name\": \"tag:Name\", \"Values\": [route_table_name]}\n )\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"Filters\"].append(\n {\"Name\": \"tag:{}\".format(tag_name), \"Values\": [tag_value]}\n )\n\n route_tables = conn3.describe_route_tables(**filter_parameters).get(\n \"RouteTables\", []\n )\n\n if not route_tables:\n return []\n\n tables = []\n keys = {\n \"id\": \"RouteTableId\",\n \"vpc_id\": \"VpcId\",\n \"tags\": \"Tags\",\n \"routes\": \"Routes\",\n \"associations\": \"Associations\",\n }\n route_keys = {\n \"destination_cidr_block\": \"DestinationCidrBlock\",\n \"gateway_id\": \"GatewayId\",\n \"instance_id\": \"Instance\",\n \"interface_id\": \"NetworkInterfaceId\",\n \"nat_gateway_id\": \"NatGatewayId\",\n \"vpc_peering_connection_id\": \"VpcPeeringConnectionId\",\n }\n assoc_keys = {\n \"id\": \"RouteTableAssociationId\",\n \"main\": \"Main\",\n \"route_table_id\": \"RouteTableId\",\n \"SubnetId\": \"subnet_id\",\n }\n for item in route_tables:\n route_table = {}\n for outkey, inkey in keys.items():\n if inkey in item:\n if outkey == \"routes\":\n route_table[outkey] = _key_remap(inkey, route_keys, item)\n elif outkey == \"associations\":\n route_table[outkey] = _key_remap(inkey, assoc_keys, item)\n elif outkey == \"tags\":\n route_table[outkey] = {}\n for tagitem in item.get(inkey, []):\n route_table[outkey][tagitem.get(\"Key\")] = tagitem.get(\n \"Value\"\n )\n else:\n route_table[outkey] = item.get(inkey)\n tables.append(route_table)\n return tables\n\n except BotoServerError as e:\n return {\"error\": __utils__[\"boto.get_error\"](e)}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the data feed for Spot instances.
def delete_spot_datafeed_subscription(DryRun=None): pass
[ "def delete_datapoints(self, data):\n return self._post('datapoints/delete', data=data)", "def delete(self):\n self._omnia_client.time_series.delete(self.id)", "def delete(self, commit=True):\n for dmp in self.dmps:\n dmp.datasets.remove(self)\n\n db.session.delete(self)\n\n if commit:\n db.session.commit()", "def delete(self, commit=True):\n for ds in self.datasets:\n ds.dmps.remove(self)\n\n db.session.delete(self)\n\n if commit:\n db.session.commit()", "def deleteData(self):\n\t\tloopdata_keys = LoopData.query().fetch(keys_only = True)\n\t\tloopdata_entities = ndb.get_multi(loopdata_keys)\n\t\tndb.delete_multi([l.key for l in loopdata_entities])", "def delete(self):\n if self.data:\n self.data.delete()\n super(Resource, self).delete()", "def close_spider(self, spider):\n\n with self.index.searcher() as searcher:\n for page in searcher.all_stored_fields():\n if page['url'] not in spider.state['update_list']:\n self.writer.delete_by_term('url', page['url'])\n self.writer.commit()", "def run(self):\n if self.token is not None:\n self.token.deleteFromStore()", "def delete_all():\n all_data_lines = DataLine.objects.all()\n return all_data_lines.delete()", "def delete(self, instance):\n self.descriptor.__delete__(instance)", "def deleteStationFromDirectory(self, items):\n dataID = items[0].text()\n \n index = -1\n for i, station in enumerate(self.datasetDirectory['datasets']):\n print(station)\n if station['PYID'] == dataID:\n index = i\n \n self.datasetDirectory['datasets'].pop(index)\n\n\n return", "def delete(self, *args, **kwargs):\n try:\n self.terminate_task()\n self.periodic_task.delete()\n except:\n pass\n return super(ShoalScrapeTask, self).delete(*args, **kwargs)", "def delete_feed(self, feed: FeedInput) -> None:\n url = _feed_argument(feed)\n self._storage.delete_feed(url)", "def destroy(self):\n try:\n del Thing.ID_dict[self.id]\n except KeyError:\n self.log.error('%s was already removed from Thing.ID_dict' % self)\n if self.location and hasattr(self.location, \"extract\"):\n self.location.extract(self)\n self.location = None\n if self in Thing.game.heartbeat_users:\n Thing.game.deregister_heartbeat(self)", "def delete(self):\n raise NotImplementedError('delete graph snapshots not implemented')", "def delete(self, data):\n raise NotImplementedError()", "def delete_ds_callback(self, widget):\n\n if self._debug:\n print(\"delete_ds_callback was called with widget {}\".format(widget))\n\n path, focus = self._datasets_tv.get_cursor()\n del_iter = self._datasets_ls.get_iter(path)\n\n self._datasets[path[0]].close()\n self._datasets.pop(path[0])\n\n self._datasets_ls.remove(del_iter)\n\n return 0", "def delete_all(self):\n to_delete = list(self.instances.keys())\n if len(to_delete) > 0: # Only delete stuff if there's stuff to\n # delete.\n self.delete(to_delete)", "def delete(self):\n if self._store:\n self._store.delete(self.key)", "def do_destroy(self, *args):\n args = [e for e in args[0].split(' ')]\n if args[0] == '':\n print('** class name missing **')\n return\n if args[0] not in self.class_l:\n print(\"** class doesn't exist **\")\n return\n if len(args) < 2:\n print('** instance id missing **')\n return\n\n storage.reload()\n objs_dict = storage.all()\n\n if objs_dict is None:\n print(\"** no instance found **\")\n return\n key = \"{}.{}\".format(args[0], args[1])\n if key in objs_dict.keys():\n del objs_dict[key]\n storage.save()\n else:\n print(\"** no instance found **\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified set of tags from the specified set of resources. This call is designed to follow a DescribeTags request. For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide .
def delete_tags(DryRun=None, Resources=None, Tags=None): pass
[ "def remove(self, *resources):\n self.doapi_manager.request(self.url + '/resources', method='DELETE',\n data={\"resources\": _to_taggable(resources)})", "def delete_tags(self, req, resource, tags=None):\n provider = self._get_provider(resource.realm)\n if tags is None:\n provider.remove_resource_tags(req, resource)\n else:\n tags = set(tags)\n current_tags = provider.get_resource_tags(req, resource)\n current_tags.remove(tags)\n provider.set_resource_tags(req, resource, tags)", "def bulk_delete(self, resources):\n\n batch = http.BatchHttpRequest()\n for resource in resources:\n resource.gce_project = self\n batch.add(self._delete_request(resource), callback=self._batch_response)\n\n try:\n self._run_request(batch)\n except error.GceError:\n raise\n except error.GceTokenError:\n raise", "def remove(self, *tags):\n with self._treant._write:\n # remove redundant tags from given list if present\n tags = set([str(tag) for tag in tags])\n for tag in tags:\n # remove tag; if not present, continue anyway\n try:\n self._treant._state['tags'].remove(tag)\n except ValueError:\n pass", "def ex_tag_resources(self, resources, tag):\r\n\r\n resources = tag.resources[:]\r\n\r\n for resource in resources:\r\n if not hasattr(resource, 'id'):\r\n raise ValueError('Resource doesn\\'t have id attribute')\r\n\r\n resources.append(resource.id)\r\n\r\n resources = list(set(resources))\r\n\r\n data = {\r\n 'name': tag.name,\r\n 'resources': resources\r\n }\r\n\r\n action = '/tags/%s/' % (tag.id)\r\n response = self.connection.request(action=action, method='PUT',\r\n data=data).object\r\n tag = self._to_tag(data=response)\r\n return tag", "def remove_resource_tags(req, resource):", "def remove_tags(self, *tags):\n\n try:\n tag_list = self.data[\"tags\"]\n except KeyError:\n return\n\n self.data[\"tags\"] = [t for t in tag_list if t not in tags]", "def _release_resources(self, resources):\n self.logger.info(\"Releasing %r\", resources)\n release_requests = [res.name\n for res in resources if res.DATA_CLASS is not None]\n\n for resource in resources[:]:\n if resource in self.locked_resources:\n self.locked_resources.remove(resource)\n\n if resource in self.unused_resources:\n self.unused_resources.remove(resource)\n\n if len(release_requests) > 0:\n request_data = ReleaseResourcesParamsModel({\n \"resources\": release_requests,\n \"token\": self.token\n })\n response = self.requester.request(ReleaseResources,\n data=request_data,\n method=\"post\")\n\n if isinstance(response, FailureResponseModel):\n raise ResourceReleaseError(response.errors)", "def _delete_resources(\n self,\n db: DBInterface,\n db_session: Session,\n namespace: str,\n deleted_resources: List[Dict],\n label_selector: str = None,\n force: bool = False,\n grace_period: int = config.runtime_resources_deletion_grace_period,\n ):\n pass", "def delete_tag(session,taglist):\r\n for t in taglist:\r\n session.query(Tag.name==t).delete()\r\n session.commit()", "def _release_resources(self, resources):\n self.logger.info(\"Releasing %r\", resources)\n release_requests = [{\"name\": res.data.name, \"dirty\": res.data.dirty}\n for res in resources]\n request = messages.ReleaseResources(requests=release_requests)\n self._request(request)\n\n for resource in resources:\n if resource in self.locked_resources:\n self.locked_resources.remove(resource)", "def delete_resources(self):\n logger.info(\"Deleting resources as a sanity functional validation\")\n\n for pod_obj in self.pod_objs:\n pod_obj.delete()\n for pod_obj in self.pod_objs:\n pod_obj.ocp.wait_for_delete(pod_obj.name)\n for pvc_obj in self.pvc_objs:\n pvc_obj.delete()\n for pvc_obj in self.pvc_objs:\n pvc_obj.ocp.wait_for_delete(pvc_obj.name)", "def delete(self, resource, keys):\n i = 0\n keyN = len(keys)\n while i < keyN:\n resource.attrs.__delitem__(keys[i])\n i = i+1\n print('Done deleting.')", "def deltags( self, tags ) :\n return self.client.tagticket( self.project, self, deltags=tags )", "def test_delete_on_background_response_descriptor_tag_sets_tag_set_tag_set_resource_spaces(self):\n pass", "def unlink(self, tag, glob=None, resources=None):\n query = Q(project__in=self.projects) if self.projects else Q()\n if glob is not None:\n resources = list(self.find(glob, include=tag))\n self.tag_manager.filter(query).get(slug=tag).resources.remove(*resources)\n return resources\n if resources is not None:\n _resources = self.resource_manager.none()\n for resource in resources:\n _resources |= self.resource_manager.filter(\n project=resource[\"project\"],\n path=resource[\"path\"])\n self.tag_manager.filter(query).get(slug=tag).resources.remove(*list(_resources))", "def delete(self, *devices):\n for d in devices:\n d.delete()", "def delete_unused(self, tags=None):\n tags_ids = [x.id for x in tags] if tags else None\n tags = self.all() if tags_ids is None else self.filter(id__in=tags_ids)\n tags.filter(items__isnull=True).delete()", "def deltags( self, tags ) :\n return self.client.tagwiki( self.project, self, deltags=tags )", "def test_delete_on_background_response_descriptor_tag_sets_tag_set_tag_set_resource(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on.
def delete_vpc(DryRun=None, VpcId=None): pass
[ "def delete(\n vpc_id=None,\n name=None,\n vpc_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if name:\n log.warning(\n \"boto_vpc.delete: name parameter is deprecated use vpc_name instead.\"\n )\n vpc_name = name\n\n if not _exactly_one((vpc_name, vpc_id)):\n raise SaltInvocationError(\n \"One (but not both) of vpc_name or vpc_id must be provided.\"\n )\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if not vpc_id:\n vpc_id = _get_id(\n vpc_name=vpc_name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not vpc_id:\n return {\n \"deleted\": False,\n \"error\": {\"message\": \"VPC {} not found\".format(vpc_name)},\n }\n\n if conn.delete_vpc(vpc_id):\n log.info(\"VPC %s was deleted.\", vpc_id)\n if vpc_name:\n _cache_id(\n vpc_name,\n resource_id=vpc_id,\n invalidate=True,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n return {\"deleted\": True}\n else:\n log.warning(\"VPC %s was not deleted.\", vpc_id)\n return {\"deleted\": False}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def deletevpc(vpc_choices):\n progressbar(\"Deleting VPC\")\n vpcname=vpc_choices['vpc'][0]\n try:\n ec2.delete_vpc(VpcId=str(vpcname))\n print(\"\\n \\n vpc \" +vpcname +\" has been deleted \\n \\n\")\n except botocore.exceptions.ClientError as e:\n coloredtext(\"There was an error while deleting vpc: \\n\\n\\n\")\n print(e)", "def delete_vpc_endpoint_resources():\n print('Deleting VPC endpoints')\n ec2 = boto3.client('ec2')\n endpoint_ids = []\n for endpoint in ec2.describe_vpc_endpoints()['VpcEndpoints']:\n print('Deleting VPC Endpoint - {}'.format(endpoint['ServiceName']))\n endpoint_ids.append(endpoint['VpcEndpointId'])\n\n if endpoint_ids:\n ec2.delete_vpc_endpoints(\n VpcEndpointIds=endpoint_ids\n )\n\n print('Waiting for VPC endpoints to get deleted')\n while ec2.describe_vpc_endpoints()['VpcEndpoints']:\n time.sleep(5)\n\n print('VPC endpoints deleted')\n\n # VPC endpoints connections\n print('Deleting VPC endpoint connections')\n service_ids = []\n for connection in ec2.describe_vpc_endpoint_connections()['VpcEndpointConnections']:\n service_id = connection['ServiceId']\n state = connection['VpcEndpointState']\n\n if state in ['PendingAcceptance', 'Pending', 'Available', 'Rejected', 'Failed', 'Expired']:\n print('Deleting VPC Endpoint Service - {}'.format(service_id))\n service_ids.append(service_id)\n\n ec2.reject_vpc_endpoint_connections(\n ServiceId=service_id,\n VpcEndpointIds=[\n connection['VpcEndpointId'],\n ]\n )\n\n if service_ids:\n ec2.delete_vpc_endpoint_service_configurations(\n ServiceIds=service_ids\n )\n\n print('Waiting for VPC endpoint services to be destroyed')\n while ec2.describe_vpc_endpoint_connections()['VpcEndpointConnections']:\n time.sleep(5)\n\n print('VPC endpoint connections deleted')", "def delete_pvc(self, pvc: PVC):\n logger.info(f\"Delete pvc {pvc.name}\")\n ocp = OCP()\n for pvc_capacity_deployment in self:\n if pvc_capacity_deployment.pvc_obj == pvc:\n\n pv_obj = pvc.backed_pv_obj\n # pvc will stack in terminating state because it is mounted\n pvc.delete(wait=False, force=True)\n # remove finalizers from the pvc to be able to delete mounted pvc\n params = '{\"metadata\": {\"finalizers\":null}}'\n try:\n ocp.exec_oc_cmd(\n f\"patch pvc {pvc.name} -p '{params}' -n {pvc.namespace}\"\n )\n logger.info(\n \"sleep 2 min to allow pvc to be deleted after patching finalizers\"\n )\n time.sleep(60 * 2)\n except CommandFailed as ex:\n if \"not found\" in str(ex):\n logger.info(f\"pvc '{pvc.name}' already deleted\")\n\n pv_names = get_pv_names()\n if pv_obj.name in pv_names:\n pv_status = get_pv_status(pv_obj.get())\n logger.info(\n f\"PVC deletion did not delete PV {pv_obj.name} on cluster. PV status {pv_status}\"\n )\n pv_obj.delete(wait=False, force=True)\n wait_for_pv_delete([pv_obj])\n else:\n logger.info(f\"PV {pv_obj.name} is already deleted\")\n\n self._delete_pvc_capacity_deployment_from_list(\n pvc_capacity_deployment.deployment.name\n )\n break\n else:\n raise ValueError(f\"PVC with name {pvc.name} not found.\")", "def delete_vpc_peering_connection(\n conn_id=None,\n conn_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n dry_run=False,\n):\n if not _exactly_one((conn_id, conn_name)):\n raise SaltInvocationError(\n \"Exactly one of conn_id or conn_name must be provided.\"\n )\n\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n if conn_name:\n conn_id = _vpc_peering_conn_id_for_name(conn_name, conn)\n if not conn_id:\n raise SaltInvocationError(\n \"Couldn't resolve VPC peering connection {} to an ID\".format(conn_name)\n )\n try:\n log.debug(\"Trying to delete vpc peering connection\")\n conn.delete_vpc_peering_connection(\n DryRun=dry_run, VpcPeeringConnectionId=conn_id\n )\n return {\"msg\": \"VPC peering connection deleted.\"}\n except botocore.exceptions.ClientError as err:\n e = __utils__[\"boto.get_error\"](err)\n log.error(\"Failed to delete VPC peering %s: %s\", conn_name or conn_id, e)\n return {\"error\": e}", "def delete_pvc(pvc, namespace):\n pods = common_utils.get_pods_using_pvc(pvc, namespace)\n\n # Filter non viewer pods from viewer pods\n viewer_pods, non_viewer_pods = [], []\n for p in pods:\n viewer_pods.append(p) if viewer_utils.is_viewer_pod(\n p) else non_viewer_pods.append(p)\n\n # If any non viewer pod is using the PVC, raise an exception\n if non_viewer_pods:\n pod_names = [p.metadata.name for p in non_viewer_pods]\n raise exceptions.Conflict(\"Cannot delete PVC '%s' because it is being\"\n \" used by pods: %s\" % (pvc, pod_names))\n\n # For each associated viewer pod delete its parent\n for viewer_pod in viewer_pods:\n viewer = viewer_utils.get_owning_viewer(viewer_pod)\n if not viewer:\n logging.warn(\n \"Viewer pod %s/%s is missing the label value %s \"\n \"required to identify its parent\",\n namespace,\n viewer_pod.metadata.name,\n viewer_utils.VIEWER_LABEL,\n )\n delete_viewer(viewer, namespace)\n\n log.info(\"Deleting PVC %s/%s...\", namespace, pvc)\n api.delete_pvc(pvc, namespace)\n log.info(\"Successfully deleted PVC %s/%s\", namespace, pvc)\n\n return api.success_response(\"message\",\n \"PVC %s successfully deleted.\" % pvc)", "def delete_pvc(self, pvc_name, namespace=DEFAULT_NAMESPACE):\n\n cmd = \"%s -n %s\" % (KUBECTL_DELETE_PVC % (pvc_name, self.context), namespace)\n result = self.nuvoloso_helper.run_check_output(cmd)\n return result", "def delete_vpc_route(self,\n vpc_id: str,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if vpc_id is None:\n raise ValueError('vpc_id must be provided')\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_vpc_route')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['vpc_id', 'id']\n path_param_values = self.encode_path_vars(vpc_id, id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/vpcs/{vpc_id}/routes/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def describe(\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n vpc_id = _find_vpcs(\n vpc_id=vpc_id,\n vpc_name=vpc_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n except BotoServerError as err:\n boto_err = __utils__[\"boto.get_error\"](err)\n if boto_err.get(\"aws\", {}).get(\"code\") == \"InvalidVpcID.NotFound\":\n # VPC was not found: handle the error and return None.\n return {\"vpc\": None}\n return {\"error\": boto_err}\n\n if not vpc_id:\n return {\"vpc\": None}\n\n filter_parameters = {\"vpc_ids\": vpc_id}\n\n try:\n vpcs = conn.get_all_vpcs(**filter_parameters)\n except BotoServerError as err:\n return {\"error\": __utils__[\"boto.get_error\"](err)}\n\n if vpcs:\n vpc = vpcs[0] # Found!\n log.debug(\"Found VPC: %s\", vpc.id)\n\n keys = (\n \"id\",\n \"cidr_block\",\n \"is_default\",\n \"state\",\n \"tags\",\n \"dhcp_options_id\",\n \"instance_tenancy\",\n )\n _r = {k: getattr(vpc, k) for k in keys}\n _r.update({\"region\": getattr(vpc, \"region\").name})\n return {\"vpc\": _r}\n else:\n return {\"vpc\": None}", "def delete_default_vpc(client, account_id, dry_run=False):\n # Check and remove default VPC\n default_vpc_id = None\n\n # Retrying the describe_vpcs call. Sometimes the VPC service is not ready when\n # you have just created a new account.\n max_retry_seconds = 180\n while True:\n try:\n vpc_response = client.describe_vpcs()\n break\n except Exception as e:\n logger.warning(f'Could not retrieve VPCs: {e}. Sleeping for 1 second before trying again.')\n max_retry_seconds - 2\n sleep(2)\n if max_retry_seconds <= 0:\n raise Exception(\"Could not describe VPCs within retry limit.\")\n\n for vpc in vpc_response[\"Vpcs\"]:\n if vpc[\"IsDefault\"] is True:\n default_vpc_id = vpc[\"VpcId\"]\n break\n\n if default_vpc_id is None:\n logging.info(f\"No default VPC found in account {account_id}\")\n return\n\n logging.info(f\"Found default VPC Id {default_vpc_id}\")\n subnet_response = client.describe_subnets()\n default_subnets = [\n subnet\n for subnet in subnet_response[\"Subnets\"]\n if subnet[\"VpcId\"] == default_vpc_id\n ]\n\n logging.info(f\"Deleting default {len(default_subnets )} subnets\")\n subnet_delete_response = [\n client.delete_subnet(SubnetId=subnet[\"SubnetId\"], DryRun=dry_run)\n for subnet in default_subnets\n ]\n\n igw_response = client.describe_internet_gateways()\n try:\n default_igw = [\n igw[\"InternetGatewayId\"]\n for igw in igw_response[\"InternetGateways\"]\n for attachment in igw[\"Attachments\"]\n if attachment[\"VpcId\"] == default_vpc_id\n ][0]\n except IndexError:\n default_igw = None\n\n if default_igw:\n logging.info(f\"Detaching Internet Gateway {default_igw}\")\n detach_default_igw_response = client.detach_internet_gateway(\n InternetGatewayId=default_igw, VpcId=default_vpc_id, DryRun=dry_run\n )\n\n logging.info(f\"Deleting Internet Gateway {default_igw}\")\n delete_internet_gateway_response = client.delete_internet_gateway(\n InternetGatewayId=default_igw\n )\n\n sleep(10) # It takes a bit of time for the dependencies to clear\n logging.info(f\"Deleting Default VPC {default_vpc_id}\")\n delete_vpc_response = client.delete_vpc(VpcId=default_vpc_id, DryRun=dry_run)\n\n return delete_vpc_response", "def delete(\n hostname,\n refresh_key,\n authorization_host,\n org_id,\n sddc_id,\n type,\n dhcp_profile_id,\n verify_ssl=True,\n cert=None,\n):\n\n log.info(\"Deleting DHCP profile %s for SDDC %s\", dhcp_profile_id, sddc_id)\n profile_type = vmc_constants.DHCP_CONFIGS.format(type)\n api_url_base = vmc_request.set_base_url(hostname)\n api_url = (\n \"{base_url}vmc/reverse-proxy/api/orgs/{org_id}/sddcs/{sddc_id}/\"\n \"policy/api/v1/infra/{profile_type}/{profile_id}\"\n )\n api_url = api_url.format(\n base_url=api_url_base,\n org_id=org_id,\n sddc_id=sddc_id,\n profile_type=profile_type,\n profile_id=dhcp_profile_id,\n )\n\n return vmc_request.call_api(\n method=vmc_constants.DELETE_REQUEST_METHOD,\n url=api_url,\n refresh_key=refresh_key,\n authorization_host=authorization_host,\n description=\"vmc_dhcp_profiles.delete\",\n responsebody_applicable=False,\n verify_ssl=verify_ssl,\n cert=cert,\n )", "def describe_vpcs(\n vpc_id=None,\n name=None,\n cidr=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n keys = (\n \"id\",\n \"cidr_block\",\n \"is_default\",\n \"state\",\n \"tags\",\n \"dhcp_options_id\",\n \"instance_tenancy\",\n )\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n filter_parameters = {\"filters\": {}}\n\n if vpc_id:\n filter_parameters[\"vpc_ids\"] = [vpc_id]\n\n if cidr:\n filter_parameters[\"filters\"][\"cidr\"] = cidr\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n vpcs = conn.get_all_vpcs(**filter_parameters)\n\n if vpcs:\n ret = []\n for vpc in vpcs:\n _r = {k: getattr(vpc, k) for k in keys}\n _r.update({\"region\": getattr(vpc, \"region\").name})\n ret.append(_r)\n return {\"vpcs\": ret}\n else:\n return {\"vpcs\": []}\n\n except BotoServerError as e:\n return {\"error\": __utils__[\"boto.get_error\"](e)}", "def delete_virtual_gateways():\n client = boto3.client('ec2')\n print('Deleting VPN Gateways')\n gw_resp = client.describe_vpn_gateways()\n while True:\n for gateway in gw_resp['VpnGateways']:\n gw_id = gateway['VpnGatewayId']\n gw_attachments = gateway['VpcAttachments']\n for attachment in gw_attachments:\n if attachment['State'] == 'attached':\n vpc_id = attachment['VpcId']\n print('Detaching virtual gateway {} from vpc {}'.format(gw_id, vpc_id))\n client.detach_vpn_gateway(\n VpcId=vpc_id,\n VpnGatewayId=gw_id\n )\n print('Deleting VPN gateway {}'.format(gw_id))\n client.delete_vpn_gateway(\n VpnGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_vpn_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_vpn_gateways()['VpnGateways']:\n all_deleted = True\n for gateway in client.describe_vpn_gateways()['VpnGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('VPN Gateways deleted')", "def delete(self):\n client = BotoClientProxy(\"cloudformation\", self.region)\n client.delete_stack(StackName=self.stack_id)", "def delete_virtualip(self, vip):\r\n return vip.delete()", "def delete_dhcp_options(\n dhcp_options_id=None,\n dhcp_options_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n return _delete_resource(\n resource=\"dhcp_options\",\n name=dhcp_options_name,\n resource_id=dhcp_options_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def describe_vpcs(DryRun=None, VpcIds=None, Filters=None):\n pass", "def delete(self, params=None):\n self.logger.debug('Deleting %s with parameters: %s'\n % (self.type_name, params))\n self.client.delete_bucket_policy(**params)", "def _find_vpcs(\n vpc_id=None,\n vpc_name=None,\n cidr=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if all((vpc_id, vpc_name)):\n raise SaltInvocationError(\"Only one of vpc_name or vpc_id may be provided.\")\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n filter_parameters = {\"filters\": {}}\n\n if vpc_id:\n filter_parameters[\"vpc_ids\"] = [vpc_id]\n\n if cidr:\n filter_parameters[\"filters\"][\"cidr\"] = cidr\n\n if vpc_name:\n filter_parameters[\"filters\"][\"tag:Name\"] = vpc_name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n vpcs = conn.get_all_vpcs(**filter_parameters)\n log.debug(\n \"The filters criteria %s matched the following VPCs:%s\", filter_parameters, vpcs\n )\n\n if vpcs:\n if not any((vpc_id, vpc_name, cidr, tags)):\n return [vpc.id for vpc in vpcs if vpc.is_default]\n else:\n return [vpc.id for vpc in vpcs]\n else:\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes the endpoint routes in the route tables that were associated with the endpoint.
def delete_vpc_endpoints(DryRun=None, VpcEndpointIds=None): pass
[ "def delete_vpc_endpoint_resources():\n print('Deleting VPC endpoints')\n ec2 = boto3.client('ec2')\n endpoint_ids = []\n for endpoint in ec2.describe_vpc_endpoints()['VpcEndpoints']:\n print('Deleting VPC Endpoint - {}'.format(endpoint['ServiceName']))\n endpoint_ids.append(endpoint['VpcEndpointId'])\n\n if endpoint_ids:\n ec2.delete_vpc_endpoints(\n VpcEndpointIds=endpoint_ids\n )\n\n print('Waiting for VPC endpoints to get deleted')\n while ec2.describe_vpc_endpoints()['VpcEndpoints']:\n time.sleep(5)\n\n print('VPC endpoints deleted')\n\n # VPC endpoints connections\n print('Deleting VPC endpoint connections')\n service_ids = []\n for connection in ec2.describe_vpc_endpoint_connections()['VpcEndpointConnections']:\n service_id = connection['ServiceId']\n state = connection['VpcEndpointState']\n\n if state in ['PendingAcceptance', 'Pending', 'Available', 'Rejected', 'Failed', 'Expired']:\n print('Deleting VPC Endpoint Service - {}'.format(service_id))\n service_ids.append(service_id)\n\n ec2.reject_vpc_endpoint_connections(\n ServiceId=service_id,\n VpcEndpointIds=[\n connection['VpcEndpointId'],\n ]\n )\n\n if service_ids:\n ec2.delete_vpc_endpoint_service_configurations(\n ServiceIds=service_ids\n )\n\n print('Waiting for VPC endpoint services to be destroyed')\n while ec2.describe_vpc_endpoint_connections()['VpcEndpointConnections']:\n time.sleep(5)\n\n print('VPC endpoint connections deleted')", "def delete_gwlbe(gwlbe_ids):\n logging.info(\"Deleting VPC Endpoint Service:\")\n try:\n response = ec2.delete_vpc_endpoints(\n VpcEndpointIds=gwlbe_ids\n )\n return response\n except ClientError as e:\n logging.error(e)\n return None", "def delete_elastic_ips():\n client = boto3.client('ec2')\n print('Deleting Elastic IPs')\n for eip in client.describe_addresses()['Addresses']:\n allocation_id = eip['AllocationId']\n print('Releasing EIP {}'.format(allocation_id))\n client.release_address(\n AllocationId=allocation_id\n )\n\n print('Elastic IPs deleted')", "def deletevpc(vpc_choices):\n progressbar(\"Deleting VPC\")\n vpcname=vpc_choices['vpc'][0]\n try:\n ec2.delete_vpc(VpcId=str(vpcname))\n print(\"\\n \\n vpc \" +vpcname +\" has been deleted \\n \\n\")\n except botocore.exceptions.ClientError as e:\n coloredtext(\"There was an error while deleting vpc: \\n\\n\\n\")\n print(e)", "def test_endpoints_remove(self):\n ctx = sm.ServiceContext(INFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n self.assertEqual(len(svc.endpoints), 9)\n svc.endpoints = filter(lambda r: r.name not in [\"zenhub\", \"mariadb\"], svc.endpoints)\n ctx.commit(OUTFILENAME)\n ctx = sm.ServiceContext(OUTFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n self.assertEqual(len(svc.endpoints), 7)\n for ep in svc.endpoints:\n if ep.name in [\"zenhub\", \"mariadb\"]:\n raise ValueError(\"Error removing endpoint.\")", "def delete_vpc(DryRun=None, VpcId=None):\n pass", "def modify_vpc_endpoint(DryRun=None, VpcEndpointId=None, ResetPolicy=None, PolicyDocument=None, AddRouteTableIds=None, RemoveRouteTableIds=None):\n pass", "def describe_vpc_endpoint_services(DryRun=None, MaxResults=None, NextToken=None):\n pass", "def handle_endpoint_delete(self, msg):\n logger.debug(\"deleting %s [0x%06x %s]\", msg.fabric, msg.vnid, msg.addr)\n # remove from local caches\n cache = msg.wf.cache\n key = cache.get_key_str(addr=msg.addr, vnid=msg.vnid)\n cache.rapid_cache.remove(key)\n # delete from db\n endpoint = eptEndpoint.load(fabric=msg.fabric, vnid=msg.vnid, addr=msg.addr)\n if endpoint.exists():\n endpoint.remove()\n else:\n logger.debug(\"endpoint not found in db, no delete occurring\")", "def del_endpoint_whitelist(user: User, endpoint_id, function_id):\n\n app.logger.info(\n f\"Deleting function {function_id} from endpoint {endpoint_id} whitelist by \"\n f\"user: {user.username}\"\n )\n\n return delete_ep_whitelist(user, endpoint_id, function_id)", "def delete_core_v1_collection_namespaced_endpoints(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_core_v1_collection_namespaced_endpoints_with_http_info(namespace, **kwargs)\n else:\n (data) = self.delete_core_v1_collection_namespaced_endpoints_with_http_info(namespace, **kwargs)\n return data", "def delete_egress_only_internet_gateways():\n client = boto3.client('ec2')\n print('Deleting Egress Only Internet Gateways')\n gw_resp = client.describe_egress_only_internet_gateways()\n while True:\n for gateway in gw_resp['EgressOnlyInternetGateways']:\n gw_id = gateway['EgressOnlyInternetGatewayId']\n client.delete_egress_only_internet_gateway(\n EgressOnlyInternetGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_egress_only_internet_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_egress_only_internet_gateways()['EgressOnlyInternetGateways']:\n time.sleep(5)\n print('Egress Only Internet Gateways deleted')", "def delete_policy_association_for_endpoint(self, context,\n policy_id, endpoint_id):\n self.policy_api.get_policy(policy_id)\n self.catalog_api.get_endpoint(endpoint_id)\n self.endpoint_policy_api.delete_policy_association(\n policy_id, endpoint_id=endpoint_id)", "def clear_endpoint_record(project: str, endpoint_id: str):\n\n _verify_endpoint(project, endpoint_id)\n\n logger.info(\"Clearing model endpoint table\", endpoint_id=endpoint_id)\n get_v3io_client().kv.delete(\n container=config.model_endpoint_monitoring.container,\n table_path=ENDPOINTS_TABLE_PATH,\n key=endpoint_id,\n )\n logger.info(\"Model endpoint table deleted\", endpoint_id=endpoint_id)\n\n return Response(status_code=HTTPStatus.NO_CONTENT.value)", "def delete(\n vpc_id=None,\n name=None,\n vpc_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if name:\n log.warning(\n \"boto_vpc.delete: name parameter is deprecated use vpc_name instead.\"\n )\n vpc_name = name\n\n if not _exactly_one((vpc_name, vpc_id)):\n raise SaltInvocationError(\n \"One (but not both) of vpc_name or vpc_id must be provided.\"\n )\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if not vpc_id:\n vpc_id = _get_id(\n vpc_name=vpc_name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not vpc_id:\n return {\n \"deleted\": False,\n \"error\": {\"message\": \"VPC {} not found\".format(vpc_name)},\n }\n\n if conn.delete_vpc(vpc_id):\n log.info(\"VPC %s was deleted.\", vpc_id)\n if vpc_name:\n _cache_id(\n vpc_name,\n resource_id=vpc_id,\n invalidate=True,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n return {\"deleted\": True}\n else:\n log.warning(\"VPC %s was not deleted.\", vpc_id)\n return {\"deleted\": False}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def vpc_stack_with_endpoints(region, request, key_name):\n\n logging.info(\"Creating VPC stack with endpoints\")\n credential = request.config.getoption(\"credential\")\n stack_factory = CfnStacksFactory(request.config.getoption(\"credential\"))\n\n def _create_stack(request, template, region, default_az_id, az_ids, stack_factory):\n # TODO: be able to reuse an existing VPC endpoint stack\n stack = CfnVpcStack(\n name=generate_stack_name(\"integ-tests-vpc-endpoints\", request.config.getoption(\"stackname_suffix\")),\n region=region,\n template=template.to_json(),\n default_az_id=default_az_id,\n az_ids=az_ids,\n )\n stack_factory.create_stack(stack)\n return stack\n\n # tests with VPC endpoints are not using multi-AZ\n default_az_id, default_az_name, _ = get_az_setup_for_region(region, credential)\n\n bastion_subnet = SubnetConfig(\n name=subnet_name(visibility=\"Public\", az_id=default_az_id),\n cidr=CIDR_FOR_PUBLIC_SUBNETS[0],\n map_public_ip_on_launch=True,\n has_nat_gateway=True,\n availability_zone=default_az_name,\n default_gateway=Gateways.INTERNET_GATEWAY,\n )\n\n no_internet_subnet = SubnetConfig(\n name=subnet_name(visibility=\"Private\", flavor=\"NoInternet\"),\n cidr=CIDR_FOR_PRIVATE_SUBNETS[0],\n map_public_ip_on_launch=False,\n has_nat_gateway=False,\n availability_zone=default_az_name,\n default_gateway=Gateways.NONE,\n )\n\n vpc_config = VPCConfig(\n cidr=\"192.168.0.0/17\",\n additional_cidr_blocks=[\"192.168.128.0/17\"],\n subnets=[\n bastion_subnet,\n no_internet_subnet,\n ],\n )\n\n with aws_credential_provider(region, credential):\n bastion_image_id = retrieve_latest_ami(region, \"alinux2\")\n\n template = NetworkTemplateBuilder(\n vpc_configuration=vpc_config,\n default_availability_zone=default_az_name,\n create_vpc_endpoints=True,\n bastion_key_name=key_name,\n bastion_image_id=bastion_image_id,\n region=region,\n ).build()\n\n yield _create_stack(request, template, region, default_az_id, [default_az_id], stack_factory)\n\n if not request.config.getoption(\"no_delete\"):\n stack_factory.delete_all_stacks()\n else:\n logging.warning(\"Skipping deletion of CFN VPC endpoints stack because --no-delete option is set\")", "def clear_endpoint_record(access_key: str, project: str, endpoint_id: str):\n verify_endpoint(project, endpoint_id)\n\n logger.info(\"Clearing model endpoint table\", endpoint_id=endpoint_id)\n client = get_v3io_client(endpoint=config.v3io_api)\n client.kv.delete(\n container=config.model_endpoint_monitoring.container,\n table_path=f\"{project}/{ENDPOINTS_TABLE_PATH}\",\n key=endpoint_id,\n access_key=access_key,\n )\n\n logger.info(\"Model endpoint table deleted\", endpoint_id=endpoint_id)", "def delete_internet_gateways():\n print('Deleting Internet Gateways')\n client = boto3.resource('ec2')\n for igw in client.internet_gateways.all():\n for attachment in igw.attachments:\n if 'State' in attachment and attachment['State'] == 'available':\n vpc_id = attachment['VpcId']\n print('Detaching internet gateway {} from vpc {}'.format(igw.id, vpc_id))\n igw.detach_from_vpc(\n VpcId=vpc_id\n )\n print('Deleting Internet Gateway {}'.format(igw.id))\n igw.delete()\n\n while [igw for igw in client.internet_gateways.all()]:\n time.sleep(5)\n print('Internet Gateways deleted')", "def test_delete(self):\n # delete the test endpoint\n path = self.bc.qjoin_path(\"endpoint\", self.test_ep_id)\n del_res = self.bc.delete(path)\n # validate results\n self.assertEqual(del_res[\"DATA_TYPE\"], \"result\")\n self.assertEqual(del_res[\"code\"], \"Deleted\")\n self.assertEqual(del_res[\"message\"],\n \"Endpoint deleted successfully\")\n # stop tracking asset for cleanup\n for cleanup in self.asset_cleanup:\n if \"name\" in cleanup and cleanup[\"name\"] == \"test_ep\":\n self.asset_cleanup.remove(cleanup)\n break\n\n # attempt to delete the test endpoint again\n with self.assertRaises(GlobusAPIError) as apiErr:\n self.bc.delete(path)\n self.assertEqual(apiErr.exception.http_status, 404)\n self.assertEqual(apiErr.exception.code, \"EndpointNotFound\")\n\n # send nonsense delete\n with self.assertRaises(GlobusAPIError) as apiErr:\n self.bc.delete(\"nonsense_path\")\n self.assertEqual(apiErr.exception.http_status, 404)\n self.assertEqual(apiErr.exception.code, \"ClientError.NotFound\")\n\n # send delete to endpoint w/o id (post resource)\n with self.assertRaises(GlobusAPIError) as apiErr:\n self.bc.delete(\"endpoint\")\n self.assertEqual(apiErr.exception.http_status, 405)\n self.assertEqual(apiErr.exception.code, \"ClientError.BadMethod\")", "def delete(self, endpoint):\n if self.host not in endpoint:\n endpoint = self.host + str(endpoint)\n response = self.session.delete(endpoint)\n response.raise_for_status()\n\n return response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes a VPC peering connection. Either the owner of the requester VPC or the owner of the peer VPC can delete the VPC peering connection if it's in the active state. The owner of the requester VPC can delete a VPC peering connection in the pendingacceptance state.
def delete_vpc_peering_connection(DryRun=None, VpcPeeringConnectionId=None): pass
[ "def delete_vpc_peering_connection(\n conn_id=None,\n conn_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n dry_run=False,\n):\n if not _exactly_one((conn_id, conn_name)):\n raise SaltInvocationError(\n \"Exactly one of conn_id or conn_name must be provided.\"\n )\n\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n if conn_name:\n conn_id = _vpc_peering_conn_id_for_name(conn_name, conn)\n if not conn_id:\n raise SaltInvocationError(\n \"Couldn't resolve VPC peering connection {} to an ID\".format(conn_name)\n )\n try:\n log.debug(\"Trying to delete vpc peering connection\")\n conn.delete_vpc_peering_connection(\n DryRun=dry_run, VpcPeeringConnectionId=conn_id\n )\n return {\"msg\": \"VPC peering connection deleted.\"}\n except botocore.exceptions.ClientError as err:\n e = __utils__[\"boto.get_error\"](err)\n log.error(\"Failed to delete VPC peering %s: %s\", conn_name or conn_id, e)\n return {\"error\": e}", "def delete_vpc(DryRun=None, VpcId=None):\n pass", "def request_vpc_peering_connection(\n requester_vpc_id=None,\n requester_vpc_name=None,\n peer_vpc_id=None,\n peer_vpc_name=None,\n name=None,\n peer_owner_id=None,\n peer_region=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n dry_run=False,\n):\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n\n if name and _vpc_peering_conn_id_for_name(name, conn):\n raise SaltInvocationError(\n \"A VPC peering connection with this name already \"\n \"exists! Please specify a different name.\"\n )\n\n if not _exactly_one((requester_vpc_id, requester_vpc_name)):\n raise SaltInvocationError(\n \"Exactly one of requester_vpc_id or requester_vpc_name is required\"\n )\n if not _exactly_one((peer_vpc_id, peer_vpc_name)):\n raise SaltInvocationError(\n \"Exactly one of peer_vpc_id or peer_vpc_name is required.\"\n )\n\n if requester_vpc_name:\n requester_vpc_id = _get_id(\n vpc_name=requester_vpc_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not requester_vpc_id:\n return {\n \"error\": \"Could not resolve VPC name {} to an ID\".format(\n requester_vpc_name\n )\n }\n if peer_vpc_name:\n peer_vpc_id = _get_id(\n vpc_name=peer_vpc_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not peer_vpc_id:\n return {\n \"error\": \"Could not resolve VPC name {} to an ID\".format(peer_vpc_name)\n }\n\n peering_params = {\n \"VpcId\": requester_vpc_id,\n \"PeerVpcId\": peer_vpc_id,\n \"DryRun\": dry_run,\n }\n\n if peer_owner_id:\n peering_params.update({\"PeerOwnerId\": peer_owner_id})\n if peer_region:\n peering_params.update({\"PeerRegion\": peer_region})\n\n try:\n log.debug(\"Trying to request vpc peering connection\")\n if not peer_owner_id:\n vpc_peering = conn.create_vpc_peering_connection(**peering_params)\n else:\n vpc_peering = conn.create_vpc_peering_connection(**peering_params)\n peering = vpc_peering.get(\"VpcPeeringConnection\", {})\n peering_conn_id = peering.get(\"VpcPeeringConnectionId\", \"ERROR\")\n msg = \"VPC peering {} requested.\".format(peering_conn_id)\n log.debug(msg)\n\n if name:\n log.debug(\"Adding name tag to vpc peering connection\")\n conn.create_tags(\n Resources=[peering_conn_id], Tags=[{\"Key\": \"Name\", \"Value\": name}]\n )\n log.debug(\"Applied name tag to vpc peering connection\")\n msg += \" With name {}.\".format(name)\n\n return {\"msg\": msg}\n except botocore.exceptions.ClientError as err:\n log.error(\"Got an error while trying to request vpc peering\")\n return {\"error\": __utils__[\"boto.get_error\"](err)}", "def delete_vpn_connection(DryRun=None, VpnConnectionId=None):\n pass", "def deletevpc(vpc_choices):\n progressbar(\"Deleting VPC\")\n vpcname=vpc_choices['vpc'][0]\n try:\n ec2.delete_vpc(VpcId=str(vpcname))\n print(\"\\n \\n vpc \" +vpcname +\" has been deleted \\n \\n\")\n except botocore.exceptions.ClientError as e:\n coloredtext(\"There was an error while deleting vpc: \\n\\n\\n\")\n print(e)", "def delete_peer(self, name, peer_type=\"REPLICATION\"):\n params = self._get_peer_type_param(peer_type)\n return self._delete(\"peers/\" + name, ApiCmPeer, params=params, api_version=3)", "def delete_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def accept_vpc_peering_connection( # pylint: disable=too-many-arguments\n conn_id=\"\", name=\"\", region=None, key=None, keyid=None, profile=None, dry_run=False\n):\n if not _exactly_one((conn_id, name)):\n raise SaltInvocationError(\n \"One (but not both) of vpc_peering_connection_id or name must be provided.\"\n )\n\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n\n if name:\n conn_id = _vpc_peering_conn_id_for_name(name, conn)\n if not conn_id:\n raise SaltInvocationError(\n \"No ID found for this \"\n \"VPC peering connection! ({}) \"\n \"Please make sure this VPC peering \"\n \"connection exists \"\n \"or invoke this function with \"\n \"a VPC peering connection \"\n \"ID\".format(name)\n )\n try:\n log.debug(\"Trying to accept vpc peering connection\")\n conn.accept_vpc_peering_connection(\n DryRun=dry_run, VpcPeeringConnectionId=conn_id\n )\n return {\"msg\": \"VPC peering connection accepted.\"}\n except botocore.exceptions.ClientError as err:\n log.error(\"Got an error while trying to accept vpc peering\")\n return {\"error\": __utils__[\"boto.get_error\"](err)}", "def delete_vpc_endpoint_resources():\n print('Deleting VPC endpoints')\n ec2 = boto3.client('ec2')\n endpoint_ids = []\n for endpoint in ec2.describe_vpc_endpoints()['VpcEndpoints']:\n print('Deleting VPC Endpoint - {}'.format(endpoint['ServiceName']))\n endpoint_ids.append(endpoint['VpcEndpointId'])\n\n if endpoint_ids:\n ec2.delete_vpc_endpoints(\n VpcEndpointIds=endpoint_ids\n )\n\n print('Waiting for VPC endpoints to get deleted')\n while ec2.describe_vpc_endpoints()['VpcEndpoints']:\n time.sleep(5)\n\n print('VPC endpoints deleted')\n\n # VPC endpoints connections\n print('Deleting VPC endpoint connections')\n service_ids = []\n for connection in ec2.describe_vpc_endpoint_connections()['VpcEndpointConnections']:\n service_id = connection['ServiceId']\n state = connection['VpcEndpointState']\n\n if state in ['PendingAcceptance', 'Pending', 'Available', 'Rejected', 'Failed', 'Expired']:\n print('Deleting VPC Endpoint Service - {}'.format(service_id))\n service_ids.append(service_id)\n\n ec2.reject_vpc_endpoint_connections(\n ServiceId=service_id,\n VpcEndpointIds=[\n connection['VpcEndpointId'],\n ]\n )\n\n if service_ids:\n ec2.delete_vpc_endpoint_service_configurations(\n ServiceIds=service_ids\n )\n\n print('Waiting for VPC endpoint services to be destroyed')\n while ec2.describe_vpc_endpoint_connections()['VpcEndpointConnections']:\n time.sleep(5)\n\n print('VPC endpoint connections deleted')", "def delete_connection(self, name=None, reason=None):\n path = self.end_point + \"connections/\" + name\n self._send_data(path, request_type='DELETE')", "def peering_connection_pending_from_vpc(\n conn_id=None,\n conn_name=None,\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n if not _exactly_one((conn_id, conn_name)):\n raise SaltInvocationError(\n \"Exactly one of conn_id or conn_name must be provided.\"\n )\n\n if not _exactly_one((vpc_id, vpc_name)):\n raise SaltInvocationError(\"Exactly one of vpc_id or vpc_name must be provided.\")\n\n if vpc_name:\n vpc_id = check_vpc(\n vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not vpc_id:\n log.warning(\"Could not resolve VPC name %s to an ID\", vpc_name)\n return False\n\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n filters = [\n {\"Name\": \"requester-vpc-info.vpc-id\", \"Values\": [vpc_id]},\n {\"Name\": \"status-code\", \"Values\": [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING]},\n ]\n if conn_id:\n filters += [{\"Name\": \"vpc-peering-connection-id\", \"Values\": [conn_id]}]\n else:\n filters += [{\"Name\": \"tag:Name\", \"Values\": [conn_name]}]\n\n vpcs = conn.describe_vpc_peering_connections(Filters=filters).get(\n \"VpcPeeringConnections\", []\n )\n\n if not vpcs:\n return False\n elif len(vpcs) > 1:\n raise SaltInvocationError(\n \"Found more than one ID for the VPC peering \"\n \"connection ({}). Please call this function \"\n \"with an ID instead.\".format(conn_id or conn_name)\n )\n else:\n status = vpcs[0][\"Status\"][\"Code\"]\n\n return bool(status == PENDING_ACCEPTANCE)", "def delete(\n vpc_id=None,\n name=None,\n vpc_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if name:\n log.warning(\n \"boto_vpc.delete: name parameter is deprecated use vpc_name instead.\"\n )\n vpc_name = name\n\n if not _exactly_one((vpc_name, vpc_id)):\n raise SaltInvocationError(\n \"One (but not both) of vpc_name or vpc_id must be provided.\"\n )\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if not vpc_id:\n vpc_id = _get_id(\n vpc_name=vpc_name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not vpc_id:\n return {\n \"deleted\": False,\n \"error\": {\"message\": \"VPC {} not found\".format(vpc_name)},\n }\n\n if conn.delete_vpc(vpc_id):\n log.info(\"VPC %s was deleted.\", vpc_id)\n if vpc_name:\n _cache_id(\n vpc_name,\n resource_id=vpc_id,\n invalidate=True,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n return {\"deleted\": True}\n else:\n log.warning(\"VPC %s was not deleted.\", vpc_id)\n return {\"deleted\": False}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_bgp_peer(self, peer, ignore_missing=True):\n self._delete(_bgp_peer.BgpPeer, peer, ignore_missing=ignore_missing)", "def describe_vpc_peering_connections(DryRun=None, VpcPeeringConnectionIds=None, Filters=None):\n pass", "def test_delete_peer(self):\n pass", "def clean_conn_peer(self):\n # Remove closed connection\n for connection in connections:\n if '[closed]' in str(connection):\n # connections.remove(connection)\n\n # Remove peer\n remove_peer_ip = '@{}'.format(connection[1][0])\n remove_peer_port = '/{}'.format(connection[1][1])\n for peer in peers_online:\n if str(remove_peer_ip) and str(remove_peer_port) in str(peer):\n peers_online.remove(peer)\n print('Peer disconnected: {}'.format(peer))\n time.sleep(0.8)\n\n # TASK 3: Broadcast peers\n # Send updated peers list to all peers\n self.broadcast_peers()", "def describe_vpc_peering_connection(\n name, region=None, key=None, keyid=None, profile=None\n):\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n return {\"VPC-Peerings\": _get_peering_connection_ids(name, conn)}", "def remove_vpn_gateway_connection_peer_cidr(self,\n vpn_gateway_id: str,\n id: str,\n cidr_prefix: str,\n prefix_length: str,\n **kwargs\n ) -> DetailedResponse:\n\n if vpn_gateway_id is None:\n raise ValueError('vpn_gateway_id must be provided')\n if id is None:\n raise ValueError('id must be provided')\n if cidr_prefix is None:\n raise ValueError('cidr_prefix must be provided')\n if prefix_length is None:\n raise ValueError('prefix_length must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='remove_vpn_gateway_connection_peer_cidr')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['vpn_gateway_id', 'id', 'cidr_prefix', 'prefix_length']\n path_param_values = self.encode_path_vars(vpn_gateway_id, id, cidr_prefix, prefix_length)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/vpn_gateways/{vpn_gateway_id}/connections/{id}/peer_cidrs/{cidr_prefix}/{prefix_length}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def remove_peer(self, peer_id):\n if peer_id in self.peers:\n del self.peers[peer_id]", "def ValidateClearVpcConnector(service, args):\n if (service is None or\n not flags.FlagIsExplicitlySet(args, 'clear_vpc_connector') or\n not args.clear_vpc_connector):\n return\n\n if flags.FlagIsExplicitlySet(args, 'vpc_egress'):\n egress = args.vpc_egress\n elif container_resource.EGRESS_SETTINGS_ANNOTATION in service.template_annotations:\n egress = service.template_annotations[\n container_resource.EGRESS_SETTINGS_ANNOTATION]\n else:\n # --vpc-egress flag not specified and egress settings not set on service.\n return\n\n if (egress != container_resource.EGRESS_SETTINGS_ALL and\n egress != container_resource.EGRESS_SETTINGS_ALL_TRAFFIC):\n return\n\n if console_io.CanPrompt():\n console_io.PromptContinue(\n message='Removing the VPC connector from this service will clear the '\n 'VPC egress setting and route outbound traffic to the public internet.',\n default=False,\n cancel_on_no=True)\n else:\n raise exceptions.ConfigurationError(\n 'Cannot remove VPC connector with VPC egress set to \"{}\". Set'\n ' `--vpc-egress=private-ranges-only` or run this command '\n 'interactively and provide confirmation to continue.'.format(egress))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified VPN connection. If you're deleting the VPC and its associated components, we recommend that you detach the virtual private gateway from the VPC and delete the VPC before deleting the VPN connection. If you believe that the tunnel credentials for your VPN connection have been compromised, you can delete the VPN connection and create a new one that has new keys, without needing to delete the VPC or virtual private gateway. If you create a new VPN connection, you must reconfigure the customer gateway using the new configuration information returned with the new VPN connection ID.
def delete_vpn_connection(DryRun=None, VpnConnectionId=None): pass
[ "def delete_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def delete_vpn_gateway(DryRun=None, VpnGatewayId=None):\n pass", "def delete_vpn_gateway_connection(self,\n vpn_gateway_id: str,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if vpn_gateway_id is None:\n raise ValueError('vpn_gateway_id must be provided')\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_vpn_gateway_connection')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['vpn_gateway_id', 'id']\n path_param_values = self.encode_path_vars(vpn_gateway_id, id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/vpn_gateways/{vpn_gateway_id}/connections/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def delete_vpc(DryRun=None, VpcId=None):\n pass", "def delete_vpc_peering_connection(\n conn_id=None,\n conn_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n dry_run=False,\n):\n if not _exactly_one((conn_id, conn_name)):\n raise SaltInvocationError(\n \"Exactly one of conn_id or conn_name must be provided.\"\n )\n\n conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n if conn_name:\n conn_id = _vpc_peering_conn_id_for_name(conn_name, conn)\n if not conn_id:\n raise SaltInvocationError(\n \"Couldn't resolve VPC peering connection {} to an ID\".format(conn_name)\n )\n try:\n log.debug(\"Trying to delete vpc peering connection\")\n conn.delete_vpc_peering_connection(\n DryRun=dry_run, VpcPeeringConnectionId=conn_id\n )\n return {\"msg\": \"VPC peering connection deleted.\"}\n except botocore.exceptions.ClientError as err:\n e = __utils__[\"boto.get_error\"](err)\n log.error(\"Failed to delete VPC peering %s: %s\", conn_name or conn_id, e)\n return {\"error\": e}", "def delete(self, api_client):\n\n cmd = {'id': self.id}\n api_client.deleteVpnCustomerGateway(**cmd)", "def DeleteVpnGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteVpnGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteVpnGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_virtual_gateways():\n client = boto3.client('ec2')\n print('Deleting VPN Gateways')\n gw_resp = client.describe_vpn_gateways()\n while True:\n for gateway in gw_resp['VpnGateways']:\n gw_id = gateway['VpnGatewayId']\n gw_attachments = gateway['VpcAttachments']\n for attachment in gw_attachments:\n if attachment['State'] == 'attached':\n vpc_id = attachment['VpcId']\n print('Detaching virtual gateway {} from vpc {}'.format(gw_id, vpc_id))\n client.detach_vpn_gateway(\n VpcId=vpc_id,\n VpnGatewayId=gw_id\n )\n print('Deleting VPN gateway {}'.format(gw_id))\n client.delete_vpn_gateway(\n VpnGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_vpn_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_vpn_gateways()['VpnGateways']:\n all_deleted = True\n for gateway in client.describe_vpn_gateways()['VpnGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('VPN Gateways deleted')", "def delete(self, api_client):\n\n cmd = {'publicipid': self.publicipid}\n api_client.deleteRemoteAccessVpn(**cmd)", "def detach_vpn_gateway(DryRun=None, VpnGatewayId=None, VpcId=None):\n pass", "def delete_vpn_ipsec_site_connection(\n self, ipsec_site_connection, ignore_missing=True\n ):\n self._delete(\n _ipsec_site_connection.VpnIPSecSiteConnection,\n ipsec_site_connection,\n ignore_missing=ignore_missing,\n )", "def delete_pvc(self, pvc_name, namespace=DEFAULT_NAMESPACE):\n\n cmd = \"%s -n %s\" % (KUBECTL_DELETE_PVC % (pvc_name, self.context), namespace)\n result = self.nuvoloso_helper.run_check_output(cmd)\n return result", "def delete_nat_gateway(NatGatewayId=None):\n pass", "def delvpn():\n error = None\n if 'name' not in request.args:\n return 'Name of VPN not found in query. Desired url format: '+gethostname()+':5000/delvpn?name=&ltname&gt'\n \n else:\n cur = g.db.execute('SELECT name FROM vpns WHERE name == ?', [request.args['name']])\n if len(cur.fetchall()) == 0:\n return 'No such vpn: '+request.args['name']\n \n g.db.execute('DELETE FROM vpns WHERE name == ?', [request.args['name']])\n g.db.commit()\n return 'Deleted vpn: '+request.args['name']", "def remove_vpn_gateway_connection_peer_cidr(self,\n vpn_gateway_id: str,\n id: str,\n cidr_prefix: str,\n prefix_length: str,\n **kwargs\n ) -> DetailedResponse:\n\n if vpn_gateway_id is None:\n raise ValueError('vpn_gateway_id must be provided')\n if id is None:\n raise ValueError('id must be provided')\n if cidr_prefix is None:\n raise ValueError('cidr_prefix must be provided')\n if prefix_length is None:\n raise ValueError('prefix_length must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='remove_vpn_gateway_connection_peer_cidr')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['vpn_gateway_id', 'id', 'cidr_prefix', 'prefix_length']\n path_param_values = self.encode_path_vars(vpn_gateway_id, id, cidr_prefix, prefix_length)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/vpn_gateways/{vpn_gateway_id}/connections/{id}/peer_cidrs/{cidr_prefix}/{prefix_length}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def create_vpn_connection(DryRun=None, Type=None, CustomerGatewayId=None, VpnGatewayId=None, Options=None):\n pass", "def delete_vpn_gateway(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_vpn_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/vpn_gateways/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def delete_vnet_connection_slot(\n self, resource_group_name, name, vnet_name, slot, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = self.delete_vnet_connection_slot.metadata['url']\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\\w\\._\\(\\)]+[^\\.]$'),\n 'name': self._serialize.url(\"name\", name, 'str'),\n 'vnetName': self._serialize.url(\"vnet_name\", vnet_name, 'str'),\n 'slot': self._serialize.url(\"slot\", slot, 'str'),\n 'subscriptionId': self._serialize.url(\"self.config.subscription_id\", self.config.subscription_id, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\"self.api_version\", self.api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct and send request\n request = self._client.delete(url, query_parameters, header_parameters)\n response = self._client.send(request, stream=False, **operation_config)\n\n if response.status_code not in [200, 404]:\n exp = CloudError(response)\n exp.request_id = response.headers.get('x-ms-request-id')\n raise exp\n\n if raw:\n client_raw_response = ClientRawResponse(None, response)\n return client_raw_response", "def delete_customer_gateway(DryRun=None, CustomerGatewayId=None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.
def delete_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None): pass
[ "def delete(self, api_client):\n\n cmd = {'id': self.id}\n api_client.deleteVpnCustomerGateway(**cmd)", "def delete_nat_gateway(NatGatewayId=None):\n pass", "async def delete_static_tunnel(self, id):\n if id not in self._static_tunnels:\n raise NETunnelServerNotFound(f'No static tunnel by id `{id}` on `{self.name}`')\n static_tunnel = self._static_tunnels.pop(id)\n await static_tunnel.stop()", "def delete_vpn_gateway(DryRun=None, VpnGatewayId=None):\n pass", "def DelAllRouterStatic(self):\n req = self.ApiGet('cmdb/router/static/')\n data = json.loads(req.text)\n for y in range(0, len(data['results'])):\n route_id = data['results'][y]['seq-num']\n return_code = self.DelRouterStaticID(route_id)\n print('del route id:', route_id, '(', return_code, ')')\n if return_code != 200: return return_code\n return 200", "def delete_virtual_gateways():\n client = boto3.client('ec2')\n print('Deleting VPN Gateways')\n gw_resp = client.describe_vpn_gateways()\n while True:\n for gateway in gw_resp['VpnGateways']:\n gw_id = gateway['VpnGatewayId']\n gw_attachments = gateway['VpcAttachments']\n for attachment in gw_attachments:\n if attachment['State'] == 'attached':\n vpc_id = attachment['VpcId']\n print('Detaching virtual gateway {} from vpc {}'.format(gw_id, vpc_id))\n client.detach_vpn_gateway(\n VpcId=vpc_id,\n VpnGatewayId=gw_id\n )\n print('Deleting VPN gateway {}'.format(gw_id))\n client.delete_vpn_gateway(\n VpnGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_vpn_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_vpn_gateways()['VpnGateways']:\n all_deleted = True\n for gateway in client.describe_vpn_gateways()['VpnGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('VPN Gateways deleted')", "def delete_route(DryRun=None, RouteTableId=None, DestinationCidrBlock=None, DestinationIpv6CidrBlock=None):\n pass", "def DeleteVpnGatewayRoutes(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteVpnGatewayRoutes\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteVpnGatewayRoutesResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "async def delete_static_tunnels(self):\n while self._static_tunnels:\n _, static_tunnel = self._static_tunnels.popitem()\n await static_tunnel.stop()", "def delete(self, api_client):\n\n cmd = {'publicipid': self.publicipid}\n api_client.deleteRemoteAccessVpn(**cmd)", "def delete_custom_route(self, purge_routes, vpc_id):\n params = {}\n results = []\n vrouter_table_id = None\n changed = False\n\n # Describe Vpc for getting VRouterId \n desc_vpc_param = {}\n self.build_list_params(desc_vpc_param, vpc_id, 'VpcId')\n desc_vpc_response = self.get_status('DescribeVpcs', desc_vpc_param)\n if int(desc_vpc_response[u'TotalCount']) > 0:\n vrouter_id = str(desc_vpc_response[u'Vpcs'][u'Vpc'][0][u'VRouterId']) \n \n # Describe Route Tables for getting RouteTable Id \n desc_route_table_param = {}\n self.build_list_params(desc_route_table_param, vrouter_id, 'VRouterId')\n desc_route_table_response = self.get_status('DescribeRouteTables', desc_route_table_param)\n if int(desc_route_table_response[u'TotalCount']) > 0:\n vrouter_table_id = str(desc_route_table_response[u'RouteTables'][u'RouteTable'][0][u'RouteTableId'])\n\n if 'route_table_id' in purge_routes:\n if 'next_hop_id' in purge_routes:\n if vrouter_table_id == purge_routes[\"route_table_id\"]: \n self.build_list_params(params, purge_routes[\"route_table_id\"], 'RouteTableId') \n fixed_dest_cidr_block = None\n if 'dest' in purge_routes:\n fixed_dest_cidr_block = purge_routes[\"dest\"]\n if 'destination_cidrblock' in purge_routes:\n fixed_dest_cidr_block = purge_routes[\"destination_cidrblock\"]\n if fixed_dest_cidr_block:\n self.build_list_params(params, fixed_dest_cidr_block, 'DestinationCidrBlock')\n \n self.build_list_params(params, purge_routes[\"next_hop_id\"], 'NextHopId')\n\n try:\n results = self.get_status('DeleteRouteEntry', params)\n changed = True\n except Exception as ex:\n error_code = ex.error_code\n error_msg = ex.message\n results.append({\"Error Code\": error_code, \"Error Message\": error_msg})\n else:\n changed = False\n results.append({ \"Error Message\": \"RouteTableId or VpcId does not exist\"})\n else:\n results.append({\"Error Message\": \"next_hop_id is required to delete route entry\"})\n else:\n results.append({\"Error Message\": \"route_table_id is required to delete route entry\"})\n\n return changed, results", "def _destroy_tunnel(self):\n\n _destroy_interface(\"mip1\")\n\n #if self._gateway is not None:\n # # Recreating original default routing\n # _add_route(dst=\"default\", gw=self._gateway)\n # self._gateway = None\n\n # Recreating default routing\n #for ifname in self._interfaces.keys():\n # if is_address_reachable(self._interfaces[ifname]):\n # logging.info(\"Setting default route for %s interface.\", ifname)\n # _add_route(dst=\"default\", gw=self._interfaces[ifname])\n # break\n\n # Deleting static route to home agent\n #_del_route(self.home_agent+\"/32\")", "def delete_nat_gateways():\n print('Deleting NAT gateways')\n ec2 = boto3.client('ec2')\n for page in ec2.get_paginator('describe_nat_gateways').paginate():\n for nat_gateway in page['NatGateways']:\n nat_gateway_id = nat_gateway['NatGatewayId']\n print('Deleting Nat Gateway - {}'.format(nat_gateway_id))\n ec2.delete_nat_gateway(\n NatGatewayId=nat_gateway_id\n )\n\n while ec2.describe_nat_gateways()['NatGateways']:\n all_deleted = True\n for gateway in ec2.describe_nat_gateways()['NatGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n\n print('NAT gateways deleted')", "def _del_nat_rule(ctx, gateway, rule_type, original_ip, translated_ip):\n any_type = 'any'\n\n ctx.logger.info(\"Delete floating ip NAT rule: original_ip '{0}',\"\n \"translated_ip '{1}', rule type '{2}'\"\n .format(original_ip, translated_ip, rule_type))\n\n gateway.del_nat_rule(\n rule_type, original_ip, any_type, translated_ip, any_type, any_type)", "def delete_vpn_connection(DryRun=None, VpnConnectionId=None):\n pass", "def delete_customer_gateway(DryRun=None, CustomerGatewayId=None):\n pass", "def DeleteVpnGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteVpnGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteVpnGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_vpn_ipsec_site_connection(\n self, ipsec_site_connection, ignore_missing=True\n ):\n self._delete(\n _ipsec_site_connection.VpnIPSecSiteConnection,\n ipsec_site_connection,\n ignore_missing=ignore_missing,\n )", "def remove_static_ipv4_route(self, remove_route):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified virtual private gateway. We recommend that before you delete a virtual private gateway, you detach it from the VPC and delete the VPN connection. Note that you don't need to delete the virtual private gateway if you plan to delete and recreate the VPN connection between your VPC and your network.
def delete_vpn_gateway(DryRun=None, VpnGatewayId=None): pass
[ "def delete_virtual_gateways():\n client = boto3.client('ec2')\n print('Deleting VPN Gateways')\n gw_resp = client.describe_vpn_gateways()\n while True:\n for gateway in gw_resp['VpnGateways']:\n gw_id = gateway['VpnGatewayId']\n gw_attachments = gateway['VpcAttachments']\n for attachment in gw_attachments:\n if attachment['State'] == 'attached':\n vpc_id = attachment['VpcId']\n print('Detaching virtual gateway {} from vpc {}'.format(gw_id, vpc_id))\n client.detach_vpn_gateway(\n VpcId=vpc_id,\n VpnGatewayId=gw_id\n )\n print('Deleting VPN gateway {}'.format(gw_id))\n client.delete_vpn_gateway(\n VpnGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_vpn_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_vpn_gateways()['VpnGateways']:\n all_deleted = True\n for gateway in client.describe_vpn_gateways()['VpnGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('VPN Gateways deleted')", "def DeleteVpnGateway(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteVpnGateway\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteVpnGatewayResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete(self, api_client):\n\n cmd = {'id': self.id}\n api_client.deleteVpnCustomerGateway(**cmd)", "def delete_nat_gateway(NatGatewayId=None):\n pass", "def detach_vpn_gateway(DryRun=None, VpnGatewayId=None, VpcId=None):\n pass", "def delete_network_gateway(self, gateway_id):\n return self._delete(self.network_gateway_path % gateway_id)", "def delete_vpn_gateway(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_vpn_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/vpn_gateways/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def delete(self, api_client):\n\n cmd = {'publicipid': self.publicipid}\n api_client.deleteRemoteAccessVpn(**cmd)", "def delete_customer_gateway(DryRun=None, CustomerGatewayId=None):\n pass", "def delete_vpc(DryRun=None, VpcId=None):\n pass", "def delete_vpn_connection_route(VpnConnectionId=None, DestinationCidrBlock=None):\n pass", "def delete_internet_gateway(\n internet_gateway_id=None,\n internet_gateway_name=None,\n detach=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if internet_gateway_name:\n internet_gateway_id = _get_resource_id(\n \"internet_gateway\",\n internet_gateway_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not internet_gateway_id:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_name\n )\n },\n }\n\n if detach:\n igw = _get_resource(\n \"internet_gateway\",\n resource_id=internet_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n\n if not igw:\n return {\n \"deleted\": False,\n \"error\": {\n \"message\": \"internet gateway {} does not exist.\".format(\n internet_gateway_id\n )\n },\n }\n\n if igw.attachments:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.detach_internet_gateway(\n internet_gateway_id, igw.attachments[0].vpc_id\n )\n return _delete_resource(\n \"internet_gateway\",\n resource_id=internet_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def DeleteVpnGatewayRoutes(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteVpnGatewayRoutes\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteVpnGatewayRoutesResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def delete_nat_gateways():\n print('Deleting NAT gateways')\n ec2 = boto3.client('ec2')\n for page in ec2.get_paginator('describe_nat_gateways').paginate():\n for nat_gateway in page['NatGateways']:\n nat_gateway_id = nat_gateway['NatGatewayId']\n print('Deleting Nat Gateway - {}'.format(nat_gateway_id))\n ec2.delete_nat_gateway(\n NatGatewayId=nat_gateway_id\n )\n\n while ec2.describe_nat_gateways()['NatGateways']:\n all_deleted = True\n for gateway in ec2.describe_nat_gateways()['NatGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n\n print('NAT gateways deleted')", "def delete_public_gateway(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_public_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/public_gateways/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def delGw(interface):\n logging.debugv(\"functions/linux.py->delGw(interface)\", [interface])\n logging.info(\"removing default gateway of device \" + interface)\n cmd = [\"ip\", \"route\", \"del\", \"default\", \"dev\", interface]\n runWrapper(cmd)", "def delete_customer_gateway(\n customer_gateway_id=None,\n customer_gateway_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n return _delete_resource(\n resource=\"customer_gateway\",\n name=customer_gateway_name,\n resource_id=customer_gateway_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def delete_nat_gateway(\n nat_gateway_id,\n release_eips=False,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n wait_for_delete=False,\n wait_for_delete_retries=5,\n):\n\n try:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])\n if gwinfo:\n gwinfo = gwinfo.get(\"NatGateways\", [None])[0]\n conn3.delete_nat_gateway(NatGatewayId=nat_gateway_id)\n\n # wait for deleting nat gateway to finish prior to attempt to release elastic ips\n if wait_for_delete:\n for retry in range(wait_for_delete_retries, 0, -1):\n if gwinfo and gwinfo[\"State\"] not in [\"deleted\", \"failed\"]:\n time.sleep(\n (2 ** (wait_for_delete_retries - retry))\n + (random.randint(0, 1000) / 1000.0)\n )\n gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])\n if gwinfo:\n gwinfo = gwinfo.get(\"NatGateways\", [None])[0]\n continue\n break\n\n if release_eips and gwinfo:\n for addr in gwinfo.get(\"NatGatewayAddresses\"):\n conn3.release_address(AllocationId=addr.get(\"AllocationId\"))\n return {\"deleted\": True}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def delete_virtualip(self, vip):\r\n return vip.delete()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your Elastic IP addresses. An Elastic IP address is for use in either the EC2Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide .
def describe_addresses(DryRun=None, PublicIps=None, Filters=None, AllocationIds=None): pass
[ "def echo_ip():\n ec2conn = connect_to_region('us-west-2',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n reservations = ec2conn.get_all_instances()\n #print reservations.AWS_INSTANCE_ID\n\n instances = [i for r in reservations for i in r.instances]\n for i in instances:\n if i.id == AWS_INSTANCE_ID:\n pprint(i.ip_address)", "def elastic_ip(self) -> str:\n return pulumi.get(self, \"elastic_ip\")", "def test_ip_addresses_list(self):\n pass", "def ip_addresses(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"ip_addresses\")", "def describe_addresses(self, xml_bytes):\n results = []\n root = XML(xml_bytes)\n for address_data in root.find(\"addressesSet\"):\n address = address_data.findtext(\"publicIp\")\n instance_id = address_data.findtext(\"instanceId\")\n results.append((address, instance_id))\n return results", "def delete_elastic_ips():\n client = boto3.client('ec2')\n print('Deleting Elastic IPs')\n for eip in client.describe_addresses()['Addresses']:\n allocation_id = eip['AllocationId']\n print('Releasing EIP {}'.format(allocation_id))\n client.release_address(\n AllocationId=allocation_id\n )\n\n print('Elastic IPs deleted')", "def test_ip_addresses_read(self):\n pass", "def _getAddresses(self, *ues):\n return [self._s1_util.get_ip(ue.ue_id) for ue in ues]", "def test_ip_addresses_create(self):\n pass", "def add_ipadress(\n self, value, tags=[], context={}, description=\"\", source=\"API\", **kwargs\n ):\n return self.__observable_add(\n value,\n type_obs=\"Ip\",\n tags=tags,\n context=context,\n description=description,\n source=source,\n **kwargs\n )", "def get_instance_ips(self, vm):\n \n return [vm._instance.private_ip_address, vm._instance.ip_address]", "def getPublicAddress(self):\n reservations = self.list_instances()\n instances = reservations[\"Reservations\"][0][\"Instances\"]\n \n ip_address = None\n for instance in instances:\n if instance[\"PublicIpAddress\"] != \"\":\n ip_address = instance[\"PublicIpAddress\"]\n break\n return ip_address", "def show_ip(): #TODO\n pass", "def test_ip_addresses_update(self):\n pass", "def ElasticIps(self, zone = None):\n if zone is None:\n zone = self.zone\n self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name)\n self.tester.sleep(10)\n for instance in self.reservation.instances:\n address = self.tester.allocate_address()\n self.assertTrue(address,'Unable to allocate address')\n self.assertTrue(self.tester.associate_address(instance, address))\n self.tester.sleep(30)\n instance.update()\n self.assertTrue( self.tester.ping(instance.public_dns_name), \"Could not ping instance with new IP\")\n address.disassociate()\n self.tester.sleep(30)\n instance.update()\n self.assertTrue( self.tester.ping(instance.public_dns_name), \"Could not ping instance with new IP\")\n address.release()\n return self.reservation", "def defineMyIpAddress(address) :\n print(\"not yet implemented\")", "def get_ip_addresses(self, task):\n return []", "def get_all_ec2_public_ip_addresses(auto_scale_group_resource_id):\n\n group = get_auto_scaling_group(auto_scale_group_resource_id)\n public_ip_addresses = []\n\n if group:\n for asg_instance in group.get('Instances'):\n instance_id = asg_instance.get('InstanceId')\n ip_address = get_public_ip_address(instance_id)\n\n public_ip_addresses.append(ip_address)\n\n return public_ip_addresses", "def addresses(ip, community, ci):\n # The table of addressing information relevant to this entity's IPv4 addresses.\n ipAdEntAddr = \"1.3.6.1.2.1.4.20.1.1\"\n ipAdEntNetMask = \"1.3.6.1.2.1.4.20.1.3\"\n\n ret = get_bulk(ip, ipAdEntAddr, community)\n if ret != None:\n for r in ret:\n for _, val in r:\n ip = val.prettyPrint()\n ci.add_ipv4_address(ip)\n # discovery_info.add_ip(ip)\n\n ret = get_bulk(ip, ipAdEntNetMask, community)\n if ret != None:\n for r in ret:\n for name, val in r:\n ip = name.prettyPrint()[len(\"SNMPv2-SMI::mib-2.4.20.1.3.\"):]\n mask = val.prettyPrint()\n discovery_info.add_to_network(ip, mask)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of the Availability Zones that are available to you. The results include zones only for the region you're currently using. If there is an event impacting an Availability Zone, you can use this request to view the state and any provided message for that Availability Zone. For more information, see Regions and Availability Zones in the Amazon Elastic Compute Cloud User Guide .
def describe_availability_zones(DryRun=None, ZoneNames=None, Filters=None): pass
[ "def availability_zone_list(request):\n az_manager = moganclient(request).availability_zone\n return az_manager.list()", "def get_availability_zones(region, credential):\n return [az.get(\"ZoneName\") for az in describe_availability_zones(region, credential)]", "def availability_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"availability_zones\")", "def describe_availability_zones(region, credential):\n with aws_credential_provider(region, credential):\n client = boto3.client(\"ec2\", region_name=region)\n return client.describe_availability_zones(\n Filters=[\n {\"Name\": \"region-name\", \"Values\": [str(region)]},\n {\"Name\": \"zone-type\", \"Values\": [\"availability-zone\"]},\n ]\n ).get(\"AvailabilityZones\")", "def test_azure_service_api_availability_zone_get(self):\n pass", "def ex_list_zones(self):\r\n list_zones = []\r\n request = '/zones'\r\n response = self.connection.request(request, method='GET').object\r\n list_zones = [self._to_zone(z) for z in response['items']]\r\n return list_zones", "def get_availability_zones_for(region: str) -> List[str]:\n check_aws_region_for_invalid_characters(region)\n ec2 = boto3.client('ec2', region_name=region)\n try:\n response = ec2.describe_availability_zones(Filters=[{'Name':'region-name', 'Values': [region]}])\n return [r['ZoneName'] for r in response['AvailabilityZones']]\n except ClientError as err:\n logging.debug(err)\n return []", "def get_zone_names_list(self):\n\n\t\treturn [zone['description'] for zone in self.compute.zones().list(project=self.project).execute()['items']]", "def test_zone_list_function():\n response = zone.list()\n assert response.success\n\n payload = response.payload\n assert payload['url'] == 'https://api.cloudns.net/dns/list-zones.json'\n assert payload['params']['page'] == 1\n assert payload['params']['rows-per-page'] == 10\n assert payload['params']['search'] == ''\n assert payload['params']['group-id'] == ''", "def ListRegionFunc(self):\n return self.api.addresses.list", "def describe_availability_zones(self, xml_bytes):\n results = []\n root = XML(xml_bytes)\n for zone_data in root.find(\"availabilityZoneInfo\"):\n zone_name = zone_data.findtext(\"zoneName\")\n zone_state = zone_data.findtext(\"zoneState\")\n results.append(model.AvailabilityZone(zone_name, zone_state))\n return results", "def _get_openstack_availablity_zones(self):\n try:\n openstack_availability_zone = self.nova.availability_zones.list()\n openstack_availability_zone = [str(zone.zoneName) for zone in openstack_availability_zone\n if zone.zoneName != 'internal']\n return openstack_availability_zone\n except Exception:\n return None", "def _get_list_zone_object(self):\n return self.rad_connection.list_objects(zonemgr.Zone())", "def print_zones(*args, **kwargs):\n # List the zones\n zones = api_call('/zones').json()\n for zone in zones:\n print(bcolors.HEADER + \"== Zone {zone[name]} [{zone[uuid]}] ==\".format(zone=zone) + bcolors.ENDC)\n\n # List associated domains\n domains = api_call(zone['domains_href']).json()\n if domains:\n print(\n \"{bcolors.MINOR}\\tDomain(s) associated with this zone:{bcolors.ENDC} {domains}\"\n .format(bcolors=bcolors, domains=', '.join([domain['fqdn'] for domain in domains]))\n )\n else:\n print(\"\\tNo domain associated with this zone.\")\n\n # Retrieve the records for this zone\n records = api_call(zone['zone_records_href']).json()\n print(\"\\t{count} {bcolors.MINOR}records in this zone:{bcolors.ENDC}\\n\".format(count=len(records), bcolors=bcolors))\n # Print each record\n for record in records:\n print_record(record)\n print()", "def list(cls, api_client, **kwargs):\n\n cmd = {}\n cmd.update(kwargs)\n if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():\n cmd['listall'] = True\n return super(Zone, cls).list(api_client.listZones(**cmd).get('zone'))", "def test_list_available_regions(self):\n subscription_client = mock.MagicMock()\n subscription_id = \"subscription ID\"\n\n result = self.subscription_service.list_available_regions(subscription_client=subscription_client,\n subscription_id=subscription_id)\n\n self.assertIsInstance(result, list)\n subscription_client.subscriptions.list_locations.assert_called_once_with(subscription_id)", "def test_vmware_service_resources_availability_zones_get(self):\n pass", "def process_zones(region_name, region_resource, zone_list):\n\n for zone in zone_list['AvailabilityZones']:\n\n spinner.update()\n\n process_instances(region_name, zone['ZoneName'], region_resource.instances.all())", "def _fetch_all_zones(self):\n query = tables.zones.select()\n return self.storage.session.execute(query).fetchall()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your bundling tasks.
def describe_bundle_tasks(DryRun=None, BundleIds=None, Filters=None): pass
[ "def gen_task_desc(**kwargs):\n logger = logging.getLogger(__name__)\n\n suppressempty = kwargs[\"suppressempty\"]\n blend = kwargs[\"blend_info\"][\"blend\"]\n tasksprefix = kwargs[\"blend_info\"][\"tasksprefix\"]\n blend_dependencies = kwargs[\"blend_dependencies\"]\n\n\n task_desc_path = \"taskdesc-sec.template\"\n logger.debug(\"Opening file {0} to write\".format(task_desc_path))\n with open(task_desc_path,'w') as fout:\n\n for task in sorted(blend_dependencies.keys()): \n\n if blend_dependencies[task]['Leaf'] == 'false':\n continue\n\n if suppressempty and blend_dependencies[task][\"haspackages\"] == 0:\n if blend_dependencies[task]['test_always_lang']:\n logger.debug(\"Print empty task {0} because Test-always-lang is set\\n\".format(task))\n else:\n logger.debug(\"The metapackage {2} will not be created because {0} dependant are in the pool and suppressempty was set {1}\\n\".format(blend_dependencies[task][\"haspackages\"], suppressempty, task))\n continue\n\n fout.write(\"Task: {0}-{1}\\n\".format(tasksprefix, task))\n fout.write(\"Section: {0}\\n\".format(blend));\n fout.write(\"Description: {0}\\n\".format(blend_dependencies[task][\"description\"]))\n fout.write(\"{0}\".format(blend_dependencies[task][\"long_description\"])) #Already contains a newline\n fout.write(\"Relevance: 10\\n\")\n\n if blend_dependencies[task][\"Enhances\"]:\n fout.write(\"Enhances: {0}\\n\".format(blend_dependencies[task][\"Enhances\"]))\n\n if blend_dependencies[task][\"metapackage\"]:\n #No use listing a metapackage as a key package, if no metapackage exist.\n fout.write(\"Key: \\n\");\n fout.write(\" {0}-{1}\\n\".format(tasksprefix, task))\n\n fout.write(\"Packages: list\\n \")\n for header in [\"Depends\", \"Recommends\"]:\n if not blend_dependencies[task][header]:\n continue \n fout.write(\"{0}\".format(\"\\n \".join(sorted(blend_dependencies[task][header]))))\n fout.write(\"\\n\")\n\n fout.write(\"\\n\")", "def __get_tasks_info(self, **kwargs):\n self.logger.debug(\"__get_task_info function was called\")\n\n blendname = kwargs[\"blend\"]\n tasksprefix = kwargs[\"tasksprefix\"]\n release = kwargs[\"release\"]\n\n blends_dependencies = {}\n\n query=\"\"\"\n SELECT task, description, section as \"Section\", enhances as \"Enhances\", leaf as \"Leaf\",\n metapackage, test_always_lang, long_description\n FROM blends_tasks\n WHERE blend='{0}'\n \"\"\".format(blendname)\n\n for result in self.__execute_query(query):\n #result row indexes: task(0), title(1), metapackage(2), description(3), long_description(4)\n task = result[0]\n\n blends_dependencies[task] = {}\n blends_dependencies[task]['haspackages'] = 0\n\n self.logger.debug(\"Reading info about task: {0}\".format(task))\n\n #we want desc[1:] we dont want the 0 index which contains the task name\n #column[0] contains the column name(taken from cursor description)\n for i, column in enumerate(self.__get_cursor_description()[1:]):\n #results[i+1] cause we start from index 1 (desc[1:]) and not from 0\n blends_dependencies[task][column[0]] = result[i+1]\n\n #the proposed priority is extra for all Blends\n blends_dependencies[task][\"Priority\"] = \"extra\"\n\n #also initialize empty lists for the following keys:\n for key in [\"Depends\", \"Recommends\", \"Suggests\", \"Ignore\", \"Avoid\"]:\n blends_dependencies[task][key] = []\n \n \n return blends_dependencies", "def describe_conversion_tasks(DryRun=None, ConversionTaskIds=None):\n pass", "def gen_task_build_bundles(**kw):\n\n def build_bundle(output, inputs):\n env = webassets.Environment(\n os.path.join(kw['output_folder'], os.path.dirname(output)),\n os.path.dirname(output))\n bundle = webassets.Bundle(*inputs,\n output=os.path.basename(output))\n env.register(output, bundle)\n # This generates the file\n env[output].urls()\n\n flag = False\n for name, files in kw['theme_bundles'].items():\n output_path = os.path.join(kw['output_folder'], name)\n dname = os.path.dirname(name)\n file_dep = [os.path.join('output', dname, fname)\n for fname in files]\n task = {\n 'task_dep': ['copy_assets', 'copy_files'],\n 'file_dep': file_dep,\n 'name': name,\n 'actions': [(build_bundle, (name, files))],\n 'targets': [os.path.join(kw['output_folder'], name)],\n 'basename': 'build_bundles',\n 'uptodate': [config_changed(kw)]\n }\n flag = True\n yield utils.apply_filters(task, kw['filters'])\n if flag == False: # No page rendered, yield a dummy task\n yield {\n 'basename': 'build_bundles',\n 'name': 'None',\n 'uptodate': [True],\n 'actions': [],\n }", "def task_description(task):\r\n name = task.__name__ if hasattr(task, '__name__') else None\r\n if isinstance(task, types.MethodType):\r\n if name is not None and hasattr(task, '__self__'):\r\n return '%s from %s' % (name, task.__self__)\r\n elif isinstance(task, types.FunctionType):\r\n if name is not None:\r\n return str(name)\r\n return repr(task)", "def gradle_task_options(self) -> tuple[str, ...]:\n if self.context.fail_fast:\n return (\"--fail-fast\",)\n\n return ()", "def describe_export_tasks(ExportTaskIds=None):\n pass", "def _make_run_description(args):\n raise NotImplementedError", "def show_tasks(self):\n print('\\nCompleted to following tasks:')\n for step in self.tasks:\n print('\\t{0}'.format(step))", "def describe_import_image_tasks(DryRun=None, ImportTaskIds=None, NextToken=None, MaxResults=None, Filters=None):\n pass", "def show(args):\n taskSettings = getTaskSettings(args)\n print(\"Task in folder `%s`:\" % args.taskpath)\n\n if 'correctSolutions' in taskSettings:\n if len(taskSettings['correctSolutions']) > 0:\n print(\"%d correctSolutions defined:\" % len(taskSettings['correctSolutions']))\n for cs in taskSettings['correctSolutions']:\n print(\" `%s`, language '%s'\" % (cs['path'], cs['language']), end=\"\")\n if 'grade' in cs:\n print(\", expected grade %d\" % cs['grade'])\n else:\n print(\"\")\n else:\n print(\"No correctSolutions defined.\")\n taskSettings.pop('correctSolutions')\n\n for comp in ['generator', 'sanitizer', 'checker']:\n if comp in taskSettings:\n print(\"%s: `%s`\" % (comp, taskSettings[comp]), end=\"\")\n if \"%sDeps\" % comp in taskSettings:\n print(\"with dependencies:\")\n for dep in taskSettings[\"%sDeps\" % comp]:\n print(\" %s\" % dep)\n taskSettings.pop(\"%sDeps\" % comp)\n else:\n print()\n taskSettings.pop(comp)\n\n if len(taskSettings.keys()) > 0:\n for k in taskSettings.keys():\n print(\"%s: %s\" % (k, taskSettings[k]))\n\n return 0", "def _print_tasks(env, tasks, mark_active=False):\n\n if env.task.active and mark_active:\n active_task = env.task.name\n else:\n active_task = None\n\n for task, options, blocks in tasks:\n # print heading\n invalid = False\n\n if task == active_task:\n method = 'success'\n else:\n if options is None and blocks is None:\n method = 'error'\n invalid = True\n\n else:\n method = 'write'\n\n opts = list(options or [])\n blks = list(blocks or [])\n\n write = getattr(env.io, method)\n write('~' * 80)\n write(' ' + task)\n write('~' * 80)\n env.io.write('')\n\n # non-block options\n if opts:\n for opt, values in opts:\n env.io.write(' {0}: {1}'.format(opt,\n ', '.join(str(v) for v in values)))\n env.io.write('')\n\n # block options\n if blks:\n had_options = False\n\n for block, options in blks:\n if options:\n had_options = True\n env.io.write(' {{ {0} }}'.format(block))\n\n for opt, values in options:\n env.io.write(' {0}: {1}'.format(opt,\n ', '.join(str(v) for v in values)))\n env.io.write('')\n\n if not had_options:\n blks = None\n\n if not opts and not blks:\n if invalid:\n env.io.write(' Invalid task.')\n else:\n env.io.write(' Empty task.')\n env.io.write('')", "def _describe_receipts(self, tasks):\n \n builder = []\n append = builder.append\n \n append(\"Tasks run by command {s.name}:\".format(s=self))\n for task in tasks:\n result = \"Error\" if task.receipt.error else task.receipt.task_dir\n append(\"\\t{receipt.name:<30} {result}\".format(receipt=task.receipt, \n result=result))\n return \"\\n\".join(builder)", "def tasks(**_):\n for task in filter(bool, get_all_tasks()):\n print(task)", "def get_task_desc(self):\n return self.task_desc", "def execute(self, env, args):\n\n tasks = env.task.get_list_info()\n if not tasks:\n env.io.write(\"No tasks found.\")\n\n else:\n if args.verbose:\n _print_tasks(env, tasks, mark_active=True)\n\n else:\n if env.task.active:\n active_task = env.task.name\n else:\n active_task = None\n\n for task, options, blocks in tasks:\n if task == active_task:\n env.io.success(task + ' *')\n else:\n if options is None and blocks is None:\n env.io.error(task + ' ~')\n else:\n env.io.write(task)", "def complete(\n ctx: typer.Context,\n tasks: list[str],\n project: Annotated[Optional[str], typer.Option(\"--list\")] = None,\n) -> None:\n project = project_set(project, ctx.obj[\"project\"])\n\n for t in sorted(tasks, reverse=True):\n task = TaskItem(title=\"complete_task\", parent=project, index=int(t))\n task.complete()\n\n print(\":white_check_mark: Task(s) completed.\")", "def main():\n task_init(authorization_action='runinveniogc',\n authorization_msg=\"InvenioGC Task Submission\",\n help_specific_usage=\" -l, --logs\\t\\tClean old logs.\\n\" \\\n \" -p, --tempfiles\\t\\tClean old temporary files.\\n\" \\\n \" -g, --guests\\t\\tClean expired guest user related information. [default action]\\n\" \\\n \" -b, --bibxxx\\t\\tClean unreferenced bibliographic values in bibXXx tables.\\n\" \\\n \" -c, --cache\\t\\tClean cache by removing old files.\\n\" \\\n \" -d, --documents\\tClean deleted documents and revisions older than %s days.\\n\" \\\n \" -T, --tasks\\t\\tClean the BibSched queue removing/archiving old DONE tasks.\\n\" \\\n \" -a, --all\\t\\tClean all of the above (but do not run check/optimise table options below).\\n\" \\\n \" -k, --check-tables\\tCheck DB tables to discover potential problems.\\n\" \\\n \" -o, --optimise-tables\\tOptimise DB tables to increase performance.\\n\" % CFG_DELETED_BIBDOC_MAXLIFE,\n version=__revision__,\n specific_params=(\"lpgbdacTko\", [\"logs\", \"tempfiles\", \"guests\", \"bibxxx\", \"documents\", \"all\", \"cache\", \"tasks\", \"check-tables\", \"optimise-tables\"]),\n task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,\n task_submit_check_options_fnc=task_submit_check_options,\n task_run_fnc=task_run_core)", "def do(task, subtasks):\n raise Exception('TODO IMPLEMENT ME !')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your linked EC2Classic instances. This request only returns information about EC2Classic instances linked to a VPC through ClassicLink; you cannot use this request to return information about other instances.
def describe_classic_link_instances(DryRun=None, InstanceIds=None, Filters=None, NextToken=None, MaxResults=None): pass
[ "def DescribeClassicLinkInstances(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeClassicLinkInstances\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeClassicLinkInstancesResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def describe_vpc_classic_link(DryRun=None, VpcIds=None, Filters=None):\n pass", "def _show_instances(self):\n conn = ec2.connect_to_region(\n self.availability_zone,\n aws_access_key_id=self.access_key_id,\n aws_secret_access_key=self.secret_access_key,\n )\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n print reservation\n for instance in reservation.instances:\n print instance\n print '- AMI ID:', instance.image_id\n print '- Instance Type:', instance.instance_type\n print '- Availability Zone:', instance.placement", "def describe_vpc_classic_link_dns_support(VpcIds=None, MaxResults=None, NextToken=None):\n pass", "def describe_ec2_instances(ec2, ec2_filter):\r\n tmp_instances = []\r\n instances = []\r\n resp = ec2.describe_instances(Filters=ec2_filter)\r\n for res in resp['Reservations']:\r\n tmp_instances.extend(res['Instances'])\r\n while 'NextToken' in resp:\r\n resp = ec2.describe_instances(Filters=ec2_filter,\r\n NextToken=resp['NextToken'])\r\n for res in resp['Reservations']:\r\n tmp_instances.extend(res['Instances'])\r\n\r\n for inst in tmp_instances:\r\n instances.append({'InstanceId': inst['InstanceId'],\r\n 'State': inst['State'],\r\n 'BlockDeviceMappings': inst['BlockDeviceMappings'],\r\n 'AttemptCount': 0,\r\n 'Tags': inst['Tags']})\r\n return instances", "def vpc_classic_link_id(self) -> str:\n warnings.warn(\"\"\"With the retirement of EC2-Classic the vpc_classic_link_id attribute has been deprecated and will be removed in a future version.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"vpc_classic_link_id is deprecated: With the retirement of EC2-Classic the vpc_classic_link_id attribute has been deprecated and will be removed in a future version.\"\"\")\n\n return pulumi.get(self, \"vpc_classic_link_id\")", "def attach_classic_link_vpc(DryRun=None, InstanceId=None, VpcId=None, Groups=None):\n pass", "def describe(self):\n print(Controller().describe_instances())", "def list(self):\n dbaas = common.get_client()\n try:\n for instance in dbaas.instances.list():\n _pretty_print(instance._info)\n except Exception:\n print sys.exc_info()[1]", "def do_printInstances(self,args):\n parser = CommandArgumentParser(\"printInstances\")\n parser.add_argument(dest='filters',nargs='*',default=[\"*\"],help='Filter instances');\n parser.add_argument('-a','--addresses',action='store_true',dest='addresses',help='list all ip addresses');\n parser.add_argument('-t','--tags',action='store_true',dest='tags',help='list all instance tags');\n parser.add_argument('-d','--allDetails',action='store_true',dest='details',help='print all instance details');\n parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh');\n parser.add_argument('-z','--zones',dest='availabilityZones',nargs='+',help='Only include specified availability zones');\n args = vars(parser.parse_args(args))\n \n client = AwsConnectionFactory.getEc2Client()\n\n filters = args['filters']\n addresses = args['addresses']\n tags = args['tags']\n details = args['details']\n availabilityZones = args['availabilityZones']\n needDescription = addresses or tags or details\n\n if args['refresh']:\n self.scalingGroupDescription = self.client.describe_auto_scaling_groups(AutoScalingGroupNames=[self.scalingGroup])\n \n # print \"AutoScaling Group:{}\".format(self.scalingGroup)\n print \"=== Instances ===\"\n instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']\n\n instances = filter( lambda x: fnmatches(x['InstanceId'],filters),instances)\n if availabilityZones:\n instances = filter( lambda x: fnmatches(x['AvailabilityZone'],availabilityZones),instances)\n \n index = 0\n for instance in instances:\n instance['index'] = index\n print \"* {0:3d} {1} {2} {3}\".format(index,instance['HealthStatus'],instance['AvailabilityZone'],instance['InstanceId'])\n description = None\n if needDescription:\n description = client.describe_instances(InstanceIds=[instance['InstanceId']])\n if addresses:\n networkInterfaces = description['Reservations'][0]['Instances'][0]['NetworkInterfaces']\n number = 0\n print \" Network Interfaces:\"\n for interface in networkInterfaces:\n print \" * {0:3d} {1}\".format(number, interface['PrivateIpAddress'])\n number +=1\n if tags:\n tags = description['Reservations'][0]['Instances'][0]['Tags']\n print \" Tags:\"\n for tag in tags:\n print \" * {0} {1}\".format(tag['Key'],tag['Value'])\n if details:\n pprint(description)\n \n index += 1", "def calculate_ec2_ris(session, results, min_ri_days=30):\n ec2_conn = session.client('ec2')\n\n # check to see if account is VPC-only (affects reserved instance reporting)\n account_is_vpc_only = (\n [{'AttributeValue': 'VPC'}] == ec2_conn.describe_account_attributes(\n AttributeNames=['supported-platforms'])['AccountAttributes'][0]\n ['AttributeValues'])\n\n paginator = ec2_conn.get_paginator('describe_instances')\n page_iterator = paginator.paginate(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n\n # Loop through running EC2 instances and record their AZ, type, and\n # Instance ID or Name Tag if it exists.\n for page in page_iterator:\n for reservation in page['Reservations']:\n for instance in reservation['Instances']:\n # Ignore spot instances\n if 'SpotInstanceRequestId' not in instance:\n az = instance['Placement']['AvailabilityZone']\n instance_type = instance['InstanceType']\n # Check for 'skip reservation' tag and name tag\n found_skip_tag = False\n instance_name = None\n if 'Tags' in instance:\n for tag in instance['Tags']:\n if tag['Key'] == 'NoReservation' and len(\n tag['Value']) > 0 and tag[\n 'Value'].lower() == 'true':\n found_skip_tag = True\n if tag['Key'] == 'Name' and len(tag['Value']) > 0:\n instance_name = tag['Value']\n\n # If skip tag is not found, increment running instances\n # count and add instance name/ID\n if not found_skip_tag:\n # not in vpc\n if not instance.get('VpcId'):\n results['ec2_classic_running_instances'][(\n instance_type,\n az)] = results[\n 'ec2_classic_running_instances'].get(\n (instance_type, az), 0) + 1\n instance_ids[(instance_type, az)].append(\n instance['InstanceId'] if not instance_name\n else instance_name)\n else:\n # inside vpc\n results['ec2_vpc_running_instances'][\n (instance_type,\n az)] = results[\n 'ec2_vpc_running_instances'].get(\n (instance_type, az), 0) + 1\n instance_ids[(instance_type, az)].append(\n instance['InstanceId'] if not instance_name\n else instance_name)\n\n # Loop through active EC2 RIs and record their AZ and type.\n for reserved_instance in ec2_conn.describe_reserved_instances(\n Filters=[{'Name': 'state', 'Values': ['active']}])[\n 'ReservedInstances']:\n # Detect if an EC2 RI is a regional benefit RI or not\n if reserved_instance['Scope'] == 'Availability Zone':\n az = reserved_instance['AvailabilityZone']\n else:\n az = 'All'\n\n ri_expiry = calc_expiry_time(expiry=reserved_instance['End'])\n if int(ri_expiry) < int(min_ri_days):\n print \"%s\\t%s\\tExpires in [%s] days\\tDISCARD\" % (\n reserved_instance['InstanceType'],\n reserved_instance['InstanceCount'],\n ri_expiry\n )\n continue\n else:\n print \"%s\\t%s\\tExpires in [%s] days\\tKEEP\" % (\n reserved_instance['InstanceType'],\n reserved_instance['InstanceCount'],\n ri_expiry\n )\n\n instance_type = reserved_instance['InstanceType']\n # check if VPC/Classic reserved instance\n if account_is_vpc_only or 'VPC' in reserved_instance.get(\n 'ProductDescription'):\n results['ec2_vpc_reserved_instances'][(\n instance_type, az)] = results[\n 'ec2_vpc_reserved_instances'].get(\n (instance_type, az), 0) + reserved_instance['InstanceCount']\n else:\n results['ec2_classic_reserved_instances'][(\n instance_type, az)] = results[\n 'ec2_classic_reserved_instances'].get(\n (instance_type, az), 0) + reserved_instance['InstanceCount']\n\n reserve_expiry[(instance_type, az)].append(ri_expiry)\n\n return results", "def list(ctx):\r\n config = ctx.obj['config']\r\n config.validate()\r\n host = config.get_active_host()\r\n instances = host.get_instances()\r\n logger.info(\"Instances on: %s\", host.name)\r\n outputters.table([x.dump() for x in instances])", "def ec2s(session, region_name):\n ec2 = session.resource('ec2', region_name)\n instances = ec2.instances.all()\n return instances", "def getReservedInstances(verbose):\n lres = {}\n jResp = EC2C.describe_reserved_instances()\n for reserved in jResp['ReservedInstances']:\n if reserved['State'] == 'active':\n if verbose:\n lres[reserved['InstanceType']] = str(reserved['Start'])+\";\"+\\\n str(reserved['End'])+\";\"+\\\n str(reserved['InstanceCount'])+\";\"+\\\n reserved['ProductDescription']+\";\"+\\\n str(reserved['UsagePrice'])\n else:\n if re.search(\"win\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"windows\"\n elif re.search(\"red hat\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"redhat\"\n elif re.search(\"suse\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"suse\"\n else:\n os = \"linux\"\n lres[reserved['InstanceType']+\";\"+os] = str(reserved['InstanceCount'])\n return lres", "def echo_ip():\n ec2conn = connect_to_region('us-west-2',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n reservations = ec2conn.get_all_instances()\n #print reservations.AWS_INSTANCE_ID\n\n instances = [i for r in reservations for i in r.instances]\n for i in instances:\n if i.id == AWS_INSTANCE_ID:\n pprint(i.ip_address)", "def list_instances(ec2):\n\n reservations = ec2.describe_instances(\n Filters=[\n {'Name': 'tag-key', 'Values': ['backup', 'Backup']},\n ]\n )['Reservations']\n\n instances = sum(\n [\n [i for i in r['Instances']]\n for r in reservations\n ], [])\n\n return instances", "def showinstances():\n username, conn = _getbotoconn(auth_user)\n\n print \"all instances running under the %s account\" % username\n\n num_running = 0\n reservations = conn.get_all_instances()\n for reservation in reservations:\n num_running += _print_reservation(reservation)\n\n return num_running", "def ec2_list_instances(tag_key, tag_value):\n instance_list = []\n ec2 = boto3.client(\"ec2\")\n paginator = ec2.get_paginator(\"describe_instances\")\n page_iterator = paginator.paginate(\n Filters=[\n {\"Name\": \"tag:\" + tag_key, \"Values\": [tag_value]},\n {\n \"Name\": \"instance-state-name\",\n \"Values\": [\"pending\", \"running\", \"stopping\", \"stopped\"],\n },\n ]\n )\n\n for page in page_iterator:\n for reservation in page[\"Reservations\"]:\n for instance in reservation[\"Instances\"]:\n instance_list.append(instance[\"InstanceId\"])\n return instance_list", "def _linkInstances(self):\n for (app, insts) in self.instances.items():\n edges = list(itertools.combinations(insts, 2))\n for edge in edges:\n self.edges.add(edge)\n self.weights[edge] = 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your conversion tasks. For more information, see the VM Import/Export User Guide . For information about the import manifest referenced by this API action, see VM Import Manifest .
def describe_conversion_tasks(DryRun=None, ConversionTaskIds=None): pass
[ "def describe_export_tasks(ExportTaskIds=None):\n pass", "def describe_import_image_tasks(DryRun=None, ImportTaskIds=None, NextToken=None, MaxResults=None, Filters=None):\n pass", "def plain_exporter(tasks: List[utils.Task], output: IO):\n tasks = utils.sort_tasks(tasks)\n for task in tasks:\n print(\"***\", file=output)\n print(\"task\", utils.task_id_str(task.task_id), file=output)\n print(\"label\", task.label, file=output)\n print(\"status\", str(task.status), file=output)\n if task.priority is not None:\n print(\n \"priority\",\n task.priority if utils.is_valued(task.priority) else \"none\",\n file=output,\n )\n if task.deadline is not None:\n print(\n \"deadline\",\n task.deadline.isoformat() if utils.is_valued(task.deadline) else \"none\",\n file=output,\n )\n if task.depends:\n print(\n \"depends\",\n \" \".join(utils.task_id_str(dep) for dep in sorted(task.depends)),\n file=output,\n )\n print(\"***\", file=output)\n print(task.content, end=\"\", file=output)", "def describe_import_snapshot_tasks(DryRun=None, ImportTaskIds=None, NextToken=None, MaxResults=None, Filters=None):\n pass", "def get_task_desc(self):\n return self.task_desc", "def gen_task_desc(**kwargs):\n logger = logging.getLogger(__name__)\n\n suppressempty = kwargs[\"suppressempty\"]\n blend = kwargs[\"blend_info\"][\"blend\"]\n tasksprefix = kwargs[\"blend_info\"][\"tasksprefix\"]\n blend_dependencies = kwargs[\"blend_dependencies\"]\n\n\n task_desc_path = \"taskdesc-sec.template\"\n logger.debug(\"Opening file {0} to write\".format(task_desc_path))\n with open(task_desc_path,'w') as fout:\n\n for task in sorted(blend_dependencies.keys()): \n\n if blend_dependencies[task]['Leaf'] == 'false':\n continue\n\n if suppressempty and blend_dependencies[task][\"haspackages\"] == 0:\n if blend_dependencies[task]['test_always_lang']:\n logger.debug(\"Print empty task {0} because Test-always-lang is set\\n\".format(task))\n else:\n logger.debug(\"The metapackage {2} will not be created because {0} dependant are in the pool and suppressempty was set {1}\\n\".format(blend_dependencies[task][\"haspackages\"], suppressempty, task))\n continue\n\n fout.write(\"Task: {0}-{1}\\n\".format(tasksprefix, task))\n fout.write(\"Section: {0}\\n\".format(blend));\n fout.write(\"Description: {0}\\n\".format(blend_dependencies[task][\"description\"]))\n fout.write(\"{0}\".format(blend_dependencies[task][\"long_description\"])) #Already contains a newline\n fout.write(\"Relevance: 10\\n\")\n\n if blend_dependencies[task][\"Enhances\"]:\n fout.write(\"Enhances: {0}\\n\".format(blend_dependencies[task][\"Enhances\"]))\n\n if blend_dependencies[task][\"metapackage\"]:\n #No use listing a metapackage as a key package, if no metapackage exist.\n fout.write(\"Key: \\n\");\n fout.write(\" {0}-{1}\\n\".format(tasksprefix, task))\n\n fout.write(\"Packages: list\\n \")\n for header in [\"Depends\", \"Recommends\"]:\n if not blend_dependencies[task][header]:\n continue \n fout.write(\"{0}\".format(\"\\n \".join(sorted(blend_dependencies[task][header]))))\n fout.write(\"\\n\")\n\n fout.write(\"\\n\")", "def __tasks__(self):\n\n if self.project_level or self.pipeline_type.lower() == \"ss2\":\n return []\n\n workflow_metadata = self.__metadata__()\n return format_map.get_workflow_tasks(workflow_metadata)", "def describe_bundle_tasks(DryRun=None, BundleIds=None, Filters=None):\n pass", "def task_fields(task):\n name_field = [sg.Input(task.name, size=(18, 1))]\n other_values = (\n task.get_current_progress(),\n task.get_next_due_date(),\n task.assignee,\n task.length,\n \", \".join(task.linked_creatures),\n \", \".join(task.linked_plants),\n task.status.get(),\n )\n other_fields = [summary_field_format(value) for value in other_values]\n return name_field + other_fields", "def task_description(task):\r\n name = task.__name__ if hasattr(task, '__name__') else None\r\n if isinstance(task, types.MethodType):\r\n if name is not None and hasattr(task, '__self__'):\r\n return '%s from %s' % (name, task.__self__)\r\n elif isinstance(task, types.FunctionType):\r\n if name is not None:\r\n return str(name)\r\n return repr(task)", "def test_dag_tasks_present(self):\n self.assertEqual(self.tasks, [\n \"set_datetime\",\n \"harvest_notes\",\n \"s3_to_server_collection\",\n \"s3_to_server_service\",\n \"reload_electronic_notes\",\n \"slack_post_success\"\n ])", "def show_tasks(self):\n print('\\nCompleted to following tasks:')\n for step in self.tasks:\n print('\\t{0}'.format(step))", "def convertTaskDefault(self):\n if not self.good:\n return\n\n procins = self.procins\n verbose = self.verbose\n tfPath = self.tfPath\n xmlPath = self.xmlPath\n\n if verbose == 1:\n console(f\"XML to TF converting: {ux(xmlPath)} => {ux(tfPath)}\")\n if verbose >= 0:\n console(\n f\"Processing instructions are {'treated' if procins else 'ignored'}\"\n )\n\n slotType = CHAR\n otext = {\n \"fmt:text-orig-full\": \"{ch}\",\n \"sectionFeatures\": \"folder,file\",\n \"sectionTypes\": \"folder,file\",\n }\n intFeatures = {\"empty\"}\n featureMeta = dict(\n str=dict(description=\"the text of a word\"),\n after=dict(description=\"the text after a word till the next word\"),\n empty=dict(description=\"whether a slot has been inserted in an empty element\"),\n )\n\n featureMeta[\"ch\"] = dict(description=\"the unicode character of a slot\")\n featureMeta[FOLDER] = dict(description=f\"name of source {FOLDER}\")\n featureMeta[FILE] = dict(description=f\"name of source {FILE}\")\n\n self.intFeatures = intFeatures\n self.featureMeta = featureMeta\n\n tfVersion = self.tfVersion\n xmlVersion = self.xmlVersion\n generic = self.generic\n generic[\"sourceFormat\"] = \"XML\"\n generic[\"version\"] = tfVersion\n generic[\"xmlVersion\"] = xmlVersion\n\n initTree(tfPath, fresh=True, gentle=True)\n\n cv = self.getConverter()\n\n self.good = cv.walk(\n getDirector(self),\n slotType,\n otext=otext,\n generic=generic,\n intFeatures=intFeatures,\n featureMeta=featureMeta,\n generateTf=True,\n )", "def test_mapping_task_classes():\n list_output('mapping', 'normal', COMMANDS_HEADER + \"\"\":\\n\n mapping_task\"\"\")", "def build_tasks(args):\n\n _embedder = build_embedder(args)\n\n if \"tasks\" in args:\n _strtasks = args[\"tasks\"]\n else:\n raise ValueError(\"Task(s) must be defined as arguments\")\n\n _tasks = []\n\n for t in _strtasks:\n if t == \"trofi\":\n _tasks.append(tasks.EdgeProbingTask(\"trofi\", 1, embedder=_embedder))\n elif t == \"dpr\":\n _tasks.append(tasks.EdgeProbingTask(\"dpr\", 1, embedder=_embedder))\n elif t == \"metonymy\":\n _tasks.append(tasks.EdgeProbingTask(\"metonymy\", 1, embedder=_embedder))\n elif t == \"rel\":\n _tasks.append(tasks.EdgeProbingTask(\"rel\", 19, embedder=_embedder))\n else:\n raise Exception(\"Task not recognized: \\\"{t}\\\"\")\n\n return _tasks, _embedder", "def _print_tasks(env, tasks, mark_active=False):\n\n if env.task.active and mark_active:\n active_task = env.task.name\n else:\n active_task = None\n\n for task, options, blocks in tasks:\n # print heading\n invalid = False\n\n if task == active_task:\n method = 'success'\n else:\n if options is None and blocks is None:\n method = 'error'\n invalid = True\n\n else:\n method = 'write'\n\n opts = list(options or [])\n blks = list(blocks or [])\n\n write = getattr(env.io, method)\n write('~' * 80)\n write(' ' + task)\n write('~' * 80)\n env.io.write('')\n\n # non-block options\n if opts:\n for opt, values in opts:\n env.io.write(' {0}: {1}'.format(opt,\n ', '.join(str(v) for v in values)))\n env.io.write('')\n\n # block options\n if blks:\n had_options = False\n\n for block, options in blks:\n if options:\n had_options = True\n env.io.write(' {{ {0} }}'.format(block))\n\n for opt, values in options:\n env.io.write(' {0}: {1}'.format(opt,\n ', '.join(str(v) for v in values)))\n env.io.write('')\n\n if not had_options:\n blks = None\n\n if not opts and not blks:\n if invalid:\n env.io.write(' Invalid task.')\n else:\n env.io.write(' Empty task.')\n env.io.write('')", "def tutorial_taskflow_api_etl():\n\n @task()\n def extract():\n data_string = '{\"1001\": 301.27, \"1002\": 433.21, \"1003\": 502.22}'\n order_data_dict = json.loads(data_string)\n return order_data_dict\n\n # Tasks may infer that they return multiple outputs by passing a dict\n # As we don't want the following task to return multiple outputs\n # we set the flag to `False`\n\n @task(multiple_outputs=False)\n def transform(order_data_dict: dict):\n total_order_value = 0\n for value in order_data_dict.values():\n total_order_value += value\n return {\"total_order_value\": total_order_value}\n\n @task()\n def load(total_order_value: float):\n print(\"Total order value is: %.2f\" % total_order_value)\n\n # Here we define the main flow of the DAG. We invoke the previous tasks in\n # the logical order that we want the DAG to execute. The dependencies\n # between tasks and the data exchanged are all handled by Airflow.\n # This is because each of the tasks may\n # run in different workers on different nodes on the network/cluster.\n order_data = extract()\n order_summary = transform(order_data)\n load(order_summary[\"total_order_value\"])", "def convert_new_things_lib(self, tasks):\n for task in tasks:\n task[\"context\"] = (\n task.get(\"project_title\")\n or task.get(\"area_title\")\n or task.get(\"heading_title\")\n )\n task[\"context_uuid\"] = (\n task.get(\"project\") or task.get(\"area\") or task.get(\"heading\")\n )\n task[\"due\"] = task.get(\"deadline\")\n task[\"started\"] = task.get(\"start_date\")\n task[\"size\"] = things.projects(\n task[\"uuid\"], count_only=True, filepath=self.database\n )\n tasks.sort(key=lambda task: task[\"title\"] or \"\", reverse=False)\n tasks = self.anonymize_tasks(tasks)\n return tasks", "def show(args):\n taskSettings = getTaskSettings(args)\n print(\"Task in folder `%s`:\" % args.taskpath)\n\n if 'correctSolutions' in taskSettings:\n if len(taskSettings['correctSolutions']) > 0:\n print(\"%d correctSolutions defined:\" % len(taskSettings['correctSolutions']))\n for cs in taskSettings['correctSolutions']:\n print(\" `%s`, language '%s'\" % (cs['path'], cs['language']), end=\"\")\n if 'grade' in cs:\n print(\", expected grade %d\" % cs['grade'])\n else:\n print(\"\")\n else:\n print(\"No correctSolutions defined.\")\n taskSettings.pop('correctSolutions')\n\n for comp in ['generator', 'sanitizer', 'checker']:\n if comp in taskSettings:\n print(\"%s: `%s`\" % (comp, taskSettings[comp]), end=\"\")\n if \"%sDeps\" % comp in taskSettings:\n print(\"with dependencies:\")\n for dep in taskSettings[\"%sDeps\" % comp]:\n print(\" %s\" % dep)\n taskSettings.pop(\"%sDeps\" % comp)\n else:\n print()\n taskSettings.pop(comp)\n\n if len(taskSettings.keys()) > 0:\n for k in taskSettings.keys():\n print(\"%s: %s\" % (k, taskSettings[k]))\n\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your VPN customer gateways. For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide .
def describe_customer_gateways(DryRun=None, CustomerGatewayIds=None, Filters=None): pass
[ "def describe_vpn_gateways(DryRun=None, VpnGatewayIds=None, Filters=None):\n pass", "def describe_internet_gateways(DryRun=None, InternetGatewayIds=None, Filters=None):\n pass", "def delete_virtual_gateways():\n client = boto3.client('ec2')\n print('Deleting VPN Gateways')\n gw_resp = client.describe_vpn_gateways()\n while True:\n for gateway in gw_resp['VpnGateways']:\n gw_id = gateway['VpnGatewayId']\n gw_attachments = gateway['VpcAttachments']\n for attachment in gw_attachments:\n if attachment['State'] == 'attached':\n vpc_id = attachment['VpcId']\n print('Detaching virtual gateway {} from vpc {}'.format(gw_id, vpc_id))\n client.detach_vpn_gateway(\n VpcId=vpc_id,\n VpnGatewayId=gw_id\n )\n print('Deleting VPN gateway {}'.format(gw_id))\n client.delete_vpn_gateway(\n VpnGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_vpn_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_vpn_gateways()['VpnGateways']:\n all_deleted = True\n for gateway in client.describe_vpn_gateways()['VpnGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('VPN Gateways deleted')", "def listVpnGateway(cls, api_client, **kwargs):\n cmd = {}\n cmd.update(kwargs)\n return super(Vpn, cls).list(api_client.listVpnGateways(**cmd))", "def describe_nat_gateways(NatGatewayIds=None, Filters=None, MaxResults=None, NextToken=None):\n pass", "def create_vpn_connection(DryRun=None, Type=None, CustomerGatewayId=None, VpnGatewayId=None, Options=None):\n pass", "def describe_nat_gateways(\n nat_gateway_id=None,\n subnet_id=None,\n subnet_name=None,\n vpc_id=None,\n vpc_name=None,\n states=(\"pending\", \"available\"),\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n return _find_nat_gateways(\n nat_gateway_id=nat_gateway_id,\n subnet_id=subnet_id,\n subnet_name=subnet_name,\n vpc_id=vpc_id,\n vpc_name=vpc_name,\n states=states,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def update(self, api_client, services, name, gateway, cidrlist):\n\n cmd = {'id': self.id, 'name': name, 'gateway': gateway, 'cidrlist': cidrlist}\n if \"ipsecpsk\" in services:\n cmd['ipsecpsk'] = services[\"ipsecpsk\"]\n if \"ikepolicy\" in services:\n cmd['ikepolicy'] = services[\"ikepolicy\"]\n if \"ikelifetime\" in services:\n cmd['ikelifetime'] = services[\"ikelifetime\"]\n if \"esppolicy\" in services:\n cmd['esppolicy'] = services[\"esppolicy\"]\n if \"esplifetime\" in services:\n cmd['esplifetime'] = services[\"esplifetime\"]\n if \"dpd\" in services:\n cmd['dpd'] = services[\"dpd\"]\n if \"forceencap\" in services:\n cmd['forceencap'] = services[\"forceencap\"]\n return api_client.updateVpnCustomerGateway(**cmd)", "def delete_customer_gateways():\n client = boto3.client('ec2')\n print('Deleting Customer Gateways')\n cust_resp = client.describe_customer_gateways()\n while True:\n for gateway in cust_resp['CustomerGateways']:\n gw_id = gateway['CustomerGatewayId']\n client.delete_customer_gateway(\n CustomerGatewayId=gw_id\n )\n time.sleep(0.25)\n if 'NextMarker' in cust_resp:\n cust_resp = client.describe_customer_gateways(\n Marker=cust_resp['NextMarker'],\n )\n else:\n break\n while client.describe_customer_gateways()['CustomerGateways']:\n all_deleted = True\n for gateway in client.describe_customer_gateways()['CustomerGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n print('Customer Gateways deleted')", "def list(cls, api_client, **kwargs):\n\n cmd = {}\n # cmd.update(kwargs)\n if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():\n cmd['listall'] = True\n return super(VpnCustomerGateway, cls).list(api_client.listVpnCustomerGateways(**cmd).get('vpncustomergateway'))", "def DescribeVpnGateways(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeVpnGateways\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeVpnGatewaysResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def describe_vpn_connections(DryRun=None, VpnConnectionIds=None, Filters=None):\n pass", "def create_vpn_gateway(DryRun=None, Type=None, AvailabilityZone=None):\n pass", "def _find_nat_gateways(\n nat_gateway_id=None,\n subnet_id=None,\n subnet_name=None,\n vpc_id=None,\n vpc_name=None,\n states=(\"pending\", \"available\"),\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not any((nat_gateway_id, subnet_id, subnet_name, vpc_id, vpc_name)):\n raise SaltInvocationError(\n \"At least one of the following must be \"\n \"provided: nat_gateway_id, subnet_id, \"\n \"subnet_name, vpc_id, or vpc_name.\"\n )\n filter_parameters = {\"Filter\": []}\n\n if nat_gateway_id:\n filter_parameters[\"NatGatewayIds\"] = [nat_gateway_id]\n\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return False\n\n if subnet_id:\n filter_parameters[\"Filter\"].append({\"Name\": \"subnet-id\", \"Values\": [subnet_id]})\n\n if vpc_name:\n vpc_id = _get_resource_id(\n \"vpc\", vpc_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not vpc_id:\n return False\n\n if vpc_id:\n filter_parameters[\"Filter\"].append({\"Name\": \"vpc-id\", \"Values\": [vpc_id]})\n\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n nat_gateways = []\n for ret in __utils__[\"boto3.paged_call\"](\n conn3.describe_nat_gateways,\n marker_flag=\"NextToken\",\n marker_arg=\"NextToken\",\n **filter_parameters\n ):\n for gw in ret.get(\"NatGateways\", []):\n if gw.get(\"State\") in states:\n nat_gateways.append(gw)\n log.debug(\n \"The filters criteria %s matched the following nat gateways: %s\",\n filter_parameters,\n nat_gateways,\n )\n\n if nat_gateways:\n return nat_gateways\n else:\n return False", "def delete_nat_gateways():\n print('Deleting NAT gateways')\n ec2 = boto3.client('ec2')\n for page in ec2.get_paginator('describe_nat_gateways').paginate():\n for nat_gateway in page['NatGateways']:\n nat_gateway_id = nat_gateway['NatGatewayId']\n print('Deleting Nat Gateway - {}'.format(nat_gateway_id))\n ec2.delete_nat_gateway(\n NatGatewayId=nat_gateway_id\n )\n\n while ec2.describe_nat_gateways()['NatGateways']:\n all_deleted = True\n for gateway in ec2.describe_nat_gateways()['NatGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n\n print('NAT gateways deleted')", "def neigh_options(config):\r\n\r\n next_hop = [\"Yes\" for k in dict.fromkeys(config) if k == \"next-hop-self\"]\r\n if not next_hop:\r\n next_hop = [\"No\"]\r\n\r\n reflector = [\"Yes\" for k in dict.fromkeys(config) if k == \"route-reflector-client\"]\r\n if not reflector:\r\n reflector = [\"No\"]\r\n\r\n soft_reconfig = [v for k, v in config.items() if k == \"soft-reconfiguration\"]\r\n if not soft_reconfig:\r\n soft_reconfig = [\"No\"]\r\n\r\n activate = [\"Yes\" for k in dict.fromkeys(config) if k == \"activate\"]\r\n if not reflector:\r\n activate = [\"No\"]\r\n\r\n return next_hop, reflector, soft_reconfig, activate", "async def test_multiple_gateways(caplog):\n async with Context() as context:\n await Py4JComponent(gateways={\n 'java1': {},\n 'java2': {}\n }).start(context)\n assert isinstance(context.java1, JavaGateway)\n assert isinstance(context.java2, JavaGateway)\n\n records = [record for record in caplog.records if record.name == 'asphalt.py4j.component']\n records.sort(key=lambda r: r.message)\n assert len(records) == 4\n assert records[0].message.startswith(\"Configured Py4J gateway \"\n \"(java1 / ctx.java1; address=127.0.0.1, port=\")\n assert records[1].message.startswith(\"Configured Py4J gateway \"\n \"(java2 / ctx.java2; address=127.0.0.1, port=\")\n assert records[2].message == 'Py4J gateway (java1) shut down'\n assert records[3].message == 'Py4J gateway (java2) shut down'", "def DescribeCustomerGateways(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeCustomerGateways\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeCustomerGatewaysResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def describe_vpc_peering_connections(DryRun=None, VpcPeeringConnectionIds=None, Filters=None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your DHCP options sets. For more information about DHCP options sets, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide .
def describe_dhcp_options(DryRun=None, DhcpOptionsIds=None, Filters=None): pass
[ "def create_dhcp_options(DryRun=None, DhcpConfigurations=None):\n pass", "def configure_dhcp():\n dhcp_config = {}\n dhcp_config_content = \"\"\"\nddns-update-style none;\ndefault-lease-time 600;\nmax-lease-time 7200;\noption domain-name-servers 84.200.69.80, 84.200.70.40;\noption domain-name \"pikube.local\";\nauthorative;\nlog-facility local7;\n\nsubnet 10.12.29.0 netmask 255.255.255.0 {\n range 10.12.29.10 10.12.29.100;\n}\n\"\"\"\n\n dhcp_config['path'] = r'/etc/dhcp/dhcpd.conf'\n dhcp_config['encoding'] = \"b64\"\n dhcp_config['content'] = base64.b64encode(\n bytes(dhcp_config_content, \"utf-8\"))\n return dhcp_config", "def DhcpOptions(self):\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dhcpoptions_3rvy29su3rhy2svzghjce9wdglvbnm import DhcpOptions\n\t\treturn DhcpOptions(self)", "def associate_dhcp_options(DryRun=None, DhcpOptionsId=None, VpcId=None):\n pass", "def _PopulateFromDhcpOptions(self, host, client):\n for option in client.OptionList.values():\n # DHCP Options tags\n if option.Tag == dhcp.HN:\n host['HostName'] = option.Value\n elif option.Tag == dhcp.CL:\n host['ClientID'] = option.Value\n elif option.Tag == dhcp.UC:\n host['UserClassID'] = option.Value\n elif option.Tag == dhcp.VC:\n host['VendorClassID'] = option.Value", "def create_dhcp_options(\n domain_name=None,\n domain_name_servers=None,\n ntp_servers=None,\n netbios_name_servers=None,\n netbios_node_type=None,\n dhcp_options_name=None,\n tags=None,\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n if vpc_id or vpc_name:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {\n \"created\": False,\n \"error\": {\n \"message\": \"VPC {} does not exist.\".format(vpc_name or vpc_id)\n },\n }\n\n r = _create_resource(\n \"dhcp_options\",\n name=dhcp_options_name,\n domain_name=domain_name,\n domain_name_servers=domain_name_servers,\n ntp_servers=ntp_servers,\n netbios_name_servers=netbios_name_servers,\n netbios_node_type=netbios_node_type,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if r.get(\"created\") and vpc_id:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n conn.associate_dhcp_options(r[\"id\"], vpc_id)\n log.info(\"Associated options %s to VPC %s\", r[\"id\"], vpc_name or vpc_id)\n return r\n except BotoServerError as e:\n return {\"created\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def getOptionDescriptions(self) -> List[unicode]:\n ...", "def optionDisplay():\n print(\"Options :\")\n print(\"[-All] : to start all the Microservices listed below\")\n print(\"[-B] : to start the Bluetooth Low Energy MicroService\")\n print(\"[-C] : to start the Cloud MicroService\")\n print(\"[-G] : to start the GPS MicroService\")\n print(\"[-h] : to display the usage\")\n print(\"[-S] : to start the Sensor Data gathering MicroService\")\n print(\"[-t] : to start the traffic MicroService\")\n print(\"[-T] : to start the Translator MicroService this takes some times\")\n print(\"[-W] : to start the Weather MicroService\")", "def showOptions ( w, what ):\n print \"@@@ options for %s:\" % what\n id = w.winfo_id()\n path = w.winfo_pathname(id)\n print ( \"@@@ name=<%s> .winfo_name=<%s> .winfo_pathname=<%s>\" %\n (str(w), w.winfo_name(), path) )\n print \"@@@ nametowidget(path)=\", w.nametowidget(path)\n#### #--This commented-out part shows the 5- and 2-tuples, but all\n#### #--the user probably cares about is the value.\n#### D = w.config()\n#### keyList = D.keys()\n#### keyList.sort()\n#### for key in keyList:\n#### print \"@@@ [%s]: <%s>\" % (key, D[key]) \n print \"@@@ \",\n for optName in w.keys():\n print \"%s=<%s>\" % (optName, `w.cget(optName)`),\n print", "def options(self, section: str) -> List[str]:", "def set_dhcp_server(self, config):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_SYSTEM, 2)\n if not self.s.is_element_present(self.info['loc_cfg_system_dhcps_fieldset']):\n raise Exception('The DHCP Server configurate field is invisible')\n\n else:\n enable_checkbox = self.info['loc_cfg_system_dhcps_enable_checkbox']\n enable_server = False if not config.has_key('enable') else config['enable']\n\n if not enable_server:\n # Uncheck the enable DHCPs server checkbox\n if self.s.is_checked(enable_checkbox):\n self.s.click_and_wait(enable_checkbox)\n\n # Click apply button\n self.s.click_and_wait(self.info['loc_cfg_system_dhcps_apply_button'], 3)\n\n return\n\n # Check the enable DHCPs server checkbox\n if not self.s.is_checked(enable_checkbox):\n self.s.click_and_wait(enable_checkbox)\n\n # Set starting ip value\n if config.has_key('start_ip'):\n self.s.type_text(self.info['loc_cfg_system_dhcps_starting_ip_textbox'], config['start_ip'])\n\n # Set ip range value\n if config.has_key('number_ip'):\n self.s.type_text(self.info['loc_cfg_system_dhcps_number_ip_textbox'], str(config['number_ip']))\n\n # Set lease time value\n if config.has_key('leasetime'):\n self.s.select_option(self.info['loc_cfg_system_dhcps_leasetime_options'], config['leasetime'])\n\n # Click 'Cancel' on the confirmation dialog to ZD do nothing if not ZD will auto correct the setting value.\n self.s.choose_cancel_on_next_confirmation()\n # Click apply button\n self.s.click_and_wait(self.info['loc_cfg_system_dhcps_apply_button'], 3)\n msg = ''\n # The ZD will be genarate an alert or an confirm dialog if there are any invalid or wrong setting value is setted.\n # Get any exist alert message\n if self.s.is_alert_present(5):\n msg = self.s.get_alert()\n\n # Get any confirmation message\n elif self.s.is_confirmation_present(5):\n msg = self.s.get_confirmation()\n\n if msg:\n raise Exception(msg)", "def delete_dhcp_options(DryRun=None, DhcpOptionsId=None):\n pass", "def test_get_options_expirations(self):\n pass", "def _vmware_dhcp_ip_config(self, args: parser_extensions.Namespace):\n kwargs = {\n 'enabled': flags.Get(args, 'enable_dhcp'),\n }\n if flags.IsSet(kwargs):\n return messages.VmwareDhcpIpConfig(**kwargs)\n return None", "def enable_dhcp(self, ip_host_num):\n return [\"ip-host %s dhcp-enable true ping-response true traceroute-response true\" % ip_host_num]", "def init_set(new_set, **set_options):\n _ipset('create', new_set, 'hash:ip',\n # Below expands to a list of k, v, one after the other\n *[str(i) for item in set_options.items() for i in item])\n flush_set(new_set)", "def test_get_options(self):\n pass", "def option_maker(self):\n pass", "def get_dhcp_server_info(self):\n server_info = {}\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_SYSTEM, 2)\n if not self.s.is_visible(self.info['loc_cfg_system_dhcps_fieldset']):\n logging.info('The DHCP Server configuration field is invisible')\n\n else:\n server_info['enable'] = self.s.is_checked(self.info['loc_cfg_system_dhcps_enable_checkbox'])\n if server_info['enable']:\n server_info['start_ip'] = self.s.get_value(self.info['loc_cfg_system_dhcps_starting_ip_textbox'])\n server_info['number_ip'] = self.s.get_value(self.info['loc_cfg_system_dhcps_number_ip_textbox'])\n server_info['leasetime'] = self.s.get_selected_label(self.info['loc_cfg_system_dhcps_leasetime_options'])\n\n return server_info" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your egressonly Internet gateways.
def describe_egress_only_internet_gateways(DryRun=None, EgressOnlyInternetGatewayIds=None, MaxResults=None, NextToken=None): pass
[ "def describe_internet_gateways(DryRun=None, InternetGatewayIds=None, Filters=None):\n pass", "def delete_egress_only_internet_gateways():\n client = boto3.client('ec2')\n print('Deleting Egress Only Internet Gateways')\n gw_resp = client.describe_egress_only_internet_gateways()\n while True:\n for gateway in gw_resp['EgressOnlyInternetGateways']:\n gw_id = gateway['EgressOnlyInternetGatewayId']\n client.delete_egress_only_internet_gateway(\n EgressOnlyInternetGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_egress_only_internet_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_egress_only_internet_gateways()['EgressOnlyInternetGateways']:\n time.sleep(5)\n print('Egress Only Internet Gateways deleted')", "def create_egress_only_internet_gateway(DryRun=None, VpcId=None, ClientToken=None):\n pass", "def describe_vpn_gateways(DryRun=None, VpnGatewayIds=None, Filters=None):\n pass", "def describe_customer_gateways(DryRun=None, CustomerGatewayIds=None, Filters=None):\n pass", "def delete_internet_gateways():\n print('Deleting Internet Gateways')\n client = boto3.resource('ec2')\n for igw in client.internet_gateways.all():\n for attachment in igw.attachments:\n if 'State' in attachment and attachment['State'] == 'available':\n vpc_id = attachment['VpcId']\n print('Detaching internet gateway {} from vpc {}'.format(igw.id, vpc_id))\n igw.detach_from_vpc(\n VpcId=vpc_id\n )\n print('Deleting Internet Gateway {}'.format(igw.id))\n igw.delete()\n\n while [igw for igw in client.internet_gateways.all()]:\n time.sleep(5)\n print('Internet Gateways deleted')", "def describe_nat_gateways(NatGatewayIds=None, Filters=None, MaxResults=None, NextToken=None):\n pass", "def describe_addresses(DryRun=None, PublicIps=None, Filters=None, AllocationIds=None):\n pass", "def egress(self) -> 'outputs.EgressResponse':\n return pulumi.get(self, \"egress\")", "def AdvertiseEvpnRoutesForOtherVtep(self):\n return self._get_attribute('advertiseEvpnRoutesForOtherVtep')", "def egress_configuration(self) -> Optional['outputs.ServiceNetworkConfigurationEgressConfiguration']:\n return pulumi.get(self, \"egress_configuration\")", "def describe_nat_gateways(\n nat_gateway_id=None,\n subnet_id=None,\n subnet_name=None,\n vpc_id=None,\n vpc_name=None,\n states=(\"pending\", \"available\"),\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n return _find_nat_gateways(\n nat_gateway_id=nat_gateway_id,\n subnet_id=subnet_id,\n subnet_name=subnet_name,\n vpc_id=vpc_id,\n vpc_name=vpc_name,\n states=states,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def describe_network_interfaces(DryRun=None, NetworkInterfaceIds=None, Filters=None):\n pass", "def allowInternetConnection(network, bridge):\n\n cmds = []\n cmds.append('ip -4 route add dev {} {} proto static'.format(bridge, network))\n cmds.append(\n 'iptables -A FORWARD -o {} -t filter -m comment --comment \"generated for Distrinet Admin Network\" -j ACCEPT'.format(\n bridge))\n cmds.append(\n 'iptables -A FORWARD -i {} -t filter -m comment --comment \"generated for Distrinet Admin Network\" -j ACCEPT'.format(\n bridge))\n cmds.append(\n 'iptables -A POSTROUTING -t nat -m comment --comment \"generated for Distrinet Admin Network\" -s {} ! -d {} -j MASQUERADE'.format(\n network, network))\n cmds.append('sysctl -w net.ipv4.ip_forward=1')\n return cmds", "def rest_api_gateways(self):\n return self._rest_api_gateways", "def EthernetOptions(self):\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ethernetoptions_9su3rhy2svzxrozxjuzxrpchrpb25z import EthernetOptions\n\t\treturn EthernetOptions(self)", "def describe_vpc_endpoint_services(DryRun=None, MaxResults=None, NextToken=None):\n pass", "def AddEgressSettingsFlag(parser):\n parser.add_argument(\n '--vpc-egress',\n help=(\n 'The outbound traffic to send through the VPC connector'\n ' for this resource. This resource must have a VPC connector to set'\n ' VPC egress.'\n ),\n choices={\n container_resource.EGRESS_SETTINGS_PRIVATE_RANGES_ONLY: (\n 'Default option. Sends outbound traffic to private IP addresses '\n 'defined by RFC1918 through the VPC connector.'\n ),\n container_resource.EGRESS_SETTINGS_ALL_TRAFFIC: (\n 'Sends all outbound traffic through the VPC connector.'\n ),\n container_resource.EGRESS_SETTINGS_ALL: (\n '(DEPRECATED) Sends all outbound traffic through the VPC '\n \"connector. Provides the same functionality as '{all_traffic}'.\"\n \" Prefer to use '{all_traffic}' instead.\".format(\n all_traffic=container_resource.EGRESS_SETTINGS_ALL_TRAFFIC\n )\n ),\n },\n )", "def service_resource(self):\n\n return self.gce_project.service.networks()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your export tasks.
def describe_export_tasks(ExportTaskIds=None): pass
[ "def plain_exporter(tasks: List[utils.Task], output: IO):\n tasks = utils.sort_tasks(tasks)\n for task in tasks:\n print(\"***\", file=output)\n print(\"task\", utils.task_id_str(task.task_id), file=output)\n print(\"label\", task.label, file=output)\n print(\"status\", str(task.status), file=output)\n if task.priority is not None:\n print(\n \"priority\",\n task.priority if utils.is_valued(task.priority) else \"none\",\n file=output,\n )\n if task.deadline is not None:\n print(\n \"deadline\",\n task.deadline.isoformat() if utils.is_valued(task.deadline) else \"none\",\n file=output,\n )\n if task.depends:\n print(\n \"depends\",\n \" \".join(utils.task_id_str(dep) for dep in sorted(task.depends)),\n file=output,\n )\n print(\"***\", file=output)\n print(task.content, end=\"\", file=output)", "def describe_conversion_tasks(DryRun=None, ConversionTaskIds=None):\n pass", "def describe_import_snapshot_tasks(DryRun=None, ImportTaskIds=None, NextToken=None, MaxResults=None, Filters=None):\n pass", "def describe_import_image_tasks(DryRun=None, ImportTaskIds=None, NextToken=None, MaxResults=None, Filters=None):\n pass", "def export_task(self, img, cont):\r\n return self._tasks_manager.create(\"export\", img=img, cont=cont)", "def describe_bundle_tasks(DryRun=None, BundleIds=None, Filters=None):\n pass", "def gen_task_desc(**kwargs):\n logger = logging.getLogger(__name__)\n\n suppressempty = kwargs[\"suppressempty\"]\n blend = kwargs[\"blend_info\"][\"blend\"]\n tasksprefix = kwargs[\"blend_info\"][\"tasksprefix\"]\n blend_dependencies = kwargs[\"blend_dependencies\"]\n\n\n task_desc_path = \"taskdesc-sec.template\"\n logger.debug(\"Opening file {0} to write\".format(task_desc_path))\n with open(task_desc_path,'w') as fout:\n\n for task in sorted(blend_dependencies.keys()): \n\n if blend_dependencies[task]['Leaf'] == 'false':\n continue\n\n if suppressempty and blend_dependencies[task][\"haspackages\"] == 0:\n if blend_dependencies[task]['test_always_lang']:\n logger.debug(\"Print empty task {0} because Test-always-lang is set\\n\".format(task))\n else:\n logger.debug(\"The metapackage {2} will not be created because {0} dependant are in the pool and suppressempty was set {1}\\n\".format(blend_dependencies[task][\"haspackages\"], suppressempty, task))\n continue\n\n fout.write(\"Task: {0}-{1}\\n\".format(tasksprefix, task))\n fout.write(\"Section: {0}\\n\".format(blend));\n fout.write(\"Description: {0}\\n\".format(blend_dependencies[task][\"description\"]))\n fout.write(\"{0}\".format(blend_dependencies[task][\"long_description\"])) #Already contains a newline\n fout.write(\"Relevance: 10\\n\")\n\n if blend_dependencies[task][\"Enhances\"]:\n fout.write(\"Enhances: {0}\\n\".format(blend_dependencies[task][\"Enhances\"]))\n\n if blend_dependencies[task][\"metapackage\"]:\n #No use listing a metapackage as a key package, if no metapackage exist.\n fout.write(\"Key: \\n\");\n fout.write(\" {0}-{1}\\n\".format(tasksprefix, task))\n\n fout.write(\"Packages: list\\n \")\n for header in [\"Depends\", \"Recommends\"]:\n if not blend_dependencies[task][header]:\n continue \n fout.write(\"{0}\".format(\"\\n \".join(sorted(blend_dependencies[task][header]))))\n fout.write(\"\\n\")\n\n fout.write(\"\\n\")", "def show_tasks(self):\n print('\\nCompleted to following tasks:')\n for step in self.tasks:\n print('\\t{0}'.format(step))", "def tasks(**_):\n for task in filter(bool, get_all_tasks()):\n print(task)", "def task_description(task):\r\n name = task.__name__ if hasattr(task, '__name__') else None\r\n if isinstance(task, types.MethodType):\r\n if name is not None and hasattr(task, '__self__'):\r\n return '%s from %s' % (name, task.__self__)\r\n elif isinstance(task, types.FunctionType):\r\n if name is not None:\r\n return str(name)\r\n return repr(task)", "def _print_tasks(env, tasks, mark_active=False):\n\n if env.task.active and mark_active:\n active_task = env.task.name\n else:\n active_task = None\n\n for task, options, blocks in tasks:\n # print heading\n invalid = False\n\n if task == active_task:\n method = 'success'\n else:\n if options is None and blocks is None:\n method = 'error'\n invalid = True\n\n else:\n method = 'write'\n\n opts = list(options or [])\n blks = list(blocks or [])\n\n write = getattr(env.io, method)\n write('~' * 80)\n write(' ' + task)\n write('~' * 80)\n env.io.write('')\n\n # non-block options\n if opts:\n for opt, values in opts:\n env.io.write(' {0}: {1}'.format(opt,\n ', '.join(str(v) for v in values)))\n env.io.write('')\n\n # block options\n if blks:\n had_options = False\n\n for block, options in blks:\n if options:\n had_options = True\n env.io.write(' {{ {0} }}'.format(block))\n\n for opt, values in options:\n env.io.write(' {0}: {1}'.format(opt,\n ', '.join(str(v) for v in values)))\n env.io.write('')\n\n if not had_options:\n blks = None\n\n if not opts and not blks:\n if invalid:\n env.io.write(' Invalid task.')\n else:\n env.io.write(' Empty task.')\n env.io.write('')", "def task_report(self, topics):\n topic_action_target = {\n \"size\": (self.write_size, self.target_size_path),\n \"mtime\": (self.write_mtime, self.target_mtime_path),\n \"alldata\": (self.write_alldata, self.target_alldata_path),\n }\n return {\n \"name\": self.source.name,\n \"file_dep\": [self.source],\n \"actions\": [self.ensure_target_dir] +\n [topic_action_target[topic][0] for topic in topics],\n \"targets\": [topic_action_target[topic][1] for topic in topics],\n \"clean\": [clean_targets, self.remove_target_dir],\n }", "def writetasks(self,tasks,\n do_more='no',minid=1,do_skipids='no'):\n fn= 'tasks_skipid_%s_more_%s_minid_%s.txt' % \\\n (do_skipids,do_more,str(minid))\n writelist(tasks, fn)", "def get_task_desc(self):\n return self.task_desc", "def task_fields(task):\n name_field = [sg.Input(task.name, size=(18, 1))]\n other_values = (\n task.get_current_progress(),\n task.get_next_due_date(),\n task.assignee,\n task.length,\n \", \".join(task.linked_creatures),\n \", \".join(task.linked_plants),\n task.status.get(),\n )\n other_fields = [summary_field_format(value) for value in other_values]\n return name_field + other_fields", "def writeTasksToFile(tasks, outputFile):\n with open(outputFile, \"w\") as file:\n for task in tasks:\n file.write(\"{} {} {} {}\\n\".format(task[0], task[1], task[2], task[3]))", "def create_instance_export_task(Description=None, InstanceId=None, TargetEnvironment=None, ExportToS3Task=None):\n pass", "def _describe_receipts(self, tasks):\n \n builder = []\n append = builder.append\n \n append(\"Tasks run by command {s.name}:\".format(s=self))\n for task in tasks:\n result = \"Error\" if task.receipt.error else task.receipt.task_dir\n append(\"\\t{receipt.name:<30} {result}\".format(receipt=task.receipt, \n result=result))\n return \"\\n\".join(builder)", "def export(cls, all_stages=True):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more flow logs. To view the information in your flow logs (the log streams for the network interfaces), you must use the CloudWatch Logs console or the CloudWatch Logs API.
def describe_flow_logs(FlowLogIds=None, Filters=None, NextToken=None, MaxResults=None): pass
[ "def DescribeFlowLogs(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeFlowLogs\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeFlowLogsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def flow_action_log(\n action_id: str = typer.Argument(...),\n flow_id: str = typer.Option(\n ...,\n help=\"The ID for the Flow which triggered the Action.\",\n prompt=True,\n ),\n flow_scope: str = typer.Option(\n None,\n help=\"The scope this Flow uses to authenticate requests.\",\n callback=url_validator_callback,\n ),\n reverse: bool = typer.Option(\n # Defaulting to any boolean value will reverse output - so we use None\n None,\n \"--reverse\",\n help=\"Display logs starting from most recent and proceeding in reverse chronological order\",\n ),\n limit: int = typer.Option(\n None,\n help=\"Set a maximum number of events from the log to return\",\n min=0,\n max=100,\n ),\n marker: str = typer.Option(\n None,\n \"--marker\",\n \"-m\",\n help=\"A pagination token for iterating through returned data.\",\n ),\n per_page: int = typer.Option(\n None,\n \"--per-page\",\n \"-p\",\n help=\"The page size to return. Only valid when used without providing a marker.\",\n min=1,\n max=50,\n ),\n output_format: FlowDisplayFormat = typer.Option(\n FlowDisplayFormat.json,\n \"--format\",\n \"-f\",\n help=\"Output display format.\",\n case_sensitive=False,\n show_default=True,\n ),\n flows_endpoint: str = typer.Option(\n PROD_FLOWS_BASE_URL,\n hidden=True,\n callback=flows_endpoint_envvar_callback,\n ),\n verbose: bool = verbosity_option,\n):\n fc = create_flows_client(CLIENT_ID, flows_endpoint)\n resp = fc.flow_action_log(\n flow_id, flow_scope, action_id, limit, reverse, marker, per_page\n )\n\n if verbose:\n display_http_details(resp)\n\n if output_format in (FlowDisplayFormat.json, FlowDisplayFormat.yaml):\n _format_and_display_flow(resp, output_format, verbose)\n elif output_format in (FlowDisplayFormat.graphviz, FlowDisplayFormat.image):\n flow_def_resp = fc.get_flow(flow_id)\n flow_def = flow_def_resp.data[\"definition\"]\n colors = state_colors_for_log(resp.data[\"entries\"])\n graphviz_out = graphviz_format(flow_def, colors)\n\n if output_format == FlowDisplayFormat.graphviz:\n typer.echo(graphviz_out.source)\n else:\n graphviz_out.render(\"flows-output/graph\", view=True, cleanup=True)", "def DescribeFlowLog(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeFlowLog\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeFlowLogResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def create_flow_logs(ResourceIds=None, ResourceType=None, TrafficType=None, LogGroupName=None, DeliverLogsPermissionArn=None, ClientToken=None):\n pass", "def print_log_overview(self):\n if self.src_msg and self.src_events:\n print \"*** Information extract from Source log file:\"\n print \"\\t%d events and %d log messages:\" % (len(self.src_events),\n len(self.src_msg))\n print \"\\tsimulation start: %s\" % self.src_simulation_start\n print \"\\tsimulation end: %s\" % self.src_simulation_end\n print \"\\tsimulation duration: %s\" % self.src_simulation_duration\n print \"\\tno bootstrap events: %d\" % len(self.src_bootstrap_events)\n print \"\\tno simulation events: %d\" % len(self.src_simulation_events)\n if self.dst_msg and self.dst_events:\n print \"*** Information extract from Destimnation log file:\"\n print \"\\t%d events and %d log messages.\" % (len(self.dst_events),\n len(self.dst_msg))\n print \"\\tsimulation start: %s\" % self.dst_simulation_start\n print \"\\tsimulation end: %s\" % self.dst_simulation_end\n print \"\\tsimulation duration: %s\" % self.dst_simulation_duration", "def _log_in_tensorboard(self, steps: int, logs: List[LogData]):\n\n for log in logs:\n if log.type == \"scalar\":\n self._summary_writer.add_scalar(log.name, log.value, steps)\n if log.type == \"image\":\n self._summary_writer.add_image(log.name, log.value, steps)", "def _print_flow_status(self, flow):\n # Attribute print\n self._print_flow_status_attribute(flow.get(\"attr\"))\n\n # Patterns print\n self._print_flow_status_patterns(flow.get(\"patterns\"))\n\n # Actions print\n self._print_flow_status_actions(flow.get(\"actions\"))", "def _print_flow_list(self, flow_list):\n print(\"ID Group Prio Attr Rule\")\n\n for flow in flow_list:\n print_data = {}\n\n try:\n print_data[\"id\"] = str(flow.get(\"rule_id\")).ljust(7)\n\n attr = flow.get(\"attr\")\n if attr is None:\n continue\n\n print_data[\"group\"] = str(attr.get(\"group\")).ljust(7)\n print_data[\"prio\"] = str(attr.get(\"priority\")).ljust(7)\n\n ingress = \"i\" if attr.get(\"ingress\") == 1 else \"-\"\n egress = \"e\" if attr.get(\"egress\") == 1 else \"-\"\n transfer = \"t\" if attr.get(\"transfer\") == 1 else \"-\"\n print_data[\"attr\"] = \"{0}{1}{2}\".format(\n ingress, egress, transfer).ljust(7)\n\n patterns = flow.get(\"patterns\")\n if patterns is None:\n continue\n\n print_data[\"rule\"] = \"\"\n for ptn in patterns:\n print_data[\"rule\"] += \"{0} \".format(\n ptn.get(\"type\").upper())\n print_data[\"rule\"] += \"=> \"\n\n actions = flow.get(\"actions\")\n if actions is None:\n continue\n\n for act in actions:\n print_data[\"rule\"] += \"{0} \".format(\n act.get(\"type\").upper())\n\n print(\"{id} {group} {prio} {attr} {rule}\".format(**print_data))\n\n except Exception as _:\n continue", "def __str__(self):\n\n def logger_desc(log):\n rv = []\n for attr in ['template']:\n rv.append(\"{}: {}\".format(attr, getattr(log, attr)))\n for handler in log.handlers:\n try:\n filename = handler.fh.name\n rv.append(\"{}: {}\".format('filename', filename))\n except:\n rv.append(\"no filename associated with handler\")\n return '\\n\\t'.join(rv)\n if self.logs is None or not len(self.logs):\n return 'No logs configured'\n log_descriptions = ['logs:']\n for name, log in self.logs.items():\n log_descriptions.append(\"{}:\\n\\t{}\".format(name, logger_desc(log)))\n return '\\n'.join(log_descriptions)", "def list_logs(self, **filters):\n uri = '/log/logs'\n return self.list_resources(uri, **filters)", "def show_log(self, log_id, **fields):\n uri = '/log/logs/%s' % log_id\n return self.show_resource(uri, **fields)", "def test_get_webhook_log_summaries(self):\n pass", "def get_dataflow_overview(log_directory, scripts):\n\n if not os.path.isdir(log_directory):\n raise NotADirectoryError(f\"{log_directory} is not a valid directory\")\n\n possible_scripts = [\"rsync_to_nas\", \"convert_and_restructure\", \"rsync_to_campus\", \"convert_on_campus\",\n \"distribute_borealis_data\"]\n\n if isinstance(scripts, str):\n scripts = [scripts]\n\n for s in scripts:\n if s not in possible_scripts:\n raise ValueError(f\"{s} not a valid script name\")\n\n # Search for each data flow script's logfile, and parse it for overview information\n summary_data = {}\n\n # print(scripts)\n for script in scripts:\n summary_data[script] = {}\n\n # Find all log files for script\n logs = []\n pattern = f'*{script}*.log'\n for directory, _, _, in os.walk(log_directory):\n logs.extend(glob(os.path.join(directory, pattern)))\n\n # Get the latest logfile to get most up-to-date summary info\n latest_log = max(logs, key=os.path.getmtime)\n\n # Get the last entered log entry\n latest_entry = []\n with open(latest_log) as f:\n for line in f:\n latest_entry.append(line.strip())\n if line.startswith(\"########\"):\n latest_entry = []\n\n # Iterate through latest entry and fill out summary dictionary\n transferring_files = ['array', 'dmap'] # Filetypes sent back to campus\n converting_on_campus = True # If convert_on_campus is converting for this site\n\n for index, line in enumerate(latest_entry):\n\n # Get hostname executing script\n if line.startswith(\"Executing\"):\n # Ex): \"Executing /home/radar/data_flow/borealis/rsync_to_nas on sasborealis\"\n summary_data[script]['host'] = line.split()[-1]\n # Get last execution time\n date_string = latest_entry[index + 1].split()[0:2]\n date_string = ' '.join(date_string)\n dt_format = \"%Y%m%d %H:%M:%S\"\n dt = datetime.strptime(date_string, dt_format)\n date_string = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n summary_data[script]['last_executed'] = date_string\n\n # Get git repo info\n if line.startswith((\"data_flow\", \"pyDARNio\")):\n git_repo = line.split()[0].split(':')[0] # data_flow or pyDARNio, trim off ':'\n git_info = [line.split()[-7][:-1], ' '.join(line.split()[-3:-1])]\n summary_data[script][f'{git_repo}_branch'] = git_info\n\n # Summary entries unique to transfer scripts\n if script in ['rsync_to_nas', 'rsync_to_campus', 'distribute_borealis_data']:\n if line.startswith((\"Transferring from:\", \"Distributing from:\")):\n summary_data[script]['source'] = line.split()[-1]\n\n # Summary entries unique to conversion scripts\n if script in ['convert_and_restructure', 'convert_on_campus']:\n if line.startswith(\"Conversion\"):\n # Ex): \"Conversion directory: /borealis_nfs/borealis_data\"\n summary_data[script]['data_directory'] = line.split()[-1]\n\n # Summary entries unique to rsync_to_nas\n if script == 'rsync_to_nas':\n if line.startswith(\"Transferring to\"):\n # Ex): \"Transferring to NAS: /borealis_nfs/borealis_data/daily/\"\n summary_data[script]['destination'] = line.split()[2][:-1] # Trim ':' off end of string\n\n # Summary entries unique to rsync_to_campus\n if script == 'rsync_to_campus':\n if line.startswith(\"Transferring to:\"):\n # Ex) \"Transferring to: mrcopy@128.233.224.39:/sddata/sas_data/\"\n full_destination = line.split()[-1].split(':')\n dest_addr = full_destination[0] # Ex) mrcopy@128.233.224.39\n dest_ip = dest_addr.split(\"@\")[1] # Ex) 128.233.224.39\n\n summary_data[script]['destination'] = gethostbyaddr(dest_ip)[0].split('.')[0] # Convert IP to domain name\n # summary_data[script]['destination_directory'] = full_destination[1] # Ex) /sddata/sas_data/\n\n if line.startswith(\"Not transferring any\"):\n if 'dmap' in line:\n transferring_files.remove('dmap')\n if 'array' in line:\n transferring_files.remove('array')\n\n if script == 'convert_on_campus':\n if line.startswith(\"Not converting\"):\n # Ex) \"Not converting files for sas.\"\n converting_on_campus = False\n\n if script == 'rsync_to_campus':\n summary_data[script]['transfer_filetype'] = transferring_files\n\n if script == 'convert_on_campus':\n summary_data[script]['is_converting'] = converting_on_campus\n\n return summary_data", "def CreateFlowLog(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateFlowLog\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateFlowLogResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def _print_flow_status_actions(self, actions):\n act_type_indent = 2\n act_fields_indent = 4\n\n try:\n print(\"Actions:\")\n\n for act in actions:\n # Type print\n self._print_key_value(act.get(\"type\"), None, act_type_indent)\n\n # Conf print\n conf = act.get(\"conf\")\n if conf is not None:\n self._print_action_conf(conf)\n\n except Exception as _:\n print(\"Error: `actions` structure of json received \"\n \"from spp-ctl is invalid\")\n return", "def delete_flow_logs(FlowLogIds=None):\n pass", "def test_get_action_step_logs(self):\n action_id, step_id = self._get_action_step_id()\n response = self.shipyard_log_retrieval_client. \\\n get_action_step_logs(action_id, step_id)\n self.assertEqual(response.response['status'], '200')\n self.assertTrue(len(response.data) > 0, 'No actions step log available')", "def print_activity_run_details(activity_run):\r\n print(\"\\n\\tActivity run details\\n\")\r\n print(\"\\tActivity run status: {}\".format(activity_run.status))\r\n if activity_run.status == 'Succeeded':\r\n print(\"\\tNumber of bytes read: {}\".format(activity_run.output['dataRead']))\r\n print(\"\\tNumber of bytes written: {}\".format(activity_run.output['dataWritten']))\r\n print(\"\\tCopy duration: {}\".format(activity_run.output['copyDuration']))\r\n else:\r\n print(\"\\tErrors: {}\".format(activity_run.error['message']))", "def print_activity_run_details(activity_run):\n now = datetime.utcnow()\n\n print(f\"{now} - Activity run status: {activity_run.status}\")\n if activity_run.status in ['Succeeded', 'InProgress']:\n print(f\"{now} - activity_run: {activity_run}\")\n else:\n print(f\"{now} - Errors: {activity_run.error}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes the Dedicated Host Reservations that are available to purchase. The results describe all the Dedicated Host Reservation offerings, including offerings that may not match the instance family and region of your Dedicated Hosts. When purchasing an offering, ensure that the the instance family and region of the offering matches that of the Dedicated Host/s it will be associated with. For an overview of supported instance types, see Dedicated Hosts Overview in the Amazon Elastic Compute Cloud User Guide .
def describe_host_reservation_offerings(OfferingId=None, MinDuration=None, MaxDuration=None, Filters=None, MaxResults=None, NextToken=None): pass
[ "def describe_reserved_instances_offerings(DryRun=None, ReservedInstancesOfferingIds=None, InstanceType=None, AvailabilityZone=None, ProductDescription=None, Filters=None, InstanceTenancy=None, OfferingType=None, NextToken=None, MaxResults=None, IncludeMarketplace=None, MinDuration=None, MaxDuration=None, MaxInstanceCount=None, OfferingClass=None):\n pass", "def describe_host_reservations(HostReservationIdSet=None, Filters=None, MaxResults=None, NextToken=None):\n pass", "def describe_reserved_instances(DryRun=None, ReservedInstancesIds=None, Filters=None, OfferingType=None, OfferingClass=None):\n pass", "def getReservedInstances(verbose):\n lres = {}\n jResp = EC2C.describe_reserved_instances()\n for reserved in jResp['ReservedInstances']:\n if reserved['State'] == 'active':\n if verbose:\n lres[reserved['InstanceType']] = str(reserved['Start'])+\";\"+\\\n str(reserved['End'])+\";\"+\\\n str(reserved['InstanceCount'])+\";\"+\\\n reserved['ProductDescription']+\";\"+\\\n str(reserved['UsagePrice'])\n else:\n if re.search(\"win\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"windows\"\n elif re.search(\"red hat\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"redhat\"\n elif re.search(\"suse\", reserved['ProductDescription'], re.IGNORECASE):\n os = \"suse\"\n else:\n os = \"linux\"\n lres[reserved['InstanceType']+\";\"+os] = str(reserved['InstanceCount'])\n return lres", "def getReservations():\n from commands import getstatusoutput\n cmd = \"scontrol -o show reservation\"\n\n output = filter(None, getstatusoutput(cmd)[1].split(\"\\n\"))\n\n return [Slurm.Reservation(each) for each in output]", "def compute_host_available(request, start_date, end_date):\n def check_host_unavailable(reservation):\n lease_start = _parse_api_datestr(reservation['start_date'])\n lease_end = _parse_api_datestr(reservation['end_date'])\n\n if (lease_start > start_date and lease_start < end_date):\n return True\n elif (lease_end > start_date and lease_end < end_date):\n return True\n elif (lease_start < start_date and lease_end > end_date):\n return True\n else:\n return False\n\n available_hosts = [\n h for h in host_allocations_list(request)\n if (not any([check_host_unavailable(r) for r in h.reservations]) or\n not h.reservations)]\n\n return len(available_hosts)", "def optimizeReservation(verbose,region):\n print(\"WARNING: As it's not possible to get OS through AWS API, All \"\\\n \"Linux are reported as Linux (no RedHat, Suse, etc)\\n\"\\\n \"This issue will be address in a future update\\n\\n\")\n shouldReserved = {}\n dreserved = getReservedInstances(False)\n dinstances = listInstances(False)\n dflavors = getInstanceTypes(region)\n count_by_type_os = countInstanceByTypeByOS(False, dinstances)\n resp = \"\"\n for typos, nb in count_by_type_os.items():\n if typos in dreserved:\n if int(count_by_type_os[typos]) - int(dreserved[typos]) >= 0:\n count_by_type_os[typos] = int(count_by_type_os[typos]) - int(dreserved[typos])\n resp += \"Reservation fully used for \"+typos+\"\\n\"\n else:\n print(\"Reservation not fully used for \"+typos+\": \"+dreserved[typos]+\"reserved but only \"+count_by_type_os[typos]+\" instances\")\n for typos, nb in dreserved.items():\n if typos not in count_by_type_os:\n resp += \"Reservation is not used for \"+typos+\"\\n\"\n #Provide tips for better reservations\n #Begin by removing instances that have reservation\n for instanceId in list(dinstances):\n if dinstances[instanceId]['flavor'] in dreserved:\n if int(dreserved[dinstances[instanceId]['flavor']]) > 0:\n dreserved[dinstances[instanceId]['flavor']] -= 1\n del dinstances[instanceId]\n today = datetime.datetime.now(datetime.timezone.utc)\n months6 = today-datetime.timedelta(days=180)\n for k, v in dinstances.items():\n if v['LaunchTime'] < months6:\n try:\n shouldReserved[v['flavor']+\";\"+v['platform']] += 1\n except:\n shouldReserved[v['flavor']+\";\"+v['platform']] = 1\n resp += \"\\nBased on instances older than 6 months, you should buy following reservations:\\n\"\n saveno, savepa = 0, 0\n for k, v in shouldReserved.items():\n resp += k+\":\"+str(v)+\"\\n\"\n saveno += (float(dflavors[k]['ondemand']) - float(dflavors[k]['reserved1yno'])) * v\n savepa += (float(dflavors[k]['ondemand']) - float(dflavors[k]['reserved1ypa'])) * v\n resp += \"You can save up to \"+str(saveno)+\"$/hour with no upfront reservation\\n\"\n resp += \"You can save up to \"+str(savepa)+\"$/hour with partial upfront reservation\\n\"\n if verbose:\n resp += \"\\nInstances below doesn't have reservation:\\n\"\n for k, v in count_by_type_os.items():\n resp += k+\":\"+str(v)+\"\\n\"\n return saveno, resp", "def get_host_reservation_purchase_preview(OfferingId=None, HostIdSet=None):\n pass", "def purchase_reserved_instances_offering(DryRun=None, ReservedInstancesOfferingId=None, InstanceCount=None, LimitPrice=None):\n pass", "def purchase_host_reservation(OfferingId=None, HostIdSet=None, LimitPrice=None, CurrencyCode=None, ClientToken=None):\n pass", "def get_listing():\n\n ec2 = boto3.client('ec2')\n listing = []\n\n try:\n full_listing = ec2.describe_instances(\n Filters=[\n {\n 'Name': 'instance-state-name',\n 'Values': [ 'running' ]\n }\n ],\n MaxResults=1000)\n except Exception as e:\n print(e)\n sys.exit(1)\n\n for reservation in full_listing['Reservations']:\n for instance in reservation['Instances']:\n listing.append(instance)\n\n return listing", "def _show_instances(self):\n conn = ec2.connect_to_region(\n self.availability_zone,\n aws_access_key_id=self.access_key_id,\n aws_secret_access_key=self.secret_access_key,\n )\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n print reservation\n for instance in reservation.instances:\n print instance\n print '- AMI ID:', instance.image_id\n print '- Instance Type:', instance.instance_type\n print '- Availability Zone:', instance.placement", "def attemptPurchases(order):\n print(\"\\n\")\n # here we sort out the availability zones\n hasOrdersAssigned = True\n\n for az in order.AvailabilityZones:\n if az.ordered is None:\n az.ordered = 0\n if az.Number is None:\n hasOrdersAssigned = False\n\n if hasOrdersAssigned == False:\n remainder = int(order.Number) % len(order.AvailabilityZones)\n eachOrderGets = int((int(order.Number) - remainder) /\n len(order.AvailabilityZones))\n # here we assign all the orders\n for az in order.AvailabilityZones:\n az.Number = eachOrderGets\n if remainder != 0:\n az.Number += 1\n remainder -= 1\n\n # this client can be used for all the az's\n print(order.Region)\n client = boto3.client('ec2', region_name=order.Region,aws_access_key_id=order.aws_access_key_id,aws_secret_access_key=order.aws_secret_access_key)\n for az in order.AvailabilityZones:\n\n # for each AZ we're buying from\n kwargs = order.getKwargs(az.Name)\n response = client.describe_reserved_instances_offerings(**kwargs)\n ReservedInstancesOfferings = response[\"ReservedInstancesOfferings\"]\n\n # we search for all instance types, not just fixed or hourly, then sort when we recieve results\n # do the sorting of the reserved instances by price, cheapest first\n allOfferings = []\n\n # get all the offerings objects\n for instanceOffering in ReservedInstancesOfferings:\n # isFixed and isHourly completely filter out or in whether or not those instance types get included\n # if both are true, then all types of instances get included regardless of payment type\n\n # for limits, 0 means no limit, everything else abides by the limit\n\n iOffering = getInstanceOffering(instanceOffering)\n fixedPrice = iOffering.FixedPrice\n recurringAmount = iOffering.RecurringAmount\n fixedPriceExists = False\n recurringAmountExists = False\n\n if fixedPrice is not None and fixedPrice != 0:\n fixedPriceExists = True\n if recurringAmount is not None and recurringAmount != 0:\n recurringAmountExists = True\n\n MaxFixedPrice = 0\n if order.MaxFixedPrice is not None:\n MaxFixedPrice = order.MaxFixedPrice\n\n MaxRecurringPrice = 0\n if order.MaxHourlyPrice is not None:\n MaxRecurringPrice = order.MaxHourlyPrice\n\n if order.isFixedPrice == True and order.isHourlyPrice == True:\n # either hourly or fixed or both\n if fixedPriceExists and recurringAmountExists:\n if (MaxFixedPrice == 0 or iOffering.FixedPrice <= MaxFixedPrice) and (MaxRecurringPrice == 0 or iOffering.RecurringAmount <= MaxRecurringPrice):\n allOfferings.append(iOffering)\n elif fixedPriceExists:\n if MaxFixedPrice == 0 or iOffering.FixedPrice <= MaxFixedPrice:\n allOfferings.append(iOffering)\n elif recurringAmountExists:\n if MaxRecurringPrice == 0 or iOffering.RecurringAmount <= MaxRecurringPrice:\n allOfferings.append(iOffering)\n\n elif order.isFixedPrice == True:\n # only fixed price servers\n if fixedPriceExists and recurringAmountExists == False:\n if MaxFixedPrice == 0 or iOffering.FixedPrice <= MaxFixedPrice:\n allOfferings.append(iOffering)\n\n elif order.isHourlyPrice == True:\n # only hourly servers\n if recurringAmountExists and fixedPriceExists == False:\n if MaxRecurringPrice == 0 or iOffering.RecurringAmount <= MaxRecurringPrice:\n allOfferings.append(iOffering)\n\n # sort into cost effectiveness, and these all have the correct AZ\n allOfferings.sort(key=lambda x: x.EffectiveHourlyRate)\n\n # print(order.Number)\n if order.Number is not None and order.Number > 0:\n if order.ordered is None:\n # brand new order bring it up to speed\n order.ordered = 0\n\n if az.ordered >= az.Number:\n print(\"AZ\", az.Name, \"has already been fulfilled with\",\n az.ordered, \"instances\")\n # buy until finished\n purchasedJustNow = 0\n previouslyPurchased = az.ordered\n for instanceOffering in allOfferings:\n # instanceOffering.print()\n # also we might want to write to the file, like keep it open, and update it for each order bought\n # something might go wrong\n # print(instanceOffering, \"\\n\")\n if order.ordered < order.Number and az.ordered < az.Number:\n # do purchase\n order.ordered += 1\n az.ordered += 1\n purchasedJustNow += 1\n instance = allOfferings.pop(0)\n kwargs = instance.getKwargs(order.DryRun)\n response = None\n try:\n response = client.purchase_reserved_instances_offering(\n **kwargs)\n print(response)\n except:\n pass\n print(\"Just Purchased:\")\n instanceOffering.print()\n order.PurchasedInstances.append(instanceOffering)\n\n if order.ordered >= order.Number or az.ordered >= az.Number:\n break\n\n print(purchasedJustNow,\n \"Reserved Instances were just purchased for:\", az.Name)\n print(previouslyPurchased, \"instances had been purchased previously\")\n if az.ordered >= az.Number:\n print(\"Purchased all\", az.ordered,\n \"Reserved Instances for:\", az.Name, \"\\n\")\n else:\n print(\"Still need\", int(az.Number - az.ordered), \"instances for availability zone:\",\n az.Name, \", will attempt to purchase the rest during the next run\", \"\\n\")\n\n if order.ordered >= order.Number:\n print(\"Purchased all\", order.ordered,\n \"Reserved Instances for this order\\n\\n\")\n else:\n print(\"Could only purchase\", order.ordered,\n \"Reserved Instances for this order, will attempt to purchase the rest at a later date.\\n\\n\")\n return", "def describe_addresses(DryRun=None, PublicIps=None, Filters=None, AllocationIds=None):\n pass", "def _print_reservation(reservation):\n num_running = 0\n for inst in reservation.instances:\n if inst.state != u'running':\n continue\n print \"ID: %s\" % inst.id\n print \"state: %s\" % inst.state\n print \"IP: %s\" % inst.ip_address\n print \"private IP: %s\" % inst.private_ip_address\n print \"DNS: %s\" % inst.public_dns_name\n print \"private DNS: %s\" % inst.private_dns_name\n print \"architecture: %s\" % inst.architecture\n print \"image ID: %s\" % inst.image_id\n print \"class: %s\" % inst.instance_class\n print \"type: %s\" % inst.instance_type\n print \"key_name: %s\" % inst.key_name\n print \"launch time: %s\" % inst.launch_time\n print \"\"\n num_running += 1\n\n return num_running", "def create_reserved_instances_listing(ReservedInstancesId=None, InstanceCount=None, PriceSchedules=None, ClientToken=None):\n pass", "def describe_instances(self, xml_bytes):\n root = XML(xml_bytes)\n results = []\n # May be a more elegant way to do this:\n for reservation_data in root.find(\"reservationSet\"):\n # Create a reservation object with the parsed data.\n reservation = model.Reservation(\n reservation_id=reservation_data.findtext(\"reservationId\"),\n owner_id=reservation_data.findtext(\"ownerId\"))\n # Get the list of instances.\n instances = self.instances_set(\n reservation_data, reservation)\n results.extend(instances)\n return results", "def describe_availability_zones(DryRun=None, ZoneNames=None, Filters=None):\n pass", "def resource_availability():\n return dict(nodes_free=randrange(1, 500))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes Dedicated Host Reservations which are associated with Dedicated Hosts in your account.
def describe_host_reservations(HostReservationIdSet=None, Filters=None, MaxResults=None, NextToken=None): pass
[ "def describe_host_reservation_offerings(OfferingId=None, MinDuration=None, MaxDuration=None, Filters=None, MaxResults=None, NextToken=None):\n pass", "def describe_addresses(DryRun=None, PublicIps=None, Filters=None, AllocationIds=None):\n pass", "def describe_reserved_instances(DryRun=None, ReservedInstancesIds=None, Filters=None, OfferingType=None, OfferingClass=None):\n pass", "def _allocate_addresses_for_host(self, context, host):\n mgmt_ip = host.mgmt_ip\n mgmt_interfaces = self.iinterfaces_get_by_ihost_nettype(\n context, host.uuid, constants.NETWORK_TYPE_MGMT\n )\n mgmt_interface_id = None\n if mgmt_interfaces:\n mgmt_interface_id = mgmt_interfaces[0]['id']\n hostname = host.hostname\n address_name = cutils.format_address_name(hostname,\n constants.NETWORK_TYPE_MGMT)\n # if ihost has mgmt_ip, make sure address in address table\n if mgmt_ip:\n self._create_or_update_address(context, hostname, mgmt_ip,\n constants.NETWORK_TYPE_MGMT,\n mgmt_interface_id)\n # if ihost has no management IP, check for static mgmt IP\n if not mgmt_ip:\n mgmt_ip = self._lookup_static_ip_address(\n hostname, constants.NETWORK_TYPE_MGMT\n )\n if mgmt_ip:\n host.mgmt_ip = mgmt_ip\n self.update_ihost(context, host)\n # if no static address, then allocate one\n if not mgmt_ip:\n mgmt_pool = self.dbapi.network_get_by_type(\n constants.NETWORK_TYPE_MGMT\n ).pool_uuid\n\n mgmt_ip = self._allocate_pool_address(mgmt_interface_id, mgmt_pool,\n address_name).address\n if mgmt_ip:\n host.mgmt_ip = mgmt_ip\n self.update_ihost(context, host)\n\n self._generate_dnsmasq_hosts_file(existing_host=host)\n self._allocate_cluster_host_address_for_host(host)", "def getReservations():\n from commands import getstatusoutput\n cmd = \"scontrol -o show reservation\"\n\n output = filter(None, getstatusoutput(cmd)[1].split(\"\\n\"))\n\n return [Slurm.Reservation(each) for each in output]", "def compute_host_available(request, start_date, end_date):\n def check_host_unavailable(reservation):\n lease_start = _parse_api_datestr(reservation['start_date'])\n lease_end = _parse_api_datestr(reservation['end_date'])\n\n if (lease_start > start_date and lease_start < end_date):\n return True\n elif (lease_end > start_date and lease_end < end_date):\n return True\n elif (lease_start < start_date and lease_end > end_date):\n return True\n else:\n return False\n\n available_hosts = [\n h for h in host_allocations_list(request)\n if (not any([check_host_unavailable(r) for r in h.reservations]) or\n not h.reservations)]\n\n return len(available_hosts)", "def get_hostnames(resource_desc):\n ram = ResourceAllocationManager.get_instance()\n with ResourceAllocationManager._lock:\n return ram._get_hostnames(resource_desc)", "def _show_instances(self):\n conn = ec2.connect_to_region(\n self.availability_zone,\n aws_access_key_id=self.access_key_id,\n aws_secret_access_key=self.secret_access_key,\n )\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n print reservation\n for instance in reservation.instances:\n print instance\n print '- AMI ID:', instance.image_id\n print '- Instance Type:', instance.instance_type\n print '- Availability Zone:', instance.placement", "def list(ctx):\r\n config = ctx.obj['config']\r\n config.validate()\r\n host = config.get_active_host()\r\n instances = host.get_instances()\r\n logger.info(\"Instances on: %s\", host.name)\r\n outputters.table([x.dump() for x in instances])", "def get_hosts(self):\n def dto(h):\n \"\"\"Convert a dict to a new host object\n \"\"\"\n r = host()\n for k, v in h.items(): setattr(r, k, v)\n return r\n #\n r = self.get_table(\"host\", columns=\"name, data\",\n fetchall=True, current=False,)\n return {x[0]: dto(x[1]) for x in r} if r else {}", "def _print_reservation(reservation):\n num_running = 0\n for inst in reservation.instances:\n if inst.state != u'running':\n continue\n print \"ID: %s\" % inst.id\n print \"state: %s\" % inst.state\n print \"IP: %s\" % inst.ip_address\n print \"private IP: %s\" % inst.private_ip_address\n print \"DNS: %s\" % inst.public_dns_name\n print \"private DNS: %s\" % inst.private_dns_name\n print \"architecture: %s\" % inst.architecture\n print \"image ID: %s\" % inst.image_id\n print \"class: %s\" % inst.instance_class\n print \"type: %s\" % inst.instance_type\n print \"key_name: %s\" % inst.key_name\n print \"launch time: %s\" % inst.launch_time\n print \"\"\n num_running += 1\n\n return num_running", "def get_hosts_info():\n response = {\n \"hosts\": []\n }\n\n scope_hosts_response = fetch_topology_hosts()\n for node_id, node in scope_hosts_response.items():\n if not node.get(\"id\"):\n continue\n host = _parse_host(node[\"id\"])\n public_ip_address = \"\"\n local_networks = []\n interface_ips = {} # list of all interface ips, along with subnet masks\n probe_id = \"\"\n cloud_metadata = {}\n os_type = \"\"\n kubernetes_cluster_name = \"\"\n\n for meta in node.get(\"metadata\", []):\n if not meta.get(\"value\"):\n continue\n if meta.get(\"id\") == \"local_networks\":\n local_networks = meta.get(\"value\").split(\",\")\n elif meta.get(\"id\") == 'kubernetes_cluster_name':\n kubernetes_cluster_name = meta.get(\"value\", \"\")\n elif meta.get(\"id\") == \"probeId\":\n probe_id = meta.get(\"value\")\n elif meta.get(\"id\") == \"interface_ips\":\n try:\n interface_ips = json.loads(meta.get(\"value\"))\n except:\n pass\n elif meta.get(\"id\") == \"cloud_metadata\":\n try:\n cloud_metadata = json.loads(meta.get(\"value\"))\n except:\n pass\n elif meta.get(\"id\") == \"os\":\n os_type = meta.get(\"value\")\n\n if not host:\n \"\"\"\n This mostly happens when the node is either in-theinternet or out-theinternet.\n \"\"\"\n continue\n if cloud_metadata:\n public_ip_address = cloud_metadata.get(\"public_ip\", None)\n\n response[\"hosts\"].append({\n \"hostname\": host,\n \"public_ip_address\": public_ip_address,\n \"local_networks\": _parse_local_networks(local_networks),\n \"probe_id\": probe_id,\n \"interface_ips\": interface_ips,\n \"cloud_metadata\": cloud_metadata,\n \"os\": os_type,\n \"kubernetes_cluster_name\": kubernetes_cluster_name\n })\n\n return response", "def describe_reserved_instances_offerings(DryRun=None, ReservedInstancesOfferingIds=None, InstanceType=None, AvailabilityZone=None, ProductDescription=None, Filters=None, InstanceTenancy=None, OfferingType=None, NextToken=None, MaxResults=None, IncludeMarketplace=None, MinDuration=None, MaxDuration=None, MaxInstanceCount=None, OfferingClass=None):\n pass", "def get_host_list(self):", "def describe_dhcp_options(DryRun=None, DhcpOptionsIds=None, Filters=None):\n pass", "def host_allocations_list(request):\n request_manager = blazarclient(request).host.request_manager\n resp, body = request_manager.get('/os-hosts/allocations')\n allocations = body['allocations']\n return [Allocation(a) for a in allocations]", "def _GetHostList(self):\n hosts = dict()\n self._GetHostsFromArpTable(hosts=hosts)\n self._GetHostsFromIp6Neigh(hosts=hosts)\n self._GetHostsFromBridges(hosts=hosts)\n self._GetHostsFromEthernets(hosts=hosts)\n self._GetHostsFromWifiAssociatedDevices(hosts=hosts)\n self._GetHostsFromMocaAssociatedDevices(hosts=hosts)\n self._GetHostsFromDhcpServers(hosts=hosts)\n self._PopulateDhcpTaxonomy(hosts=hosts)\n self._PopulateDiscoveredHostnames(hosts=hosts)\n self._PopulateWifiTaxonomy(hosts=hosts)\n host_list = dict()\n for idx, host in enumerate(hosts.values(), start=1):\n host_list[str(idx)] = Host(**host)\n return host_list", "def list_dedicated_hosts(self,\n *,\n dedicated_host_group_id: str = None,\n start: str = None,\n limit: int = None,\n resource_group_id: str = None,\n zone_name: str = None,\n **kwargs\n ) -> DetailedResponse:\n\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='list_dedicated_hosts')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation,\n 'dedicated_host_group.id': dedicated_host_group_id,\n 'start': start,\n 'limit': limit,\n 'resource_group.id': resource_group_id,\n 'zone.name': zone_name\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/dedicated_hosts'\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def from_dict(cls, _dict: Dict) -> 'DedicatedHost':\n args = {}\n if 'available_memory' in _dict:\n args['available_memory'] = _dict.get('available_memory')\n else:\n raise ValueError('Required property \\'available_memory\\' not present in DedicatedHost JSON')\n if 'available_vcpu' in _dict:\n args['available_vcpu'] = VCPU.from_dict(_dict.get('available_vcpu'))\n else:\n raise ValueError('Required property \\'available_vcpu\\' not present in DedicatedHost JSON')\n if 'created_at' in _dict:\n args['created_at'] = string_to_datetime(_dict.get('created_at'))\n else:\n raise ValueError('Required property \\'created_at\\' not present in DedicatedHost JSON')\n if 'crn' in _dict:\n args['crn'] = _dict.get('crn')\n else:\n raise ValueError('Required property \\'crn\\' not present in DedicatedHost JSON')\n if 'disks' in _dict:\n args['disks'] = [DedicatedHostDisk.from_dict(x) for x in _dict.get('disks')]\n else:\n raise ValueError('Required property \\'disks\\' not present in DedicatedHost JSON')\n if 'group' in _dict:\n args['group'] = DedicatedHostGroupReference.from_dict(_dict.get('group'))\n else:\n raise ValueError('Required property \\'group\\' not present in DedicatedHost JSON')\n if 'href' in _dict:\n args['href'] = _dict.get('href')\n else:\n raise ValueError('Required property \\'href\\' not present in DedicatedHost JSON')\n if 'id' in _dict:\n args['id'] = _dict.get('id')\n else:\n raise ValueError('Required property \\'id\\' not present in DedicatedHost JSON')\n if 'instance_placement_enabled' in _dict:\n args['instance_placement_enabled'] = _dict.get('instance_placement_enabled')\n else:\n raise ValueError('Required property \\'instance_placement_enabled\\' not present in DedicatedHost JSON')\n if 'instances' in _dict:\n args['instances'] = [InstanceReference.from_dict(x) for x in _dict.get('instances')]\n else:\n raise ValueError('Required property \\'instances\\' not present in DedicatedHost JSON')\n if 'lifecycle_state' in _dict:\n args['lifecycle_state'] = _dict.get('lifecycle_state')\n else:\n raise ValueError('Required property \\'lifecycle_state\\' not present in DedicatedHost JSON')\n if 'memory' in _dict:\n args['memory'] = _dict.get('memory')\n else:\n raise ValueError('Required property \\'memory\\' not present in DedicatedHost JSON')\n if 'name' in _dict:\n args['name'] = _dict.get('name')\n else:\n raise ValueError('Required property \\'name\\' not present in DedicatedHost JSON')\n if 'profile' in _dict:\n args['profile'] = DedicatedHostProfileReference.from_dict(_dict.get('profile'))\n else:\n raise ValueError('Required property \\'profile\\' not present in DedicatedHost JSON')\n if 'provisionable' in _dict:\n args['provisionable'] = _dict.get('provisionable')\n else:\n raise ValueError('Required property \\'provisionable\\' not present in DedicatedHost JSON')\n if 'resource_group' in _dict:\n args['resource_group'] = ResourceGroupReference.from_dict(_dict.get('resource_group'))\n else:\n raise ValueError('Required property \\'resource_group\\' not present in DedicatedHost JSON')\n if 'resource_type' in _dict:\n args['resource_type'] = _dict.get('resource_type')\n else:\n raise ValueError('Required property \\'resource_type\\' not present in DedicatedHost JSON')\n if 'socket_count' in _dict:\n args['socket_count'] = _dict.get('socket_count')\n else:\n raise ValueError('Required property \\'socket_count\\' not present in DedicatedHost JSON')\n if 'state' in _dict:\n args['state'] = _dict.get('state')\n else:\n raise ValueError('Required property \\'state\\' not present in DedicatedHost JSON')\n if 'supported_instance_profiles' in _dict:\n args['supported_instance_profiles'] = [InstanceProfileReference.from_dict(x) for x in _dict.get('supported_instance_profiles')]\n else:\n raise ValueError('Required property \\'supported_instance_profiles\\' not present in DedicatedHost JSON')\n if 'vcpu' in _dict:\n args['vcpu'] = VCPU.from_dict(_dict.get('vcpu'))\n else:\n raise ValueError('Required property \\'vcpu\\' not present in DedicatedHost JSON')\n if 'zone' in _dict:\n args['zone'] = ZoneReference.from_dict(_dict.get('zone'))\n else:\n raise ValueError('Required property \\'zone\\' not present in DedicatedHost JSON')\n return cls(**args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes your IAM instance profile associations.
def describe_iam_instance_profile_associations(AssociationIds=None, Filters=None, MaxResults=None, NextToken=None): pass
[ "def profiles(self) -> Sequence[str]:\n return pulumi.get(self, \"profiles\")", "def __str__(self):\n return \"Association %s \" % str(self._tuple)", "def print_profiles(profiles):\n\n print \"Available profiles for the pods are the following:\"\n\n for profile in profiles:\n print \" %s\" % (profile)", "def profile_list():\n conf = api.Config()\n\n for profile in conf.profile_sections():\n data = conf._profile_general(profile)\n\n try:\n _print_profile(profile, data)\n except KeyError:\n print(\n log.format(\n f\"Invalid or incomplete profile '{profile}'\",\n color=\"red\",\n bold=False,\n )\n )", "def get_profilearn(self):\n try:\n response = self.client.get_instance_profile(InstanceProfileName=self.ProfileName)\n self.ProfileArn=response[\"InstanceProfile\"][\"Arn\"]\n except ClientError:\n self.ProfileArn=\"\"\n return self.ProfileArn", "async def profiles(self, ctx):\n if ctx.invoked_subcommand is None:\n await self.show(ctx)", "def ptableinstructor(self):\n pt = PrettyTable(field_names = ['CWID','Name','Dept','Course','Students'])\n for i in self.instructordict.values(): # i is an instance of class Instructor\n for line in i.instructordetails():\n pt.add_row(line)\n print(pt)", "def cli(ctx, **kwds):\n profile_names = profiles.list_profiles(ctx, **kwds)\n print(profile_names)", "def profile(ctx):\n if ctx.invoked_subcommand is None:\n config = ctx.obj.configuration\n\n default = config.default_profile_name()\n names = config.profiles()\n for profile_name in names:\n profile = config.profile(profile_name)\n if profile_name == default:\n click.echo(\"Profile: %s (default)\" % profile_name)\n else:\n click.echo(\"Profile: %s\" % profile_name)\n click.echo(\"User: %s\" % profile['user'])\n click.echo(\"URL: %s\" % profile['url'])\n click.echo()", "def list_instance_profiles(self,\n **kwargs\n ) -> DetailedResponse:\n\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='list_instance_profiles')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/instance/profiles'\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def iam_instance_profile(self) -> pulumi.Output[Optional['outputs.LaunchTemplateIamInstanceProfile']]:\n return pulumi.get(self, \"iam_instance_profile\")", "def print_ins(self)-> None:\n pt:PrettyTable = PrettyTable(field_names=[\"CWID\",\"Name\",\"Dept\",\"Course\",\"Students\"])\n for ins in self.instdict.values():\n a = ins.getInstructorDetails()\n if a[len(a)-1] == \"NA\":\n pt.add_row(a)\n else:\n for course, noOfStudents in a[len(a)-1].items():\n pt.add_row([a[0],a[1],a[2],course,noOfStudents])\n \n print(\"Instructor Summary\")\n print(pt)", "def print_profile_information(config: Config):\n try:\n profile: Any = config.profile\n profile_info = config.section_items(profile)\n except NoSectionError:\n profile_info = config.defaults().items()\n\n click.echo(\"\\nBonsai configuration file(s) found at {}\".format(config.file_paths))\n click.echo(\"\\nProfile Information\")\n click.echo(\"--------------------\")\n if profile_info:\n for key, val in profile_info:\n click.echo(key + \": \" + str(val))\n else:\n click.echo(\"No profiles found please run 'bonsai configure'.\")", "def print_pod_profiles(pods, remote):\n for pod in pods:\n profile = remote.get_system(pod)['profile']\n print \"%s: %s\" % (pod, profile)", "def __init__(self,\n profiles: List['InstanceProfile']) -> None:\n self.profiles = profiles", "def profiles():\n profs = UserProfile.query.order_by(UserProfile.lastname).all()\n return render_template('profiles.html', users=profs)", "def list(self, architecture):\n return self._list(\"/archs/%s/profiles\" % architecture.id,\n \"profiles\")", "def profile(self):\n return self.profile_url.format", "def iam_instance_profile(self) -> Optional[pulumi.Input['LaunchTemplateIamInstanceProfileArgs']]:\n return pulumi.get(self, \"iam_instance_profile\")", "def _init_instance_profile(self):\n iam_client = self._session.client('iam')\n\n # Create instance profile\n instance_profile_name = 'AccelizeLoadFPGA'\n with _ExceptionHandler.catch(filter_error_codes='EntityAlreadyExists'):\n iam_client.create_instance_profile(\n InstanceProfileName=instance_profile_name)\n\n _get_logger().info(\n _utl.gen_msg('created_object', 'instance profile',\n instance_profile_name))\n\n _time.sleep(5)\n\n # Attach role to instance profile\n with _ExceptionHandler.catch(filter_error_codes='LimitExceeded'):\n iam_client.add_role_to_instance_profile(\n InstanceProfileName=instance_profile_name, RoleName=self._role)\n\n _get_logger().info(\n _utl.gen_msg('attached_to', 'role', self._role,\n 'instance profile', instance_profile_name))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes the ID format settings for your resources on a perregion basis, for example, to view which resource types are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types.
def describe_id_format(Resource=None): pass
[ "def listMetadataFormats(identifier=None):", "def listFormats(self, type='255', returnFormat='None'):\n \n pass", "def get_formats(cls):\n return RegionsRegistry.get_formats(cls)", "async def list_formats(info_dict: dict) -> str:\n formats = info_dict.get('formats', [info_dict])\n table = [[\n f['format_id'], f['ext'],\n youtube_dl.YoutubeDL.format_resolution(f)\n ] for f in formats\n if f.get('preference') is None or f['preference'] >= -1000]\n if len(formats) > 1:\n table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'\n\n header_line = ['format code', 'extension', 'resolution']\n fmtStr = (\n '`Available formats for %s:`\\n`%s`' %\n (info_dict['title'], youtube_dl.render_table(header_line, table)))\n return fmtStr", "def listFormats():\n return None", "def getFormatIds(self):\n res = {}\n for subproc in self.parsers[0].p[\"subprocessors\"]:\n subproc_instance = self.getBean(subproc)\n try:\n for formatId in subproc_instance.p[\"matchDocuments\"]:\n try:\n res[formatId].append(subproc)\n except KeyError as e:\n res[formatId] = [subproc, ]\n except KeyError as e:\n self._L.info(\"No formatIds for subprocessor: %s\", subproc)\n return res", "def listMetadataFormats(self, identifier=None):\n if identifier is not None:\n q = cqlparse('rec.identifier exact \"%s\"' % (identifier))\n try:\n rs = self.db.search(session, q)\n except SRWDiagnostics.Diagnostic16:\n raise ConfigFileException('Index map for rec.identifier required in protocolMap: %s' % self.db.get_path(session, 'protocolMap').id)\n \n if not len(rs) or len(rs) > 1:\n raise IdDoesNotExistError('%s records exist for identifier: %s' % (len(rs), identifier))\n # all records should be available in the same formats in a Cheshire3 database\n mfs = []\n for prefix, ns in self.protocolMap.recordNamespaces.iteritems():\n mfs.append((prefix, self.protocolMap.schemaLocations[ns], ns))\n \n if not len(mfs):\n raise NoMetadataFormatsError()\n return mfs", "def fileformats():\n\n # Filter parsing using request headers.\n headers = _parse_filter_headers(request)\n format_filter = headers.get(FILE_FORMAT_HEADER, None)\n guid_filter = headers.get(GUID_HEADER, None)\n\n offset, limit = _parse_offset_limit(request)\n before_date, after_date = _parse_filter_dates(request)\n\n versions = fpr_format_versions.query.filter(\n fpr_format_versions.last_modified.between(after_date, before_date)\n ).all()\n\n response = {}\n response[\"fileFormats\"] = []\n\n for version in versions:\n if version.pronom_id:\n if format_filter != []:\n if version.pronom_id not in format_filter:\n continue\n if version.pronom_id is \"\":\n continue\n else:\n if format_filter != []:\n if slugify(version.description) not in format_filter:\n continue\n if version.pronom_id:\n if guid_filter != []:\n if version.pronom_id not in guid_filter:\n continue\n if version.pronom_id is \"\":\n continue\n else:\n if guid_filter != []:\n if slugify(version.description) not in guid_filter:\n continue\n\n format = fpr_formats.query.get(version.format)\n group = fpr_format_groups.query.get(format.group)\n if version.pronom_id:\n if version.pronom_id[:3] == \"arc\":\n namespace = \"https://archivematica.org\"\n else:\n namespace = \"http://www.nationalarchives.uk.gov\"\n id = {\n \"guid\": version.uuid,\n \"name\": version.pronom_id,\n \"namespace\": namespace,\n }\n identifier = {\n \"identifier\": version.pronom_id,\n \"identifierType\": \"PUID\",\n }\n else:\n id = {\n \"guid\": version.uuid,\n \"name\": slugify(version.description),\n \"namespace\": \"https://archivematica.org\",\n }\n identifier = {\n \"identifier\": slugify(version.description),\n \"identifierType\": \"Archivematica description\",\n }\n if version.version == \"\":\n updatedVersion = None\n else:\n updatedVersion = version.version\n\n newFormat = {\n \"name\": version.description,\n \"localLastModifiedDate\": str(version.last_modified),\n \"version\": updatedVersion,\n \"id\": id,\n \"identifiers\": [identifier],\n \"types\": [group.description],\n }\n\n response[\"fileFormats\"].append(newFormat)\n\n response[\"fileFormats\"] = response[\"fileFormats\"][offset:limit]\n\n return jsonify(response)", "def get_format_choices():\n return [\n (f.CONTENT_TYPE, f().get_title())\n for f in DEFAULT_FORMATS\n if f().can_import()\n ]", "def output_formats(**kwargs):\n formats = []\n query = dict(TASK_KWARGS)\n task_q = get_selection_query(**kwargs)\n ds_q = {'has_parent': {'type': 'task', 'query': task_q}}\n agg = {'formats': {'terms': {'field': 'data_format', 'size': 500}}}\n agg['formats']['terms']['exclude'] = ['DAOD', 'DRAW']\n query['body'] = {'query': ds_q, 'aggs': agg}\n query['doc_type'] = 'output_dataset'\n query['size'] = 0\n r = client().search(**query)\n return [bucket['key'] for bucket in\n r['aggregations']['formats']['buckets']]", "def getFormatOptions(self):\n try:\n activeDocument = self.psApp.Application.ActiveDocument\n bitDepth = activeDocument.bitsPerChannel\n except:\n return []\n\n if bitDepth == 8:\n return self.exportFormats8Bit\n if bitDepth == 16:\n return self.exportFormats16Bit\n if bitDepth == 32:\n return self.exportFormats32Bit", "def getFormat(formatId):\n return None", "def resource_types_show(self,resource_type):\n \n path=\"/resource_types/%s\" %resource_type\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Openstack heat resource types show: %s' % \\\n truncate(res))\n return res[0]", "def set_format(self, format_type):\n for each_format_type in FormatType:\n if each_format_type.name == format_type:\n self._report_extension = each_format_type.name\n self._request_json['taskInfo']['subTasks'][0]['options']['adminOpts']\\\n ['reportOption']['commonOpt']['outputFormat']['outputType'] = \\\n str(each_format_type.value)\n return\n raise Exception(\"Invalid format type,format should be one among the type in FormatType\")", "def get_specific_resource_type(self, resource_type):\n return resource_type['specific'].title()", "def _get_response_formats(self):\n # self.responses will never be None\n response_formats = set()\n\n for code, response in self.responses.items():\n for _format in response.formats:\n response_formats.add(_format)\n\n return sorted(list(response_formats))", "def getDocFormats(conn=None):\n\n query = cdrdb.Query(\"format\", \"name\").order(\"name\")\n if conn is None:\n rows = query.execute().fetchall()\n else:\n cursor = conn.cursor()\n rows = query.execute(cursor).fetchall()\n cursor.close()\n return [row.name for row in rows]", "def list_output_formats():\n format_string = \"{:<20}{}\"\n print(format_string.format(\"Format\", \"Description\"))\n print('-' * 80)\n for plugin in get_output_plugins():\n print(format_string.format(plugin.format_name, plugin.format_description))", "def _get_int_format(self):\r\n return self._int_format", "def describe_identity_id_format(Resource=None, PrincipalArn=None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes the ID format settings for resources for the specified IAM user, IAM role, or root user. For example, you can view the resource types that are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide .
def describe_identity_id_format(Resource=None, PrincipalArn=None): pass
[ "def listMetadataFormats(identifier=None):", "def modify_identity_id_format(Resource=None, UseLongIds=None, PrincipalArn=None):\n pass", "def listFormats(self, type='255', returnFormat='None'):\n \n pass", "def resource_types_show(self,resource_type):\n \n path=\"/resource_types/%s\" %resource_type\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Openstack heat resource types show: %s' % \\\n truncate(res))\n return res[0]", "def _id_format(resp):\r\n if 'StackId' in resp:\r\n identity = identifier.HeatIdentifier(**resp['StackId'])\r\n resp['StackId'] = identity.arn()\r\n if 'EventId' in resp:\r\n identity = identifier.EventIdentifier(**resp['EventId'])\r\n resp['EventId'] = identity.event_id\r\n return resp", "def format_stack_resource(r):\r\n keymap = {\r\n engine_api.RES_DESCRIPTION: 'Description',\r\n engine_api.RES_NAME: 'LogicalResourceId',\r\n engine_api.RES_PHYSICAL_ID: 'PhysicalResourceId',\r\n engine_api.RES_STATUS_DATA: 'ResourceStatusReason',\r\n engine_api.RES_TYPE: 'ResourceType',\r\n engine_api.RES_STACK_ID: 'StackId',\r\n engine_api.RES_STACK_NAME: 'StackName',\r\n engine_api.RES_UPDATED_TIME: 'Timestamp',\r\n }\r\n\r\n result = api_utils.reformat_dict_keys(keymap, r)\r\n\r\n result['ResourceStatus'] = self._resource_status(r)\r\n\r\n return self._id_format(result)", "async def list_formats(info_dict: dict) -> str:\n formats = info_dict.get('formats', [info_dict])\n table = [[\n f['format_id'], f['ext'],\n youtube_dl.YoutubeDL.format_resolution(f)\n ] for f in formats\n if f.get('preference') is None or f['preference'] >= -1000]\n if len(formats) > 1:\n table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'\n\n header_line = ['format code', 'extension', 'resolution']\n fmtStr = (\n '`Available formats for %s:`\\n`%s`' %\n (info_dict['title'], youtube_dl.render_table(header_line, table)))\n return fmtStr", "def resource_settings(self) -> 'outputs.ResourceSettingsResponse':\n return pulumi.get(self, \"resource_settings\")", "def listFormats():\n return None", "def GetDetailedHelpForSetIamPolicy(collection, example_id):\n return {\n 'brief': 'Set IAM policy for a {0}.'.format(collection),\n 'DESCRIPTION': '{description}',\n 'EXAMPLES': \"\"\"\\\n The following command will read an IAM policy defined in a JSON file\n 'policy.json' and set it for a {0} with identifier '{1}'\n\n $ {{command}} {1} policy.json\n\n See https://cloud.google.com/iam/docs/managing-policies for details\n of the policy file format and contents.\n \"\"\".format(collection, example_id)\n }", "def get_specific_resource_type(self, resource_type):\n return resource_type['specific'].title()", "def test_get_metadata_resources_types_schema(self):\n body = self.schemas_client.show_schema(\"metadefs/resource_types\")\n self.assertEqual(\"resource_type_associations\", body['name'])", "def transform(cls, clients, resource_config):\n resource_id = resource_config.get(\"id\")\n if not resource_id:\n cinderclient = clients.cinder()\n resource_id = _id_from_name(resource_config=resource_config,\n resources=cinderclient.\n volume_types.list(),\n typename=\"volume_type\")\n return resource_id", "def get_conf(self, resource_types = [\"Domain\",\"SecurityPolicy\",\"Group\",\"Rule\"]):\n filter = \"?filter=Type-\" + \"|\".join(resource_types)\n uri = self.url + filter\n res = get( uri,\n verify = self.certificate_validation,\n auth = HTTPBasicAuth(self.username, self.password)\n )\n return res.content.decode()", "def resource_types(self):\n return [v['name'] for k, v in self.mapping\n if 'resource' in v['attr'].split()]", "def resource_type(self):\n\n values = set()\n\n if 'resources' in self.description:\n for resource in self.description['resources']:\n if not resource:\n return\n\n ambiguities = (\n resource.get('path') and resource.get('url'),\n not resource.get('path') and not resource.get('url')\n )\n\n if any(ambiguities):\n return\n\n value = 'path' if resource.get('path') else 'url'\n values.add(value)\n\n if len(values) == 1:\n return values.pop()", "def resource_map(self, resource):\n resp = self.get_resource(resource)\n return {x[\"id\"]: x[\"name\"] for x in resp.json()}", "def _get_parsed_resource_ids(resource_ids):\n if not resource_ids:\n return None\n\n for rid in resource_ids:\n if not is_valid_resource_id(rid):\n raise CLIError('az resource: error: argument --ids: invalid ResourceId value: \\'%s\\'' % rid)\n\n return ({'resource_id': rid} for rid in resource_ids)", "def note_setdata_resource_conversion(resource, id, class_):\n note = Note()\n data = {\n 'resource': resource,\n 'resource_id': id\n }\n _data = note.format_data_set(data)\n assert isinstance(_data['resource'], class_)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes the specified attribute of the specified AMI. You can specify only one attribute at a time.
def describe_image_attribute(DryRun=None, ImageId=None, Attribute=None): pass
[ "def describe_network_interface_attribute(DryRun=None, NetworkInterfaceId=None, Attribute=None):\n pass", "def describe_attr_value(attr, die, section_offset):\r\n descr_func = _ATTR_DESCRIPTION_MAP[attr.form]\r\n val_description = descr_func(attr, die, section_offset)\r\n\r\n # For some attributes we can display further information\r\n extra_info_func = _EXTRA_INFO_DESCRIPTION_MAP[attr.name]\r\n extra_info = extra_info_func(attr, die, section_offset)\r\n return str(val_description) + '\\t' + extra_info", "def attributeInfo(multi=bool, inherited=bool, bool=bool, internal=bool, type=\"string\", hidden=bool, enumerated=bool, allAttributes=bool, logicalAnd=bool, writable=bool, userInterface=bool, leaf=bool, short=bool):\n pass", "def do_attr(self, args): # noqa: C901\n\n if not self.current:\n print('There are no resources in use. Use the command \"open\".')\n return\n\n args = args.strip()\n\n if not args:\n self.print_attribute_list()\n return\n\n args = args.split(\" \")\n\n if len(args) > 2:\n print(\n \"Invalid syntax, use `attr <name>` to get; or `attr <name> <value>` to set\"\n )\n return\n\n if len(args) == 1:\n # Get a given attribute\n attr_name = args[0]\n if attr_name.startswith(\"VI_\"):\n try:\n print(\n self.current.get_visa_attribute(getattr(constants, attr_name))\n )\n except Exception as e:\n print(e)\n else:\n try:\n print(getattr(self.current, attr_name))\n except Exception as e:\n print(e)\n return\n\n # Set the specified attribute value\n attr_name, attr_state = args[0], args[1]\n if attr_name.startswith(\"VI_\"):\n try:\n attributeId = getattr(constants, attr_name)\n attr = attributes.AttributesByID[attributeId]\n datatype = attr.visa_type\n retcode = None\n if datatype == \"ViBoolean\":\n if attr_state == \"True\":\n attr_state = True\n elif attr_state == \"False\":\n attr_state = False\n else:\n retcode = (\n constants.StatusCode.error_nonsupported_attribute_state\n )\n elif datatype in [\n \"ViUInt8\",\n \"ViUInt16\",\n \"ViUInt32\",\n \"ViInt8\",\n \"ViInt16\",\n \"ViInt32\",\n ]:\n try:\n attr_state = int(attr_state)\n except ValueError:\n retcode = (\n constants.StatusCode.error_nonsupported_attribute_state\n )\n if not retcode:\n retcode = self.current.set_visa_attribute(attributeId, attr_state)\n if retcode:\n print(\"Error {}\".format(str(retcode)))\n else:\n print(\"Done\")\n except Exception as e:\n print(e)\n else:\n print(\"Setting Resource Attributes by python name is not yet supported.\")\n return", "def describe_volume_attribute(DryRun=None, VolumeId=None, Attribute=None):\n pass", "def describe_vpc_attribute(DryRun=None, VpcId=None, Attribute=None):\n pass", "def describe_snapshot_attribute(DryRun=None, SnapshotId=None, Attribute=None):\n pass", "def info(self, attribute):\n return self.call('catalog_product_attribute.info', [attribute])", "def showattribute(self, vname=None, device=None):\n if device is None:\n device = sys.stdout\n if vname is None:\n vname = self.default_variable_name\n device.write(\"Attributes of \")\n device.write(vname)\n device.write(\" in file \")\n device.write(self.id)\n device.write(\":\\n\")\n device.write(str(self.listattribute(vname)))\n device.write(\"\\n\")", "def generateAttribute(self,xmiAttribute):\n # generate attribute dictionary\n name = xmiAttribute.getName()\n doc = xmiAttribute.getDocumentation()\n type = xmiAttribute.getType()\n attributeTemplateDict = dict(\n name = name,\n doc = doc,\n type = type\n )\n\n # render method\n attributeTemplate= self.env.get_template('attribute.jinja2')\n return attributeTemplate.render(attributeTemplateDict)", "def set_attribute(self,att,val):\r\n self.attributes[att] = val", "def validate_attr(self, arg):\n args = arg.split(' ')\n if len(args) < 3:\n print(HBNBCommand.ERROR_ATTR)\n return False\n attribute = args[2]\n return attribute", "def modify_image_attribute(DryRun=None, ImageId=None, Attribute=None, OperationType=None, UserIds=None, UserGroups=None, ProductCodes=None, Value=None, LaunchPermission=None, Description=None):\n pass", "def attribute_info(self, attribute_info):\n self._attribute_info = attribute_info", "def __init__(self, ami_name=None, ami_id=None, description=None, state=None, tags=None, architecture=None):\n self.openapi_types = {\n \"ami_name\": str,\n \"ami_id\": str,\n \"description\": str,\n \"state\": Ec2AmiState,\n \"tags\": List[Tag],\n \"architecture\": str,\n }\n\n self.attribute_map = {\n \"ami_name\": \"amiName\",\n \"ami_id\": \"amiId\",\n \"description\": \"description\",\n \"state\": \"state\",\n \"tags\": \"tags\",\n \"architecture\": \"architecture\",\n }\n\n self._ami_name = ami_name\n self._ami_id = ami_id\n self._description = description\n self._state = state\n self._tags = tags\n self._architecture = architecture", "def getAmi(verbose,amiId):\n dami = {}\n jResp = EC2C.describe_images(ImageIds=[amiId])\n if len(jResp['Images']) > 0:\n if 'Platform' in jResp['Images'][0]:\n platform = jResp['Images'][0]['Platform']\n else:\n platform = \"\"\n if verbose:\n dami[amiId] = jResp['Images'][0]['Name']+\";\"+\\\n platform+\";\"+\\\n jResp['Images'][0]['Architecture']+\";\"+\\\n jResp['Images'][0]['ImageType']+\";\"+\\\n jResp['Images'][0]['VirtualizationType']\n else:\n dami[amiId] = jResp['Images'][0]['Name']+\";\"+\\\n platform\n else:\n dami[amiId] = \"Unknown;Unknown\"\n return dami", "def Attributes(self) -> _n_5_t_17:", "def options(self, attribute, store_view=None):\n return self.call('catalog_product_attribute.options',\n [attribute, store_view])", "def print_attribute_list(self):\n p = prettytable.PrettyTable((\"VISA name\", \"Constant\", \"Python name\", \"val\"))\n for attr in getattr(self.current, \"visa_attributes_classes\", ()):\n try:\n val = self.current.get_visa_attribute(attr.attribute_id)\n except VisaIOError as e:\n val = e.abbreviation\n except Exception as e:\n val = str(e)\n if len(val) > 10:\n val = val[:10] + \"...\"\n p.add_row((attr.visa_name, attr.attribute_id, attr.py_name, val))\n\n print(p.get_string(sortby=\"VISA name\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of the images (AMIs, AKIs, and ARIs) available to you. Images available to you include public images, private images that you own, and private images owned by other AWS accounts but for which you have explicit launch permissions.
def describe_images(DryRun=None, ImageIds=None, Owners=None, ExecutableUsers=None, Filters=None): pass
[ "def get_amis():\n print(\"looking for images that fit {}\".format(os.environ[\"CREATE_AMI_NAME\"]))\n images = EC2.describe_images(\n Owners=[\"self\"],\n Filters=[\n {\"Name\": \"name\", \"Values\": [\"{}*\".format(os.environ[\"CREATE_AMI_NAME\"])]}\n ],\n )\n sorted_images = sorted(images[\"Images\"], key=lambda x: x[\"CreationDate\"])\n print(\"There are {} images\".format(len(sorted_images)))\n return sorted_images", "def test_ami_exists(self) -> None:\n owner = self.sts.get_caller_identity().get('Account')\n amis = self.ec2.describe_images(\n Owners=[owner],\n Filters=[{\n 'Name': 'name',\n 'Values': ['global-jenkins-server*']\n }]\n )\n self.assertTrue(len(amis.get('Images')) > 0)", "def avail_images(call=None):\n vm_ = get_configured_provider()\n return {\"Profiles\": [profile for profile in vm_[\"profiles\"]]}", "def showimages():\n images = settings.get_all_images()\n for image in images:\n _print_image(settings.get_image(image))", "def test_list_images(self):\n with self.override_role():\n self.image_client.list_images()['images']", "def docker_images_list(self):\n images = Images.objects()\n if len(images) == 0:\n print(\"No images exist\")\n return\n\n for image in images:\n print(image.imageName)", "def getAmi(verbose,amiId):\n dami = {}\n jResp = EC2C.describe_images(ImageIds=[amiId])\n if len(jResp['Images']) > 0:\n if 'Platform' in jResp['Images'][0]:\n platform = jResp['Images'][0]['Platform']\n else:\n platform = \"\"\n if verbose:\n dami[amiId] = jResp['Images'][0]['Name']+\";\"+\\\n platform+\";\"+\\\n jResp['Images'][0]['Architecture']+\";\"+\\\n jResp['Images'][0]['ImageType']+\";\"+\\\n jResp['Images'][0]['VirtualizationType']\n else:\n dami[amiId] = jResp['Images'][0]['Name']+\";\"+\\\n platform\n else:\n dami[amiId] = \"Unknown;Unknown\"\n return dami", "def get_images(self, analyses):\n raise NotImplementedError(\"Getting images is not yet supported.\")", "def create_ana_images(self):\n log.debug(\"start\")\n os.chdir(self._p_analysis_tmp)\n exif_attributes=self._exif_attributes\n exif_attributes=\" \".join([\"-\"+a for a in exif_attributes])\n\n # quiet option suppreses regular output\n cmd_exif=ImageAnalyzer.CMD_EXIFTOOL_JSON.replace(\"_EXIF_\",self._exiftool)\n cmd_exif=cmd_exif.replace(\"ATT\",exif_attributes)\n\n cmd_out = None\n runner = Runner()\n ret_code=runner.run_cmd(cmd_exif)\n if ret_code == 0:\n cmd_out=runner.get_output()\n files_metadata={}\n\n try:\n files_metadata=json.loads(cmd_out)\n except JSONDecodeError as e:\n err_details={\"msg\":e.msg,\"col\":str(e.colno),\"line\":str(e.lineno)}\n log.error(\"JSON Decode Error: %(msg)s error occured in output at column %(col)s, line %(line)s\",err_details)\n\n for file_metadata in files_metadata:\n\n filename=Path(file_metadata[\"SourceFile\"])\n filename=filename.stem+\"_ana\"+filename.suffix\n file_metadata[\"TargetFile\"]=os.path.join(self._p_analysis,filename)\n file_metadata[\"FocusBox\"]=ImageAnalyzer.get_focus_box(file_metadata)\n file_metadata[\"Description\"]=ImageAnalyzer.create_analysis_text(file_metadata)\n # convert to a os magick command\n draw_config=self._magick_box_config.copy()\n try:\n draw_config[\"_FILE_IN_\"]=file_metadata[\"SourceFile\"]\n draw_config[\"_FILE_OUT_\"]=file_metadata[\"TargetFile\"]\n draw_config[\"_TEXT_\"]=file_metadata[\"Description\"]\n draw_config[\"_X0_\"]=str(file_metadata[\"FocusBox\"][0][0])\n draw_config[\"_Y0_\"]=str(file_metadata[\"FocusBox\"][0][1])\n draw_config[\"_X1_\"]=str(file_metadata[\"FocusBox\"][2][0])\n draw_config[\"_Y1_\"]=str(file_metadata[\"FocusBox\"][2][1])\n except TypeError as e:\n log.error(\"not all metadata found to create focus box (%s)\",e)\n continue\n # replace template\n cmd_magick=ImageAnalyzer.CMD_MAGICK_DRAW_FOCUS_BOX\n for k,v in draw_config.items():\n cmd_magick=cmd_magick.replace(k,v)\n file_metadata[\"CmdMagick\"]=cmd_magick\n\n # writing files with focus box and meta data\n runner = Runner()\n for file_metadata in files_metadata:\n cmd=file_metadata.get(\"CmdMagick\")\n\n if not cmd:\n continue\n ret_code=runner.run_cmd(cmd)\n if ret_code == 0:\n log.info(\"Writing file %s\",file_metadata['TargetFile'])\n cmd_out=runner.get_output()\n else:\n log.error(\"Error writing file %s\",file_metadata['TargetFile'])\n\n return files_metadata", "def list(self):\n with self.alternate_service_type('image', allowed_types=('image',)):\n return self._list('/v2/images', 'images')", "def showImageDetails():\r\n id = getImageId()\r\n try:\r\n image = imageManager.find(id)\r\n except CloudServersFault, cf:\r\n if cf.code == 404:\r\n print \"Server not found\"\r\n return\r\n print \"Image: \", id\r\n pprint(image)", "def describe_image_attribute(DryRun=None, ImageId=None, Attribute=None):\n pass", "def get_annot_images(ibs, aid_list):\n gid_list = ibs.get_annot_gids(aid_list)\n image_list = ibs.get_images(gid_list)\n return image_list", "def get_images(self):\n pass", "def images(profile, region, json_output):\n if profile != None:\n session = boto3.Session(profile_name=profile)\n else:\n session = boto3.Session()\n if region != None:\n client = session.client('ec2', region_name=region)\n else:\n client = session.client('ec2')\n image_owner = ['aws-marketplace']\n image_filter = [\n {\n 'Name':'name',\n 'Values':[\n '*vsrx3*'\n ]\n }\n ]\n response = client.describe_images(Owners = image_owner, Filters = image_filter)\n if 'Images' not in response:\n sys.exit('error get vSRX images from AWS marketplace')\n images = response['Images']\n images = sorted(images, key=lambda k: k['CreationDate'])\n if json_output == True:\n print json.dumps(images, indent = 4)\n else:\n table = []\n table.append(['IMAGE_ID', 'DESCRIPTION', 'CREATION_DATE', 'ARCHITECTURE'])\n for image in images:\n creation_datetime = parse(image['CreationDate'])\n table.append([image['ImageId'], image['Description'], creation_datetime, image['Architecture']])\n print(tabulate(table, headers=\"firstrow\", tablefmt=\"grid\"))", "def print_amis(self):\n\n if self.args.show_all:\n for ami in self.amis:\n if self.args.verbose:\n print(\"id: {}, name: {}, create_date: {}\".format(ami['ImageId'], ami['Name'], ami['CreationDate']))\n else:\n print(ami['ImageId'])\n else:\n\n if self.args.verbose:\n print(\"id: {}, name: {}, create_date: {}\".format(self.amis[-1]['ImageId'], self.amis[-1]['Name'],\n self.amis[-1]['CreationDate']))\n else:\n print(self.amis[-1]['ImageId'])", "def get_image_info(self):\n # pylint: disable=unused-variable\n rc, out, err = self.module.run_command(\n [self.module_params['executable'], b'image', b'inspect', self.module_params['image']])\n return json.loads(out)[0] if rc == 0 else {}", "def list_images(self, **args):\n\n return self._list(Image, **args)", "def docker_images(repotag_name=None):\n fmt_str = \"\"\"table {{.Repository}}:{{.Tag}}|{{.ID}}|\\\n {{.CreatedAt}}|{{.CreatedSince}}|{{.Digest}}|{{.Size}}\"\"\"\n cmd_lst = ['sudo', '-n', DOCKER_COMMAND, \"images\", \"--format\", fmt_str]\n if repotag_name is not None:\n cmd_lst.append(repotag_name)\n return DockerHandler._run_shell_command_to_dict(cmd_lst, splitchar=\"|\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Displays details about an import virtual machine or import snapshot tasks that are already created.
def describe_import_image_tasks(DryRun=None, ImportTaskIds=None, NextToken=None, MaxResults=None, Filters=None): pass
[ "def describe_import_snapshot_tasks(DryRun=None, ImportTaskIds=None, NextToken=None, MaxResults=None, Filters=None):\n pass", "def show_tasks(self):\n print('\\nCompleted to following tasks:')\n for step in self.tasks:\n print('\\t{0}'.format(step))", "def show_task(path, final_only):\n\n if final_only:\n stats = get_exif_status_final_only(path)\n else:\n stats = get_exif_status(path)\n\n # print('show_task() ' + str(stats))\n name_col_len = 1\n # Column length for image name\n for each_stat in stats:\n if len(each_stat['image']) > name_col_len:\n name_col_len = len(each_stat['image'])\n\n for each_stat in stats:\n # print('show_task() ' + str(each_stat))\n if each_stat['jpg']:\n jpg = 'j'\n else:\n jpg = '-'\n\n if each_stat['final']:\n final = 'f'\n else:\n final = '-'\n\n if each_stat['raw']:\n raw = 'r'\n else:\n raw = '-'\n\n if each_stat['title']:\n title_flag = 't'\n title = each_stat['title']\n else:\n title_flag = '-'\n title = '-'\n\n if each_stat['description']:\n description_flag = 'd'\n description = each_stat['description']\n else:\n description_flag = '-'\n description = '-'\n\n if each_stat['location'] is None:\n location_flag = '-'\n else:\n location_flag = 'g'\n\n # print('show_task() ' + str(location_flag))\n\n formatting = '{}{}{}{}{}{} {:<' + str(name_col_len) + '} {} / {}'\n if final_only is False or (final_only is True and final == 'f'):\n print(formatting.format(jpg, raw, final, title_flag, description_flag, location_flag,\n each_stat['image'], title, description))", "def show(username):\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n try:\n info = vmware.show_jumpbox(username)\n except ValueError as doh:\n logger.error('Task failed: {}'.format(doh))\n resp['error'] = '{}'.format(doh)\n else:\n logger.info('Task complete')\n resp['content'] = info\n return resp", "def info(ctx, show_all):\r\n ts, tier = _get_config_and_tier(ctx.obj.tier_name)\r\n\r\n #hd = ['template_name', '']\r\n #print \"template code\", [t().template_name for t in templater.export]\r\n\r\n _list_stacks(tier, show_all)", "def display_task(task):\n log = getLogger()\n return check_task(task, log)", "def show_data(self):\n for i, task in enumerate(self.todo_tasks):\n print(f\"{i + 1}° - \", end=\"\")\n task.show_task()", "def importa_voti(self, event):\n self.Disable()\n ViewImportVoti(parent=self, title='Importa Voti')", "def membership_import_preview(request, id):\n if not request.user.profile.is_superuser:\n raise Http403\n\n memport = get_object_or_404(MembershipImport, pk=id)\n\n if request.method == 'POST':\n form = ImportMapForm(request.POST, memport=memport)\n\n if form.is_valid():\n #show the user a preview based on the mapping\n cleaned_data = form.cleaned_data\n file_path = memport.get_file().file.name\n #file_path = os.path.join(settings.MEDIA_ROOT, memport.get_file().file.name)\n memberships, stats = parse_mems_from_csv(\n file_path,\n cleaned_data,\n membership_import=memport\n )\n\n EventLog.objects.log()\n\n # return the form to use it for the confirm view\n template_name = 'memberships/import-preview.html'\n return render_to_response(template_name, {\n 'memberships': memberships,\n 'stats': stats,\n 'memport': memport,\n 'form': form,\n 'datetime': datetime,\n }, context_instance=RequestContext(request))\n\n else:\n form = ImportMapForm(memport=memport)\n\n template_name = 'memberships/import-map-fields.html'\n return render_to_response(template_name, {\n 'form': form,\n 'memport': memport,\n 'datetime': datetime,\n }, context_instance=RequestContext(request))", "def show_info(args):\n # Enforce anonymization, when file_io upload is tuned on.\n anonymizer = PiiAnonymizer() if args.anonymize or args.file_io else NullAnonymizer()\n info = AirflowInfo(anonymizer)\n if args.file_io:\n _send_report_to_fileio(info.render_text())\n else:\n info.show()", "def import_task(self, img, cont, img_format=None, img_name=None):\r\n return self._tasks_manager.create(\"import\", img=img, cont=cont,\r\n img_format=img_format, img_name=img_name)", "def describe_export_tasks(ExportTaskIds=None):\n pass", "def populate_task_details(workflow_stats, workflow_info):\n\ttotal_tasks = 0\n\ttotal_tasks = workflow_stats.get_total_tasks_status()\n\tworkflow_info.total_tasks = total_tasks", "def read_tasks_and_print():\n if not _TASK_FILE.is_file():\n print(\"No existing tasks! Try to add some.\")\n return\n \n with open(_TASK_FILE) as f:\n task_dict = json.load(f)\n todo_list = task_dict['todo']\n review_list = task_dict['review']\n\n if 'daily' not in task_dict:\n task_dict['daily'] = []\n daily_list = task_dict['daily']\n\n cprint(\"Daily Tasks:\", 'grey', 'on_yellow', end='\\n')\n _print_daily_task_list(daily_list)\n\n cprint(\"Todos:\", 'grey', 'on_green', end='\\n')\n _print_list(todo_list)\n \n cprint(\"Reviews:\", 'white', 'on_blue', end='\\n')\n _print_list(review_list)", "def info():\n return render_template(\n os.path.join(os.path.dirname(__file__), 'templates/instance_info.html'),\n concurrents=concurrents,\n current_requests=current_requests,\n os=os,\n runtime=os.getenv('GAE_RUNTIME'),\n )", "def migrate(ctx, interactive, sync, map_project, map_tag):\n\n if sync:\n ctx.invoke(synchronize)\n\n tasks = todoist.items.all()\n io.important(f'Starting migration of {len(tasks)} tasks...')\n for idx, task in enumerate(tasks):\n data = {}\n tid = data['tid'] = task['id']\n name = data['name'] = task['content']\n\n # Log message and check if exists\n io.important(f'Task {idx + 1} of {len(tasks)}: {name}')\n if check_task_exists(tid):\n io.info(f'Already exists (todoist_id={tid})')\n continue\n\n # Project\n p = todoist.projects.get_by_id(task['project_id'])\n project_hierarchy = [p]\n while p['parent_id']:\n p = todoist.projects.get_by_id(p['parent_id'])\n project_hierarchy.insert(0, p)\n\n project_name = '.'.join(p['name'] for p in project_hierarchy)\n project_name = utils.try_map(\n map_project,\n project_name\n )\n data['project'] = utils.maybe_quote_ws(project_name)\n\n # Priority\n data['priority'] = utils.parse_priority(task['priority'])\n\n # Tags\n data['tags'] = [\n utils.try_map(map_tag, todoist.labels.get_by_id(l_id)['name'])\n for l_id in task['labels']\n ]\n\n # Dates\n data['entry'] = utils.parse_date(task['date_added'])\n data['due'] = utils.parse_due(utils.try_get_model_prop(task, 'due'))\n data['recur'] = parse_recur_or_prompt(utils.try_get_model_prop(task, 'due'))\n\n if not interactive:\n add_task(**data)\n else:\n add_task_interactive(**data)", "def display_project():\n\n return render_template(\"project_info.html\")", "def __load_smart_task_source_files_screen(self, smart_task_name, source_type, invisible, printer_obj=None):\n self.fc.flow_home_load_smart_task_screen(create_acc=False, printer_obj=printer_obj)\n self.fc.flow_smart_task_load_smart_task_create_screen(smart_task_name)\n self.smart_tasks.add_smart_task_for_email(to_email=self.email_address)\n self.smart_tasks.select_save_btn()\n try:\n self.smart_tasks.dismiss_smart_task_created_popup()\n self.smart_tasks.select_smart_task(smart_task_name)\n except TimeoutException:\n self.smart_tasks.select_btn_on_saved_screen(is_checked=False, btn_name=self.smart_tasks.START_THIS_SMART_TASK_BTN)\n self.smart_tasks.select_smart_task_source_type(source_type, invisible=invisible)", "def membership_import_confirm(request, id):\n if not request.user.profile.is_superuser:\n raise Http403\n\n memport = get_object_or_404(MembershipImport, pk=id)\n\n if request.method == \"POST\":\n form = ImportMapForm(request.POST, memport=memport)\n\n if form.is_valid():\n cleaned_data = form.cleaned_data\n\n EventLog.objects.log()\n\n if not settings.CELERY_IS_ACTIVE:\n result = ImportMembershipsTask()\n memberships, stats = result.run(memport, cleaned_data)\n return render_to_response('memberships/import-confirm.html', {\n 'memberships': memberships,\n 'stats': stats,\n 'datetime': datetime,\n }, context_instance=RequestContext(request))\n else:\n result = ImportMembershipsTask.delay(memport, cleaned_data)\n\n return redirect('membership_import_status', result.task_id)\n else:\n return redirect('membership_import_preview', memport.id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes your import snapshot tasks.
def describe_import_snapshot_tasks(DryRun=None, ImportTaskIds=None, NextToken=None, MaxResults=None, Filters=None): pass
[ "def describe_import_image_tasks(DryRun=None, ImportTaskIds=None, NextToken=None, MaxResults=None, Filters=None):\n pass", "def describe_export_tasks(ExportTaskIds=None):\n pass", "def import_task(self, img, cont, img_format=None, img_name=None):\r\n return self._tasks_manager.create(\"import\", img=img, cont=cont,\r\n img_format=img_format, img_name=img_name)", "def describe_conversion_tasks(DryRun=None, ConversionTaskIds=None):\n pass", "def describe_bundle_tasks(DryRun=None, BundleIds=None, Filters=None):\n pass", "def importMetadataTasks(self):\n for t in range(len(self.tasks)):\n self.importMetadata(self.metadataFiles[t], self.tasks[t], self.taskCallback[t])", "def show_tasks(self):\n print('\\nCompleted to following tasks:')\n for step in self.tasks:\n print('\\t{0}'.format(step))", "def migrate(ctx, interactive, sync, map_project, map_tag):\n\n if sync:\n ctx.invoke(synchronize)\n\n tasks = todoist.items.all()\n io.important(f'Starting migration of {len(tasks)} tasks...')\n for idx, task in enumerate(tasks):\n data = {}\n tid = data['tid'] = task['id']\n name = data['name'] = task['content']\n\n # Log message and check if exists\n io.important(f'Task {idx + 1} of {len(tasks)}: {name}')\n if check_task_exists(tid):\n io.info(f'Already exists (todoist_id={tid})')\n continue\n\n # Project\n p = todoist.projects.get_by_id(task['project_id'])\n project_hierarchy = [p]\n while p['parent_id']:\n p = todoist.projects.get_by_id(p['parent_id'])\n project_hierarchy.insert(0, p)\n\n project_name = '.'.join(p['name'] for p in project_hierarchy)\n project_name = utils.try_map(\n map_project,\n project_name\n )\n data['project'] = utils.maybe_quote_ws(project_name)\n\n # Priority\n data['priority'] = utils.parse_priority(task['priority'])\n\n # Tags\n data['tags'] = [\n utils.try_map(map_tag, todoist.labels.get_by_id(l_id)['name'])\n for l_id in task['labels']\n ]\n\n # Dates\n data['entry'] = utils.parse_date(task['date_added'])\n data['due'] = utils.parse_due(utils.try_get_model_prop(task, 'due'))\n data['recur'] = parse_recur_or_prompt(utils.try_get_model_prop(task, 'due'))\n\n if not interactive:\n add_task(**data)\n else:\n add_task_interactive(**data)", "def startup_tasks(self) -> None:\n pass", "def _make_run_description(args):\n raise NotImplementedError", "def task_description(task):\r\n name = task.__name__ if hasattr(task, '__name__') else None\r\n if isinstance(task, types.MethodType):\r\n if name is not None and hasattr(task, '__self__'):\r\n return '%s from %s' % (name, task.__self__)\r\n elif isinstance(task, types.FunctionType):\r\n if name is not None:\r\n return str(name)\r\n return repr(task)", "def export_task(self, img, cont):\r\n return self._tasks_manager.create(\"export\", img=img, cont=cont)", "def gen_task_desc(**kwargs):\n logger = logging.getLogger(__name__)\n\n suppressempty = kwargs[\"suppressempty\"]\n blend = kwargs[\"blend_info\"][\"blend\"]\n tasksprefix = kwargs[\"blend_info\"][\"tasksprefix\"]\n blend_dependencies = kwargs[\"blend_dependencies\"]\n\n\n task_desc_path = \"taskdesc-sec.template\"\n logger.debug(\"Opening file {0} to write\".format(task_desc_path))\n with open(task_desc_path,'w') as fout:\n\n for task in sorted(blend_dependencies.keys()): \n\n if blend_dependencies[task]['Leaf'] == 'false':\n continue\n\n if suppressempty and blend_dependencies[task][\"haspackages\"] == 0:\n if blend_dependencies[task]['test_always_lang']:\n logger.debug(\"Print empty task {0} because Test-always-lang is set\\n\".format(task))\n else:\n logger.debug(\"The metapackage {2} will not be created because {0} dependant are in the pool and suppressempty was set {1}\\n\".format(blend_dependencies[task][\"haspackages\"], suppressempty, task))\n continue\n\n fout.write(\"Task: {0}-{1}\\n\".format(tasksprefix, task))\n fout.write(\"Section: {0}\\n\".format(blend));\n fout.write(\"Description: {0}\\n\".format(blend_dependencies[task][\"description\"]))\n fout.write(\"{0}\".format(blend_dependencies[task][\"long_description\"])) #Already contains a newline\n fout.write(\"Relevance: 10\\n\")\n\n if blend_dependencies[task][\"Enhances\"]:\n fout.write(\"Enhances: {0}\\n\".format(blend_dependencies[task][\"Enhances\"]))\n\n if blend_dependencies[task][\"metapackage\"]:\n #No use listing a metapackage as a key package, if no metapackage exist.\n fout.write(\"Key: \\n\");\n fout.write(\" {0}-{1}\\n\".format(tasksprefix, task))\n\n fout.write(\"Packages: list\\n \")\n for header in [\"Depends\", \"Recommends\"]:\n if not blend_dependencies[task][header]:\n continue \n fout.write(\"{0}\".format(\"\\n \".join(sorted(blend_dependencies[task][header]))))\n fout.write(\"\\n\")\n\n fout.write(\"\\n\")", "def test_dag_tasks_present(self):\n self.assertEqual(self.tasks, [\n \"set_datetime\",\n \"harvest_notes\",\n \"s3_to_server_collection\",\n \"s3_to_server_service\",\n \"reload_electronic_notes\",\n \"slack_post_success\"\n ])", "def import_snapshot(DryRun=None, Description=None, DiskContainer=None, ClientData=None, ClientToken=None, RoleName=None):\n pass", "def task_fields(task):\n name_field = [sg.Input(task.name, size=(18, 1))]\n other_values = (\n task.get_current_progress(),\n task.get_next_due_date(),\n task.assignee,\n task.length,\n \", \".join(task.linked_creatures),\n \", \".join(task.linked_plants),\n task.status.get(),\n )\n other_fields = [summary_field_format(value) for value in other_values]\n return name_field + other_fields", "def group_task(self):\n return {\n 'basename': self.name,\n 'name': None,\n 'doc': first_line(self.__doc__),\n }", "def summary(self):\n return self.instance.get_task_summary(self.name)", "def test_import_description():\n cwd = os.getcwd()\n test_image_1 = os.path.join(cwd, TEST_IMAGE_1)\n runner = CliRunner()\n result = runner.invoke(\n import_cli,\n [\n \"--verbose\",\n \"--clear-metadata\",\n \"--description\",\n \"{exiftool:XMP:Description|upper}\",\n test_image_1,\n ],\n terminal_width=TERMINAL_WIDTH,\n )\n\n assert result.exit_code == 0\n\n import_data = parse_import_output(result.output)\n file_1 = pathlib.Path(test_image_1).name\n uuid_1 = import_data[file_1]\n photo_1 = Photo(uuid_1)\n\n assert photo_1.filename == file_1\n assert photo_1.description == TEST_DATA[TEST_IMAGE_1][\"description\"].upper()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your Internet gateways.
def describe_internet_gateways(DryRun=None, InternetGatewayIds=None, Filters=None): pass
[ "def describe_vpn_gateways(DryRun=None, VpnGatewayIds=None, Filters=None):\n pass", "def describe_customer_gateways(DryRun=None, CustomerGatewayIds=None, Filters=None):\n pass", "def describe_nat_gateways(NatGatewayIds=None, Filters=None, MaxResults=None, NextToken=None):\n pass", "def describe_egress_only_internet_gateways(DryRun=None, EgressOnlyInternetGatewayIds=None, MaxResults=None, NextToken=None):\n pass", "def describe_nat_gateways(\n nat_gateway_id=None,\n subnet_id=None,\n subnet_name=None,\n vpc_id=None,\n vpc_name=None,\n states=(\"pending\", \"available\"),\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n return _find_nat_gateways(\n nat_gateway_id=nat_gateway_id,\n subnet_id=subnet_id,\n subnet_name=subnet_name,\n vpc_id=vpc_id,\n vpc_name=vpc_name,\n states=states,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def delete_internet_gateways():\n print('Deleting Internet Gateways')\n client = boto3.resource('ec2')\n for igw in client.internet_gateways.all():\n for attachment in igw.attachments:\n if 'State' in attachment and attachment['State'] == 'available':\n vpc_id = attachment['VpcId']\n print('Detaching internet gateway {} from vpc {}'.format(igw.id, vpc_id))\n igw.detach_from_vpc(\n VpcId=vpc_id\n )\n print('Deleting Internet Gateway {}'.format(igw.id))\n igw.delete()\n\n while [igw for igw in client.internet_gateways.all()]:\n time.sleep(5)\n print('Internet Gateways deleted')", "def delete_egress_only_internet_gateways():\n client = boto3.client('ec2')\n print('Deleting Egress Only Internet Gateways')\n gw_resp = client.describe_egress_only_internet_gateways()\n while True:\n for gateway in gw_resp['EgressOnlyInternetGateways']:\n gw_id = gateway['EgressOnlyInternetGatewayId']\n client.delete_egress_only_internet_gateway(\n EgressOnlyInternetGatewayId=gw_id\n )\n if 'NextMarker' in gw_resp:\n gw_resp = client.describe_egress_only_internet_gateways(\n Marker=gw_resp['NextMarker'],\n )\n else:\n break\n while client.describe_egress_only_internet_gateways()['EgressOnlyInternetGateways']:\n time.sleep(5)\n print('Egress Only Internet Gateways deleted')", "async def test_multiple_gateways(caplog):\n async with Context() as context:\n await Py4JComponent(gateways={\n 'java1': {},\n 'java2': {}\n }).start(context)\n assert isinstance(context.java1, JavaGateway)\n assert isinstance(context.java2, JavaGateway)\n\n records = [record for record in caplog.records if record.name == 'asphalt.py4j.component']\n records.sort(key=lambda r: r.message)\n assert len(records) == 4\n assert records[0].message.startswith(\"Configured Py4J gateway \"\n \"(java1 / ctx.java1; address=127.0.0.1, port=\")\n assert records[1].message.startswith(\"Configured Py4J gateway \"\n \"(java2 / ctx.java2; address=127.0.0.1, port=\")\n assert records[2].message == 'Py4J gateway (java1) shut down'\n assert records[3].message == 'Py4J gateway (java2) shut down'", "def gateway_by_type(self, type=None, on_network=None): # @ReservedAssignment\n gateways = route_level(self, 'gateway')\n if not type:\n for gw in gateways:\n yield gw\n else:\n for node in gateways:\n #TODO: Change to type == node.related_element_type when\n # only supporting SMC >= 6.4\n if type == node.routing_node_element.typeof:\n # If the parent is level interface, this is a tunnel interface\n # where the gateway is bound to interface versus network\n parent = node._parent\n if parent.level == 'interface':\n interface = parent\n network = None\n else:\n network = parent\n interface = network._parent\n \n if on_network is not None:\n if network and network.ip == on_network:\n yield (interface, network, node)\n else:\n yield (interface, network, node)", "def gateway(self):\n return self.get_ip('gateway') or str(self.ip_network[1])", "def netctl(self):\n config = list()\n config.append('Description=\"%s network\"' % self.name)\n config.append('Interface=%s' % self.name)\n config.append('Connection=ethernet')\n if self.ipv4_address:\n config.append('IP=static')\n config.append(\"Address=('%s')\" % self.ipv4_address.with_prefixlen)\n if self.ipv4_gateway:\n config.append(\"Gateway='%s'\" % str(self.ipv4_gateway))\n else:\n config.append('IP=no')\n\n if self.ipv6_address:\n config.append('IP6=static')\n config.append(\"Address6=('%s')\" % self.ipv6_address.with_prefixlen)\n if self.ipv6_gateway:\n config.append(\"Gateway6='%s'\" % str(self.ipv6_gateway))\n else:\n config.append('IP6=no')\n\n if self.dns:\n dns = []\n for server in self.dns:\n dns.append(\"'%s'\" % str(server))\n config.append('DNS=(%s)' % \" \".join(dns))\n return config", "def rest_api_gateways(self):\n return self._rest_api_gateways", "def DescribeNatGateways(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeNatGateways\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeNatGatewaysResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def gateway(self) -> pulumi.Output['outputs.AppConnectionGateway']:\n return pulumi.get(self, \"gateway\")", "def gateway(self) -> Optional[pulumi.Input['AppConnectionGatewayArgs']]:\n return pulumi.get(self, \"gateway\")", "async def setup_gateways(hass, config):\n conf = config[DOMAIN]\n gateways = {}\n\n for index, gateway_conf in enumerate(conf[CONF_GATEWAYS]):\n persistence_file = gateway_conf.get(\n CONF_PERSISTENCE_FILE,\n hass.config.path('mysensors{}.pickle'.format(index + 1)))\n ready_gateway = await _get_gateway(\n hass, config, gateway_conf, persistence_file)\n if ready_gateway is not None:\n gateways[id(ready_gateway)] = ready_gateway\n\n return gateways", "def gateway_name(self) -> str:\n return self.gateway.name", "def get_gateway(self, gw_name):\n\t\treturn self.get_feature_source(\"gateway\", gw_name)", "def gate_names(self):\n return self.gg" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your key pairs. For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide .
def describe_key_pairs(DryRun=None, KeyNames=None, Filters=None): pass
[ "def ex_describe_all_keypairs(self):\r\n names = [key_pair.name for key_pair in self.list_key_pairs()]\r\n return names", "def getkeypairs(show):\n keypairlist=[]\n \n try:\n keypairs=ec2.describe_key_pairs()\n except botocore.exceptions.ClientError as e:\n coloredtext(\"There was an error while getting keypair data: \\n\\n\\n\")\n print(e)\n for keypair in keypairs['KeyPairs']:\n name=keypair['KeyName']\n \n if show:\n print(\"name: \"+name)\n keypairlist.append({ \"name\":name})\n return keypairlist", "def ex_describe_keypair(self, name):\r\n\r\n params = {\r\n 'Action': 'DescribeKeyPairs',\r\n 'KeyName.1': name\r\n }\r\n\r\n response = self.connection.request(self.path, params=params).object\r\n key_name = findattr(element=response, xpath='keySet/item/keyName',\r\n namespace=NAMESPACE)\r\n fingerprint = findattr(element=response,\r\n xpath='keySet/item/keyFingerprint',\r\n namespace=NAMESPACE).strip()\r\n return {\r\n 'keyName': key_name,\r\n 'keyFingerprint': fingerprint\r\n }", "def show_keypair(k5token, keypair_name, project_id, region):\n\n try:\n\n serverURL = 'https://compute.' + region + \\\n '.cloud.global.fujitsu.com/v2/' + project_id + '/os-keypairs/' + keypair_name\n response = requests.get(serverURL,\n headers={\n 'X-Auth-Token': k5token,\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'})\n return response\n except:\n return (\"\\nUnexpected error:\", sys.exc_info())", "def info(self):\n\n print(\"KEY INFO\")\n print(\" Network %s\" % self.network.name)\n print(\" Compressed %s\" % self.compressed)\n if self.secret:\n print(\"SECRET EXPONENT\")\n print(\" Private Key (hex) %s\" % self.private_hex)\n print(\" Private Key (long) %s\" % self.secret)\n if isinstance(self, HDKey):\n print(\" Private Key (wif) %s\" % self.wif_key())\n else:\n print(\" Private Key (wif) %s\" % self.wif())\n else:\n print(\"PUBLIC KEY ONLY, NO SECRET EXPONENT\")\n print(\"PUBLIC KEY\")\n print(\" Public Key (hex) %s\" % self.public_hex)\n print(\" Public Key uncompr. (hex) %s\" % self.public_uncompressed_hex)\n print(\" Public Key Hash160 %s\" % self.hash160.hex())\n print(\" Address (b58) %s\" % self.address())\n point_x, point_y = self.public_point()\n print(\" Point x %s\" % point_x)\n print(\" Point y %s\" % point_y)", "def get_key_pairs():\n return _get_json(\"json/nova/key_pairs_json.py\")", "def print_key_pairs(v, title=\"Parameters\", print_function=None):\n items = v.items() if type(v) is dict else v\n print_function(\"=\" * 40)\n print_function(title)\n print_function(\"=\" * 40)\n for key,value in items:\n print_function(\"{:<15}: {:<10}\".format(key, value if value is not None else \"None\"))\n print_function(\"-\" * 40)", "def list(self, limit):\n try:\n for kp in self.ec2_resource.key_pairs.limit(limit):\n print(f\"Found {kp.key_type} key {kp.name} with fingerprint:\")\n print(f\"\\t{kp.key_fingerprint}\")\n except ClientError as err:\n logger.error(\n \"Couldn't list key pairs. Here's why: %s: %s\",\n err.response['Error']['Code'], err.response['Error']['Message'])\n raise", "def __str__ (self):\n return \"%s - %s\" % (self.key, self.desc)", "def get_key_pairs():\n client = boto3.client('ec2', AVAILABILITY_ZONE)\n keypairs = client.describe_key_pairs()\n keypairs = [ kepair for kepair in keypairs['KeyPairs']]\n keynames = [ kepair['KeyName'] for kepair in keypairs]\n return keynames", "def describe_keypairs(self, xml_bytes):\n results = []\n root = XML(xml_bytes)\n keypairs = root.find(\"keySet\")\n if keypairs is None:\n return results\n for keypair_data in keypairs:\n key_name = keypair_data.findtext(\"keyName\")\n key_fingerprint = keypair_data.findtext(\"keyFingerprint\")\n results.append(model.Keypair(key_name, key_fingerprint))\n return results", "def list_key_pairs(self):\r\n raise NotImplementedError(\r\n 'list_key_pairs not implemented for this driver')", "def example_info(key: str) -> None:\n if key not in __TRACKMAP:\n raise ParameterError(f\"Unknown example key: {key}\")\n\n license_file = __GOODBOY.fetch(__TRACKMAP[key][\"path\"] + \".txt\")\n\n with open(license_file, \"r\") as fdesc:\n print(f\"{key:10s}\\t{__TRACKMAP[key]['desc']:s}\")\n print(\"-\" * 68)\n for line in fdesc:\n print(line)", "def meta_key_map(self):\n meta_keys = list(self.meta.keys())\n meta_cards = [str(self.meta[key]['card']) for key in meta_keys]\n nk = max(12, max([len(key) for key in meta_keys]))\n nc = max(11, max([len(card) for card in meta_cards]))\n print('')\n print('{0} {1}'.format('Metadata Key'.center(nk), 'Header Card'.center(nc)))\n print('-'*nk + ' ' + '-'*nc)\n for key, card in zip(meta_keys, meta_cards):\n print('{0} {1}'.format(key.rjust(nk), card.rjust(nc)))\n print('')", "def show_dictionary():", "def test_vmware_service_resources_keypairs_get(self):\n pass", "def __str__(self):\n s_list = list()\n StringFormat.line(s_list)\n s_list.append(\"HSS private key\")\n StringFormat.format_hex(s_list, \"levels\", u32str(self.levels))\n for prv in self.pvt_keys:\n s_list.append(str(prv))\n StringFormat.line(s_list)\n return \"\\n\".join(s_list)", "def info(ctx: CLIContext, name: str) -> None:\n with Session() as session:\n try:\n rp = session.KeypairResourcePolicy(session.config.access_key)\n item = rp.info(name)\n ctx.output.print_item(item, _default_detail_fields)\n except Exception as e:\n ctx.output.print_error(e)\n sys.exit(1)", "def key_pair_name(self) -> Optional[str]:\n return pulumi.get(self, \"key_pair_name\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of the your NAT gateways.
def describe_nat_gateways(NatGatewayIds=None, Filters=None, MaxResults=None, NextToken=None): pass
[ "def describe_internet_gateways(DryRun=None, InternetGatewayIds=None, Filters=None):\n pass", "def describe_vpn_gateways(DryRun=None, VpnGatewayIds=None, Filters=None):\n pass", "def describe_nat_gateways(\n nat_gateway_id=None,\n subnet_id=None,\n subnet_name=None,\n vpc_id=None,\n vpc_name=None,\n states=(\"pending\", \"available\"),\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n return _find_nat_gateways(\n nat_gateway_id=nat_gateway_id,\n subnet_id=subnet_id,\n subnet_name=subnet_name,\n vpc_id=vpc_id,\n vpc_name=vpc_name,\n states=states,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )", "def describe_customer_gateways(DryRun=None, CustomerGatewayIds=None, Filters=None):\n pass", "def delete_nat_gateways():\n print('Deleting NAT gateways')\n ec2 = boto3.client('ec2')\n for page in ec2.get_paginator('describe_nat_gateways').paginate():\n for nat_gateway in page['NatGateways']:\n nat_gateway_id = nat_gateway['NatGatewayId']\n print('Deleting Nat Gateway - {}'.format(nat_gateway_id))\n ec2.delete_nat_gateway(\n NatGatewayId=nat_gateway_id\n )\n\n while ec2.describe_nat_gateways()['NatGateways']:\n all_deleted = True\n for gateway in ec2.describe_nat_gateways()['NatGateways']:\n if gateway['State'] != 'deleted':\n all_deleted = False\n break\n if all_deleted:\n break\n else:\n time.sleep(5)\n\n print('NAT gateways deleted')", "def _find_nat_gateways(\n nat_gateway_id=None,\n subnet_id=None,\n subnet_name=None,\n vpc_id=None,\n vpc_name=None,\n states=(\"pending\", \"available\"),\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not any((nat_gateway_id, subnet_id, subnet_name, vpc_id, vpc_name)):\n raise SaltInvocationError(\n \"At least one of the following must be \"\n \"provided: nat_gateway_id, subnet_id, \"\n \"subnet_name, vpc_id, or vpc_name.\"\n )\n filter_parameters = {\"Filter\": []}\n\n if nat_gateway_id:\n filter_parameters[\"NatGatewayIds\"] = [nat_gateway_id]\n\n if subnet_name:\n subnet_id = _get_resource_id(\n \"subnet\", subnet_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not subnet_id:\n return False\n\n if subnet_id:\n filter_parameters[\"Filter\"].append({\"Name\": \"subnet-id\", \"Values\": [subnet_id]})\n\n if vpc_name:\n vpc_id = _get_resource_id(\n \"vpc\", vpc_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if not vpc_id:\n return False\n\n if vpc_id:\n filter_parameters[\"Filter\"].append({\"Name\": \"vpc-id\", \"Values\": [vpc_id]})\n\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n nat_gateways = []\n for ret in __utils__[\"boto3.paged_call\"](\n conn3.describe_nat_gateways,\n marker_flag=\"NextToken\",\n marker_arg=\"NextToken\",\n **filter_parameters\n ):\n for gw in ret.get(\"NatGateways\", []):\n if gw.get(\"State\") in states:\n nat_gateways.append(gw)\n log.debug(\n \"The filters criteria %s matched the following nat gateways: %s\",\n filter_parameters,\n nat_gateways,\n )\n\n if nat_gateways:\n return nat_gateways\n else:\n return False", "def DescribeNatGateways(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeNatGateways\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeNatGatewaysResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def gateway(self):\n return self.get_ip('gateway') or str(self.ip_network[1])", "def netctl(self):\n config = list()\n config.append('Description=\"%s network\"' % self.name)\n config.append('Interface=%s' % self.name)\n config.append('Connection=ethernet')\n if self.ipv4_address:\n config.append('IP=static')\n config.append(\"Address=('%s')\" % self.ipv4_address.with_prefixlen)\n if self.ipv4_gateway:\n config.append(\"Gateway='%s'\" % str(self.ipv4_gateway))\n else:\n config.append('IP=no')\n\n if self.ipv6_address:\n config.append('IP6=static')\n config.append(\"Address6=('%s')\" % self.ipv6_address.with_prefixlen)\n if self.ipv6_gateway:\n config.append(\"Gateway6='%s'\" % str(self.ipv6_gateway))\n else:\n config.append('IP6=no')\n\n if self.dns:\n dns = []\n for server in self.dns:\n dns.append(\"'%s'\" % str(server))\n config.append('DNS=(%s)' % \" \".join(dns))\n return config", "def add_natgw(self, idx: int, nat_eips: Ref = None):\n if nat_eips:\n eip = Select(idx, nat_eips)\n else:\n self.nat_eip = self.t.add_resource(EIP(\n f'NatEip{self.idx}',\n Domain='vpc',\n ))\n eip = GetAtt(self.nat_eip, 'AllocationId')\n\n self.natgw = self.t.add_resource(NatGateway(\n f'NatGw{self.idx}',\n AllocationId=eip,\n SubnetId=Ref(self.subnet),\n ))\n\n self.t.add_output(Output(\n f'NatEip{self.idx}',\n Value=eip,\n Description=f'Nat Gateway Elastic IP for {self.az}',\n ))", "def neigh_options(config):\r\n\r\n next_hop = [\"Yes\" for k in dict.fromkeys(config) if k == \"next-hop-self\"]\r\n if not next_hop:\r\n next_hop = [\"No\"]\r\n\r\n reflector = [\"Yes\" for k in dict.fromkeys(config) if k == \"route-reflector-client\"]\r\n if not reflector:\r\n reflector = [\"No\"]\r\n\r\n soft_reconfig = [v for k, v in config.items() if k == \"soft-reconfiguration\"]\r\n if not soft_reconfig:\r\n soft_reconfig = [\"No\"]\r\n\r\n activate = [\"Yes\" for k in dict.fromkeys(config) if k == \"activate\"]\r\n if not reflector:\r\n activate = [\"No\"]\r\n\r\n return next_hop, reflector, soft_reconfig, activate", "def listVpnGateway(cls, api_client, **kwargs):\n cmd = {}\n cmd.update(kwargs)\n return super(Vpn, cls).list(api_client.listVpnGateways(**cmd))", "def gateway_by_type(self, type=None, on_network=None): # @ReservedAssignment\n gateways = route_level(self, 'gateway')\n if not type:\n for gw in gateways:\n yield gw\n else:\n for node in gateways:\n #TODO: Change to type == node.related_element_type when\n # only supporting SMC >= 6.4\n if type == node.routing_node_element.typeof:\n # If the parent is level interface, this is a tunnel interface\n # where the gateway is bound to interface versus network\n parent = node._parent\n if parent.level == 'interface':\n interface = parent\n network = None\n else:\n network = parent\n interface = network._parent\n \n if on_network is not None:\n if network and network.ip == on_network:\n yield (interface, network, node)\n else:\n yield (interface, network, node)", "def on_show_routes(self, widget):\n g = NetworkGraph.NetworkGraph().graph\n for node in g.nodes():\n # print node\n if isinstance(node, Firewall):\n print node.route_list\n else:\n pass # print node.to_string()\n\n print 'end nodes\\n'\n for edge in g.edges(data=True):\n # print edge\n\n firewall, ip = (edge[0], edge[1]) if isinstance(edge[0], Firewall) and isinstance(edge[1], Ip)\\\n else (edge[1], edge[0])\n route_list = firewall.route_list\n iface = self.get_iface_from_ip(firewall, ip)\n routes = []\n for route in route_list:\n if route.iface == iface:\n routes.append(route)\n output = {}\n for route in routes:\n if route.gw_ip.to_string() in [key for key in output.keys()]:\n output[route.gw_ip.to_string()].append(route.net_ip_dst.to_string() +\n '/' + str(fromDotted2Dec(route.net_mask.to_string())))\n else:\n output[route.gw_ip.to_string()] = []\n tmp = route.net_ip_dst.to_string()\n tmp2 = route.net_mask.to_string()\n if tmp == \"0.0.0.0 / 0\":\n tmp = \"0.0.0.0\"\n if tmp2 == \"0.0.0.0 / 0\":\n tmp2 = \"0.0.0.0\"\n output[route.gw_ip.to_string()].append(tmp\n + '/' + str(fromDotted2Dec(tmp2)))\n print len(output), output\n if len(output) > 0:\n data = Route_info(output, iface)\n edge[2]['object'].remove()\n NetworkGraph.NetworkGraph()._add_route_info(firewall, data, iface, edge)\n NetworkGraph.NetworkGraph.multidigraph = nx.MultiGraph()\n Gtk_Main.Gtk_Main().lateral_pane.focus_firewall()\n Gtk_Main.Gtk_Main().draw()", "def describe_egress_only_internet_gateways(DryRun=None, EgressOnlyInternetGatewayIds=None, MaxResults=None, NextToken=None):\n pass", "def getGatewayAppliances(self, namePrefix=None):\n filter = None\n if namePrefix:\n filter = {\n 'name': { 'operation': '^='+namePrefix}\n }\n\n gateways = self.client['SoftLayer_Account'].getNetworkGateways(filter=filter,mask=NetworkGateway.MASK)\n result = []\n if gateways:\n for gatewayData in gateways:\n gateway = NetworkGateway(gatewayData, self)\n if namePrefix is None or gateway.name.startswith(namePrefix):\n result.append(gateway)\n return result if len(result) > 0 else None", "def DescribeVpnGateways(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeVpnGateways\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeVpnGatewaysResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def describe_vpn_connections(DryRun=None, VpnConnectionIds=None, Filters=None):\n pass", "def create_network_gateway(self, body=None):\n return self._post(self.network_gateways_path, body=body)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your network ACLs. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide .
def describe_network_acls(DryRun=None, NetworkAclIds=None, Filters=None): pass
[ "def get_network_acls(self):\n try:\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls?version={}&generation={}\".format(\n self.cfg[\"version\"], self.cfg[\"generation\"]))\n\n # Return data\n return qw(\"iaas\", \"GET\", path, headers())[\"data\"]\n\n except Exception as error:\n print(\"Error fetching network ACLs. {}\".format(error))\n raise", "def create_nacls(self) -> None:\n selection_sagsnl = _ec2.SubnetSelection(subnet_group_name=SwiftComponents.SAGSNL)\n selection_amh = _ec2.SubnetSelection(subnet_group_name=SwiftComponents.AMH)\n\n self.create_nacl(cid=SwiftComponents.SAGSNL + \"NACL\", name=SwiftComponents.SAGSNL + \"NACL\",\n description=\"NACL for SAGSNL Subnet\",\n subnet_selection=selection_sagsnl)\n self.create_nacl(cid=SwiftComponents.AMH + \"NACL\", name=SwiftComponents.AMH + \"NACL\",\n description=\"NACL For AMMH Subnet\",\n subnet_selection=selection_amh)\n\n self.add_nacl_entry(cid=SwiftComponents.SAGSNL + \"NACL\",\n nacl_id=\"SAGSNLNACLEntry1\",\n cidr=_ec2.AclCidr.any_ipv4(),\n rule_number=100,\n traffic=_ec2.AclTraffic.all_traffic(),\n direction=_ec2.TrafficDirection.EGRESS)\n self.add_nacl_entry(cid=SwiftComponents.SAGSNL + \"NACL\",\n nacl_id=\"SAGSNLNACLEntry2\",\n cidr=_ec2.AclCidr.any_ipv4(),\n rule_number=100,\n traffic=_ec2.AclTraffic.all_traffic(),\n direction=_ec2.TrafficDirection.INGRESS)\n\n self.add_nacl_entry(cid=SwiftComponents.AMH + \"NACL\",\n nacl_id=\"AMHNACLEntry1\",\n cidr=_ec2.AclCidr.any_ipv4(),\n rule_number=100,\n traffic=_ec2.AclTraffic.all_traffic(),\n direction=_ec2.TrafficDirection.EGRESS)\n self.add_nacl_entry(cid=SwiftComponents.AMH + \"NACL\",\n nacl_id=\"AMHNACLEntry2\",\n cidr=_ec2.AclCidr.any_ipv4(),\n rule_number=100,\n traffic=_ec2.AclTraffic.all_traffic(),\n direction=_ec2.TrafficDirection.INGRESS)", "def __str__(self):\n sb = '\\nACL [ ' + str(self._acl_handle) + ' ]\\n'\n return sb", "def setAccessControlList(acl):", "def get_network_acl_by_id(self, id):\n try:\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls/{}?version={}&generation={}\".format(\n id, self.cfg[\"version\"], self.cfg[\"generation\"]))\n\n # Return data\n return qw(\"iaas\", \"GET\", path, headers())[\"data\"]\n\n except Exception as error:\n print(\"Error fetching network ACL with ID {}. {}\".format(\n id, error))\n raise", "def create_network_acl_entry(DryRun=None, NetworkAclId=None, RuleNumber=None, Protocol=None, RuleAction=None, Egress=None, CidrBlock=None, Ipv6CidrBlock=None, IcmpTypeCode=None, PortRange=None):\n pass", "def get_acl_info(self, acl_name):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_ACCESS_CONTROLS, 3)\n\n total_acls = self._get_total_number(self.info['loc_cfg_total_acls_span'], \"Access Controls\")\n max_acls_row = int(self.info['const_cfg_max_acl_rows'])\n traverse_row = 1\n i = 0\n acl_info = {}\n\n if total_acls == u'0':\n logging.info(\"There's no acl rules in the Access Controls table\")\n return {}\n\n while i < int(total_acls):\n find_acl_name = self.info['loc_cfg_acl_name_cell']\n find_acl_name = find_acl_name.replace('$_$', str(traverse_row))\n get_acl_name = self.s.get_text(find_acl_name)\n\n if get_acl_name == acl_name:\n acl_edit = self.info['loc_cfg_acl_edit_span']\n acl_edit = acl_edit.replace('$_$', str(i))\n self.s.click_and_wait(acl_edit)\n\n acl_info['acl_name'] = acl_name\n if self.s.is_checked(self.info['loc_cfg_acl_allowall_radio']):\n acl_info['policy'] = 'allow-all'\n\n else:\n acl_info['policy'] = 'deny-all'\n\n acl_info['mac_entries'] = self.s.get_text(self.info['loc_cfg_acl_mac_table']).split('delete')[:-1]\n break\n\n if traverse_row == max_acls_row:\n traverse_row = 0\n self.s.click_and_wait(self.info['loc_cfg_acl_next_image'])\n\n traverse_row += 1\n i += 1\n time.sleep(1)\n\n return acl_info", "def get_network_acl(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='get_network_acl')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/network_acls/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def list(cls, api_client, **kwargs):\n\n cmd = {}\n cmd.update(kwargs)\n if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():\n cmd['listall'] = True\n return super(NetworkACLList, cls).list(api_client.listNetworkACLLists(**cmd)['networkacllist'])", "def test_visibility_acl_combinations(self):\n\n # check that INST, GROUP, & USER ACE's convert\n self.assertEqual(\n convert_acl('acl-overrule', ['001705', 'UIS', 'mjd66', 'aj333']),\n 'GROUP_001705,INST_UIS,USER_mjd66,USER_aj333'\n )\n\n # check that 'cam' is overridden by ACL\n self.assertEqual(\n convert_acl('cam', ['001705', 'UIS']),\n 'GROUP_001705,INST_UIS'\n )\n\n # check that 'cam' is converted when no ACL\n self.assertEqual(\n convert_acl('cam', ['']),\n 'CAM'\n )\n\n # check that 'cam-overrule' is converted\n self.assertEqual(\n convert_acl('cam-overrule', ['']),\n 'CAM'\n )\n\n # check that 'cam-overrule' is converted regardless of ACL\n self.assertEqual(\n convert_acl('cam-overrule', ['si202', 'jrn30']),\n 'CAM,USER_si202,USER_jrn30'\n )\n\n # check that 'world' is overridden by ACL\n self.assertEqual(\n convert_acl('world', ['101128', 'jew46', 'mec22']),\n 'GROUP_101128,USER_jew46,USER_mec22'\n )\n\n # check that 'world' is converted when no ACL\n self.assertEqual(\n convert_acl('world', ['']),\n 'WORLD'\n )\n\n # check that 'world-overrule' is converted\n self.assertEqual(\n convert_acl('world-overrule', ['']),\n 'WORLD'\n )\n\n # check that 'world-overrule' is converted regardless of ACL\n self.assertEqual(convert_acl(\n 'world-overrule', ['jar35', 'lmd11', 'hs243']),\n 'WORLD,USER_jar35,USER_lmd11,USER_hs243'\n )", "def emit_acl(fp: io.IOBase, aclname: str, acllist: str) -> None:\n lines = textwrap.wrap(acllist)\n fp.write(str.format('acl \"{0}\" {{', aclname)) # double open brace for str.format\n for itm in lines:\n fp.write(\"\\n \" + itm)\n fp.write(\";\\n};\\n\\n\")", "def get_acl_type(self):\n return self.acl_type", "def generate_acl(self, name=None, data=None, metadata=None,\n json_string=None, uge_version=None,\n add_required_data=True):\n return self.access_list_manager.generate_object(\n name=name, data=data, metadata=metadata,\n json_string=json_string, uge_version=uge_version,\n add_required_data=add_required_data)", "def acl(self):\n # type: () -> list[AclEntry]\n return self._acl", "def edit_acl_rule(self, old_acl_name, new_acl_name = \"\", is_added_mac = False, mac_list = [],\n is_modified_policy = False, new_policy = False, old_mac_addr = \"\"):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_ACCESS_CONTROLS, 5)\n\n ##zj 20140410 fixed ZF-8015\n if self.s.is_element_present(self.info['loc_cfg_acl_icon_expand']):\n pass\n elif self.s.is_element_present(self.info['loc_cfg_acl_icon_collapse']): \n self.s.click_and_wait(self.info['loc_cfg_acl_icon_collapse']) \n ##zj 20140410 fixed ZF-8015 \n\n #cwang@2010-9-30, checking element first, for scaling test.\n try:\n self._fill_search_txt(self.info['loc_cfg_l2_acl_search_textbox'], old_acl_name, is_refresh = False)\n except Exception, e:\n logging.debug(e.message)\n self._fill_search_txt(self.info['loc_cfg_l2_acl_search_textbox'], old_acl_name, is_refresh = True)\n\n try:\n\n if not self._wait_for_element(self.info['loc_cfg_total_acls_span'], is_refresh = True):\n raise Exception('Element [%s] not found' % self.info['loc_cfg_total_acls_span'])\n\n total_acls = self._get_total_number(self.info['loc_cfg_total_acls_span'], \"Access Controls\")\n max_acls_row = int(self.info['const_cfg_max_acl_rows'])\n traverse_row = 1\n i = 0\n\n if total_acls == u'0':\n logging.info(\"There's no acl rules in the Access Controls table\")\n return\n\n while i < int(total_acls):\n find_acl_name = self.info['loc_cfg_acl_name_cell']\n find_acl_name = find_acl_name.replace('$_$', str(traverse_row))\n get_acl_name = self.s.get_text(find_acl_name)\n\n if get_acl_name == old_acl_name:\n acl_edit = self.info['loc_cfg_acl_edit_span']\n acl_edit = acl_edit.replace('$_$', str(i))\n self.s.click_and_wait(acl_edit)\n\n if new_acl_name:\n self.s.type_text(self.info['loc_cfg_acl_name_textbox'], new_acl_name)\n if is_added_mac:\n if len(mac_list) == 1:\n self._delete_mac_addr_in_acl(old_mac_addr)\n self.s.type_text(self.info['loc_cfg_acl_mac_textbox'], mac_list[0])\n self.s.click_and_wait(self.info['loc_cfg_acl_createnew_station_button'])\n self.s.get_alert(self.info['loc_cfg_acl_cancel_button'])\n\n else:\n self._delete_all_mac_addrs_in_acl()\n for mac in mac_list:\n self.s.type_text(self.info['loc_cfg_acl_mac_textbox'], mac)\n self.s.click_and_wait(self.info['loc_cfg_acl_createnew_station_button'])\n self.s.get_alert(self.info['loc_cfg_acl_cancel_button'])\n time.sleep(1)\n\n if is_modified_policy:\n if new_policy:\n self.s.click_and_wait(self.info['loc_cfg_acl_allowall_radio'])\n else:\n self.s.click_and_wait(self.info['loc_cfg_acl_denyall_radio'])\n\n self.s.click_and_wait(self.info['loc_cfg_acl_ok_button'])\n self.s.get_alert(self.info['loc_cfg_acl_cancel_button'])\n\n return\n\n\n if traverse_row == max_acls_row:\n traverse_row = 0\n self.s.click_and_wait(self.info['loc_cfg_acl_next_image'])\n traverse_row += 1\n i += 1\n time.sleep(1)\n\n logging.info(\"No ACL rule named %s existed in the ACL table\" % old_acl_name)\n\n finally:\n self._fill_search_txt(self.info['loc_cfg_l2_acl_search_textbox'], '')", "def get_network_acl_rules(self, acl):\n by_name = self.get_network_acl_rules_by_name(acl)\n if \"errors\" in by_name:\n for key_name in by_name[\"errors\"]:\n if key_name[\"code\"] == \"not_found\":\n by_id = self.get_network_acl_rules_by_id(acl)\n if \"errors\" in by_id:\n return by_id\n return by_id\n else:\n return by_name\n else:\n return by_name", "def _get_acl_rules(self, address_type, acl_type, acl_name, seq_range):\n rules_list = []\n\n if address_type == 'mac':\n cmd = acl_template.show_l2_access_list\n elif address_type == 'ip':\n cmd = acl_template.show_ip_access_list\n elif address_type == 'ipv6':\n cmd = acl_template.show_ipv6_access_list\n else:\n raise ValueError('{} not supported'.format(address_type))\n\n t = jinja2.Template(cmd)\n config = t.render(acl_name_str=acl_name)\n config = ' '.join(config.split())\n\n output = self._callback(config, handler='cli-get')\n\n # Check if there is any error\n self._process_cli_output(inspect.stack()[0][3], config, output)\n\n if address_type == 'mac':\n rules_list = self._parse_l2_rule(output, seq_range)\n elif address_type == 'ip':\n if acl_type == 'standard':\n rules_list = self._parse_std_ip_rule(output, seq_range)\n elif acl_type == 'extended':\n rules_list = self._parse_ext_ip_rule(output, seq_range)\n elif address_type == 'ipv6':\n rules_list = self._parse_ext_ipv6_rule(output, seq_range)\n\n return rules_list", "def AccessListAdd(self, acl):\n self.send_AccessListAdd(acl)\n return self.recv_AccessListAdd()", "def get_all_acl_names(self):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_ACCESS_CONTROLS, 3)\n\n total_acls = self._get_total_number(self.info['loc_cfg_total_acls_span'], \"Access Controls\")\n max_acls_row = int(self.info['const_cfg_max_acl_rows'])\n traverse_row = 1\n i = 0\n total_entries = []\n\n if total_acls == u'0':\n logging.info(\"There's no ACL rules in the Access Controls table\")\n return []\n\n while i < int(total_acls):\n find_acl_name = self.info['loc_cfg_acl_name_cell']\n find_acl_name = find_acl_name.replace('$_$', str(traverse_row))\n get_acl_name = self.s.get_text(find_acl_name)\n total_entries.append(get_acl_name)\n\n if traverse_row == max_acls_row:\n traverse_row = 0\n self.s.click_and_wait(self.info['loc_cfg_acl_next_image'])\n traverse_row += 1\n i += 1\n time.sleep(1)\n\n return total_entries" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes a network interface attribute. You can specify only one attribute at a time.
def describe_network_interface_attribute(DryRun=None, NetworkInterfaceId=None, Attribute=None): pass
[ "def modify_network_interface_attribute(DryRun=None, NetworkInterfaceId=None, Description=None, SourceDestCheck=None, Groups=None, Attachment=None):\n pass", "def interface_description(self, intconf):\n if not intconf['enabled']:\n return \"DISABLED\"\n\n if intconf['nb_int_desc']:\n # Custom description from Netbox descrtiption field\n return intconf['nb_int_desc']\n\n if intconf['circuit_id']:\n # Link connects to a third party circuit\n cct_desc = f\"{intconf['circuit_id']} {intconf.get('circuit_desc', '')}\".strip()\n if intconf['wmf_z_end']:\n # Typically transport circuit\n return f\"{intconf['link_type']}: {intconf['z_dev']}:{intconf['z_int']} ({intconf['provider']}, \" \\\n f\"{cct_desc}) {{#{intconf['cable_label']}}}\"\n # Typically transit circuit\n return f\"{intconf['link_type']}: {intconf['provider']} ({cct_desc}) {{#{intconf['cable_label']}}}\"\n\n if intconf['z_dev']:\n # Direct link between two WMF devices\n if intconf['link_type']:\n # Typically 'core' link between two network devices\n return f\"{intconf['link_type']}: {intconf['z_dev']}:{intconf['z_int']} {{#{intconf['cable_label']}}}\"\n if intconf['cable_label']:\n # Typically server connection\n return f\"{intconf['z_dev']} {{#{intconf['cable_label']}}}\"\n return f\"{intconf['z_dev']}\"\n\n return ''", "def describe_image_attribute(DryRun=None, ImageId=None, Attribute=None):\n pass", "def attributeInfo(multi=bool, inherited=bool, bool=bool, internal=bool, type=\"string\", hidden=bool, enumerated=bool, allAttributes=bool, logicalAnd=bool, writable=bool, userInterface=bool, leaf=bool, short=bool):\n pass", "def showattribute(self, vname=None, device=None):\n if device is None:\n device = sys.stdout\n if vname is None:\n vname = self.default_variable_name\n device.write(\"Attributes of \")\n device.write(vname)\n device.write(\" in file \")\n device.write(self.id)\n device.write(\":\\n\")\n device.write(str(self.listattribute(vname)))\n device.write(\"\\n\")", "def describe_attr_value(attr, die, section_offset):\r\n descr_func = _ATTR_DESCRIPTION_MAP[attr.form]\r\n val_description = descr_func(attr, die, section_offset)\r\n\r\n # For some attributes we can display further information\r\n extra_info_func = _EXTRA_INFO_DESCRIPTION_MAP[attr.name]\r\n extra_info = extra_info_func(attr, die, section_offset)\r\n return str(val_description) + '\\t' + extra_info", "def do_attr(self, args): # noqa: C901\n\n if not self.current:\n print('There are no resources in use. Use the command \"open\".')\n return\n\n args = args.strip()\n\n if not args:\n self.print_attribute_list()\n return\n\n args = args.split(\" \")\n\n if len(args) > 2:\n print(\n \"Invalid syntax, use `attr <name>` to get; or `attr <name> <value>` to set\"\n )\n return\n\n if len(args) == 1:\n # Get a given attribute\n attr_name = args[0]\n if attr_name.startswith(\"VI_\"):\n try:\n print(\n self.current.get_visa_attribute(getattr(constants, attr_name))\n )\n except Exception as e:\n print(e)\n else:\n try:\n print(getattr(self.current, attr_name))\n except Exception as e:\n print(e)\n return\n\n # Set the specified attribute value\n attr_name, attr_state = args[0], args[1]\n if attr_name.startswith(\"VI_\"):\n try:\n attributeId = getattr(constants, attr_name)\n attr = attributes.AttributesByID[attributeId]\n datatype = attr.visa_type\n retcode = None\n if datatype == \"ViBoolean\":\n if attr_state == \"True\":\n attr_state = True\n elif attr_state == \"False\":\n attr_state = False\n else:\n retcode = (\n constants.StatusCode.error_nonsupported_attribute_state\n )\n elif datatype in [\n \"ViUInt8\",\n \"ViUInt16\",\n \"ViUInt32\",\n \"ViInt8\",\n \"ViInt16\",\n \"ViInt32\",\n ]:\n try:\n attr_state = int(attr_state)\n except ValueError:\n retcode = (\n constants.StatusCode.error_nonsupported_attribute_state\n )\n if not retcode:\n retcode = self.current.set_visa_attribute(attributeId, attr_state)\n if retcode:\n print(\"Error {}\".format(str(retcode)))\n else:\n print(\"Done\")\n except Exception as e:\n print(e)\n else:\n print(\"Setting Resource Attributes by python name is not yet supported.\")\n return", "def network_interface(ifname, sysifdir=\"/sys/class/net/\"):\n # only physical and virtual devices are in /sys/class/net/, no aliases\n ifs = [name for name in os.listdir(sysifdir)]\n if ifname not in ifs:\n err = ((\"'%s' isn't a network interface; \"\n \"you probably meant one of: %s\") %\n (ifname, \" \".join(sorted(ifs))))\n raise argparse.ArgumentTypeError(err)\n return ifname\n\n\n # NOTE: If RG is set to use VLAN ID 0, no VLAN is needed to bypass. If RG\n # is set to use a nonzero VLAN ID, then a VLAN subinterface with that VLAN\n # ID must be created on IF_WAN.\n #\n # Debian autoconfigures VLANs using /etc/network/if-pre-up.d/vlan, which\n # pads VLAN IDs in the resulting interface name by default (\"auto eth0.0\"\n # results in a VLAN named eth0.0000, for example) when deriving raw device\n # name, VLAN ID, and name padding arguments for vconfig from things named\n # <thing>.<digits> it finds in /etc/network/interfaces.\n #\n # If this is not desired, a workaround is to edit vlan and add a special\n # case exactly matching the desired VLAN interface name, as in the example\n # below for an interface named eth0.0.\n #\n # case \"$IFACE\" in\n # [ ... ]\n # # for eap_proxy: special case to create eth0.0 properly\n # eth0.0)\n # vconfig set_name_type DEV_PLUS_VID_NO_PAD\n # VLANID=0\n # IF_VLAN_RAW_DEVICE=eth0\n # ;;\n # [ ... ]", "def netctl(self):\n config = list()\n config.append('Description=\"%s network\"' % self.name)\n config.append('Interface=%s' % self.name)\n config.append('Connection=ethernet')\n if self.ipv4_address:\n config.append('IP=static')\n config.append(\"Address=('%s')\" % self.ipv4_address.with_prefixlen)\n if self.ipv4_gateway:\n config.append(\"Gateway='%s'\" % str(self.ipv4_gateway))\n else:\n config.append('IP=no')\n\n if self.ipv6_address:\n config.append('IP6=static')\n config.append(\"Address6=('%s')\" % self.ipv6_address.with_prefixlen)\n if self.ipv6_gateway:\n config.append(\"Gateway6='%s'\" % str(self.ipv6_gateway))\n else:\n config.append('IP6=no')\n\n if self.dns:\n dns = []\n for server in self.dns:\n dns.append(\"'%s'\" % str(server))\n config.append('DNS=(%s)' % \" \".join(dns))\n return config", "def set_interface(self, interface: str):\n self.di = interface", "def __str__(self):\n return \"@attribute %s numeric\" % self.name", "def set_attribute(self,att,val):\r\n self.attributes[att] = val", "def describe_volume_attribute(DryRun=None, VolumeId=None, Attribute=None):\n pass", "def get_network_interface_name(self):\n # type: () -> str\n string_len = c_uint(100)\n network_name_str = create_string_buffer(string_len.value)\n err = lib.ulDevGetConfigStr(self.__handle, UlInfoItem.NET_IFC_STR,\n 0, network_name_str, byref(string_len))\n if err != 0:\n raise ULException(err)\n return network_name_str.value.decode('utf-8')", "def validate_attr(self, arg):\n args = arg.split(' ')\n if len(args) < 3:\n print(HBNBCommand.ERROR_ATTR)\n return False\n attribute = args[2]\n return attribute", "def update_nic_interface_names(node):\n for ifc in node[u\"interfaces\"].values():\n if_pci = ifc[u\"pci_address\"].replace(u\".\", u\":\").split(u\":\")\n loc = f\"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/\" \\\n f\"{int(if_pci[3], 16):x}\"\n if ifc[u\"model\"] == u\"Intel-XL710\":\n ifc[u\"name\"] = f\"FortyGigabitEthernet{loc}\"\n elif ifc[u\"model\"] == u\"Intel-X710\":\n ifc[u\"name\"] = f\"TenGigabitEthernet{loc}\"\n elif ifc[u\"model\"] == u\"Intel-X520-DA2\":\n ifc[u\"name\"] = f\"TenGigabitEthernet{loc}\"\n elif ifc[u\"model\"] == u\"Cisco-VIC-1385\":\n ifc[u\"name\"] = f\"FortyGigabitEthernet{loc}\"\n elif ifc[u\"model\"] == u\"Cisco-VIC-1227\":\n ifc[u\"name\"] = f\"TenGigabitEthernet{loc}\"\n else:\n ifc[u\"name\"] = f\"UnknownEthernet{loc}\"", "def describe_vpc_attribute(DryRun=None, VpcId=None, Attribute=None):\n pass", "def ModifyNetworkInterfaceAttribute(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyNetworkInterfaceAttribute\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyNetworkInterfaceAttributeResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def generate_network_interface_name(vr, i):\n # type: (VmResource, int) -> str\n return '{}-ni{}'.format(vr.hostname_prefix, str(i).zfill(3))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your network interfaces.
def describe_network_interfaces(DryRun=None, NetworkInterfaceIds=None, Filters=None): pass
[ "def describe_network_interface_attribute(DryRun=None, NetworkInterfaceId=None, Attribute=None):\n pass", "def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EndpointAccessVpcEndpointNetworkInterfaceArgs']]]]:\n return pulumi.get(self, \"network_interfaces\")", "def network_interfaces(self):\n out = []\n for adapter in getattr(self, 'network_adapters', '').split('|'):\n parts = adapter.split(',')\n if len(parts) == 2:\n out.append(Sensor.NetworkAdapter._make([':'.join(a+b for a, b in zip(parts[1][::2], parts[1][1::2])),\n parts[0]]))\n return out", "def _get_all_interfaces_names(self): \n \n listOfInterfaceName = []\n \n cmd = 'ip link show | awk \\'/eth[0-9]/ {print $0}\\''\n stdout, stderr, rc = _exec_command(cmd)\n \n if rc !=0 or stderr !='':\n raise AssertionError('*ERROR* cmd=%s, rc=%s, %s %s' %(cmd,rc,stdout,stderr)) \n \n listOfContent = stdout.split('\\n')\n for content in listOfContent:\n subcontent = content.split(':')\n if (len(subcontent) > 2):\n listOfInterfaceName.append(subcontent[1].lstrip())\n \n return listOfInterfaceName", "def enumerate_interfaces():\n # XXX : perhaps use iwconfig, which seems faster than airmon-ng for listing\n cmd = \"fakeroot airmon-ng\"\n output = subprocess.check_output(cmd, shell=True)\n interfaces = interfaces_from_airmon_ng(output)\n return interfaces", "def network_interfaces(self):\n ret = self._get_attr(\"networkInterfaces\")\n return [IHostNetworkInterface(a) for a in ret]", "def host_nics(self, session):\n url = utils.urljoin(\n self.base_path, self.id, 'host_info', 'network_interfaces')\n resp = session.get(url, endpoint_filter=self.service).json()\n return resp['info']", "def DescribeNetworkInterfaces(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeNetworkInterfaces\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeNetworkInterfacesResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def enumerate_networks(interface):\n if not interface:\n return None\n interface = str(interface)\n cmd = \"iwlist %s scan\" % interface\n try:\n output = subprocess.check_output(cmd, shell=True)\n except subprocess.CalledProcessError:\n print \"Oops: Something is wrong with the network interface.\"\n print \"To fix it, try running this command:\"\n print (\"sudo ifconfig %s down && sudo ifconfig %s up\" %\n (interface, interface))\n return []\n result = output.split('\\n', 1)[0].replace(' ','')\n if result == (\"%sScancompleted:\" % interface):\n return networks_from_iwlist(output)\n elif result == (\"%sNoscanresults\" % interface):\n return []\n print \"Oops: Something unexpected happened when running this command:\"\n print \" \" + cmd\n print \" the output of which was:\"\n print \"************************************\"\n print output\n print \"************************************\"\n return []", "def get_interface_names():\n sys_path = \"/sys/class/net\"\n ifaces = {}\n for iname in os.listdir(sys_path):\n mac = read_file(os.path.join(sys_path, iname, \"address\"))\n mac = mac.strip().lower()\n ifaces[mac] = iname\n return ifaces", "def get_interfaces_states(interfaces=None):\n\n states = {}\n links = _ipr.get_links()\n for link in links:\n ifname = link.get_attr(\"IFLA_IFNAME\")\n if interfaces is None or ifname in interfaces:\n ip_list = _ipr.get_addr(family=socket.AF_INET, label=ifname)\n if len(ip_list) > 0:\n state = \"UP\"\n else:\n state = \"DOWN\"\n states[ifname] = state\n return states", "def get_interfaces(args):\n\n dev = Device(host=args['hostname'], user=args['user'],\n ssh_private_key_file=args['sshkey'],\n ssh_config=args['sshconfig'])\n\n try:\n dev.open()\n except:\n print('Unexpected error with NETCONF connection. Try `ssh ' +\n args['hostname'] + '`')\n exit(3)\n\n interfaces = PhyPortTable(dev).get()\n dev.close()\n\n return interfaces", "def interface(self):\n return self.broker.interface(**{\"IfAddrID\": self.IfAddrID})", "def GetInterfaces(cls):\n return [NetworkInterface(name) for name in os.listdir(cls._SYSFS_NET)]", "def list_nic_interfaces(self):\n return self._nic_mgmt.list_nic_interfaces()", "def network_interface(ifname, sysifdir=\"/sys/class/net/\"):\n # only physical and virtual devices are in /sys/class/net/, no aliases\n ifs = [name for name in os.listdir(sysifdir)]\n if ifname not in ifs:\n err = ((\"'%s' isn't a network interface; \"\n \"you probably meant one of: %s\") %\n (ifname, \" \".join(sorted(ifs))))\n raise argparse.ArgumentTypeError(err)\n return ifname\n\n\n # NOTE: If RG is set to use VLAN ID 0, no VLAN is needed to bypass. If RG\n # is set to use a nonzero VLAN ID, then a VLAN subinterface with that VLAN\n # ID must be created on IF_WAN.\n #\n # Debian autoconfigures VLANs using /etc/network/if-pre-up.d/vlan, which\n # pads VLAN IDs in the resulting interface name by default (\"auto eth0.0\"\n # results in a VLAN named eth0.0000, for example) when deriving raw device\n # name, VLAN ID, and name padding arguments for vconfig from things named\n # <thing>.<digits> it finds in /etc/network/interfaces.\n #\n # If this is not desired, a workaround is to edit vlan and add a special\n # case exactly matching the desired VLAN interface name, as in the example\n # below for an interface named eth0.0.\n #\n # case \"$IFACE\" in\n # [ ... ]\n # # for eap_proxy: special case to create eth0.0 properly\n # eth0.0)\n # vconfig set_name_type DEV_PLUS_VID_NO_PAD\n # VLANID=0\n # IF_VLAN_RAW_DEVICE=eth0\n # ;;\n # [ ... ]", "def get_interfaces_ip(self):\n # Disable Pageing of the device\n self.disable_pageing()\n \n out_curr_config = self._send_command('display current-configuration')\n ipv4table = re.findall(r'^interface\\s+([A-Za-z0-9-/]{1,40})\\n.*\\s+ip\\s+address\\s+(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\s+(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\n',out_curr_config,re.M)\n # TODO: get device with v6 and update above struct\n # ipv6table = re.findall(r'',out_curr_config,re.M)\n output_ipv4table = []\n iface = {}\n iface['ipv4'] = {}\n iface['ipv6'] = {}\n for rec in ipv4table:\n interface,ip,mask = rec\n norm_int = self.normalize_port_name(interface)\n iinterfaces = { norm_int : {'ipv4': {ip: { 'prefix_len': mask}}}}\n output_ipv4table.append(iinterfaces)\n\n return output_ipv4table", "def netctl(self):\n config = list()\n config.append('Description=\"%s network\"' % self.name)\n config.append('Interface=%s' % self.name)\n config.append('Connection=ethernet')\n if self.ipv4_address:\n config.append('IP=static')\n config.append(\"Address=('%s')\" % self.ipv4_address.with_prefixlen)\n if self.ipv4_gateway:\n config.append(\"Gateway='%s'\" % str(self.ipv4_gateway))\n else:\n config.append('IP=no')\n\n if self.ipv6_address:\n config.append('IP6=static')\n config.append(\"Address6=('%s')\" % self.ipv6_address.with_prefixlen)\n if self.ipv6_gateway:\n config.append(\"Gateway6='%s'\" % str(self.ipv6_gateway))\n else:\n config.append('IP6=no')\n\n if self.dns:\n dns = []\n for server in self.dns:\n dns.append(\"'%s'\" % str(server))\n config.append('DNS=(%s)' % \" \".join(dns))\n return config", "def foundInterfaces(self, interfaces):\n self.interfaceList.clear()\n self.available_interfaces = interfaces\n for i in interfaces:\n if (len(i[1]) > 0):\n self.interfaceList.addItem(\"%s - %s\" % (i[0], i[1]))\n else:\n self.interfaceList.addItem(i[0])\n if len(interfaces) > 0:\n self.interfaceList.setCurrentRow(0)\n self.connectButton.setEnabled(True)\n self.cancelButton.setEnabled(True)\n self.scanButton.setEnabled(True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes one or more of your placement groups. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide .
def describe_placement_groups(DryRun=None, GroupNames=None, Filters=None): pass
[ "def delete_placement_groups():\n client = boto3.resource('ec2')\n print('Deleting Placement Groups')\n for placement_group in client.placement_groups.all():\n print('Deleting Placement Group {}'.format(placement_group.name))\n placement_group.delete()\n print('Placement Groups deleted')", "def create_placement_group(DryRun=None, GroupName=None, Strategy=None):\n pass", "def placement_group_id(self) -> Optional[str]:\n return pulumi.get(self, \"placement_group_id\")", "def test_list_eip_groups_with_detailed_options(self):\n print((self.client.list_eip_groups(id=EIP_GRP_ID, name=EIP_GRP_NAME,\n status=EIP_GROUP_STATUS,\n marker=MARKER,\n max_keys=MAX_KEYS)))", "def group_names_for_display(self):\n return self.demographic_group_name, \"non-\" + self.demographic_group_name", "def groups_display(self) -> str:\n return \", \".join([\n taxonomy.definitions.GROUPS[group]['name']\n for group in self.submission_groups\n ])", "def test_list_eip_groups(self):\n print((self.client.list_eip_groups(max_keys=1)))", "def configure_groups():\n from collections import OrderedDict \n\n order = 0.0\n group_config = OrderedDict()\n\n group_config['H5F'] = {}\n group_config['H5D'] = {}\n group_config['MPIIO'] = {}\n group_config['DXT_MPIIO'] = {}\n group_config['STDIO'] = {}\n group_config['POSIX'] = {}\n group_config['DXT_POSIX'] = {}\n group_config['LUSTRE'] = {}\n\n # apply order\n for k,v in group_config.items():\n v['order'] = order\n order += 1.0\n\n return group_config", "def print_groups(self):\n\n text = ''\n\n # print out a starting message, and print headers.\n # print('printing groups')\n text += self.print_header()\n\n # print out the row numbers and the contents of the\n # rows, with the values in m represented by groups\n # and wall characters.\n for j in range(0, self.height):\n\n text += '{}| '.format(j%10)\n # print('{}|'.format(j%10), end=' ')\n \n for i in range(0, self.width):\n text += '{} '.format(self.group_map[j][i])\n # print(self.group_map[j][i], end=' ')\n\n text += '\\n'\n # print()\n\n # print the ending message, then check the map.\n # print('end of groups\\n')\n self.assert_array_size('print_groups', self.group_map)\n \n return text", "def GetPlacementGroupSpecClass(cloud):\n return spec.GetSpecClass(BasePlacementGroupSpec, CLOUD=cloud)", "def cmd_groups(self):\r\n return dict({i.name: i.info() for i in self.groups})", "def instance_group_string(self):\n\n model_config = self.get_config()\n\n # TODO change when remote mode is fixed\n # Set default count/kind\n count = 1\n if cuda.is_available():\n kind = 'GPU'\n else:\n kind = 'CPU'\n\n if 'instance_group' in model_config:\n instance_group_list = model_config['instance_group']\n group_str_list = []\n for group in instance_group_list:\n group_kind, group_count = kind, count\n # Update with instance group values\n if 'kind' in group:\n group_kind = group['kind'].split('_')[1]\n if 'count' in group:\n group_count = group['count']\n group_str_list.append(f\"{group_count}/{group_kind}\")\n return ','.join(group_str_list)\n return f\"{count}/{kind}\"", "def get_group(self): # real signature unknown; restored from __doc__\n return \"\"", "def placement_layer(self):\n return self.pinlist[0].GetGroup().GetPlacementLayer().GetName()", "def test_destroy_not_deployed_group(self):\r\n location = self.driver.list_locations()[0]\r\n group = self.driver.ex_list_groups(location)[1]\r\n self.assertTrue(group.destroy())", "def __configGroups(self):\n \n if self.niveau == 1 :\n self.__groups.append(Group(1, Constants.DescPortionGrp1, Constants.NbPortionGrp1))\n elif self.niveau == 2 :\n self.__groups.append(Group(2, Constants.DescPortionGrp2, Constants.NbPortionGrp2))\n self.__groups.append(Group(3, Constants.DescPortionGrp3, Constants.NbPortionGrp3))\n elif self.niveau == 3 :\n self.__groups.append(Group(4, Constants.DescPortionGrp4, Constants.NbPortionGrp4))\n self.__groups.append(Group(5, Constants.DescPortionGrp5, Constants.NbPortionGrp5))\n elif self.niveau == 4 :\n self.__groups.append(Group(6, Constants.DescPortionGrp6, Constants.NbPortionGrp6))\n self.__groups.append(Group(7, Constants.DescPortionGrp7, Constants.NbPortionGrp7))\n else:\n raise MyException(\"__configGroups() : Nivel de sobre group errado, escolhe em {1, 2, 3, 4}\")", "def test_get_eip_group(self):\n print((self.client.get_eip_group(id=EIP_GRP_ID)))", "def test_create_eip_group_with_name(self):\n name = 'test_eip_group'\n self.client.create_eip_group(eip_count=2,\n bandwidth_in_mbps=10,\n name=name, config=None)", "def GetPlacementGroupClass(cloud):\n return resource.GetResourceClass(BasePlacementGroup,\n CLOUD=cloud)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }