focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static List<?> toList(Object value) { return convert(List.class, value); }
@Test public void toListTest() { final List<String> list = Arrays.asList("1", "2"); final String str = Convert.toStr(list); final List<String> list2 = Convert.toList(String.class, str); assertEquals("1", list2.get(0)); assertEquals("2", list2.get(1)); final List<Integer> list3 = Convert.toList(Integer.class, str); assertEquals(1, list3.get(0).intValue()); assertEquals(2, list3.get(1).intValue()); }
public boolean onSameSegment(LogOffsetMetadata that) { if (messageOffsetOnly() || that.messageOffsetOnly()) return false; return this.segmentBaseOffset == that.segmentBaseOffset; }
@Test void testOnSameSegment() { LogOffsetMetadata metadata1 = new LogOffsetMetadata(1L, 0L, 1); LogOffsetMetadata metadata2 = new LogOffsetMetadata(5L, 4L, 2); LogOffsetMetadata metadata3 = new LogOffsetMetadata(10L, 4L, 200); assertFalse(metadata1.onSameSegment(metadata2)); assertTrue(metadata2.onSameSegment(metadata3)); LogOffsetMetadata metadata4 = new LogOffsetMetadata(50); LogOffsetMetadata metadata5 = new LogOffsetMetadata(100); assertFalse(metadata4.onSameSegment(metadata5)); }
public static String getUserName() { return Optional.ofNullable(USER_THREAD_LOCAL.get()).map(User::getUsername).orElse(""); }
@Test public void testGetUserName() { UserContext.setUserInfo(USERNAME, USER_ROLE); String userName = UserContext.getUserName(); Assert.isTrue(USERNAME.equals(userName)); }
public static String initEndpoint(final NacosClientProperties properties) { if (properties == null) { return ""; } // Whether to enable domain name resolution rules String isUseEndpointRuleParsing = properties.getProperty(PropertyKeyConst.IS_USE_ENDPOINT_PARSING_RULE, properties.getProperty(SystemPropertyKeyConst.IS_USE_ENDPOINT_PARSING_RULE, String.valueOf(ParamUtil.USE_ENDPOINT_PARSING_RULE_DEFAULT_VALUE))); boolean isUseEndpointParsingRule = Boolean.parseBoolean(isUseEndpointRuleParsing); String endpointUrl; if (isUseEndpointParsingRule) { // Get the set domain name information endpointUrl = ParamUtil.parsingEndpointRule(properties.getProperty(PropertyKeyConst.ENDPOINT)); if (StringUtils.isBlank(endpointUrl)) { return ""; } } else { endpointUrl = properties.getProperty(PropertyKeyConst.ENDPOINT); } if (StringUtils.isBlank(endpointUrl)) { return ""; } String endpointPort = TemplateUtils .stringEmptyAndThenExecute(properties.getProperty(PropertyKeyConst.SystemEnv.ALIBABA_ALIWARE_ENDPOINT_PORT), () -> properties.getProperty(PropertyKeyConst.ENDPOINT_PORT)); endpointPort = TemplateUtils.stringEmptyAndThenExecute(endpointPort, () -> DEFAULT_END_POINT_PORT); return endpointUrl + ":" + endpointPort; }
@Test void testInitEndpointFromPropertiesWithCloudParsing() { System.setProperty(SystemPropertyKeyConst.IS_USE_ENDPOINT_PARSING_RULE, "true"); final NacosClientProperties properties = NacosClientProperties.PROTOTYPE.derive(); String endpoint = "1.1.1.1"; String endpointPort = "1234"; properties.setProperty(PropertyKeyConst.ENDPOINT, endpoint); properties.setProperty(PropertyKeyConst.ENDPOINT_PORT, endpointPort); String actual = InitUtils.initEndpoint(properties); assertEquals(endpoint + ":" + endpointPort, actual); }
@ApiOperation(value = "Get a historic process instance", tags = { "History Process" }, nickname = "getHistoricProcessInstance") @ApiResponses(value = { @ApiResponse(code = 200, message = "Indicates that the historic process instances could be found."), @ApiResponse(code = 404, message = "Indicates that the historic process instances could not be found.") }) @GetMapping(value = "/history/historic-process-instances/{processInstanceId}", produces = "application/json") public HistoricProcessInstanceResponse getProcessInstance(@ApiParam(name = "processInstanceId") @PathVariable String processInstanceId) { HistoricProcessInstanceResponse processInstanceResponse = restResponseFactory.createHistoricProcessInstanceResponse(getHistoricProcessInstanceFromRequest(processInstanceId)); ProcessDefinition processDefinition = repositoryService.createProcessDefinitionQuery().processDefinitionId(processInstanceResponse.getProcessDefinitionId()).singleResult(); if (processDefinition != null) { processInstanceResponse.setProcessDefinitionName(processDefinition.getName()); processInstanceResponse.setProcessDefinitionDescription(processDefinition.getDescription()); } return processInstanceResponse; }
@Test @Deployment(resources = { "org/flowable/rest/service/api/repository/oneTaskProcess.bpmn20.xml" }) public void testGetProcessInstance() throws Exception { ProcessInstance processInstance = runtimeService.createProcessInstanceBuilder() .processDefinitionKey("oneTaskProcess") .businessKey("myBusinessKey") .callbackId("testCallbackId") .callbackType("testCallbackType") .referenceId("testReferenceId") .referenceType("testReferenceType") .stageInstanceId("testStageInstanceId") .start(); runtimeService.updateBusinessStatus(processInstance.getId(), "myBusinessStatus"); CloseableHttpResponse response = executeRequest( new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_HISTORIC_PROCESS_INSTANCE, processInstance.getId())), HttpStatus.SC_OK); assertThat(response.getStatusLine().getStatusCode()).isEqualTo(HttpStatus.SC_OK); JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent()); closeResponse(response); assertThat(responseNode).isNotNull(); assertThatJson(responseNode) .when(Option.IGNORING_EXTRA_FIELDS) .isEqualTo("{" + "id: '" + processInstance.getId() + "'," + "businessKey: 'myBusinessKey'," + "businessStatus: 'myBusinessStatus'," + "callbackId: 'testCallbackId'," + "callbackType: 'testCallbackType'," + "referenceId: 'testReferenceId'," + "referenceType: 'testReferenceType'," + "propagatedStageInstanceId: 'testStageInstanceId'" + "}"); Task task = taskService.createTaskQuery().processInstanceId(processInstance.getId()).singleResult(); assertThat(task).isNotNull(); taskService.complete(task.getId()); response = executeRequest( new HttpDelete(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_HISTORIC_PROCESS_INSTANCE, processInstance.getId())), HttpStatus.SC_NO_CONTENT); assertThat(response.getStatusLine().getStatusCode()).isEqualTo(HttpStatus.SC_NO_CONTENT); closeResponse(response); }
@VisibleForTesting protected String buildShortMessage(Map<String, Object> fields) { final StringBuilder shortMessage = new StringBuilder(); shortMessage.append("JSON API poll result: "); if (!flatten) { shortMessage.append(jsonPath.getPath()); } shortMessage.append(" -> "); if (fields.toString().length() > 50) { shortMessage.append(fields.toString().substring(0, 50)).append("[...]"); } else { shortMessage.append(fields.toString()); } return shortMessage.toString(); }
@Test public void testBuildShortMessageThatGetsCutFullJson() throws Exception { Map<String, Object> fields = Maps.newLinkedHashMap(); fields.put("baz", 9001); fields.put("foo", "bargggdzrtdfgfdgldfsjgkfdlgjdflkjglfdjgljslfperitperoujglkdnfkndsbafdofhasdpfoöadjsFOO"); JsonPathCodec selector = new JsonPathCodec(configOf(CK_PATH, "$.download_count", CK_FLATTEN, true), objectMapperProvider.get(), messageFactory); assertThat(selector.buildShortMessage(fields)).isEqualTo("JSON API poll result: -> {baz=9001, foo=bargggdzrtdfgfdgldfsjgkfdlgjdflkjgl[...]"); }
@Override public boolean enableSendingOldValues(final boolean forceMaterialization) { if (queryableName != null) { sendOldValues = true; return true; } if (parent.enableSendingOldValues(forceMaterialization)) { sendOldValues = true; } return sendOldValues; }
@Test public void shouldSetSendOldValuesOnParentIfNotMaterialized() { when(parent.enableSendingOldValues(true)).thenReturn(true); new KTableTransformValues<>(parent, new NoOpValueTransformerWithKeySupplier<>(), null).enableSendingOldValues(true); }
public static boolean webSocketHostPathMatches(String hostPath, String targetPath) { boolean exactPathMatch = true; if (ObjectHelper.isEmpty(hostPath) || ObjectHelper.isEmpty(targetPath)) { // This scenario should not really be possible as the input args come from the vertx-websocket consumer / producer URI return false; } // Paths ending with '*' are Vert.x wildcard routes so match on the path prefix if (hostPath.endsWith("*")) { exactPathMatch = false; hostPath = hostPath.substring(0, hostPath.lastIndexOf('*')); } String normalizedHostPath = HttpUtils.normalizePath(hostPath + "/"); String normalizedTargetPath = HttpUtils.normalizePath(targetPath + "/"); String[] hostPathElements = normalizedHostPath.split("/"); String[] targetPathElements = normalizedTargetPath.split("/"); if (exactPathMatch && hostPathElements.length != targetPathElements.length) { return false; } if (exactPathMatch) { return normalizedHostPath.equals(normalizedTargetPath); } else { return normalizedTargetPath.startsWith(normalizedHostPath); } }
@Test void webSocketHostEmptyPathNotMatches() { String hostPath = ""; String targetPath = ""; assertFalse(VertxWebsocketHelper.webSocketHostPathMatches(hostPath, targetPath)); }
public static CoordinatorRecord newConsumerGroupSubscriptionMetadataTombstoneRecord( String groupId ) { return new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupPartitionMetadataKey() .setGroupId(groupId), (short) 4 ), null // Tombstone. ); }
@Test public void testNewConsumerGroupSubscriptionMetadataTombstoneRecord() { CoordinatorRecord expectedRecord = new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupPartitionMetadataKey() .setGroupId("group-id"), (short) 4 ), null); assertEquals(expectedRecord, newConsumerGroupSubscriptionMetadataTombstoneRecord( "group-id" )); }
@Override public int compare( Object data1, Object data2 ) throws KettleValueException { boolean n1 = isNull( data1 ); boolean n2 = isNull( data2 ); if ( n1 && !n2 ) { if ( isSortedDescending() ) { // BACKLOG-14028 return 1; } else { return -1; } } if ( !n1 && n2 ) { if ( isSortedDescending() ) { return -1; } else { return 1; } } if ( n1 && n2 ) { return 0; } int cmp = 0; //If a comparator is not provided, default to the type comparisons if ( comparator == null ) { cmp = typeCompare( data1, data2 ); } else { cmp = comparator.compare( data1, data2 ); } if ( isSortedDescending() ) { return -cmp; } else { return cmp; } }
@Test public void testCompareIntegerToDouble() throws KettleValueException { ValueMetaBase intMeta = new ValueMetaBase( "int", ValueMetaInterface.TYPE_INTEGER ); Long int1 = 2L; ValueMetaBase numberMeta = new ValueMetaBase( "number", ValueMetaInterface.TYPE_NUMBER ); Double double2 = 1.5D; assertEquals( 1, intMeta.compare( int1, numberMeta, double2 ) ); }
@SuppressWarnings({ "nullness" // TODO(https://github.com/apache/beam/issues/20497) }) public static TableReference parseTableSpec(String tableSpec) { Matcher match = BigQueryIO.TABLE_SPEC.matcher(tableSpec); if (!match.matches()) { throw new IllegalArgumentException( String.format( "Table specification [%s] is not in one of the expected formats (" + " [project_id]:[dataset_id].[table_id]," + " [project_id].[dataset_id].[table_id]," + " [dataset_id].[table_id])", tableSpec)); } TableReference ref = new TableReference(); ref.setProjectId(match.group("PROJECT")); return ref.setDatasetId(match.group("DATASET")).setTableId(match.group("TABLE")); }
@Test public void testTableParsingError_slash() { thrown.expect(IllegalArgumentException.class); BigQueryHelpers.parseTableSpec("a\\b12345:c.d"); }
@ShellMethod(key = "cleans show", value = "Show the cleans") public String showCleans( @ShellOption(value = {"--limit"}, help = "Limit commits", defaultValue = "-1") final Integer limit, @ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField, @ShellOption(value = {"--startTs"}, help = "start time for cleans, default: now - 10 days", defaultValue = ShellOption.NULL) String startTs, @ShellOption(value = {"--endTs"}, help = "end time for clean, default: upto latest", defaultValue = ShellOption.NULL) String endTs, @ShellOption(value = {"--includeArchivedTimeline"}, help = "Include archived commits as well", defaultValue = "false") final boolean includeArchivedTimeline, @ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending, @ShellOption(value = {"--headeronly"}, help = "Print Header Only", defaultValue = "false") final boolean headerOnly) throws IOException { HoodieDefaultTimeline activeTimeline = CLIUtils.getTimelineInRange(startTs, endTs, includeArchivedTimeline); HoodieTimeline timeline = activeTimeline.getCleanerTimeline().filterCompletedInstants(); List<HoodieInstant> cleans = timeline.getReverseOrderedInstants().collect(Collectors.toList()); List<Comparable[]> rows = new ArrayList<>(); for (HoodieInstant clean : cleans) { HoodieCleanMetadata cleanMetadata = TimelineMetadataUtils.deserializeHoodieCleanMetadata(timeline.getInstantDetails(clean).get()); rows.add(new Comparable[] {clean.getTimestamp(), cleanMetadata.getEarliestCommitToRetain(), cleanMetadata.getTotalFilesDeleted(), cleanMetadata.getTimeTakenInMillis()}); } TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_CLEAN_TIME) .addTableHeaderField(HoodieTableHeaderFields.HEADER_EARLIEST_COMMAND_RETAINED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_DELETED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_TIME_TAKEN); return HoodiePrintHelper.print(header, new HashMap<>(), sortByField, descending, limit, headerOnly, rows); }
@Test public void testShowCleans() throws Exception { // Check properties file exists. assertNotNull(propsFilePath, "Not found properties file"); // First, run clean SparkMain.clean(jsc(), HoodieCLI.basePath, propsFilePath.getPath(), new ArrayList<>()); assertEquals(1, metaClient.getActiveTimeline().reload().getCleanerTimeline().countInstants(), "Loaded 1 clean and the count should match"); Object result = shell.evaluate(() -> "cleans show"); assertTrue(ShellEvaluationResultUtil.isSuccess(result)); HoodieInstant clean = metaClient.getActiveTimeline().reload().getCleanerTimeline().getInstantsAsStream().findFirst().orElse(null); assertNotNull(clean); TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_CLEAN_TIME) .addTableHeaderField(HoodieTableHeaderFields.HEADER_EARLIEST_COMMAND_RETAINED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_DELETED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_TIME_TAKEN); List<Comparable[]> rows = new ArrayList<>(); // EarliestCommandRetained should be 102, since hoodie.clean.commits.retained=2 // Total Time Taken need read from metadata rows.add(new Comparable[] {clean.getTimestamp(), "102", "2", getLatestCleanTimeTakenInMillis().toString()}); String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows); expected = removeNonWordAndStripSpace(expected); String got = removeNonWordAndStripSpace(result.toString()); assertEquals(expected, got); }
public synchronized Topology addSource(final String name, final String... topics) { internalTopologyBuilder.addSource(null, name, null, null, null, topics); return this; }
@Test public void shouldNotAllowNullNameWhenAddingSourceWithTopic() { assertThrows(NullPointerException.class, () -> topology.addSource((String) null, "topic")); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseQuotedTimeStringAsTimeInMap() throws Exception { String keyStr = "k1"; String timeStr = "14:34:54.346Z"; String mapStr = "{\"" + keyStr + "\":\"" + timeStr + "\"}"; SchemaAndValue result = Values.parseString(mapStr); assertEquals(Type.MAP, result.schema().type()); Schema keySchema = result.schema().keySchema(); Schema valueSchema = result.schema().valueSchema(); assertEquals(Type.STRING, keySchema.type()); assertEquals(Type.INT32, valueSchema.type()); assertEquals(Time.LOGICAL_NAME, valueSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr); assertEquals(Collections.singletonMap(keyStr, expected), result.value()); }
public SmppMessage createSmppMessage(CamelContext camelContext, AlertNotification alertNotification) { SmppMessage smppMessage = new SmppMessage(camelContext, alertNotification, configuration); smppMessage.setHeader(SmppConstants.MESSAGE_TYPE, SmppMessageType.AlertNotification.toString()); smppMessage.setHeader(SmppConstants.SEQUENCE_NUMBER, alertNotification.getSequenceNumber()); smppMessage.setHeader(SmppConstants.COMMAND_ID, alertNotification.getCommandId()); smppMessage.setHeader(SmppConstants.COMMAND_STATUS, alertNotification.getCommandStatus()); smppMessage.setHeader(SmppConstants.SOURCE_ADDR, alertNotification.getSourceAddr()); smppMessage.setHeader(SmppConstants.SOURCE_ADDR_NPI, alertNotification.getSourceAddrNpi()); smppMessage.setHeader(SmppConstants.SOURCE_ADDR_TON, alertNotification.getSourceAddrTon()); smppMessage.setHeader(SmppConstants.ESME_ADDR, alertNotification.getEsmeAddr()); smppMessage.setHeader(SmppConstants.ESME_ADDR_NPI, alertNotification.getEsmeAddrNpi()); smppMessage.setHeader(SmppConstants.ESME_ADDR_TON, alertNotification.getEsmeAddrTon()); return smppMessage; }
@Test public void createSmppMessageFromDeliveryReceiptWithPayloadInOptionalParameterShouldReturnASmppMessage() { DeliverSm deliverSm = new DeliverSm(); deliverSm.setSmscDeliveryReceipt(); deliverSm.setOptionalParameters(new OctetString( OptionalParameter.Tag.MESSAGE_PAYLOAD, "id:2 sub:001 dlvrd:001 submit date:0908312310 done date:0908312311 stat:DELIVRD err:xxx Text:Hello SMPP world!")); try { SmppMessage smppMessage = binding.createSmppMessage(camelContext, deliverSm); assertEquals("Hello SMPP world!", smppMessage.getBody()); assertEquals(10, smppMessage.getHeaders().size()); assertEquals("2", smppMessage.getHeader(SmppConstants.ID)); assertEquals(1, smppMessage.getHeader(SmppConstants.DELIVERED)); assertEquals("xxx", smppMessage.getHeader(SmppConstants.ERROR)); assertEquals(1, smppMessage.getHeader(SmppConstants.SUBMITTED)); assertEquals(DeliveryReceiptState.DELIVRD, smppMessage.getHeader(SmppConstants.FINAL_STATUS)); assertEquals(SmppMessageType.DeliveryReceipt.toString(), smppMessage.getHeader(SmppConstants.MESSAGE_TYPE)); } catch (Exception e) { fail("Should not throw exception while creating smppMessage."); } }
public static <T extends Throwable> void checkContains(final Collection<?> values, final Object element, final Supplier<T> exceptionSupplierIfUnexpected) throws T { if (!values.contains(element)) { throw exceptionSupplierIfUnexpected.get(); } }
@Test void assertCheckContainsToNotThrowException() { assertDoesNotThrow(() -> ShardingSpherePreconditions.checkContains(Collections.singleton("foo"), "foo", SQLException::new)); }
public static Class<?> forName(String name) throws ClassNotFoundException { return forName(name, getClassLoader()); }
@Test void testForName3() throws Exception { ClassLoader classLoader = Mockito.mock(ClassLoader.class); ClassUtils.forName("a.b.c.D", classLoader); verify(classLoader).loadClass("a.b.c.D"); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return delegate.find(file, listener); } if(cache.isValid(file.getParent())) { final AttributedList<Path> list = cache.get(file.getParent()); final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(sensitivity, file)); if(null != found) { if(log.isDebugEnabled()) { log.debug(String.format("Return cached attributes %s for %s", found.attributes(), file)); } return found.attributes(); } if(log.isDebugEnabled()) { log.debug(String.format("Cached directory listing does not contain %s", file)); } throw new NotfoundException(file.getAbsolute()); } final CachingListProgressListener caching = new CachingListProgressListener(cache); try { final PathAttributes attr = delegate.find(file, new ProxyListProgressListener(listener, caching)); caching.cache(); return attr; } catch(NotfoundException e) { caching.cache(); throw e; } }
@Test public void testFindErrorWhilePagingDirectoryListing() throws Exception { final PathCache cache = new PathCache(1); final Path directory = new Path("/", EnumSet.of(Path.Type.directory)); final Path file = new Path(directory, "f", EnumSet.of(Path.Type.file)); final CachingAttributesFinderFeature feature = new CachingAttributesFinderFeature(Protocol.Case.sensitive, cache, new DefaultAttributesFinderFeature(new NullSession(new Host(new TestProtocol())) { @Override public AttributedList<Path> list(final Path folder, final ListProgressListener listener) throws BackgroundException { final AttributedList<Path> list = new AttributedList<>(Collections.singletonList(new Path(folder, "t", EnumSet.of(Path.Type.file)))); listener.chunk(folder, list); throw new ConnectionTimeoutException(folder.getAbsolute()); } })); try { feature.find(file, new DisabledListProgressListener()); fail(); } catch(ConnectionTimeoutException e) { // } assertFalse(cache.isCached(directory)); }
public void deleteFavorite(long favoriteId) { Favorite favorite = favoriteRepository.findById(favoriteId).orElse(null); checkUserOperatePermission(favorite); favoriteRepository.delete(favorite); }
@Test(expected = BadRequestException.class) @Sql(scripts = "/sql/favorites/favorites.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD) @Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD) public void testDeleteFavoriteFail() { long anotherPersonFavoriteId = 23L; favoriteService.deleteFavorite(anotherPersonFavoriteId); Assert.assertNull(favoriteRepository.findById(anotherPersonFavoriteId).orElse(null)); }
@Override public JCTypeApply inline(Inliner inliner) throws CouldNotResolveImportException { return inliner .maker() .TypeApply(getType().inline(inliner), inliner.<JCExpression>inlineList(getTypeArguments())); }
@Test public void inline() { ImportPolicy.bind(context, ImportPolicy.IMPORT_TOP_LEVEL); assertInlines( "List<String>", UTypeApply.create( UClassIdent.create("java.util.List"), UClassIdent.create("java.lang.String"))); }
@Override public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels( RemoveFromClusterNodeLabelsRequest request) throws YarnException, IOException { // parameter verification. if (request == null) { routerMetrics.incrRemoveFromClusterNodeLabelsFailedRetrieved(); RouterServerUtil.logAndThrowException("Missing RemoveFromClusterNodeLabels request.", null); } String subClusterId = request.getSubClusterId(); if (StringUtils.isBlank(subClusterId)) { routerMetrics.incrRemoveFromClusterNodeLabelsFailedRetrieved(); RouterServerUtil.logAndThrowException("Missing RemoveFromClusterNodeLabels SubClusterId.", null); } try { long startTime = clock.getTime(); RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod( new Class[]{RemoveFromClusterNodeLabelsRequest.class}, new Object[]{request}); Collection<RemoveFromClusterNodeLabelsResponse> refreshNodesResourcesResps = remoteMethod.invokeConcurrent(this, RemoveFromClusterNodeLabelsResponse.class, subClusterId); if (CollectionUtils.isNotEmpty(refreshNodesResourcesResps)) { long stopTime = clock.getTime(); routerMetrics.succeededRemoveFromClusterNodeLabelsRetrieved(stopTime - startTime); return RemoveFromClusterNodeLabelsResponse.newInstance(); } } catch (YarnException e) { routerMetrics.incrRemoveFromClusterNodeLabelsFailedRetrieved(); RouterServerUtil.logAndThrowException(e, "Unable to removeFromClusterNodeLabels due to exception. " + e.getMessage()); } routerMetrics.incrRemoveFromClusterNodeLabelsFailedRetrieved(); throw new YarnException("Unable to removeFromClusterNodeLabels."); }
@Test public void testRemoveFromClusterNodeLabelsEmptyRequest() throws Exception { // null request1. LambdaTestUtils.intercept(YarnException.class, "Missing RemoveFromClusterNodeLabels request.", () -> interceptor.removeFromClusterNodeLabels(null)); // null request2. RemoveFromClusterNodeLabelsRequest request = RemoveFromClusterNodeLabelsRequest.newInstance(null, null); LambdaTestUtils.intercept(YarnException.class, "Missing RemoveFromClusterNodeLabels SubClusterId.", () -> interceptor.removeFromClusterNodeLabels(request)); }
@Deprecated public final boolean recycle(T o, Handle<T> handle) { if (handle == NOOP_HANDLE) { return false; } handle.recycle(o); return true; }
@Test public void testRecycle() { Recycler<HandledObject> recycler = newRecycler(1024); HandledObject object = recycler.get(); object.recycle(); HandledObject object2 = recycler.get(); assertSame(object, object2); object2.recycle(); }
@Override public synchronized void write(int b) throws IOException { mUfsOutStream.write(b); mBytesWritten++; }
@Test public void writeIncreasingByteArrayOffsetLen() throws IOException, AlluxioException { AlluxioURI ufsPath = getUfsPath(); try (FileOutStream outStream = mFileSystem.createFile(ufsPath)) { outStream.write(BufferUtils.getIncreasingByteArray(CHUNK_SIZE), 0, CHUNK_SIZE); } verifyIncreasingBytesWritten(ufsPath, CHUNK_SIZE); }
public boolean match(int left, int right) { return left == right; }
@Test public void longShouldEqual() { long a = 21474836478L; long b = 21474836478L; boolean match = new NumberMatch().match(a, b); assertTrue(match); a = -21474836478L; b = -21474836479L; match = new NumberMatch().match(a, b); assertFalse(match); Long c = -123L; Long d = -123L; match = new NumberMatch().match(c, d); assertTrue(match); c = -21474836478L; d = -21474836479L; match = new NumberMatch().match(c, d); assertFalse(match); }
FileContext getLocalFileContext(Configuration conf) { try { return FileContext.getLocalFSFileContext(conf); } catch (IOException e) { throw new YarnRuntimeException("Failed to access local fs"); } }
@Test public void testLocalizationInit() throws Exception { conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077"); AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(new Configuration()); ContainerExecutor exec = mock(ContainerExecutor.class); DeletionService delService = spy(new DeletionService(exec)); delService.init(conf); delService.start(); List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[4]; for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); LocalDirsHandlerService diskhandler = new LocalDirsHandlerService(); diskhandler.init(conf); ResourceLocalizationService locService = spy(new ResourceLocalizationService(dispatcher, exec, delService, diskhandler, nmContext, metrics)); doReturn(lfs) .when(locService).getLocalFileContext(isA(Configuration.class)); try { dispatcher.start(); // initialize ResourceLocalizationService locService.init(conf); final FsPermission defaultPerm = new FsPermission((short)0755); // verify directory creation for (Path p : localDirs) { p = new Path((new URI(p.toString())).getPath()); Path usercache = new Path(p, ContainerLocalizer.USERCACHE); verify(spylfs) .mkdir(eq(usercache), eq(defaultPerm), eq(true)); Path publicCache = new Path(p, ContainerLocalizer.FILECACHE); verify(spylfs) .mkdir(eq(publicCache), eq(defaultPerm), eq(true)); Path nmPriv = new Path(p, ResourceLocalizationService.NM_PRIVATE_DIR); verify(spylfs).mkdir(eq(nmPriv), eq(ResourceLocalizationService.NM_PRIVATE_PERM), eq(true)); } } finally { dispatcher.stop(); delService.stop(); } }
static JobManagerProcessSpec processSpecFromConfig(Configuration config) { return createMemoryProcessSpec(PROCESS_MEMORY_UTILS.memoryProcessSpecFromConfig(config)); }
@Test void testLogFailureOfJvmHeapSizeMinSizeVerification() { MemorySize jvmHeapMemory = MemorySize.parse("50m"); Configuration conf = new Configuration(); conf.set(JobManagerOptions.JVM_HEAP_MEMORY, jvmHeapMemory); JobManagerProcessUtils.processSpecFromConfig(conf); assertThat(testLoggerResource.getMessages()) .anyMatch( str -> str.contains( String.format( "The configured or derived JVM heap memory size (%s) is less than its recommended minimum value (%s)", jvmHeapMemory.toHumanReadableString(), JobManagerOptions.MIN_JVM_HEAP_SIZE .toHumanReadableString()))); }
ClassicGroup getOrMaybeCreateClassicGroup( String groupId, boolean createIfNotExists ) throws GroupIdNotFoundException { Group group = groups.get(groupId); if (group == null && !createIfNotExists) { throw new GroupIdNotFoundException(String.format("Classic group %s not found.", groupId)); } if (group == null) { ClassicGroup classicGroup = new ClassicGroup(logContext, groupId, ClassicGroupState.EMPTY, time, metrics); groups.put(groupId, classicGroup); metrics.onClassicGroupStateTransition(null, classicGroup.currentState()); return classicGroup; } else { if (group.type() == CLASSIC) { return (ClassicGroup) group; } else { // We don't support upgrading/downgrading between protocols at the moment so // we throw an exception if a group exists with the wrong type. throw new GroupIdNotFoundException(String.format("Group %s is not a classic group.", groupId)); } } }
@Test public void testStaticMemberRejoinWithKnownLeaderIdToTriggerRebalanceAndFollowerWithChangeofProtocol() throws Exception { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .build(); GroupMetadataManagerTestContext.RebalanceResult rebalanceResult = context.staticMembersJoinAndRebalance( "group-id", "leader-instance-id", "follower-instance-id" ); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); // A static leader rejoin with known member id will trigger rebalance. JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() .withGroupId("group-id") .withGroupInstanceId("leader-instance-id") .withMemberId(rebalanceResult.leaderId) .withProtocolSuperset() .withRebalanceTimeoutMs(10000) .withSessionTimeoutMs(5000) .build(); GroupMetadataManagerTestContext.JoinResult leaderJoinResult = context.sendClassicGroupJoin( request, true, true ); assertTrue(leaderJoinResult.records.isEmpty()); assertFalse(leaderJoinResult.joinFuture.isDone()); // Rebalance completes immediately after follower rejoins. GroupMetadataManagerTestContext.JoinResult followerJoinResult = context.sendClassicGroupJoin( request.setGroupInstanceId("follower-instance-id") .setMemberId(rebalanceResult.followerId), true, true ); assertTrue(followerJoinResult.records.isEmpty()); assertTrue(leaderJoinResult.joinFuture.isDone()); assertTrue(followerJoinResult.joinFuture.isDone()); assertTrue(group.isInState(COMPLETING_REBALANCE)); assertEquals(2, group.generationId()); // Leader should get the same assignment as last round. JoinGroupResponseData expectedLeaderResponse = new JoinGroupResponseData() .setErrorCode(Errors.NONE.code()) .setGenerationId(rebalanceResult.generationId + 1) // The group has promoted to the new generation. .setMemberId(leaderJoinResult.joinFuture.get().memberId()) .setLeader(rebalanceResult.leaderId) .setProtocolName("range") .setProtocolType("consumer") .setSkipAssignment(false) .setMembers(toJoinResponseMembers(group)); checkJoinGroupResponse( expectedLeaderResponse, leaderJoinResult.joinFuture.get(), group, COMPLETING_REBALANCE, mkSet("leader-instance-id", "follower-instance-id") ); JoinGroupResponseData expectedFollowerResponse = new JoinGroupResponseData() .setErrorCode(Errors.NONE.code()) .setGenerationId(rebalanceResult.generationId + 1) // The group has promoted to the new generation. .setMemberId(followerJoinResult.joinFuture.get().memberId()) .setLeader(rebalanceResult.leaderId) .setProtocolName("range") .setProtocolType("consumer") .setSkipAssignment(false) .setMembers(Collections.emptyList()); checkJoinGroupResponse( expectedFollowerResponse, followerJoinResult.joinFuture.get(), group, COMPLETING_REBALANCE, Collections.emptySet() ); // The follower protocol changed from protocolSuperset to general protocols. JoinGroupRequestProtocolCollection protocols = GroupMetadataManagerTestContext.toProtocols("range"); followerJoinResult = context.sendClassicGroupJoin( request.setGroupInstanceId("follower-instance-id") .setMemberId(rebalanceResult.followerId) .setProtocols(protocols), true, true ); assertTrue(followerJoinResult.records.isEmpty()); assertFalse(followerJoinResult.joinFuture.isDone()); // The group will transition to PreparingRebalance due to protocol change from follower. assertTrue(group.isInState(PREPARING_REBALANCE)); // Advance clock by session timeout to kick leader out and complete join phase. List<ExpiredTimeout<Void, CoordinatorRecord>> timeouts = context.sleep(5000); // Both leader and follower heartbeat timers may expire. However, the follower heartbeat expiration // will not kick the follower out because it is awaiting a join response. assertTrue(timeouts.size() <= 2); assertTrue(followerJoinResult.joinFuture.isDone()); String newFollowerId = followerJoinResult.joinFuture.get().memberId(); expectedFollowerResponse = new JoinGroupResponseData() .setErrorCode(Errors.NONE.code()) .setGenerationId(rebalanceResult.generationId + 2) // The group has promoted to the new generation. .setMemberId(newFollowerId) .setLeader(newFollowerId) .setProtocolName("range") .setProtocolType("consumer") .setSkipAssignment(false) .setMembers(toJoinResponseMembers(group)); checkJoinGroupResponse( expectedFollowerResponse, followerJoinResult.joinFuture.get(), group, COMPLETING_REBALANCE, Collections.singleton("follower-instance-id") ); }
@Override public JwtToken getToken(@Nullable @QueryParameter("expiryTimeInMins") Integer expiryTimeInMins, @Nullable @QueryParameter("maxExpiryTimeInMins") Integer maxExpiryTimeInMins) { long expiryTime= Long.getLong("EXPIRY_TIME_IN_MINS",DEFAULT_EXPIRY_IN_SEC); int maxExpiryTime = Integer.getInteger("MAX_EXPIRY_TIME_IN_MINS",DEFAULT_MAX_EXPIRY_TIME_IN_MIN); if(maxExpiryTimeInMins != null){ maxExpiryTime = maxExpiryTimeInMins; } if(expiryTimeInMins != null){ if(expiryTimeInMins > maxExpiryTime) { throw new ServiceException.BadRequestException( String.format("expiryTimeInMins %s can't be greater than %s", expiryTimeInMins, maxExpiryTime)); } expiryTime = expiryTimeInMins * 60; } Authentication authentication = Jenkins.getAuthentication2(); String userId = authentication.getName(); User user = User.get(userId, false, Collections.emptyMap()); String email = null; String fullName = null; if(user != null) { fullName = user.getFullName(); userId = user.getId(); Mailer.UserProperty p = user.getProperty(Mailer.UserProperty.class); if(p!=null) email = p.getAddress(); } Plugin plugin = Jenkins.get().getPlugin("blueocean-jwt"); String issuer = "blueocean-jwt:"+ ((plugin!=null) ? plugin.getWrapper().getVersion() : ""); JwtToken jwtToken = new JwtToken(); jwtToken.claim.put("jti", UUID.randomUUID().toString().replace("-","")); jwtToken.claim.put("iss", issuer); jwtToken.claim.put("sub", userId); jwtToken.claim.put("name", fullName); long currentTime = System.currentTimeMillis()/1000; jwtToken.claim.put("iat", currentTime); jwtToken.claim.put("exp", currentTime+expiryTime); jwtToken.claim.put("nbf", currentTime - DEFAULT_NOT_BEFORE_IN_SEC); //set claim JSONObject context = new JSONObject(); JSONObject userObject = new JSONObject(); userObject.put("id", userId); userObject.put("fullName", fullName); userObject.put("email", email); JwtAuthenticationStore authenticationStore = getJwtStore( authentication); authenticationStore.store(authentication, context); context.put("user", userObject); jwtToken.claim.put("context", context); return jwtToken; }
@Test public void getJwks() throws Exception { j.jenkins.setSecurityRealm(j.createDummySecurityRealm()); JenkinsRule.WebClient webClient = j.createWebClient(); User user = User.get("alice"); user.setFullName("Alice Cooper"); user.addProperty(new Mailer.UserProperty("alice@jenkins-ci.org")); webClient.login("alice"); String token = getToken(webClient); // this call triggers the creation of a RSA key in RSAConfidentialKey::getPrivateKey String jwksPayload = webClient.goTo("jwt-auth/jwk-set", "application/json").getWebResponse().getContentAsString(); System.out.println(jwksPayload); JsonWebKeySet jsonWebKeySet = new JsonWebKeySet(jwksPayload); JwksVerificationKeyResolver jwksResolver = new JwksVerificationKeyResolver(jsonWebKeySet.getJsonWebKeys()); JwtConsumer jwtConsumer = new JwtConsumerBuilder() .setRequireExpirationTime() // the JWT must have an expiration time .setAllowedClockSkewInSeconds(30) // allow some leeway in validating time based claims to account for clock skew .setRequireSubject() // the JWT must have a subject claim .setVerificationKeyResolver(jwksResolver) // verify the sign with the public key .build(); // create the JwtConsumer instance JwtClaims claims = jwtConsumer.processToClaims(token); Assert.assertEquals("alice", claims.getSubject()); Map<String,Object> claimMap = claims.getClaimsMap(); Map<String,Object> context = (Map<String, Object>) claimMap.get("context"); Map<String,String> userContext = (Map<String, String>) context.get("user"); Assert.assertEquals("alice", userContext.get("id")); Assert.assertEquals("Alice Cooper", userContext.get("fullName")); Assert.assertEquals("alice@jenkins-ci.org", userContext.get("email")); }
@Override public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay readerWay, IntsRef relationFlags) { if (readerWay.hasTag("hazmat:adr_tunnel_cat", TUNNEL_CATEGORY_NAMES)) { HazmatTunnel code = HazmatTunnel.valueOf(readerWay.getTag("hazmat:adr_tunnel_cat")); hazTunnelEnc.setEnum(false, edgeId, edgeIntAccess, code); } else if (readerWay.hasTag("hazmat:tunnel_cat", TUNNEL_CATEGORY_NAMES)) { HazmatTunnel code = HazmatTunnel.valueOf(readerWay.getTag("hazmat:tunnel_cat")); hazTunnelEnc.setEnum(false, edgeId, edgeIntAccess, code); } else if (readerWay.hasTag("tunnel", "yes")) { HazmatTunnel[] codes = HazmatTunnel.values(); for (int i = codes.length - 1; i >= 0; i--) { if (readerWay.hasTag("hazmat:" + codes[i].name(), "no")) { hazTunnelEnc.setEnum(false, edgeId, edgeIntAccess, codes[i]); break; } } } }
@Test public void testIgnoreNonTunnelSubtags() { EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1); ReaderWay readerWay = new ReaderWay(1); readerWay.setTag("hazmat:B", "no"); int edgeId = 0; parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(HazmatTunnel.A, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess)); }
public static void main(String[] args) throws Exception { TikaCLI cli = new TikaCLI(); if (cli.testForHelp(args)) { cli.usage(); return; } else if (cli.testForBatch(args)) { String[] batchArgs = BatchCommandLineBuilder.build(args); BatchProcessDriverCLI batchDriver = new BatchProcessDriverCLI(batchArgs); batchDriver.execute(); return; } else if (cli.testForAsync(args)) { async(args); return; } if (args.length > 0) { for (String arg : args) { cli.process(arg); } if (cli.pipeMode) { cli.process("-"); } } else { // Started with no arguments. Wait for up to 0.1s to see if // we have something waiting in standard input and use the // pipe mode if we have. If no input is seen, start the GUI. if (System.in.available() == 0) { Thread.sleep(100); } if (System.in.available() > 0) { cli.process("-"); } else { cli.process("--gui"); } } }
@Test public void testExtractTgz() throws Exception { //TIKA-2564 String[] params = {"--extract-dir=" + extractDir.toAbsolutePath(), "-z", resourcePrefix + "/test-documents.tgz"}; TikaCLI.main(params); String[] tempFileNames = extractDir .toFile() .list(); assertNotNull(tempFileNames); String allFiles = String.join(" : ", tempFileNames); Path expectedTAR = extractDir.resolve("test-documents.tar"); assertExtracted(expectedTAR, allFiles); }
@Override public boolean apply(InputFile inputFile) { return originalPredicate.apply(inputFile) && InputFile.Status.SAME != inputFile.status(); }
@Test public void apply_when_file_is_changed_and_predicate_is_true() { when(inputFile.status()).thenReturn(InputFile.Status.CHANGED); when(predicate.apply(inputFile)).thenReturn(true); Assertions.assertThat(underTest.apply(inputFile)).isTrue(); verify(predicate, times(1)).apply(any()); verify(inputFile, times(1)).status(); }
@Override public byte[] serialize() { final byte[] data = new byte[LENGTH - HEADER_LENGTH]; final ByteBuffer bb = ByteBuffer.wrap(data); bb.putShort(this.systemPriority); bb.put(this.systemMac.toBytes()); bb.putShort(this.key); bb.putShort(this.portPriority); bb.putShort(this.port); bb.put(this.state.toByte()); bb.put(RESERVED); return data; }
@Test public void serialize() { assertArrayEquals(data, BASE_TLV.serialize()); }
public static FullyQualifiedKotlinType convert(FullyQualifiedJavaType javaType) { FullyQualifiedKotlinType kotlinType = convertBaseType(javaType); for (FullyQualifiedJavaType argument : javaType.getTypeArguments()) { kotlinType.addTypeArgument(convert(argument)); } return kotlinType; }
@Test void testByteWrapperArray() { FullyQualifiedJavaType jt = new FullyQualifiedJavaType("java.lang.Byte[]"); FullyQualifiedKotlinType kt = JavaToKotlinTypeConverter.convert(jt); assertThat(kt.getShortNameWithTypeArguments()).isEqualTo("Array<Byte>"); assertThat(kt.getImportList()).isEmpty(); }
public static <T> CompletableFuture<T> supplyAsync( SupplierWithException<T, ?> supplier, Executor executor) { return CompletableFuture.supplyAsync( () -> { try { return supplier.get(); } catch (Throwable e) { throw new CompletionException(e); } }, executor); }
@Test void testSupplyAsyncFailure() { final String exceptionMessage = "Test exception"; final FlinkException testException = new FlinkException(exceptionMessage); final CompletableFuture<Object> future = FutureUtils.supplyAsync( () -> { throw testException; }, EXECUTOR_RESOURCE.getExecutor()); assertThatFuture(future) .eventuallyFailsWith(ExecutionException.class) .withCause(testException); }
@Override public Block toBlock(Type desiredType) { checkArgument(BIGINT.equals(desiredType), "type doesn't match: %s", desiredType); int numberOfRecords = numberOfRecords(); return new LongArrayBlock( numberOfRecords, Optional.ofNullable(nulls), longs == null ? new long[numberOfRecords] : longs); }
@Test(expectedExceptions = IllegalArgumentException.class) public void testReadBlockWrongDesiredType() { PrestoThriftBlock columnsData = longColumn(null, null); columnsData.toBlock(INTEGER); }
static EfestoInputPMML getEfestoInputPMML(String modelName, PMMLRuntimeContext context) { LocalComponentIdPmml modelLocalUriId = new EfestoAppRoot() .get(KiePmmlComponentRoot.class) .get(PmmlIdFactory.class) .get(context.getFileNameNoSuffix(), getSanitizedClassName(modelName)); return new EfestoInputPMML(modelLocalUriId, context); }
@Test void getEfestoInputPMML() { String modelName = "modelName"; EfestoInputPMML retrieved = PMMLRuntimeInternalImpl.getEfestoInputPMML(modelName, pmmlRuntimeContext); assertThat(retrieved).isNotNull(); LocalComponentIdPmml expected = new LocalComponentIdPmml(pmmlRuntimeContext.getFileNameNoSuffix(), getSanitizedClassName(modelName)); assertThat(retrieved.getModelLocalUriId()).isEqualTo(expected); assertThat(retrieved.getInputData()).isEqualTo(pmmlRuntimeContext); }
@Nonnull @Override public Collection<DataConnectionResource> listResources() { HazelcastInstance instance = getClient(); try { return instance.getDistributedObjects() .stream() .filter(IMap.class::isInstance) .map(o -> new DataConnectionResource(OBJECT_TYPE_IMAP_JOURNAL, o.getName())) .collect(Collectors.toList()); } finally { instance.shutdown(); } }
@Test public void list_resources_should_return_map() { IMap<Integer, String> map = instance.getMap("my_map"); map.put(42, "42"); DataConnectionConfig dataConnectionConfig = sharedDataConnectionConfig(clusterName); hazelcastDataConnection = new HazelcastDataConnection(dataConnectionConfig); Collection<DataConnectionResource> resources = hazelcastDataConnection.listResources(); assertThat(resources).contains(new DataConnectionResource("IMapJournal", "my_map")); }
@Override public Optional<SensorCacheData> load() { String url = URL + "?project=" + project.key(); if (branchConfiguration.referenceBranchName() != null) { url = url + "&branch=" + branchConfiguration.referenceBranchName(); } Profiler profiler = Profiler.create(LOG).startInfo(LOG_MSG); GetRequest request = new GetRequest(url).setHeader(ACCEPT_ENCODING, "gzip"); try (WsResponse response = wsClient.call(request); InputStream is = response.contentStream()) { Optional<String> contentEncoding = response.header(CONTENT_ENCODING); Optional<Integer> length = response.header(CONTENT_LENGTH).map(Integer::parseInt); boolean hasGzipEncoding = contentEncoding.isPresent() && contentEncoding.get().equals("gzip"); SensorCacheData cache = hasGzipEncoding ? decompress(is) : read(is); if (length.isPresent()) { profiler.stopInfo(LOG_MSG + String.format(" (%s)", humanReadableByteCountSI(length.get()))); } else { profiler.stopInfo(LOG_MSG); } return Optional.of(cache); } catch (HttpException e) { if (e.code() == 404) { profiler.stopInfo(LOG_MSG + " (404)"); return Optional.empty(); } throw MessageException.of("Failed to download analysis cache: " + DefaultScannerWsClient.createErrorMessage(e)); } catch (Exception e) { throw new IllegalStateException("Failed to download analysis cache", e); } }
@Test public void returns_empty_if_404() { when(wsClient.call(any())).thenThrow(new HttpException("url", 404, "content")); assertThat(loader.load()).isEmpty(); assertThat(logs.logs()).anyMatch(s -> s.startsWith("Load analysis cache (404) | time=")); }
public void close() { sessionExpirationService.shutdown(); // Update all not clean session with the proper expiry date updateNotCleanSessionsWithProperExpire(); queueRepository.close(); }
@Test public void testSerializabilityOfPublishedMessage() { LOG.info("testSerializabilityOfPublishedMessage"); MVStore mvStore = new MVStore.Builder() .fileName(BrokerConstants.DEFAULT_PERSISTENT_PATH) .autoCommitDisabled() .open(); final MVMap.Builder<String, SessionRegistry.PublishedMessage> builder = new MVMap.Builder<String, SessionRegistry.PublishedMessage>() .valueType(new EnqueuedMessageValueType()); final ByteBuf payload = Unpooled.wrappedBuffer("Hello World!".getBytes(StandardCharsets.UTF_8)); SessionRegistry.PublishedMessage msg = new SessionRegistry.PublishedMessage(Topic.asTopic("/say"), MqttQoS.AT_LEAST_ONCE, payload, false, Instant.MAX); try { // store a message in the MVStore final String mapName = "test_map"; MVMap<String, SessionRegistry.PublishedMessage> persistentMap = mvStore.openMap(mapName, builder); String key = "message"; persistentMap.put(key, msg); mvStore.close(); // reopen the MVStore and read it mvStore = new MVStore.Builder() .fileName(BrokerConstants.DEFAULT_PERSISTENT_PATH) .autoCommitDisabled() .open(); final SessionRegistry.PublishedMessage reloadedMsg = mvStore.openMap(mapName, builder).get(key); // Verify assertEquals("/say", reloadedMsg.topic.toString()); } finally { mvStore.close(); File dbFile = new File(BrokerConstants.DEFAULT_PERSISTENT_PATH); if (dbFile.exists()) { dbFile.delete(); } assertFalse(dbFile.exists()); } }
public TimelineEntity getEntity( String entityType, String entityId, EnumSet<Field> fields, UserGroupInformation callerUGI) throws YarnException, IOException { long startTime = Time.monotonicNow(); metrics.incrGetEntityOps(); try { return doGetEntity(entityType, entityId, fields, callerUGI); } finally { metrics.addGetEntityTime(Time.monotonicNow() - startTime); } }
@Test void testGetOldEntityWithOutDomainId() throws Exception { TimelineEntity entity = dataManaer.getEntity( "OLD_ENTITY_TYPE_1", "OLD_ENTITY_ID_1", null, UserGroupInformation.getCurrentUser()); assertNotNull(entity); assertEquals("OLD_ENTITY_ID_1", entity.getEntityId()); assertEquals("OLD_ENTITY_TYPE_1", entity.getEntityType()); assertEquals( TimelineDataManager.DEFAULT_DOMAIN_ID, entity.getDomainId()); }
@Override public void readFrame(ChannelHandlerContext ctx, ByteBuf input, Http2FrameListener listener) throws Http2Exception { if (readError) { input.skipBytes(input.readableBytes()); return; } try { do { if (readingHeaders && !preProcessFrame(input)) { return; } // The header is complete, fall into the next case to process the payload. // This is to ensure the proper handling of zero-length payloads. In this // case, we don't want to loop around because there may be no more data // available, causing us to exit the loop. Instead, we just want to perform // the first pass at payload processing now. // Wait until the entire payload has been read. if (input.readableBytes() < payloadLength) { return; } // Slice to work only on the frame being read ByteBuf framePayload = input.readSlice(payloadLength); // We have consumed the data for this frame, next time we read, // we will be expecting to read a new frame header. readingHeaders = true; verifyFrameState(); processPayloadState(ctx, framePayload, listener); } while (input.isReadable()); } catch (Http2Exception e) { readError = !Http2Exception.isStreamError(e); throw e; } catch (RuntimeException e) { readError = true; throw e; } catch (Throwable cause) { readError = true; PlatformDependent.throwException(cause); } }
@Test public void failedWhenPriorityFrameDependsOnItself() throws Http2Exception { final ByteBuf input = Unpooled.buffer(); try { writePriorityFrame(input, 1, 1, 10); assertThrows(Http2Exception.class, new Executable() { @Override public void execute() throws Throwable { frameReader.readFrame(ctx, input, listener); } }); } finally { input.release(); } }
@Override public void check(final EncryptRule encryptRule, final ShardingSphereSchema schema, final SelectStatementContext sqlStatementContext) { checkSelect(encryptRule, sqlStatementContext); for (SelectStatementContext each : sqlStatementContext.getSubqueryContexts().values()) { checkSelect(encryptRule, each); } }
@Test void assertCheckWhenShorthandExpandContainsSubqueryTable() { SelectStatementContext sqlStatementContext = mock(SelectStatementContext.class, RETURNS_DEEP_STUBS); when(sqlStatementContext.containsTableSubquery()).thenReturn(true); when(sqlStatementContext.getSqlStatement().getProjections().getProjections()).thenReturn(Collections.singleton(new ShorthandProjectionSegment(0, 0))); assertThrows(UnsupportedSQLOperationException.class, () -> new EncryptSelectProjectionSupportedChecker().check(mockEncryptRule(), null, sqlStatementContext)); }
public static String escapeXML( String content ) { if ( Utils.isEmpty( content ) ) { return content; } return StringEscapeUtils.escapeXml( content ); }
@Test public void testEscapeXml() { final String xml = "<xml xmlns:test=\"http://test\">"; final String escaped = "&lt;xml xmlns:test=&quot;http://test&quot;&gt;"; assertNull( Const.escapeXml( null ) ); assertEquals( escaped, Const.escapeXml( xml ) ); }
@Description("Beta cdf given the a, b parameters and value") @ScalarFunction @SqlType(StandardTypes.DOUBLE) public static double betaCdf( @SqlType(StandardTypes.DOUBLE) double a, @SqlType(StandardTypes.DOUBLE) double b, @SqlType(StandardTypes.DOUBLE) double value) { checkCondition(value >= 0 && value <= 1, INVALID_FUNCTION_ARGUMENT, "betaCdf Function: value must be in the interval [0, 1]"); checkCondition(a > 0, INVALID_FUNCTION_ARGUMENT, "betaCdf Function: a must be > 0"); checkCondition(b > 0, INVALID_FUNCTION_ARGUMENT, "betaCdf Function: b must be > 0"); BetaDistribution distribution = new BetaDistribution(null, a, b, BetaDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY); return distribution.cumulativeProbability(value); }
@Test public void testBetaCdf() { assertFunction("beta_cdf(3, 3.6, 0.0)", DOUBLE, 0.0); assertFunction("beta_cdf(3, 3.6, 1.0)", DOUBLE, 1.0); assertFunction("beta_cdf(3, 3.6, 0.3)", DOUBLE, 0.21764809997679938); assertFunction("beta_cdf(3, 3.6, 0.9)", DOUBLE, 0.9972502881611551); assertInvalidFunction("beta_cdf(0, 3, 0.5)", "betaCdf Function: a must be > 0"); assertInvalidFunction("beta_cdf(3, 0, 0.5)", "betaCdf Function: b must be > 0"); assertInvalidFunction("beta_cdf(3, 5, -0.1)", "betaCdf Function: value must be in the interval [0, 1]"); assertInvalidFunction("beta_cdf(3, 5, 1.1)", "betaCdf Function: value must be in the interval [0, 1]"); }
public final void containsEntry(@Nullable Object key, @Nullable Object value) { // TODO(kak): Can we share any of this logic w/ MapSubject.containsEntry()? checkNotNull(actual); if (!actual.containsEntry(key, value)) { Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value); ImmutableList<Map.Entry<@Nullable Object, @Nullable Object>> entryList = ImmutableList.of(entry); // TODO(cpovirk): If the key is present but not with the right value, we could fail using // something like valuesForKey(key).contains(value). Consider whether this is worthwhile. if (hasMatchingToStringPair(actual.entries(), entryList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.entries(), /* itemsToCheck = */ entryList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (actual.containsKey(key)) { failWithoutActual( fact("expected to contain entry", entry), simpleFact("but did not"), fact("though it did contain values with that key", actual.asMap().get(key)), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (actual.containsValue(value)) { Set<@Nullable Object> keys = new LinkedHashSet<>(); for (Map.Entry<?, ?> actualEntry : actual.entries()) { if (Objects.equal(actualEntry.getValue(), value)) { keys.add(actualEntry.getKey()); } } failWithoutActual( fact("expected to contain entry", entry), simpleFact("but did not"), fact("though it did contain keys with that value", keys), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else { failWithActual("expected to contain entry", immutableEntry(key, value)); } } }
@Test public void containsEntry_failsWithSameToString() throws Exception { expectFailureWhenTestingThat( ImmutableMultimap.builder().put(1, "1").put(1, 1L).put(1L, 1).put(2, 3).build()) .containsEntry(1, 1); assertFailureKeys( "expected to contain entry", "an instance of", "but did not", "though it did contain", "full contents"); assertFailureValue("expected to contain entry", "1=1"); assertFailureValue("an instance of", "Map.Entry<java.lang.Integer, java.lang.Integer>"); assertFailureValue( "though it did contain", "[1=1 (Map.Entry<java.lang.Integer, java.lang.String>), " + "1=1 (Map.Entry<java.lang.Integer, java.lang.Long>), " + "1=1 (Map.Entry<java.lang.Long, java.lang.Integer>)]"); }
@Override public AppResponse process(Flow flow, CancelFlowRequest request) { if (appAuthenticator != null && "no_nfc".equals(request.getCode())) { appAuthenticator.setNfcSupport(false); } appSession.setAbortCode(request.getCode()); return new OkResponse(); }
@Test public void processReturnsOkResponseWithoutNfcCode() { //given cancelFlowRequest.setCode("otherCode"); //when AppResponse appResponse = aborted.process(mockedFlow, cancelFlowRequest); //then assertTrue(appResponse instanceof OkResponse); Assertions.assertEquals("otherCode", mockedAppSession.getAbortCode()); }
@Override public RunSharedCacheCleanerTaskResponse runCleanerTask( RunSharedCacheCleanerTaskRequest request) throws YarnException { checkAcls("runCleanerTask"); RunSharedCacheCleanerTaskResponse response = recordFactory.newRecordInstance(RunSharedCacheCleanerTaskResponse.class); this.cleanerService.runCleanerTask(); // if we are here, then we have submitted the request to the cleaner // service, ack the request to the admin client response.setAccepted(true); return response; }
@Test void testRunCleanerTaskCLI() throws Exception { String[] args = {"-runCleanerTask"}; RunSharedCacheCleanerTaskResponse rp = new RunSharedCacheCleanerTaskResponsePBImpl(); rp.setAccepted(true); when(mockAdmin.runCleanerTask(isA(RunSharedCacheCleanerTaskRequest.class))) .thenReturn(rp); assertEquals(0, adminCLI.run(args)); rp.setAccepted(false); when(mockAdmin.runCleanerTask(isA(RunSharedCacheCleanerTaskRequest.class))) .thenReturn(rp); assertEquals(1, adminCLI.run(args)); verify(mockAdmin, times(2)).runCleanerTask( any(RunSharedCacheCleanerTaskRequest.class)); }
public static Write write() { return Write.create(); }
@Test public void testWriteValidationFailsMissingProjectId() { BigtableIO.WriteWithResults write = BigtableIO.write() .withTableId("table") .withInstanceId("instance") .withBigtableOptions(BigtableOptions.builder().build()) .withWriteResults(); thrown.expect(IllegalArgumentException.class); write.expand(null); }
@Override public ModelLocalUriId deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { JsonNode node = p.getCodec().readTree(p); String path = node.get("fullPath").asText(); return new ModelLocalUriId(LocalUri.parse(path)); }
@Test void deserializeEncodedPath() throws IOException { String json = "{\"model\":\"To%2Bdecode%2Bfirst%2Bpart\"," + "\"basePath\":\"/To+decode+second+part/To+decode+third+part\"," + "\"fullPath\":\"/To+decode+first+part/To+decode+second+part/To+decode+third+part\"}"; ObjectMapper mapper = new ObjectMapper(); InputStream stream = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); JsonParser parser = mapper.getFactory().createParser(stream); DeserializationContext ctxt = mapper.getDeserializationContext(); ModelLocalUriId retrieved = new ModelLocalUriIdDeSerializer().deserialize(parser, ctxt); String path = "/To+decode+first+part/To+decode+second+part/To+decode+third+part/"; LocalUri parsed = LocalUri.parse(path); ModelLocalUriId expected = new ModelLocalUriId(parsed); assertThat(retrieved).isEqualTo(expected); }
public void logTerminationAck( final int memberId, final long logLeadershipTermId, final long logPosition, final int senderMemberId) { final int length = ClusterEventEncoder.terminationAckLength(); final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(TERMINATION_ACK.toEventCodeId(), encodedLength); if (index > 0) { try { ClusterEventEncoder.encodeTerminationAck( (UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, memberId, logLeadershipTermId, logPosition, senderMemberId); } finally { ringBuffer.commit(index); } } }
@Test void logTerminationAck() { final long logLeadershipTermId = 96; final long logPosition = 128L; final int memberId = 222; final int senderMemberId = 982374; final int offset = 64; logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset); logger.logTerminationAck(memberId, logLeadershipTermId, logPosition, senderMemberId); verifyLogHeader( logBuffer, offset, TERMINATION_ACK.toEventCodeId(), terminationAckLength(), terminationAckLength()); final int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH; assertEquals(logLeadershipTermId, logBuffer.getLong(index, LITTLE_ENDIAN)); assertEquals(logPosition, logBuffer.getLong(index + SIZE_OF_LONG, LITTLE_ENDIAN)); assertEquals(memberId, logBuffer.getInt(index + 2 * SIZE_OF_LONG, LITTLE_ENDIAN)); final StringBuilder sb = new StringBuilder(); ClusterEventDissector.dissectTerminationAck( TERMINATION_ACK, logBuffer, encodedMsgOffset(offset), sb); final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: TERMINATION_ACK " + "\\[24/24]: memberId=222 logLeadershipTermId=96 logPosition=128 senderMemberId=982374"; assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern)); }
@Override public List<String> listSchemaNames(ConnectorSession session) { return ImmutableList.of(JMX_SCHEMA_NAME, HISTORY_SCHEMA_NAME); }
@Test public void testListSchemas() { assertEquals(metadata.listSchemaNames(SESSION), ImmutableList.of(JMX_SCHEMA_NAME, HISTORY_SCHEMA_NAME)); }
@Override public int hashCode() { return timestamp.hashCode(); }
@Test public final void testHashCode() { Timestamped<String> a = new Timestamped<>("a", TS_1_1); Timestamped<String> b = new Timestamped<>("b", TS_1_1); assertTrue("value does not impact hashCode", a.hashCode() == b.hashCode()); }
@Override public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats); TimestampColumnStatsDataInspector aggregateData = timestampInspectorFromStats(aggregateColStats); TimestampColumnStatsDataInspector newData = timestampInspectorFromStats(newColStats); Timestamp lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData)); if (lowValue != null) { aggregateData.setLowValue(lowValue); } Timestamp highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData)); if (highValue != null) { aggregateData.setHighValue(highValue); } aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator(); NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator(); List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst); aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(), ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs())); aggregateData.setNdvEstimator(ndvEstimatorsList.get(0)); KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator(); KllHistogramEstimator newKllEst = newData.getHistogramEstimator(); aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst)); aggregateColStats.getStatsData().setTimestampStats(aggregateData); }
@Test public void testMergeNullValues() { ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(Timestamp.class) .low(null) .high(null) .numNulls(1) .numDVs(0) .build()); merger.merge(aggrObj, aggrObj); ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(Timestamp.class) .low(null) .high(null) .numNulls(2) .numDVs(0) .build(); assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData()); }
@Override public Status check() { if (applicationContext == null) { SpringExtensionInjector springExtensionInjector = SpringExtensionInjector.get(applicationModel); applicationContext = springExtensionInjector.getContext(); } if (applicationContext == null) { return new Status(Status.Level.UNKNOWN); } Map<String, DataSource> dataSources = applicationContext.getBeansOfType(DataSource.class, false, false); if (CollectionUtils.isEmptyMap(dataSources)) { return new Status(Status.Level.UNKNOWN); } Status.Level level = Status.Level.OK; StringBuilder buf = new StringBuilder(); for (Map.Entry<String, DataSource> entry : dataSources.entrySet()) { DataSource dataSource = entry.getValue(); if (buf.length() > 0) { buf.append(", "); } buf.append(entry.getKey()); try (Connection connection = dataSource.getConnection()) { DatabaseMetaData metaData = connection.getMetaData(); try (ResultSet resultSet = metaData.getTypeInfo()) { if (!resultSet.next()) { level = Status.Level.ERROR; } } buf.append(metaData.getURL()); buf.append('('); buf.append(metaData.getDatabaseProductName()); buf.append('-'); buf.append(metaData.getDatabaseProductVersion()); buf.append(')'); } catch (Throwable e) { logger.warn(CONFIG_WARN_STATUS_CHECKER, "", "", e.getMessage(), e); return new Status(level, e.getMessage()); } } return new Status(level, buf.toString()); }
@Test void testWithDatasourceHasNextResult() throws SQLException { Map<String, DataSource> map = new HashMap<String, DataSource>(); DataSource dataSource = mock(DataSource.class); Connection connection = mock(Connection.class, Answers.RETURNS_DEEP_STUBS); given(dataSource.getConnection()).willReturn(connection); given(connection.getMetaData().getTypeInfo().next()).willReturn(true); map.put("mockDatabase", dataSource); given(applicationContext.getBeansOfType(eq(DataSource.class), anyBoolean(), anyBoolean())) .willReturn(map); Status status = dataSourceStatusChecker.check(); assertThat(status.getLevel(), is(Status.Level.OK)); }
@Override public Set<SystemScope> getDefaults() { return Sets.filter(getAll(), isDefault); }
@Test public void getDefaults() { Set<SystemScope> defaults = Sets.newHashSet(defaultDynScope1, defaultDynScope2, defaultScope1, defaultScope2); assertThat(service.getDefaults(), equalTo(defaults)); }
@Override protected ObjectListingChunk getObjectListingChunk(String key, boolean recursive) throws IOException { String delimiter = recursive ? "" : PATH_SEPARATOR; key = PathUtils.normalizePath(key, PATH_SEPARATOR); // In case key is root (empty string) do not normalize prefix key = key.equals(PATH_SEPARATOR) ? "" : key; ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(mBucketNameInternal); request.setPrefix(key); request.setMaxKeys(getListingChunkLength(mUfsConf)); request.setDelimiter(delimiter); ObjectListing result = getObjectListingChunk(request); if (result != null) { return new COSObjectListingChunk(request, result); } return null; }
@Test public void testGetObjectListingChunk() { // test successful get object listing chunk Mockito.when(mClient.listObjects(ArgumentMatchers.any(ListObjectsRequest.class))) .thenReturn(new ObjectListing()); ListObjectsRequest request = new ListObjectsRequest(); Serializable result = mCOSUnderFileSystem.getObjectListingChunk(request); Assert.assertTrue(result instanceof ObjectListing); }
@Override public List<RoleDO> getRoleList() { return roleMapper.selectList(); }
@Test public void testGetRoleList() { // mock 数据 RoleDO dbRole01 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); roleMapper.insert(dbRole01); RoleDO dbRole02 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())); roleMapper.insert(dbRole02); // 调用 List<RoleDO> list = roleService.getRoleList(); // 断言 assertEquals(2, list.size()); assertPojoEquals(dbRole01, list.get(0)); assertPojoEquals(dbRole02, list.get(1)); }
public RunResponse restartDirectly(WorkflowInstance instance, RunRequest runRequest) { Checks.checkTrue( !runRequest.isFreshRun(), "Cannot restart a workflow instance %s using fresh run policy [%s]", instance.getIdentity(), runRequest.getCurrentPolicy()); if (runRequest.getRestartConfig() != null) { runRequest.validateIdentity(instance); } WorkflowInstance.Status status = instanceDao.getLatestWorkflowInstanceStatus( instance.getWorkflowId(), instance.getWorkflowInstanceId()); if (!status.isTerminal()) { throw new MaestroInvalidStatusException( "Cannot restart workflow instance [%s][%s] as the latest run status is [%s] (non-terminal).", instance.getWorkflowId(), instance.getWorkflowInstanceId(), status); } workflowHelper.updateWorkflowInstance(instance, runRequest); int ret = runStrategyDao.startWithRunStrategy( instance, workflowDao.getRunStrategy(instance.getWorkflowId())); RunResponse runResponse = RunResponse.from(instance, ret); LOG.info("Restart a workflow instance with a response: [{}]", runResponse); return runResponse; }
@Test public void testRestartSubworkflow() { WorkflowInstance wfInstance = new WorkflowInstance(); SubworkflowInitiator initiator = new SubworkflowInitiator(); UpstreamInitiator.Info info = new UpstreamInitiator.Info(); info.setWorkflowId("foo"); info.setInstanceId(123L); info.setRunId(2L); info.setStepId("bar"); initiator.setAncestors(Collections.singletonList(info)); wfInstance.setInitiator(initiator); wfInstance.setStatus(WorkflowInstance.Status.SUCCEEDED); wfInstance.setWorkflowInstanceId(10L); wfInstance.setWorkflowRunId(3L); wfInstance.setWorkflowId("test-workflow"); wfInstance.setRuntimeWorkflow(Workflow.builder().build()); RunRequest request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.RESTART_FROM_BEGINNING) .restartConfig( RestartConfig.builder().addRestartNode("test-workflow", 10L, null).build()) .build(); when(runStrategyDao.startWithRunStrategy(any(), any())).thenReturn(1); when(instanceDao.getLatestWorkflowInstanceStatus(any(), anyLong())) .thenReturn(WorkflowInstance.Status.SUCCEEDED); doNothing().when(workflowHelper).updateWorkflowInstance(any(), any()); RunResponse response = actionHandler.restartDirectly(wfInstance, request); assertEquals("test-workflow", response.getWorkflowId()); assertEquals(10L, response.getWorkflowInstanceId()); assertEquals(3L, response.getWorkflowRunId()); assertEquals(RunResponse.Status.WORKFLOW_RUN_CREATED, response.getStatus()); verify(workflowHelper, times(1)).updateWorkflowInstance(any(), any()); }
public long getMsgOutCounter() { return msgOutCounter.longValue(); }
@Test public void testGetMsgOutCounter() { stats.msgOutCounter = 1L; consumer.updateStats(stats); assertEquals(consumer.getMsgOutCounter(), 1L); }
@Override public List<SnowflakeIdentifier> listIcebergTables(SnowflakeIdentifier scope) { StringBuilder baseQuery = new StringBuilder("SHOW ICEBERG TABLES"); String[] queryParams = null; switch (scope.type()) { case ROOT: // account-level listing baseQuery.append(" IN ACCOUNT"); break; case DATABASE: // database-level listing baseQuery.append(" IN DATABASE IDENTIFIER(?)"); queryParams = new String[] {scope.toIdentifierString()}; break; case SCHEMA: // schema-level listing baseQuery.append(" IN SCHEMA IDENTIFIER(?)"); queryParams = new String[] {scope.toIdentifierString()}; break; default: throw new IllegalArgumentException( String.format("Unsupported scope type for listIcebergTables: %s", scope)); } final String finalQuery = baseQuery.toString(); final String[] finalQueryParams = queryParams; List<SnowflakeIdentifier> tables; try { tables = connectionPool.run( conn -> queryHarness.query(conn, finalQuery, TABLE_RESULT_SET_HANDLER, finalQueryParams)); } catch (SQLException e) { throw snowflakeExceptionToIcebergException( scope, e, String.format("Failed to list tables for scope '%s'", scope)); } catch (InterruptedException e) { throw new UncheckedInterruptedException( e, "Interrupted while listing tables for scope '%s'", scope); } tables.forEach( table -> Preconditions.checkState( table.type() == SnowflakeIdentifier.Type.TABLE, "Expected TABLE, got identifier '%s' for scope '%s'", table, scope)); return tables; }
@SuppressWarnings("unchecked") @Test public void testListIcebergTablesSQLExceptionAtRootLevel() throws SQLException, InterruptedException { Exception injectedException = new SQLException(String.format("SQL exception with Error Code %d", 0), "2000", 0, null); when(mockClientPool.run(any(ClientPool.Action.class))).thenThrow(injectedException); assertThatExceptionOfType(UncheckedSQLException.class) .isThrownBy(() -> snowflakeClient.listIcebergTables(SnowflakeIdentifier.ofRoot())) .withMessageContaining("Failed to list tables for scope 'ROOT: '''") .withCause(injectedException); }
@Override public PollResult poll(long currentTimeMs) { if (memberId == null) { return PollResult.EMPTY; } // Send any pending acknowledgements before fetching more records. PollResult pollResult = processAcknowledgements(currentTimeMs); if (pollResult != null) { return pollResult; } if (!fetchMoreRecords || closing) { return PollResult.EMPTY; } Map<Node, ShareSessionHandler> handlerMap = new HashMap<>(); Map<String, Uuid> topicIds = metadata.topicIds(); for (TopicPartition partition : partitionsToFetch()) { Optional<Node> leaderOpt = metadata.currentLeader(partition).leader; if (!leaderOpt.isPresent()) { log.debug("Requesting metadata update for partition {} since current leader node is missing", partition); metadata.requestUpdate(false); continue; } Uuid topicId = topicIds.get(partition.topic()); if (topicId == null) { log.debug("Requesting metadata update for partition {} since topic ID is missing", partition); metadata.requestUpdate(false); continue; } Node node = leaderOpt.get(); if (nodesWithPendingRequests.contains(node.id())) { log.trace("Skipping fetch for partition {} because previous fetch request to {} has not been processed", partition, node.id()); } else { // if there is a leader and no in-flight requests, issue a new fetch ShareSessionHandler handler = handlerMap.computeIfAbsent(node, k -> sessionHandlers.computeIfAbsent(node.id(), n -> new ShareSessionHandler(logContext, n, memberId))); TopicIdPartition tip = new TopicIdPartition(topicId, partition); Acknowledgements acknowledgementsToSend = fetchAcknowledgementsMap.get(tip); if (acknowledgementsToSend != null) { metricsManager.recordAcknowledgementSent(acknowledgementsToSend.size()); } handler.addPartitionToFetch(tip, acknowledgementsToSend); log.debug("Added fetch request for partition {} to node {}", partition, node.id()); } } Map<Node, ShareFetchRequest.Builder> builderMap = new LinkedHashMap<>(); for (Map.Entry<Node, ShareSessionHandler> entry : handlerMap.entrySet()) { builderMap.put(entry.getKey(), entry.getValue().newShareFetchBuilder(groupId, fetchConfig)); } List<UnsentRequest> requests = builderMap.entrySet().stream().map(entry -> { Node target = entry.getKey(); log.trace("Building ShareFetch request to send to node {}", target.id()); ShareFetchRequest.Builder requestBuilder = entry.getValue(); nodesWithPendingRequests.add(target.id()); BiConsumer<ClientResponse, Throwable> responseHandler = (clientResponse, error) -> { if (error != null) { handleShareFetchFailure(target, requestBuilder.data(), error); } else { handleShareFetchSuccess(target, requestBuilder.data(), clientResponse); } }; return new UnsentRequest(requestBuilder, Optional.of(target)).whenComplete(responseHandler); }).collect(Collectors.toList()); return new PollResult(requests); }
@Test public void testParseInvalidRecordBatch() { buildRequestManager(); MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, Compression.NONE, TimestampType.CREATE_TIME, new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes())); ByteBuffer buffer = records.buffer(); // flip some bits to fail the crc buffer.putInt(32, buffer.get(32) ^ 87238423); assignFromSubscribed(singleton(tp0)); // normal fetch assertEquals(1, sendFetches()); client.prepareResponse(fullFetchResponse(tip0, MemoryRecords.readableRecords(buffer), ShareCompletedFetchTest.acquiredRecords(0L, 3), Errors.NONE)); networkClientDelegate.poll(time.timer(0)); assertThrows(KafkaException.class, this::collectFetch); }
static int toInteger(final JsonNode object) { if (object instanceof NumericNode) { return object.intValue(); } if (object instanceof TextNode) { try { return Integer.parseInt(object.textValue()); } catch (final NumberFormatException e) { throw failedStringCoercionException(SqlBaseType.INTEGER); } } throw invalidConversionException(object, SqlBaseType.INTEGER); }
@Test(expected = IllegalArgumentException.class) public void shouldFailWhenConvertingNonIntegerToIntegr() { JsonSerdeUtils.toInteger(JsonNodeFactory.instance.booleanNode(true)); }
public static boolean shouldLoadInIsolation(String name) { return !(EXCLUDE.matcher(name).matches() && !INCLUDE.matcher(name).matches()); }
@Test public void testMirrorClasses() { assertTrue(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.mirror.MirrorSourceTask") ); assertTrue(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.mirror.MirrorSourceConnector") ); }
public MediaType detect(InputStream input, Metadata metadata) throws IOException { if (input == null) { return MediaType.OCTET_STREAM; } input.mark(offsetRangeEnd + length); try { int offset = 0; // Skip bytes at the beginning, using skip() or read() while (offset < offsetRangeBegin) { long n = input.skip(offsetRangeBegin - offset); if (n > 0) { offset += n; } else if (input.read() != -1) { offset += 1; } else { return MediaType.OCTET_STREAM; } } // Fill in the comparison window byte[] buffer = new byte[length + (offsetRangeEnd - offsetRangeBegin)]; int n = input.read(buffer); if (n > 0) { offset += n; } while (n != -1 && offset < offsetRangeEnd + length) { int bufferOffset = offset - offsetRangeBegin; n = input.read(buffer, bufferOffset, buffer.length - bufferOffset); // increment offset - in case not all read (see testDetectStreamReadProblems) if (n > 0) { offset += n; } } if (this.isRegex) { int flags = 0; if (this.isStringIgnoreCase) { flags = Pattern.CASE_INSENSITIVE; } Pattern p = Pattern.compile(new String(this.pattern, UTF_8), flags); ByteBuffer bb = ByteBuffer.wrap(buffer); CharBuffer result = ISO_8859_1.decode(bb); Matcher m = p.matcher(result); boolean match = false; // Loop until we've covered the entire offset range for (int i = 0; i <= offsetRangeEnd - offsetRangeBegin; i++) { m.region(i, length + i); match = m.lookingAt(); // match regex from start of region if (match) { return type; } } } else { if (offset < offsetRangeBegin + length) { return MediaType.OCTET_STREAM; } // Loop until we've covered the entire offset range for (int i = 0; i <= offsetRangeEnd - offsetRangeBegin; i++) { boolean match = true; int masked; for (int j = 0; match && j < length; j++) { masked = (buffer[i + j] & mask[j]); if (this.isStringIgnoreCase) { masked = Character.toLowerCase(masked); } match = (masked == pattern[j]); } if (match) { return type; } } } return MediaType.OCTET_STREAM; } finally { input.reset(); } }
@Test public void testDetectNull() throws Exception { MediaType html = new MediaType("text", "html"); Detector detector = new MagicDetector(html, "<html".getBytes(US_ASCII)); assertEquals(MediaType.OCTET_STREAM, detector.detect(null, new Metadata())); }
public static String[] extractRecordKeysByFields(String recordKey, List<String> fields) { String[] fieldKV = recordKey.split(DEFAULT_RECORD_KEY_PARTS_SEPARATOR); return Arrays.stream(fieldKV).map(kv -> kv.split(DEFAULT_COMPOSITE_KEY_FILED_VALUE, 2)) .filter(kvArray -> kvArray.length == 1 || fields.isEmpty() || (fields.contains(kvArray[0]))) .map(kvArray -> { if (kvArray.length == 1) { return kvArray[0]; } else if (kvArray[1].equals(NULL_RECORDKEY_PLACEHOLDER)) { return null; } else if (kvArray[1].equals(EMPTY_RECORDKEY_PLACEHOLDER)) { return ""; } else { return kvArray[1]; } }).toArray(String[]::new); }
@Test public void testExtractRecordKeysWithFields() { List<String> fields = new ArrayList<>(1); fields.add("id2"); String[] s1 = KeyGenUtils.extractRecordKeysByFields("id1:1,id2:2,id3:3", fields); Assertions.assertArrayEquals(new String[] {"2"}, s1); String[] s2 = KeyGenUtils.extractRecordKeysByFields("id1:1,id2:2,2,id3:3", fields); Assertions.assertArrayEquals(new String[] {"2", "2"}, s2); }
@CheckForNull public static File resolveSymlinkToFile(@NonNull File link) throws InterruptedException, IOException { String target = resolveSymlink(link); if (target == null) return null; File f = new File(target); if (f.isAbsolute()) return f; // absolute symlink return new File(link.getParentFile(), target); // relative symlink }
@Test @Issue("SECURITY-904") public void resolveSymlinkToFile() throws Exception { assumeFalse(Functions.isWindows()); // root // /a // /aa // aa.txt // /_b => symlink to /root/b // /b // /_a => symlink to /root/a File root = tmp.getRoot(); File a = new File(root, "a"); File aa = new File(a, "aa"); aa.mkdirs(); File aaTxt = new File(aa, "aa.txt"); Files.writeString(aaTxt.toPath(), "aa", StandardCharsets.US_ASCII); File b = new File(root, "b"); b.mkdir(); File _a = new File(b, "_a"); Util.createSymlink(_a.getParentFile(), a.getAbsolutePath(), _a.getName(), TaskListener.NULL); File _b = new File(a, "_b"); Util.createSymlink(_b.getParentFile(), b.getAbsolutePath(), _b.getName(), TaskListener.NULL); assertTrue(Files.isSymbolicLink(_a.toPath())); assertTrue(Files.isSymbolicLink(_b.toPath())); // direct symlinks are resolved assertEquals(Util.resolveSymlinkToFile(_a), a); assertEquals(Util.resolveSymlinkToFile(_b), b); // intermediate symlinks are NOT resolved assertNull(Util.resolveSymlinkToFile(new File(_a, "aa"))); assertNull(Util.resolveSymlinkToFile(new File(_a, "aa/aa.txt"))); }
@Override public List<MailTemplateDO> getMailTemplateList() {return mailTemplateMapper.selectList();}
@Test public void testGetMailTemplateList() { // mock 数据 MailTemplateDO dbMailTemplate01 = randomPojo(MailTemplateDO.class); mailTemplateMapper.insert(dbMailTemplate01); MailTemplateDO dbMailTemplate02 = randomPojo(MailTemplateDO.class); mailTemplateMapper.insert(dbMailTemplate02); // 调用 List<MailTemplateDO> list = mailTemplateService.getMailTemplateList(); // 断言 assertEquals(2, list.size()); assertEquals(dbMailTemplate01, list.get(0)); assertEquals(dbMailTemplate02, list.get(1)); }
public int generate(Class<? extends CustomResource> crdClass, Writer out) throws IOException { ObjectNode node = nf.objectNode(); Crd crd = crdClass.getAnnotation(Crd.class); if (crd == null) { err(crdClass + " is not annotated with @Crd"); } else { node.put("apiVersion", "apiextensions.k8s.io/" + crdApiVersion) .put("kind", "CustomResourceDefinition") .putObject("metadata") .put("name", crd.spec().names().plural() + "." + crd.spec().group()); if (!labels.isEmpty()) { ((ObjectNode) node.get("metadata")) .putObject("labels") .setAll(labels.entrySet().stream() .collect(Collectors.<Map.Entry<String, String>, String, JsonNode, LinkedHashMap<String, JsonNode>>toMap( Map.Entry::getKey, e -> new TextNode( e.getValue() .replace("%group%", crd.spec().group()) .replace("%plural%", crd.spec().names().plural()) .replace("%singular%", crd.spec().names().singular())), (x, y) -> x, LinkedHashMap::new))); } node.set("spec", buildSpec(crdApiVersion, crd.spec(), crdClass)); } mapper.writeValue(out, node); return numErrors; }
@Test void generateHelmMetadataLabels() throws IOException { Map<String, String> labels = new LinkedHashMap<>(); labels.put("app", "{{ template \"strimzi.name\" . }}"); labels.put("chart", "{{ template \"strimzi.chart\" . }}"); labels.put("component", "%plural%.%group%-crd"); labels.put("release", "{{ .Release.Name }}"); labels.put("heritage", "{{ .Release.Service }}"); CrdGenerator crdGenerator = new CrdGenerator(KubeVersion.V1_16_PLUS, ApiVersion.V1, CrdGenerator.YAML_MAPPER, labels, crdGeneratorReporter, emptyList(), null, null, new CrdGenerator.NoneConversionStrategy(), null); StringWriter w = new StringWriter(); crdGenerator.generate(ExampleCrd.class, w); String s = w.toString(); assertTrue(errors.isEmpty(), "CrdGenerator should not report any errors: " + errors); assertEquals(CrdTestUtils.readResource("simpleTestHelmMetadata.yaml"), s); }
@InvokeOnHeader(Web3jConstants.ETH_GET_STORAGE_AT) void ethGetStorageAt(Message message) throws IOException { String address = message.getHeader(Web3jConstants.ADDRESS, configuration::getAddress, String.class); DefaultBlockParameter atBlock = toDefaultBlockParameter(message.getHeader(Web3jConstants.AT_BLOCK, configuration::getAtBlock, String.class)); BigInteger position = message.getHeader(Web3jConstants.POSITION, configuration::getPosition, BigInteger.class); Request<?, EthGetStorageAt> request = web3j.ethGetStorageAt(address, position, atBlock); setRequestId(message, request); EthGetStorageAt response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getData()); } }
@Test public void ethGetStorageAtTest() throws Exception { EthGetStorageAt response = Mockito.mock(EthGetStorageAt.class); Mockito.when(mockWeb3j.ethGetStorageAt(any(), any(), any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getData()).thenReturn("test"); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_STORAGE_AT); template.send(exchange); String body = exchange.getIn().getBody(String.class); assertEquals("test", body); }
@Override public List<String> assignSegment(String segmentName, Map<String, Map<String, String>> currentAssignment, InstancePartitions instancePartitions, InstancePartitionsType instancePartitionsType) { int numPartitions = instancePartitions.getNumPartitions(); checkReplication(instancePartitions, _replication, _tableName); int partitionId; if (_partitionColumn == null || numPartitions == 1) { partitionId = 0; } else { // Uniformly spray the segment partitions over the instance partitions if (_tableConfig.getTableType() == TableType.OFFLINE) { partitionId = SegmentAssignmentUtils .getOfflineSegmentPartitionId(segmentName, _tableName, _helixManager, _partitionColumn) % numPartitions; } else { partitionId = SegmentAssignmentUtils .getRealtimeSegmentPartitionId(segmentName, _tableName, _helixManager, _partitionColumn) % numPartitions; } } return SegmentAssignmentUtils.assignSegmentWithReplicaGroup(currentAssignment, instancePartitions, partitionId); }
@Test public void testAssignSegmentWithPartition() { int numInstancesPerReplicaGroup = NUM_INSTANCES / NUM_REPLICAS; Map<String, Map<String, String>> currentAssignment = new TreeMap<>(); int numInstancesPerPartition = numInstancesPerReplicaGroup / NUM_PARTITIONS; for (int segmentId = 0; segmentId < NUM_SEGMENTS; segmentId++) { String segmentName = SEGMENTS.get(segmentId); List<String> instancesAssigned = _segmentAssignmentWithPartition .assignSegment(segmentName, currentAssignment, _instancePartitionsMapWithPartition); assertEquals(instancesAssigned.size(), NUM_REPLICAS); // Segment 0 (partition 0) should be assigned to instance 0, 6, 12 // Segment 1 (partition 1) should be assigned to instance 2, 8, 14 // Segment 2 (partition 2) should be assigned to instance 4, 10, 16 // Segment 3 (partition 0) should be assigned to instance 1, 7, 13 // Segment 4 (partition 1) should be assigned to instance 3, 9, 15 // Segment 5 (partition 2) should be assigned to instance 5, 11, 17 // Segment 6 (partition 0) should be assigned to instance 0, 6, 12 // Segment 7 (partition 1) should be assigned to instance 2, 8, 14 // ... int partitionId = segmentId % NUM_PARTITIONS; for (int replicaGroupId = 0; replicaGroupId < NUM_REPLICAS; replicaGroupId++) { int expectedAssignedInstanceId = (segmentId % numInstancesPerReplicaGroup) / NUM_PARTITIONS + partitionId * numInstancesPerPartition + replicaGroupId * numInstancesPerReplicaGroup; assertEquals(instancesAssigned.get(replicaGroupId), INSTANCES.get(expectedAssignedInstanceId)); } currentAssignment .put(segmentName, SegmentAssignmentUtils.getInstanceStateMap(instancesAssigned, SegmentStateModel.ONLINE)); } }
@CanIgnoreReturnValue public final Ordered containsExactly() { return containsExactlyEntriesIn(ImmutableMap.of()); }
@Test public void containsExactlyExtraKeyAndMissingKeyAndWrongValue() { ImmutableMap<String, Integer> actual = ImmutableMap.of("jan", 1, "march", 3); expectFailureWhenTestingThat(actual).containsExactly("march", 33, "feb", 2); assertFailureKeys( "keys with wrong values", "for key", "expected value", "but got value", "missing keys", "for key", "expected value", "unexpected keys", "for key", "unexpected value", "---", "expected", "but was"); assertFailureValueIndexed("for key", 0, "march"); assertFailureValueIndexed("expected value", 0, "33"); assertFailureValue("but got value", "3"); assertFailureValueIndexed("for key", 1, "feb"); assertFailureValueIndexed("expected value", 1, "2"); assertFailureValueIndexed("for key", 2, "jan"); assertFailureValue("unexpected value", "1"); }
public String type(Class<?> clz) { if (!sourcePkgLevelAccessible(clz)) { clz = Object.class; } if (clz.isArray()) { return getArrayType(clz); } String type = ReflectionUtils.getLiteralName(clz); if (type.startsWith("java.lang")) { if (!type.substring("java.lang.".length()).contains(".")) { String simpleName = clz.getSimpleName(); boolean hasPackage = StringUtils.isNotBlank(pkg); Map<String, Boolean> packageMap = nameConflicts.computeIfAbsent(hasPackage ? pkg : "", p -> new ConcurrentHashMap<>()); Class<?> c = clz; Boolean conflictRes = packageMap.computeIfAbsent( simpleName, sn -> { try { ClassLoader beanClassClassLoader = c.getClassLoader() == null ? Thread.currentThread().getContextClassLoader() : c.getClassLoader(); if (beanClassClassLoader == null) { beanClassClassLoader = Fury.class.getClassLoader(); } beanClassClassLoader.loadClass(hasPackage ? pkg + "." + sn : sn); return Boolean.TRUE; } catch (ClassNotFoundException e) { return Boolean.FALSE; } }); return conflictRes ? clz.getName() : simpleName; } } if (imports.contains(type)) { return clz.getSimpleName(); } else { int index = type.lastIndexOf("."); if (index > 0) { // This might be package name or qualified name of outer class String pkgOrClassName = type.substring(0, index); if (imports.contains(pkgOrClassName + ".*")) { return clz.getSimpleName(); } } return type; } }
@Test public void type() { TypeRef<List<List<String>>> typeRef = new TypeRef<List<List<String>>>() {}; { CodegenContext ctx = new CodegenContext(); ctx.addImport(List.class); Assert.assertEquals("List", ctx.type(List.class)); } CodegenContext ctx = new CodegenContext(); String type = ctx.type(typeRef); Assert.assertEquals("java.util.List", type); Assert.assertEquals("int[][]", ctx.type(int[][].class)); }
public boolean registerClient(final String addr, final HeartbeatData heartbeat, final long timeoutMillis) throws RemotingException, InterruptedException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.HEART_BEAT, new HeartbeatRequestHeader()); request.setBody(heartbeat.encode()); RemotingCommand response = this.remotingClient.invokeSync(addr, request, timeoutMillis); return response.getCode() == ResponseCode.SUCCESS; }
@Test public void testRegisterClient() throws RemotingException, InterruptedException { mockInvokeSync(); HeartbeatData heartbeatData = new HeartbeatData(); assertTrue(mqClientAPI.registerClient(defaultBrokerAddr, heartbeatData, defaultTimeout)); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof BandwidthProfileAction) { final BandwidthProfileAction that = (BandwidthProfileAction) obj; return this.getClass() == that.getClass() && Objects.equals(this.action, that.action) && Objects.equals(this.dscpClass, that.dscpClass) && Objects.equals(this.ipPrecedence, that.ipPrecedence) && Objects.equals(this.dropPrecedence, that.dropPrecedence); } return false; }
@Test public void testEquals() { BandwidthProfileAction passAction1 = BandwidthProfileAction.builder() .action(Action.PASS) .build(); BandwidthProfileAction passAction2 = BandwidthProfileAction.builder() .action(Action.PASS) .build(); BandwidthProfileAction discardAction1 = BandwidthProfileAction.builder() .action(Action.DISCARD) .build(); BandwidthProfileAction discardAction2 = BandwidthProfileAction.builder() .action(Action.DISCARD) .build(); BandwidthProfileAction remarkAction1 = BandwidthProfileAction.builder() .action(Action.REMARK) .dscpClass(DscpClass.AF11) .build(); BandwidthProfileAction remarkAction2 = BandwidthProfileAction.builder() .action(Action.REMARK) .dscpClass(DscpClass.AF11) .build(); new EqualsTester() .addEqualityGroup(passAction1, passAction2) .addEqualityGroup(discardAction1, discardAction2) .addEqualityGroup(remarkAction1, remarkAction2) .testEquals(); }
public static FusedPipeline fuse(Pipeline p) { return new GreedyPipelineFuser(p).fusedPipeline; }
@Test public void singleEnvironmentAcrossGroupByKeyMultipleStages() { Components components = partialComponents .toBuilder() .putTransforms( "read", PTransform.newBuilder() .setUniqueName("Read") .putInputs("input", "impulse.out") .putOutputs("output", "read.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .build() .toByteString())) .setEnvironmentId("py") .build()) .putPcollections("read.out", pc("read.out")) .putTransforms( "groupByKey", PTransform.newBuilder() .setUniqueName("GroupByKey") .putInputs("input", "read.out") .putOutputs("output", "groupByKey.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.GROUP_BY_KEY_TRANSFORM_URN)) .build()) .putPcollections("groupByKey.out", pc("groupByKey.out")) .putTransforms( "parDo", PTransform.newBuilder() .setUniqueName("ParDo") .putInputs("input", "groupByKey.out") .putOutputs("output", "parDo.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .build() .toByteString())) .setEnvironmentId("py") .build()) .putPcollections("parDo.out", pc("parDo.out")) .build(); FusedPipeline fused = GreedyPipelineFuser.fuse(Pipeline.newBuilder().setComponents(components).build()); assertThat( fused.getRunnerExecutedTransforms(), containsInAnyOrder( PipelineNode.pTransform("impulse", components.getTransformsOrThrow("impulse")), PipelineNode.pTransform("groupByKey", components.getTransformsOrThrow("groupByKey")))); assertThat( fused.getFusedStages(), containsInAnyOrder( ExecutableStageMatcher.withInput("impulse.out") .withOutputs("read.out") .withTransforms("read"), ExecutableStageMatcher.withInput("groupByKey.out") .withNoOutputs() .withTransforms("parDo"))); }
@Description("Returns the minimum convex geometry that encloses all input geometries") @ScalarFunction("ST_ConvexHull") @SqlType(GEOMETRY_TYPE_NAME) public static Slice stConvexHull(@SqlType(GEOMETRY_TYPE_NAME) Slice input) { OGCGeometry geometry = EsriGeometrySerde.deserialize(input); if (geometry.isEmpty()) { return input; } if (GeometryType.getForEsriGeometryType(geometry.geometryType()) == POINT) { return input; } return EsriGeometrySerde.serialize(geometry.convexHull()); }
@Test public void testSTConvexHull() { // test empty geometry assertConvexHull("POINT EMPTY", "POINT EMPTY"); assertConvexHull("MULTIPOINT EMPTY", "MULTIPOINT EMPTY"); assertConvexHull("LINESTRING EMPTY", "LINESTRING EMPTY"); assertConvexHull("MULTILINESTRING EMPTY", "MULTILINESTRING EMPTY"); assertConvexHull("POLYGON EMPTY", "POLYGON EMPTY"); assertConvexHull("MULTIPOLYGON EMPTY", "MULTIPOLYGON EMPTY"); assertConvexHull("GEOMETRYCOLLECTION EMPTY", "GEOMETRYCOLLECTION EMPTY"); assertConvexHull("GEOMETRYCOLLECTION (POINT (1 1), POINT EMPTY)", "POINT (1 1)"); assertConvexHull("GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (1 1), GEOMETRYCOLLECTION (POINT (1 5), POINT (4 5), GEOMETRYCOLLECTION (POINT (3 4), POINT EMPTY))))", "POLYGON ((1 1, 1 5, 4 5, 1 1))"); // test single geometry assertConvexHull("POINT (1 1)", "POINT (1 1)"); assertConvexHull("LINESTRING (1 1, 1 9, 2 2)", "POLYGON ((1 1, 1 9, 2 2, 1 1))"); // convex single geometry assertConvexHull("LINESTRING (1 1, 1 9, 2 2, 1 1)", "POLYGON ((1 1, 1 9, 2 2, 1 1))"); assertConvexHull("POLYGON ((0 0, 0 3, 2 4, 4 2, 3 0, 0 0))", "POLYGON ((0 0, 0 3, 2 4, 4 2, 3 0, 0 0))"); // non-convex geometry assertConvexHull("LINESTRING (1 1, 1 9, 2 2, 1 1, 4 0)", "POLYGON ((1 1, 1 9, 4 0, 1 1))"); assertConvexHull("POLYGON ((0 0, 0 3, 4 4, 1 1, 3 0, 0 0))", "POLYGON ((0 0, 0 3, 4 4, 3 0, 0 0))"); // all points are on the same line assertConvexHull("LINESTRING (20 20, 30 30)", "LINESTRING (20 20, 30 30)"); assertConvexHull("MULTILINESTRING ((0 0, 3 3), (1 1, 2 2), (2 2, 4 4), (5 5, 8 8))", "LINESTRING (0 0, 8 8)"); assertConvexHull("MULTIPOINT (0 1, 1 2, 2 3, 3 4, 4 5, 5 6)", "LINESTRING (0 1, 5 6)"); assertConvexHull("GEOMETRYCOLLECTION (POINT (0 0), LINESTRING (1 1, 4 4, 2 2), POINT (10 10), POLYGON ((5 5, 7 7, 6 6, 5 5)), POINT (2 2), LINESTRING (6 6, 9 9))", "LINESTRING (0 0, 10 10)"); assertConvexHull("GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (2 2), POINT (1 1)), POINT (3 3))", "LINESTRING (3 3, 1 1)"); // not all points are on the same line assertConvexHull("MULTILINESTRING ((1 1, 5 1, 6 6), (2 4, 4 0), (2 -4, 4 4), (3 -2, 4 -3))", "POLYGON ((1 1, 2 4, 6 6, 5 1, 4 -3, 2 -4, 1 1))"); assertConvexHull("MULTIPOINT (0 2, 1 0, 3 0, 4 0, 4 2, 2 2, 2 4)", "POLYGON ((0 2, 2 4, 4 2, 4 0, 1 0, 0 2))"); assertConvexHull("MULTIPOLYGON (((0 3, 2 0, 3 6, 0 3), (2 1, 2 3, 5 3, 5 1, 2 1), (1 7, 2 4, 4 2, 5 6, 3 8, 1 7)))", "POLYGON ((0 3, 1 7, 3 8, 5 6, 5 1, 2 0, 0 3))"); assertConvexHull("GEOMETRYCOLLECTION (POINT (2 3), LINESTRING (2 8, 7 10), POINT (8 10), POLYGON ((4 4, 4 8, 9 8, 6 6, 6 4, 8 3, 6 1, 4 4)), POINT (4 2), LINESTRING (3 6, 5 5), POLYGON ((7 5, 7 6, 8 6, 8 5, 7 5)))", "POLYGON ((2 3, 2 8, 7 10, 8 10, 9 8, 8 3, 6 1, 2 3))"); assertConvexHull("GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (2 3), LINESTRING (2 8, 7 10), GEOMETRYCOLLECTION (POINT (8 10))), POLYGON ((4 4, 4 8, 9 8, 6 6, 6 4, 8 3, 6 1, 4 4)), POINT (4 2), LINESTRING (3 6, 5 5), POLYGON ((7 5, 7 6, 8 6, 8 5, 7 5)))", "POLYGON ((2 3, 2 8, 7 10, 8 10, 9 8, 8 3, 6 1, 2 3))"); // single-element multi-geometries and geometry collections assertConvexHull("MULTILINESTRING ((1 1, 5 1, 6 6))", "POLYGON ((1 1, 6 6, 5 1, 1 1))"); assertConvexHull("MULTILINESTRING ((1 1, 5 1, 1 4, 5 4))", "POLYGON ((1 1, 1 4, 5 4, 5 1, 1 1))"); assertConvexHull("MULTIPOINT (0 2)", "POINT (0 2)"); assertConvexHull("MULTIPOLYGON (((0 3, 3 6, 2 0, 0 3)))", "POLYGON ((0 3, 3 6, 2 0, 0 3))"); assertConvexHull("MULTIPOLYGON (((0 0, 4 0, 4 4, 0 4, 2 2, 0 0)))", "POLYGON ((0 0, 0 4, 4 4, 4 0, 0 0))"); assertConvexHull("GEOMETRYCOLLECTION (POINT (2 3))", "POINT (2 3)"); assertConvexHull("GEOMETRYCOLLECTION (LINESTRING (1 1, 5 1, 6 6))", "POLYGON ((1 1, 6 6, 5 1, 1 1))"); assertConvexHull("GEOMETRYCOLLECTION (LINESTRING (1 1, 5 1, 1 4, 5 4))", "POLYGON ((1 1, 1 4, 5 4, 5 1, 1 1))"); assertConvexHull("GEOMETRYCOLLECTION (POLYGON ((0 3, 3 6, 2 0, 0 3)))", "POLYGON ((0 3, 3 6, 2 0, 0 3))"); assertConvexHull("GEOMETRYCOLLECTION (POLYGON ((0 0, 4 0, 4 4, 0 4, 2 2, 0 0)))", "POLYGON ((0 0, 0 4, 4 4, 4 0, 0 0))"); }
@ScalarFunction(nullableParameters = true) public static byte[] toIntegerSumTupleSketch(@Nullable Object key, @Nullable Integer value) { return toIntegerSumTupleSketch(key, value, CommonConstants.Helix.DEFAULT_TUPLE_SKETCH_LGK); }
@Test public void intTupleSumCreation() { for (Object i : _inputs) { Assert.assertEquals(intTupleEstimate(SketchFunctions.toIntegerSumTupleSketch(i, 1)), 1.0d); Assert.assertEquals(intTupleEstimate(SketchFunctions.toIntegerSumTupleSketch(i, 1, 16)), 1.0d); } Assert.assertEquals(intTupleEstimate(SketchFunctions.toIntegerSumTupleSketch(null, 1)), 0.0d); Assert.assertEquals(intTupleEstimate(SketchFunctions.toIntegerSumTupleSketch(null, 1, 16)), 0.0d); Assert.assertThrows(IllegalArgumentException.class, () -> SketchFunctions.toIntegerSumTupleSketch(new Object(), 1)); Assert.assertThrows(IllegalArgumentException.class, () -> SketchFunctions.toIntegerSumTupleSketch(new Object(), 1, 1024)); }
int interact(Cell c, CellPool pool, Cell[][] cellMatrix) { if (this.candy.getType().equals(Type.REWARD_FRUIT) || c.candy.getType() .equals(Type.REWARD_FRUIT)) { return 0; } else { if (this.candy.name.equals(c.candy.name)) { var pointsWon = this.candy.getPoints() + c.candy.getPoints(); handleCrush(c, pool, cellMatrix); return pointsWon; } else { return 0; } } }
@Test void interactTest() { var c1 = new Candy("green jelly", "jelly", Type.CRUSHABLE_CANDY, 5); var c2 = new Candy("green apple", "apple", Type.REWARD_FRUIT, 10); var matrix = new Cell[4][4]; matrix[0][0] = new Cell(c1, 0, 0); matrix[0][1] = new Cell(c1, 1, 0); matrix[0][2] = new Cell(c2, 2, 0); matrix[0][3] = new Cell(c1, 3, 0); var cp = new CellPool(5); var points1 = matrix[0][0].interact(matrix[0][1], cp, matrix); var points2 = matrix[0][2].interact(matrix[0][3], cp, matrix); assertTrue(points1 > 0 && points2 == 0); }
public static String generateFullNewWalletFile(String password, File destinationDirectory) throws NoSuchAlgorithmException, NoSuchProviderException, InvalidAlgorithmParameterException, CipherException, IOException { return generateNewWalletFile(password, destinationDirectory, true); }
@Test public void testGenerateFullNewWalletFile() throws Exception { String fileName = WalletUtils.generateFullNewWalletFile(PASSWORD, tempDir); testGeneratedNewWalletFile(fileName); }
@Override public int hashCode() { return Objects.hash(partition, topicId); }
@Test public void testCachedSharePartitionEqualsAndHashCode() { Uuid topicId = Uuid.randomUuid(); String topicName = "topic"; int partition = 0; CachedSharePartition cachedSharePartitionWithIdAndName = new CachedSharePartition(topicName, topicId, partition, false); CachedSharePartition cachedSharePartitionWithIdAndNoName = new CachedSharePartition(null, topicId, partition, false); CachedSharePartition cachedSharePartitionWithDifferentIdAndName = new CachedSharePartition(topicName, Uuid.randomUuid(), partition, false); CachedSharePartition cachedSharePartitionWithZeroIdAndName = new CachedSharePartition(topicName, Uuid.ZERO_UUID, partition, false); // CachedSharePartitions with valid topic IDs will compare topic ID and partition but not topic name. assertEquals(cachedSharePartitionWithIdAndName, cachedSharePartitionWithIdAndNoName); assertEquals(cachedSharePartitionWithIdAndName.hashCode(), cachedSharePartitionWithIdAndNoName.hashCode()); assertNotEquals(cachedSharePartitionWithIdAndName, cachedSharePartitionWithDifferentIdAndName); assertNotEquals(cachedSharePartitionWithIdAndName.hashCode(), cachedSharePartitionWithDifferentIdAndName.hashCode()); assertNotEquals(cachedSharePartitionWithIdAndName, cachedSharePartitionWithZeroIdAndName); assertNotEquals(cachedSharePartitionWithIdAndName.hashCode(), cachedSharePartitionWithZeroIdAndName.hashCode()); // CachedSharePartitions with null name and valid IDs will act just like ones with valid names assertNotEquals(cachedSharePartitionWithIdAndNoName, cachedSharePartitionWithDifferentIdAndName); assertNotEquals(cachedSharePartitionWithIdAndNoName.hashCode(), cachedSharePartitionWithDifferentIdAndName.hashCode()); assertNotEquals(cachedSharePartitionWithIdAndNoName, cachedSharePartitionWithZeroIdAndName); assertNotEquals(cachedSharePartitionWithIdAndNoName.hashCode(), cachedSharePartitionWithZeroIdAndName.hashCode()); assertEquals(cachedSharePartitionWithZeroIdAndName.hashCode(), cachedSharePartitionWithZeroIdAndName.hashCode()); }
public abstract void scan(File dir, FileVisitor visitor) throws IOException;
@Test public void globShouldIgnoreDefaultExcludesByRequest() throws Exception { FilePath tmp = new FilePath(tmpRule.getRoot()); try { tmp.child(".gitignore").touch(0); FilePath git = tmp.child(".git"); git.mkdirs(); git.child("HEAD").touch(0); DirScanner glob = new DirScanner.Glob("**/*", null, false); MatchingFileVisitor gitdir = new MatchingFileVisitor("HEAD"); MatchingFileVisitor gitignore = new MatchingFileVisitor(".gitignore"); glob.scan(new File(tmp.getRemote()), gitdir); glob.scan(new File(tmp.getRemote()), gitignore); assertTrue(gitdir.found); assertTrue(gitignore.found); } finally { tmp.deleteRecursive(); } }
public void updateNodeLabels(NodeId node) { synchronized (lock) { newlyRegisteredNodes.add(node); } }
@Test public void testWithNodeLabelUpdateEnabled() throws Exception { conf.setLong(YarnConfiguration.RM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS, 1000); MockRM rm = new MockRM(conf); rm.init(conf); rm.getRMContext().getRMDelegatedNodeLabelsUpdater().nodeLabelsUpdateInterval = 3 * 1000; rm.start(); RMNodeLabelsManager mgr = rm.getRMContext().getNodeLabelManager(); mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y")); NodeId nodeId = toNodeId("h1:1234"); assertEquals(0, mgr.getLabelsOnNode(nodeId).size()); updateNodeLabels(nodeId, "x"); registerNode(rm, nodeId); Thread.sleep(4000); assertCollectionEquals(ImmutableSet.of("x"), mgr.getLabelsOnNode(nodeId)); // Ensure that node labels are updated if NodeLabelsProvider // gives different labels updateNodeLabels(nodeId, "y"); Thread.sleep(4000); assertCollectionEquals(ImmutableSet.of("y"), mgr.getLabelsOnNode(nodeId)); rm.stop(); }
public static String getClassMethodKey(final String className, final String methodName) { return String.join("_", className, methodName); }
@Test public void testGetClassMethodKey() { assertEquals("className_methodName", ApplicationConfigCache.getClassMethodKey("className", "methodName")); }
public static HybridPartitionDataConsumeConstraint getOrDecideHybridPartitionDataConsumeConstraint( Configuration configuration, boolean enableSpeculativeExecution) { final HybridPartitionDataConsumeConstraint hybridPartitionDataConsumeConstraint = configuration .getOptional(JobManagerOptions.HYBRID_PARTITION_DATA_CONSUME_CONSTRAINT) .orElseGet( () -> { HybridPartitionDataConsumeConstraint defaultConstraint = enableSpeculativeExecution ? ONLY_FINISHED_PRODUCERS : UNFINISHED_PRODUCERS; LOG.info( "Set {} to {} as it is not configured", JobManagerOptions .HYBRID_PARTITION_DATA_CONSUME_CONSTRAINT .key(), defaultConstraint.name()); return defaultConstraint; }); if (enableSpeculativeExecution) { Preconditions.checkState( hybridPartitionDataConsumeConstraint != UNFINISHED_PRODUCERS, "For speculative execution, only supports consume finished partition now."); } return hybridPartitionDataConsumeConstraint; }
@Test void testOnlyConsumeFinishedPartitionWillSetForSpeculativeEnable() { HybridPartitionDataConsumeConstraint hybridPartitionDataConsumeConstraint = getOrDecideHybridPartitionDataConsumeConstraint(new Configuration(), true); assertThat(hybridPartitionDataConsumeConstraint.isOnlyConsumeFinishedPartition()).isTrue(); }
public void completeAllJoinFutures( Errors error ) { members.forEach((memberId, member) -> completeJoinFuture( member, new JoinGroupResponseData() .setMemberId(memberId) .setErrorCode(error.code()) )); }
@Test public void testCompleteAllJoinFutures() throws ExecutionException, InterruptedException { JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestProtocolCollection(); protocols.add(new JoinGroupRequestProtocol() .setName("roundrobin") .setMetadata(new byte[0])); List<ClassicGroupMember> memberList = new ArrayList<>(); for (int i = 0; i < 3; i++) { memberList.add(new ClassicGroupMember( memberId + i, Optional.empty(), clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, protocolType, protocols )); } List<CompletableFuture<JoinGroupResponseData>> joinGroupFutureList = new ArrayList<>(); for (int i = 0; i < 3; i++) { CompletableFuture<JoinGroupResponseData> future = new CompletableFuture<>(); group.add(memberList.get(i), future); joinGroupFutureList.add(future); } assertEquals(3, group.numAwaitingJoinResponse()); group.completeAllJoinFutures(Errors.REBALANCE_IN_PROGRESS); for (int i = 0; i < 3; i++) { assertEquals(Errors.REBALANCE_IN_PROGRESS.code(), joinGroupFutureList.get(i).get().errorCode()); assertEquals(memberId + i, joinGroupFutureList.get(i).get().memberId()); assertFalse(memberList.get(i).isAwaitingJoin()); } assertEquals(0, group.numAwaitingJoinResponse()); }
@Udf(description = "Returns a masked version of the input string. All characters except for the" + " last n will be replaced according to the default masking rules.") @SuppressWarnings("MethodMayBeStatic") // Invoked via reflection public String mask( @UdfParameter("input STRING to be masked") final String input, @UdfParameter("number of characters to keep unmasked at the end") final int numChars ) { return doMask(new Masker(), input, numChars); }
@Test public void shouldMaskOnlySpecifiedCharTypes() { final String result = udf.mask("AbCd#$123xy Z", 5, null, "q", null, "="); assertThat(result, is("AqCq==123xy Z")); }
@Config("resource-groups.config-db-url") public DbResourceGroupConfig setConfigDbUrl(String configUrl) { this.configUrl = configUrl; return this; }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("resource-groups.config-db-url", "jdbc:mysql//localhost:3306/config?user=presto_admin") .build(); DbResourceGroupConfig expected = new DbResourceGroupConfig() .setConfigDbUrl("jdbc:mysql//localhost:3306/config?user=presto_admin"); assertFullMapping(properties, expected); }
@Override public List<ValidationMessage> validate(ValidationContext context) { return context.query().tokens().stream() .filter(this::isInvalidOperator) .map(token -> { final String errorMessage = String.format(Locale.ROOT, "Query contains invalid operator \"%s\". All AND / OR / NOT operators have to be written uppercase", token.image()); return ValidationMessage.builder(ValidationStatus.WARNING, ValidationType.INVALID_OPERATOR) .errorMessage(errorMessage) .relatedProperty(token.image()) .position(QueryPosition.from(token)) .build(); }).collect(Collectors.toList()); }
@Test void testInvalidOperatorLowercaseOr() { final ValidationContext context = TestValidationContext.create("foo:bar or") .build(); final List<ValidationMessage> messages = sut.validate(context); assertThat(messages.size()).isEqualTo(1); final ValidationMessage message = messages.iterator().next(); assertThat(message.validationType()).isEqualTo(ValidationType.INVALID_OPERATOR); assertThat(message.relatedProperty()).hasValue("or"); }
public Optional<Column> findValueColumn(final ColumnName columnName) { return findColumnMatching(withNamespace(VALUE).and(withName(columnName))); }
@Test public void shouldNotGetKeyColumnFromValue() { assertThat(SOME_SCHEMA.findValueColumn(K0), is(Optional.empty())); }
@Override public void deleteJournals(long deleteToJournalId) { List<Long> dbNames = bdbEnvironment.getDatabaseNamesWithPrefix(prefix); if (dbNames == null) { LOG.info("delete database names is null."); return; } StringBuilder msg = new StringBuilder("existing database names: "); for (long name : dbNames) { msg.append(name).append(" "); } msg.append(", deleteToJournalId is ").append(deleteToJournalId); LOG.info(msg.toString()); for (int i = 1; i < dbNames.size(); i++) { if (deleteToJournalId >= dbNames.get(i)) { long name = dbNames.get(i - 1); String dbName = getFullDatabaseName(name); LOG.info("delete database name {}", dbName); bdbEnvironment.removeDatabase(dbName); } else { LOG.info("database name {} is larger than deleteToJournalId {}, not delete", dbNames.get(i), deleteToJournalId); break; } } }
@Test public void testDeleteJournals(@Mocked BDBEnvironment environment) throws Exception { BDBJEJournal journal = new BDBJEJournal(environment); // failed to get database names; do nothing new Expectations(environment) { { environment.getDatabaseNamesWithPrefix(""); times = 1; result = null; } }; journal.deleteJournals(11); // 2. find journal and delete // current db (3, 23, 45) checkpoint is made on 44, should remove 3, 23 new Expectations(environment) { { environment.getDatabaseNamesWithPrefix(""); times = 1; result = Arrays.asList(3L, 23L, 45L); environment.removeDatabase("3"); times = 1; environment.removeDatabase("23"); times = 1; } }; journal.deleteJournals(45); journal.close(); // no db will closed }
public void setup(final Map<String, InternalTopicConfig> topicConfigs) { log.info("Starting to setup internal topics {}.", topicConfigs.keySet()); final long now = time.milliseconds(); final long deadline = now + retryTimeoutMs; final Map<String, Map<String, String>> streamsSideTopicConfigs = topicConfigs.values().stream() .collect(Collectors.toMap( InternalTopicConfig::name, topicConfig -> topicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention) )); final Set<String> createdTopics = new HashSet<>(); final Set<String> topicStillToCreate = new HashSet<>(topicConfigs.keySet()); while (!topicStillToCreate.isEmpty()) { final Set<NewTopic> newTopics = topicStillToCreate.stream() .map(topicName -> new NewTopic( topicName, topicConfigs.get(topicName).numberOfPartitions(), Optional.of(replicationFactor) ).configs(streamsSideTopicConfigs.get(topicName)) ).collect(Collectors.toSet()); log.info("Going to create internal topics: " + newTopics); final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics); processCreateTopicResults(createTopicsResult, topicStillToCreate, createdTopics, deadline); maybeSleep(Collections.singletonList(topicStillToCreate), deadline, "created"); } log.info("Completed setup of internal topics {}.", topicConfigs.keySet()); }
@Test public void shouldCreateTopics() throws Exception { final InternalTopicConfig internalTopicConfig1 = setupRepartitionTopicConfig(topic1, 1); final InternalTopicConfig internalTopicConfig2 = setupRepartitionTopicConfig(topic2, 1); internalTopicManager.setup(mkMap( mkEntry(topic1, internalTopicConfig1), mkEntry(topic2, internalTopicConfig2) )); final Set<String> newlyCreatedTopics = mockAdminClient.listTopics().names().get(); assertThat(newlyCreatedTopics.size(), is(2)); assertThat(newlyCreatedTopics, hasItem(topic1)); assertThat(newlyCreatedTopics, hasItem(topic2)); }
public void register(String id, SocketChannel socketChannel) throws IOException { ensureNotRegistered(id); registerChannel(id, socketChannel, SelectionKey.OP_READ); this.sensors.connectionCreated.record(); // Default to empty client information as the ApiVersionsRequest is not // mandatory. In this case, we still want to account for the connection. ChannelMetadataRegistry metadataRegistry = this.channel(id).channelMetadataRegistry(); if (metadataRegistry.clientInformation() == null) metadataRegistry.registerClientInformation(ClientInformation.EMPTY); }
@Test public void testInboundConnectionsCountInConnectionCreationMetric() throws Exception { int conns = 5; try (ServerSocketChannel ss = ServerSocketChannel.open()) { ss.bind(new InetSocketAddress(0)); InetSocketAddress serverAddress = (InetSocketAddress) ss.getLocalAddress(); for (int i = 0; i < conns; i++) { Thread sender = createSender(serverAddress, randomPayload(1)); sender.start(); try (SocketChannel channel = ss.accept()) { channel.configureBlocking(false); selector.register(Integer.toString(i), channel); } finally { sender.join(); } } } assertEquals((double) conns, getMetric("connection-creation-total").metricValue()); assertEquals((double) conns, getMetric("connection-count").metricValue()); }
@Override public void execute(GraphModel graphModel) { Graph graph = graphModel.getGraphVisible(); execute(graph); }
@Test public void testDirectedStarOutGraphDegree() { GraphModel graphModel = GraphModel.Factory.newInstance(); DirectedGraph directedGraph = graphModel.getDirectedGraph(); Node firstNode = graphModel.factory().newNode("0"); directedGraph.addNode(firstNode); for (int i = 1; i <= 5; i++) { Node currentNode = graphModel.factory().newNode(((Integer) i).toString()); directedGraph.addNode(currentNode); Edge currentEdge = graphModel.factory().newEdge(firstNode, currentNode); directedGraph.addEdge(currentEdge); } DirectedGraph graph = graphModel.getDirectedGraph(); Node n1 = graph.getNode("0"); Node n3 = graph.getNode("2"); WeightedDegree d = new WeightedDegree(); d.execute(graph); double inDegree1 = (Double) n1.getAttribute(WeightedDegree.WINDEGREE); double outDegree1 = (Double) n1.getAttribute(WeightedDegree.WOUTDEGREE); double degree3 = (Double) n3.getAttribute(WeightedDegree.WDEGREE); assertEquals(inDegree1, 0.0); assertEquals(outDegree1, 5.0); assertEquals(degree3, 1.0); }
public <T> T getStore(final StoreQueryParameters<T> storeQueryParameters) { final String storeName = storeQueryParameters.storeName(); final QueryableStoreType<T> queryableStoreType = storeQueryParameters.queryableStoreType(); final List<T> globalStore = globalStoreProvider.stores(storeName, queryableStoreType); if (!globalStore.isEmpty()) { return queryableStoreType.create(globalStoreProvider, storeName); } return queryableStoreType.create( new WrappingStoreProvider(storeProviders.values(), storeQueryParameters), storeName ); }
@Test public void shouldThrowExceptionWhenLookingForKVStoreWithDifferentType() { assertThrows(InvalidStateStoreException.class, () -> storeProvider.getStore(StoreQueryParameters.fromNameAndType(keyValueStore, QueryableStoreTypes.windowStore())).fetch("1", System.currentTimeMillis())); }
public CompletableFuture<Acknowledge> stopWithSavepoint( AsynchronousJobOperationKey operationKey, String targetDirectory, SavepointFormatType formatType, TriggerSavepointMode savepointMode, Time timeout) { return registerOperationIdempotently( operationKey, () -> stopWithSavepointFunction.apply( operationKey.getJobId(), targetDirectory, formatType, savepointMode, timeout)); }
@Test public void stopWithSavepointRepeatedly() throws ExecutionException, InterruptedException { CompletableFuture<Acknowledge> firstAcknowledge = handler.stopWithSavepoint( operationKey, targetDirectory, SavepointFormatType.CANONICAL, TriggerSavepointMode.TERMINATE_WITH_SAVEPOINT, TIMEOUT); CompletableFuture<Acknowledge> secondAcknowledge = handler.stopWithSavepoint( operationKey, targetDirectory, SavepointFormatType.CANONICAL, TriggerSavepointMode.TERMINATE_WITH_SAVEPOINT, TIMEOUT); assertThat(stopWithSavepointFunction.getNumberOfInvocations(), is(1)); assertThat( stopWithSavepointFunction.getInvocationParameters().get(0), is( new Tuple4<>( jobID, targetDirectory, SavepointFormatType.CANONICAL, TriggerSavepointMode.TERMINATE_WITH_SAVEPOINT))); assertThat(firstAcknowledge.get(), is(Acknowledge.get())); assertThat(secondAcknowledge.get(), is(Acknowledge.get())); }
public static SqlPrimitiveType of(final String typeName) { switch (typeName.toUpperCase()) { case INT: return SqlPrimitiveType.of(SqlBaseType.INTEGER); case VARCHAR: return SqlPrimitiveType.of(SqlBaseType.STRING); default: try { final SqlBaseType sqlType = SqlBaseType.valueOf(typeName.toUpperCase()); return SqlPrimitiveType.of(sqlType); } catch (final IllegalArgumentException e) { throw new SchemaException("Unknown primitive type: " + typeName, e); } } }
@Test public void shouldReturnSqlType() { assertThat(SqlPrimitiveType.of(SqlBaseType.INTEGER).baseType(), is(SqlBaseType.INTEGER)); }
public static String getShortName(String destinationName) { if (destinationName == null) { throw new IllegalArgumentException("destinationName is null"); } if (destinationName.startsWith("queue:")) { return destinationName.substring(6); } else if (destinationName.startsWith("topic:")) { return destinationName.substring(6); } else { return destinationName; } }
@Test public void testGetShortName() { assertEquals("foo.DestinationNameParserTest", DestinationNameParser.getShortName("topic:foo.DestinationNameParserTest")); assertFalse(DestinationNameParser.isTopic("queue:bar.DestinationNameParserTest"), "bar"); assertFalse(DestinationNameParser.isTopic("bar"), "bar"); }
public static IRubyObject deep(final Ruby runtime, final Object input) { if (input == null) { return runtime.getNil(); } final Class<?> cls = input.getClass(); final Rubyfier.Converter converter = CONVERTER_MAP.get(cls); if (converter != null) { return converter.convert(runtime, input); } return fallbackConvert(runtime, input, cls); }
@Test public void testDeepListWithString() throws Exception { List<String> data = new ArrayList<>(); data.add("foo"); @SuppressWarnings("rawtypes") RubyArray rubyArray = (RubyArray)Rubyfier.deep(RubyUtil.RUBY, data); // toJavaArray does not newFromRubyArray inner elements to Java types \o/ assertEquals(RubyString.class, rubyArray.toJavaArray()[0].getClass()); assertEquals("foo", rubyArray.toJavaArray()[0].toString()); }
@Override public void subscribeService(Service service, Subscriber subscriber, String clientId) { Service singleton = ServiceManager.getInstance().getSingletonIfExist(service).orElse(service); Client client = clientManager.getClient(clientId); checkClientIsLegal(client, clientId); client.addServiceSubscriber(singleton, subscriber); client.setLastUpdatedTime(); NotifyCenter.publishEvent(new ClientOperationEvent.ClientSubscribeServiceEvent(singleton, clientId)); }
@Test void testSubscribeWhenClientPersistent() { assertThrows(NacosRuntimeException.class, () -> { Client persistentClient = new IpPortBasedClient(ipPortBasedClientId, false); when(clientManager.getClient(anyString())).thenReturn(persistentClient); // Excepted exception ephemeralClientOperationServiceImpl.subscribeService(service, subscriber, ipPortBasedClientId); }); }
@Override public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { if(proxy.isSupported(source, target)) { return proxy.copy(source, target, status, callback, listener); } // Copy between encrypted and unencrypted data room if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) { // File key must be set for new upload status.setFilekey(SDSTripleCryptEncryptorFeature.generateFileKey()); } final Path result = copy.copy(source, target, status, callback, listener); nodeid.cache(target, null); return result.withAttributes(new SDSAttributesFinderFeature(session, nodeid).find(result)); }
@Test public void testCopyWithRenameToExistingFile() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path folder = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); new SDSDirectoryFeature(session, nodeid).mkdir(folder, new TransferStatus()); final Path test = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new SDSTouchFeature(session, nodeid).touch(test, new TransferStatus()); final Path copy = new Path(folder, test.getName(), EnumSet.of(Path.Type.file)); final SDSCopyFeature proxy = new SDSCopyFeature(session, nodeid); final SDSDelegatingCopyFeature feature = new SDSDelegatingCopyFeature(session, nodeid, proxy); assertFalse(proxy.isSupported(test, copy)); assertTrue(feature.isSupported(test, copy)); assertNotNull(feature.copy(test, copy, new TransferStatus().exists(true), new DisabledConnectionCallback(), new DisabledStreamListener()).attributes().getVersionId()); final Find find = new DefaultFindFeature(session); final AttributedList<Path> files = new SDSListService(session, nodeid).list(folder, new DisabledListProgressListener()); assertTrue(find.find(copy)); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }