focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Collection<ComputeNode> getAllWorkers() { if (hasComputeNode) { return availableID2ComputeNode.values(); } else { return ImmutableList.copyOf(availableID2Backend.values()); } }
@Test public void testGetWorkersPreferringComputeNode() { DefaultWorkerProvider workerProvider; workerProvider = new DefaultWorkerProvider(id2Backend, id2ComputeNode, availableId2Backend, availableId2ComputeNode, true); assertThat(workerProvider.getAllWorkers()) .containsOnlyOnceElementsOf(availableId2ComputeNode.values()); workerProvider = new DefaultWorkerProvider(id2Backend, id2ComputeNode, availableId2Backend, ImmutableMap.of(), true); assertThat(workerProvider.getAllWorkers()) .containsOnlyOnceElementsOf(availableId2Backend.values()); workerProvider = new DefaultWorkerProvider(id2Backend, id2ComputeNode, availableId2Backend, availableId2ComputeNode, false); assertThat(workerProvider.getAllWorkers()) .containsOnlyOnceElementsOf(availableId2ComputeNode.values()); }
public String anonymize(final ParseTree tree) { return build(tree); }
@Test public void shouldAnonymizeCreateTableCorrectly() { final String output = anon.anonymize( "CREATE TABLE my_table (profileId VARCHAR, latitude DOUBLE, longitude DOUBLE)\n" + "WITH (kafka_topic='locations', value_format='json', partitions=1);"); Approvals.verify(output); }
@Override public Result analyze() { checkState(!analyzed); analyzed = true; Result result = analyzeIsFinal(); if (result != null && result != Result.OK) return result; return analyzeIsStandard(); }
@Test public void nonStandardDust() { Transaction standardTx = new Transaction(); standardTx.addInput(MAINNET.getGenesisBlock().getTransactions().get(0).getOutput(0)); standardTx.addOutput(COIN, key1); assertEquals(RiskAnalysis.Result.OK, DefaultRiskAnalysis.FACTORY.create(wallet, standardTx, NO_DEPS).analyze()); Transaction dustTx = new Transaction(); dustTx.addInput(MAINNET.getGenesisBlock().getTransactions().get(0).getOutput(0)); dustTx.addOutput(Coin.SATOSHI, key1); // 1 Satoshi assertEquals(RiskAnalysis.Result.NON_STANDARD, DefaultRiskAnalysis.FACTORY.create(wallet, dustTx, NO_DEPS).analyze()); Transaction edgeCaseTx = new Transaction(); edgeCaseTx.addInput(MAINNET.getGenesisBlock().getTransactions().get(0).getOutput(0)); Coin dustThreshold = new TransactionOutput(null, Coin.COIN, key1).getMinNonDustValue(); edgeCaseTx.addOutput(dustThreshold, key1); assertEquals(RiskAnalysis.Result.OK, DefaultRiskAnalysis.FACTORY.create(wallet, edgeCaseTx, NO_DEPS).analyze()); }
@Override public BytesStreamMessage getStreamMessage(int index) { return _messages.get(index); }
@Test public void testMessageBatchNoStitching() { PulsarConfig config = mock(PulsarConfig.class); when(config.getEnableKeyValueStitch()).thenReturn(false); List<BytesStreamMessage> streamMessages = List.of(PulsarUtils.buildPulsarStreamMessage(_message, config)); PulsarMessageBatch messageBatch = new PulsarMessageBatch(streamMessages, mock(MessageIdStreamOffset.class), false); byte[] valueBytes = messageBatch.getStreamMessage(0).getValue(); assertEquals(valueBytes, _expectedValueBytes); }
public void startAsync() { try { udfLoader.load(); ProcessingLogServerUtils.maybeCreateProcessingLogTopic( serviceContext.getTopicClient(), processingLogConfig, ksqlConfig); if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) { log.warn("processing log auto-create is enabled, but this is not supported " + "for headless mode."); } rocksDBConfigSetterHandler.accept(ksqlConfig); processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); ksqlConfig.originals().forEach((key, value) -> { if (nonNull(value)) { properties.put(key, value.toString()); } }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); throw e; } }
@Test public void shouldNotThrowIfNullValueInKsqlConfig() { standaloneExecutor = new StandaloneExecutor( serviceContext, processingLogConfig, new KsqlConfig(Collections.singletonMap("test", null)), ksqlEngine, queriesFile.toString(), udfLoader, false, versionChecker, injectorFactory, new MetricCollectors(), rocksDBConfigSetterHandler ); // When: standaloneExecutor.startAsync(); }
public Type getType() { return token.getType(); }
@Test public void testTypeDescriptorNested() throws Exception { TypeRememberer<String> rememberer = new TypeRememberer<String>() {}; assertEquals(new TypeToken<String>() {}.getType(), rememberer.descriptorByClass.getType()); assertEquals(new TypeToken<String>() {}.getType(), rememberer.descriptorByInstance.getType()); TypeRememberer<List<String>> genericRememberer = new TypeRememberer<List<String>>() {}; assertEquals( new TypeToken<List<String>>() {}.getType(), genericRememberer.descriptorByClass.getType()); assertEquals( new TypeToken<List<String>>() {}.getType(), genericRememberer.descriptorByInstance.getType()); }
public static InetSocketAddress getInetSocketAddressFromRpcURL(String rpcURL) throws Exception { // Pekko URLs have the form schema://systemName@host:port/.... if it's a remote Pekko URL try { final Address address = getAddressFromRpcURL(rpcURL); if (address.host().isDefined() && address.port().isDefined()) { return new InetSocketAddress(address.host().get(), (int) address.port().get()); } else { throw new MalformedURLException(); } } catch (MalformedURLException e) { throw new Exception("Could not retrieve InetSocketAddress from Pekko URL " + rpcURL); } }
@Test void getHostFromRpcURLHandlesIPv6Addresses() throws Exception { final String ipv6Address = "2001:db8:10:11:12:ff00:42:8329"; final int port = 1234; final InetSocketAddress address = new InetSocketAddress(ipv6Address, port); final String url = "pekko://flink@[" + ipv6Address + "]:" + port + "/user/jobmanager"; final InetSocketAddress result = PekkoUtils.getInetSocketAddressFromRpcURL(url); assertThat(result).isEqualTo(address); }
public static RuleDescriptionSectionContextDto of(String key, String displayName) { return new RuleDescriptionSectionContextDto(key, displayName); }
@Test void check_of_instantiate_object() { RuleDescriptionSectionContextDto context = RuleDescriptionSectionContextDto.of(CONTEXT_KEY, CONTEXT_DISPLAY_NAME); assertThat(context).extracting(RuleDescriptionSectionContextDto::getKey, RuleDescriptionSectionContextDto::getDisplayName).contains(CONTEXT_KEY, CONTEXT_DISPLAY_NAME); }
@VisibleForTesting Set<ConsumedPartitionGroup> getCrossRegionConsumedPartitionGroups() { return Collections.unmodifiableSet(crossRegionConsumedPartitionGroups); }
@Test void testComputingCrossRegionConsumedPartitionGroupsCorrectly() throws Exception { final JobVertex v1 = createJobVertex("v1", 4); final JobVertex v2 = createJobVertex("v2", 3); final JobVertex v3 = createJobVertex("v3", 2); v2.connectNewDataSetAsInput( v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); v3.connectNewDataSetAsInput( v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); v3.connectNewDataSetAsInput( v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3)); final JobGraph jobGraph = JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build(); final ExecutionGraph executionGraph = TestingDefaultExecutionGraphBuilder.newBuilder() .setJobGraph(jobGraph) .build(EXECUTOR_RESOURCE.getExecutor()); final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology(); final PipelinedRegionSchedulingStrategy schedulingStrategy = new PipelinedRegionSchedulingStrategy( testingSchedulerOperation, schedulingTopology); final Set<ConsumedPartitionGroup> crossRegionConsumedPartitionGroups = schedulingStrategy.getCrossRegionConsumedPartitionGroups(); assertThat(crossRegionConsumedPartitionGroups).hasSize(1); final ConsumedPartitionGroup expected = executionGraph .getJobVertex(v3.getID()) .getTaskVertices()[1] .getAllConsumedPartitionGroups() .get(0); assertThat(crossRegionConsumedPartitionGroups).contains(expected); }
public static String format(Integer id) { return format(id, " "); }
@Test public void testFormat() { assertEquals(AreaUtils.format(110105), "北京 北京市 朝阳区"); assertEquals(AreaUtils.format(1), "中国"); assertEquals(AreaUtils.format(2), "蒙古"); }
@Override public boolean accept(ProcessingEnvironment processingEnv, TypeMirror type) { return isPrimitiveType(type); }
@Test void testAccept() { assertTrue(builder.accept(processingEnv, zField.asType())); assertTrue(builder.accept(processingEnv, bField.asType())); assertTrue(builder.accept(processingEnv, cField.asType())); assertTrue(builder.accept(processingEnv, sField.asType())); assertTrue(builder.accept(processingEnv, iField.asType())); assertTrue(builder.accept(processingEnv, lField.asType())); assertTrue(builder.accept(processingEnv, fField.asType())); assertTrue(builder.accept(processingEnv, dField.asType())); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { SQLStatement dalStatement = sqlStatementContext.getSqlStatement(); if (dalStatement instanceof MySQLShowDatabasesStatement) { return new LocalDataMergedResult(Collections.singleton(new LocalDataQueryResultRow(databaseName))); } ShardingSphereSchema schema = getSchema(sqlStatementContext, database); if (dalStatement instanceof MySQLShowTablesStatement) { return new LogicTablesMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowTableStatusStatement) { return new ShowTableStatusMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowIndexStatement) { return new ShowIndexMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowCreateTableStatement) { return new ShowCreateTableMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } return new TransparentMergedResult(queryResults.get(0)); }
@Test void assertMergeForDescribeStatement() throws SQLException { DALStatement dalStatement = new MySQLExplainStatement(); SQLStatementContext sqlStatementContext = mockSQLStatementContext(dalStatement); ShardingDALResultMerger resultMerger = new ShardingDALResultMerger(DefaultDatabase.LOGIC_NAME, mock(ShardingRule.class)); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getName()).thenReturn(DefaultDatabase.LOGIC_NAME); assertThat(resultMerger.merge(queryResults, sqlStatementContext, database, mock(ConnectionContext.class)), instanceOf(TransparentMergedResult.class)); }
public static CronPattern of(String pattern) { return new CronPattern(pattern); }
@Test public void lastTest() { // 每月最后一天的任意时间 CronPattern pattern = CronPattern.of("* * * L * ?"); assertMatch(pattern, "2017-07-31 04:20:00"); assertMatch(pattern, "2017-02-28 04:20:00"); // 最后一个月的任意时间 pattern = CronPattern.of("* * * * L ?"); assertMatch(pattern, "2017-12-02 04:20:00"); // 任意天的最后时间 pattern = CronPattern.of("L L L * * ?"); assertMatch(pattern, "2017-12-02 23:59:59"); }
@Override public Result start( WorkflowSummary workflowSummary, Step step, StepRuntimeSummary runtimeSummary) { try { Artifact artifact = createArtifact(workflowSummary, runtimeSummary); return new Result( State.DONE, Collections.singletonMap(artifact.getType().key(), artifact), Collections.emptyList()); } catch (Exception e) { LOG.error( "Failed to start foreach workflow step runtime for {}{}", workflowSummary.getIdentity(), runtimeSummary.getIdentity(), e); return new Result( State.FATAL_ERROR, Collections.emptyMap(), Collections.singletonList( TimelineDetailsEvent.from( Details.create( e, false, "Failed to start foreach workflow step runtime with an error")))); } }
@Test public void testForeachCreateArtifactWithRestartFromSpecificNotAlongRestartPath() { int restartIterationId = 2; Map<String, Object> evaluatedResult = new LinkedHashMap<>(); evaluatedResult.put("loop_param", new long[] {1, 2, 3}); Map<String, Parameter> params = new LinkedHashMap<>(); params.put( "loop_params", MapParameter.builder() .evaluatedResult(evaluatedResult) .evaluatedTime(System.currentTimeMillis()) .build()); StepRetry stepRetry = new StepRetry(); stepRetry.setRetryable(false); RestartConfig restartConfig = RestartConfig.builder() .addRestartNode("maestro-foreach-wf", restartIterationId, "job2") .addRestartNode("wf", 1, "job1") .restartPolicy(RunPolicy.RESTART_FROM_SPECIFIC) .build(); StepRuntimeSummary runtimeSummary = StepRuntimeSummary.builder() .stepId(STEP_ID) .stepAttemptId(STEP_ATTEMPT_ID) .artifacts(new HashMap<>()) .params(params) .stepRetry(stepRetry) .restartConfig(restartConfig) .build(); ForeachArtifact prevArtifact = new ForeachArtifact(); prevArtifact.setForeachWorkflowId("maestro-foreach-wf"); prevArtifact.setNextLoopIndex(1); prevArtifact.setForeachOverview(new ForeachStepOverview()); prevArtifact.getForeachOverview().addOne(1, WorkflowInstance.Status.FAILED, null); prevArtifact.getForeachOverview().refreshDetail(); Mockito.when(stepInstanceDao.getLatestForeachArtifact(anyString(), anyLong(), anyString())) .thenReturn(prevArtifact); StepRuntime.Result res = foreachStepRuntime.start(workflowSummary, null, runtimeSummary); ForeachArtifact artifact = res.getArtifacts().get(Type.FOREACH.key()).asForeach(); assertEquals(RunPolicy.RESTART_FROM_SPECIFIC, artifact.getRunPolicy()); assertNull(artifact.getPendingAction()); }
@Override public void beforeJob(JobExecution jobExecution) { LOG.debug("sending before job execution event [{}]...", jobExecution); producerTemplate.sendBodyAndHeader(endpointUri, jobExecution, EventType.HEADER_KEY, EventType.BEFORE.name()); LOG.debug("sent before job execution event"); }
@Test public void shouldSetBeforeJobEventHeader() throws Exception { // When jobExecutionListener.beforeJob(jobExecution); // Then Exchange beforeJobEvent = consumer().receive("seda:eventQueue"); assertEquals(CamelJobExecutionListener.EventType.BEFORE.name(), beforeJobEvent.getIn().getHeader(CamelJobExecutionListener.EventType.HEADER_KEY)); }
@Override public void run() { Optional<RoutingTable> table = tableSupplier.get(); table.ifPresent(this::reportHealth); reportConfigAge(); }
@Test public void config_age_metric() throws Exception { reporter.run(); // No files exist assertEquals(0D, getMetric(NginxMetricsReporter.CONFIG_AGE_METRIC), Double.MIN_VALUE); // Only temporary file exists Path configRoot = fileSystem.getPath("/opt/vespa/var/vespa-hosted/routing/"); Path tempFile = configRoot.resolve("nginxl4.conf.tmp"); createFile(tempFile, Instant.ofEpochSecond(123)); reporter.run(); assertEquals(123D, getMetric(NginxMetricsReporter.CONFIG_AGE_METRIC), Double.MIN_VALUE); // Only main file exists Files.delete(tempFile); createFile(configRoot.resolve("nginxl4.conf"), Instant.ofEpochSecond(456)); reporter.run(); assertEquals(0D, getMetric(NginxMetricsReporter.CONFIG_AGE_METRIC), Double.MIN_VALUE); // Both files exist createFile(tempFile, Instant.ofEpochSecond(123)); reporter.run(); assertEquals(333D, getMetric(NginxMetricsReporter.CONFIG_AGE_METRIC), Double.MIN_VALUE); }
public List<ReceivedMessage> fetchMessages() { List<ReceivedMessage> messageList = new ArrayList<>(); try (SubscriberStub subscriber = pubsubQueueClient.getSubscriber(subscriberStubSettings)) { String subscriptionName = ProjectSubscriptionName.format(googleProjectId, pubsubSubscriptionId); long startTime = System.currentTimeMillis(); long unAckedMessages = pubsubQueueClient.getNumUnAckedMessages(this.pubsubSubscriptionId); LOG.info("Found unacked messages " + unAckedMessages); while (messageList.size() < unAckedMessages && messageList.size() < maxMessagesPerSync && ((System.currentTimeMillis() - startTime) < (maxFetchTimePerSyncSecs * 1000))) { PullResponse pullResponse = pubsubQueueClient.makePullRequest(subscriber, subscriptionName, batchSize); messageList.addAll(pullResponse.getReceivedMessagesList()); } return messageList; } catch (Exception e) { throw new HoodieException("Error when fetching metadata", e); } }
@Test public void testFetchMessagesZeroTimeout() throws IOException { doNothing().when(mockSubscriber).close(); when(mockPubsubQueueClient.getSubscriber(any())).thenReturn(mockSubscriber); when(mockPubsubQueueClient.getNumUnAckedMessages(SUBSCRIPTION_ID)).thenReturn(100L); PubsubMessagesFetcher fetcher = new PubsubMessagesFetcher( PROJECT_ID, SUBSCRIPTION_ID, SMALL_BATCH_SIZE, MAX_MESSAGES_IN_REQUEST, 0, mockPubsubQueueClient ); List<ReceivedMessage> messages = fetcher.fetchMessages(); assertEquals(0, messages.size()); }
public void ensureActiveGroup() { while (!ensureActiveGroup(time.timer(Long.MAX_VALUE))) { log.warn("still waiting to ensure active group"); } }
@Test public void testWakeupAfterJoinGroupSent() throws Exception { setupCoordinator(); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { private int invocations = 0; @Override public boolean matches(AbstractRequest body) { invocations++; boolean isJoinGroupRequest = body instanceof JoinGroupRequest; if (isJoinGroupRequest && invocations == 1) // simulate wakeup before the request returns throw new WakeupException(); return isJoinGroupRequest; } }, joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException ignored) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
public boolean setCodeVariants(DefaultIssue issue, Set<String> currentCodeVariants, IssueChangeContext context) { Set<String> newCodeVariants = getNewCodeVariants(issue); if (!currentCodeVariants.equals(newCodeVariants)) { issue.setFieldChange(context, CODE_VARIANTS, currentCodeVariants.isEmpty() ? null : CHANGELOG_LIST_JOINER.join(currentCodeVariants), newCodeVariants.isEmpty() ? null : CHANGELOG_LIST_JOINER.join(newCodeVariants)); issue.setCodeVariants(newCodeVariants); issue.setUpdateDate(context.date()); issue.setChanged(true); issue.setSendNotifications(true); return true; } return false; }
@Test void setCodeVariants_whenCodeVariantAdded_shouldBeUpdated() { Set<String> currentCodeVariants = new HashSet<>(Arrays.asList("linux")); Set<String> newCodeVariants = new HashSet<>(Arrays.asList("linux", "windows")); issue.setCodeVariants(newCodeVariants); boolean updated = underTest.setCodeVariants(issue, currentCodeVariants, context); assertThat(updated).isTrue(); assertThat(issue.codeVariants()).contains("linux", "windows"); FieldDiffs.Diff diff = issue.currentChange().get("code_variants"); assertThat(diff.oldValue()).isEqualTo("linux"); assertThat(diff.newValue()).isEqualTo("linux windows"); assertThat(issue.mustSendNotifications()).isTrue(); }
public WebServerResponse getWebserverUrls() { return new WebServerResponse("OK", List.of(new WebService("Mijn DigiD", protocol + "://" + host + "/authn_app"))); }
@Test void getWebServerUrlsTest() { ConfigService configService = new ConfigService("http", "SSSSSSSSSSSSSSS", sharedServiceClient, switchService); WebServerResponse webServerResponse = configService.getWebserverUrls(); Assertions.assertEquals("OK", webServerResponse.getStatus()); Assertions.assertEquals("Mijn DigiD", webServerResponse.getServices().get(0).getName()); Assertions.assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", webServerResponse.getServices().get(0).getUrl()); }
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) { return decoder.decodeFunctionResult(rawInput, outputParameters); }
@Test public void testDecodeMultipleDynamicStruct() { String rawInput = "0x00000000000000000000000000000000000000000000000000000000000000a0" + "0000000000000000000000000000000000000000000000000000000000000001" + "000000000000000000000000000000000000000000000000000000000000000a" + "0000000000000000000000000000000000000000000000000000000000000002" + "000000000000000000000000000000000000000000000000000000000000000b" + "0000000000000000000000000000000000000000000000000000000000000040" + "0000000000000000000000000000000000000000000000000000000000000080" + "0000000000000000000000000000000000000000000000000000000000000002" + "6964000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000004" + "6e616d6500000000000000000000000000000000000000000000000000000000"; assertEquals( FunctionReturnDecoder.decode( rawInput, AbiV2TestFixture.getFooBarBarFunction.getOutputParameters()), Arrays.asList( new AbiV2TestFixture.Foo("id", "name"), new AbiV2TestFixture.Bar(BigInteger.ONE, BigInteger.TEN), new AbiV2TestFixture.Bar(BigInteger.valueOf(2), BigInteger.valueOf(11)))); }
@Override public void onRstStreamRead(ChannelHandlerContext ctx, int streamId, long errorCode) throws Http2Exception { long currentNano = System.nanoTime(); if (currentNano - lastRstFrameNano >= nanosPerWindow) { lastRstFrameNano = currentNano; receivedRstInWindow = 1; } else { receivedRstInWindow++; if (receivedRstInWindow > maxRstFramesPerWindow) { logger.debug("{} Maximum number {} of RST frames reached within {} seconds, " + "closing connection with {} error", ctx.channel(), maxRstFramesPerWindow, TimeUnit.NANOSECONDS.toSeconds(nanosPerWindow), RST_FRAME_RATE_EXCEEDED.error(), RST_FRAME_RATE_EXCEEDED); throw RST_FRAME_RATE_EXCEEDED; } } super.onRstStreamRead(ctx, streamId, errorCode); }
@Test public void testRstFrames() throws Exception { listener = new Http2MaxRstFrameListener(frameListener, 1, 1); listener.onRstStreamRead(ctx, 1, Http2Error.STREAM_CLOSED.code()); Thread.sleep(1100); listener.onRstStreamRead(ctx, 1, Http2Error.STREAM_CLOSED.code()); verify(frameListener, times(2)).onRstStreamRead(eq(ctx), anyInt(), eq(Http2Error.STREAM_CLOSED.code())); }
@Override public boolean syncData(DistroData data, String targetServer) { if (isNoExistTarget(targetServer)) { return true; } DistroDataRequest request = new DistroDataRequest(data, data.getType()); Member member = memberManager.find(targetServer); if (checkTargetServerStatusUnhealthy(member)) { Loggers.DISTRO .warn("[DISTRO] Cancel distro sync caused by target server {} unhealthy, key: {}", targetServer, data.getDistroKey()); return false; } try { Response response = clusterRpcClientProxy.sendRequest(member, request); return checkResponse(response); } catch (NacosException e) { Loggers.DISTRO.error("[DISTRO-FAILED] Sync distro data failed! key: {}", data.getDistroKey(), e); } return false; }
@Test void testSyncDataForMemberUnhealthy() throws NacosException { when(memberManager.hasMember(member.getAddress())).thenReturn(true); when(memberManager.find(member.getAddress())).thenReturn(member); assertFalse(transportAgent.syncData(new DistroData(), member.getAddress())); verify(clusterRpcClientProxy, never()).sendRequest(any(Member.class), any()); }
@Override public VersionManager getVersionManager() { return original.getVersionManager(); }
@Test public void getVersionManager() { assertEquals(pluginManager.getVersionManager(), wrappedPluginManager.getVersionManager()); }
public String getExternalCacheDirectoryPath() { return this.context != null ? absPath(this.context.getExternalCacheDir()) : ""; }
@Test public void getExternalCacheDirectoryPathIsNotEmpty() { assertThat(contextUtil.getExternalCacheDirectoryPath(), containsString("/external-cache")); }
List<TaskDirectory> listAllTaskDirectories() { return listTaskDirectories(pathname -> pathname.isDirectory() && TASK_DIR_PATH_NAME.matcher(pathname.getName()).matches()); }
@Test public void shouldReturnEmptyArrayIfStateDirDoesntExist() throws IOException { cleanup(); assertFalse(stateDir.exists()); assertTrue(directory.listAllTaskDirectories().isEmpty()); }
public void init(String keyId, String applicationKey, String exportService) throws BackblazeCredentialsException, IOException { // Fetch all the available buckets and use that to find which region the user is in ListBucketsResponse listBucketsResponse = null; String userRegion = null; // The Key ID starts with the region identifier number, so reorder the regions such that // the first region is most likely the user's region String regionId = keyId.substring(0, 3); BACKBLAZE_REGIONS.sort( (String region1, String region2) -> { if (region1.endsWith(regionId)) { return -1; } return 0; }); Throwable s3Exception = null; for (String region : BACKBLAZE_REGIONS) { try { s3Client = backblazeS3ClientFactory.createS3Client(keyId, applicationKey, region); listBucketsResponse = s3Client.listBuckets(); userRegion = region; break; } catch (S3Exception e) { s3Exception = e; if (s3Client != null) { s3Client.close(); } if (e.statusCode() == 403) { monitor.debug(() -> String.format("User is not in region %s", region)); } } } if (listBucketsResponse == null || userRegion == null) { throw new BackblazeCredentialsException( "User's credentials or permissions are not valid for any regions available", s3Exception); } bucketName = getOrCreateBucket(s3Client, listBucketsResponse, userRegion, exportService); }
@Test public void testInitListBucketException() throws BackblazeCredentialsException, IOException { when(s3Client.listBuckets()).thenThrow(S3Exception.builder().statusCode(403).build()); BackblazeDataTransferClient client = createDefaultClient(); assertThrows(BackblazeCredentialsException.class, () -> { client.init(KEY_ID, APP_KEY, EXPORT_SERVICE); }); verify(s3Client, atLeast(1)).close(); verify(monitor, atLeast(1)).debug(any()); }
@Override public void onResponse(Call call, okhttp3.Response okHttpResponse) { try { final Response response = OkHttpHttpClient.convertResponse(okHttpResponse); try { @SuppressWarnings("unchecked") final T t = converter == null ? (T) response : converter.convert(response); okHttpFuture.setResult(t); if (callback != null) { callback.onCompleted(t); } } catch (IOException | RuntimeException e) { okHttpFuture.setException(e); if (callback != null) { callback.onThrowable(e); } } } finally { okHttpFuture.finish(); } }
@Test public void shouldReportOAuthException() { handler = new OAuthAsyncCompletionHandler<>(callback, OAUTH_EXCEPTION_RESPONSE_CONVERTER, future); call.enqueue(handler); final Request request = new Request.Builder().url("http://localhost/").build(); final okhttp3.Response response = new okhttp3.Response.Builder() .request(request) .protocol(Protocol.HTTP_1_1) .code(200) .message("ok") .body(ResponseBody.create(new byte[0], MediaType.get("text/plain"))) .build(); handler.onResponse(call, response); assertNull(callback.getResponse()); assertNotNull(callback.getThrowable()); assertTrue(callback.getThrowable() instanceof OAuthException); // verify latch is released assertThrows(ExecutionException.class, new ThrowingRunnable() { @Override public void run() throws Throwable { future.get(); } }); }
public static UpdateRequirement fromJson(String json) { return JsonUtil.parse(json, UpdateRequirementParser::fromJson); }
@Test public void testUpdateRequirementWithoutRequirementTypeCannotParse() { List<String> invalidJson = ImmutableList.of( "{\"type\":null,\"uuid\":\"2cc52516-5e73-41f2-b139-545d41a4e151\"}", "{\"uuid\":\"2cc52516-5e73-41f2-b139-545d41a4e151\"}"); for (String json : invalidJson) { assertThatThrownBy(() -> UpdateRequirementParser.fromJson(json)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse update requirement. Missing field: type"); } }
public int length() { return mName.length(); }
@Test public void length() throws Exception { assertEquals(PropertyKey.Name.HOME.length(), PropertyKey.HOME.length()); }
public static String getOnlineInstanceNodePath(final String instanceId, final InstanceType instanceType) { return String.join("/", "", ROOT_NODE, COMPUTE_NODE, ONLINE_NODE, instanceType.name().toLowerCase(), instanceId); }
@Test void assertGetOnlineInstanceNodePath() { assertThat(ComputeNode.getOnlineInstanceNodePath("foo_instance_1", InstanceType.PROXY), is("/nodes/compute_nodes/online/proxy/foo_instance_1")); assertThat(ComputeNode.getOnlineInstanceNodePath("foo_instance_2", InstanceType.JDBC), is("/nodes/compute_nodes/online/jdbc/foo_instance_2")); }
public ConfigCenterBuilder address(String address) { this.address = address; return getThis(); }
@Test void address() { ConfigCenterBuilder builder = ConfigCenterBuilder.newBuilder(); builder.address("address"); Assertions.assertEquals("address", builder.build().getAddress()); }
@CheckForNull public String getDecoratedSourceAsHtml(@Nullable String sourceLine, @Nullable String highlighting, @Nullable String symbols) { if (sourceLine == null) { return null; } DecorationDataHolder decorationDataHolder = new DecorationDataHolder(); if (StringUtils.isNotBlank(highlighting)) { decorationDataHolder.loadSyntaxHighlightingData(highlighting); } if (StringUtils.isNotBlank(symbols)) { decorationDataHolder.loadLineSymbolReferences(symbols); } HtmlTextDecorator textDecorator = new HtmlTextDecorator(); List<String> decoratedSource = textDecorator.decorateTextWithHtml(sourceLine, decorationDataHolder, 1, 1); if (decoratedSource == null) { return null; } else { if (decoratedSource.isEmpty()) { return ""; } else { return decoratedSource.get(0); } } }
@Test public void should_ignore_missing_highlighting() { String sourceLine = " if (toto < 42) {"; assertThat(sourceDecorator.getDecoratedSourceAsHtml(sourceLine, null, null)).isEqualTo(" if (toto &lt; 42) {"); assertThat(sourceDecorator.getDecoratedSourceAsHtml(sourceLine, "", null)).isEqualTo(" if (toto &lt; 42) {"); }
@Override public SQLToken generateSQLToken(final InsertStatementContext insertStatementContext) { Optional<InsertValuesToken> result = findPreviousSQLToken(); Preconditions.checkState(result.isPresent()); Optional<GeneratedKeyContext> generatedKey = insertStatementContext.getGeneratedKeyContext(); Preconditions.checkState(generatedKey.isPresent()); Iterator<Comparable<?>> generatedValues = generatedKey.get().getGeneratedValues().iterator(); int count = 0; List<List<Object>> params = insertStatementContext.getGroupedParameters(); for (InsertValueContext each : insertStatementContext.getInsertValueContexts()) { InsertValue insertValueToken = result.get().getInsertValues().get(count); DerivedSimpleExpressionSegment expressionSegment = isToAddDerivedLiteralExpression(params, count) ? new DerivedLiteralExpressionSegment(generatedValues.next()) : new DerivedParameterMarkerExpressionSegment(each.getParameterCount()); insertValueToken.getValues().add(expressionSegment); count++; } return result.get(); }
@Test void assertGenerateSQLToken() { InsertStatementContext insertStatementContext = mock(InsertStatementContext.class); GeneratedKeyContext generatedKeyContext = getGeneratedKeyContext(); when(insertStatementContext.getGeneratedKeyContext()).thenReturn(Optional.of(generatedKeyContext)); when(insertStatementContext.getInsertValueContexts()).thenReturn(Collections.singletonList(mock(InsertValueContext.class))); List<List<Object>> parameterGroups = Collections.singletonList(new ArrayList<>(Collections.singleton(new Object()))); when(insertStatementContext.getGroupedParameters()).thenReturn(parameterGroups); GeneratedKeyInsertValuesTokenGenerator generator = new GeneratedKeyInsertValuesTokenGenerator(); generator.setPreviousSQLTokens(getPreviousSQLTokens()); SQLToken sqlToken = generator.generateSQLToken(insertStatementContext); assertThat(sqlToken, instanceOf(InsertValuesToken.class)); assertThat(((InsertValuesToken) sqlToken).getInsertValues().get(0).getValues().get(0), instanceOf(DerivedParameterMarkerExpressionSegment.class)); parameterGroups.get(0).clear(); ((InsertValuesToken) sqlToken).getInsertValues().get(0).getValues().clear(); sqlToken = generator.generateSQLToken(insertStatementContext); assertThat(((InsertValuesToken) sqlToken).getInsertValues().get(0).getValues().get(0), instanceOf(DerivedLiteralExpressionSegment.class)); }
public static int[] getCutIndices(String s, String splitChar, int index) { int found = 0; char target = splitChar.charAt(0); for (int i = 0; i < s.length(); i++) { if (s.charAt(i) == target) { found++; } if (found == index) { int begin = i; if (begin != 0) { begin += 1; } int end = s.indexOf(target, i + 1); // End will be -1 if this is the last last token in the string and there is no other occurence. if (end == -1) { end = s.length(); } return new int[]{begin, end}; } } return new int[]{0, 0}; }
@Test public void testCutIndicesWithLastToken() throws Exception { String s = "<10> 07 Aug 2013 somesubsystem"; int[] result = SplitAndIndexExtractor.getCutIndices(s, " ", 4); assertEquals(17, result[0]); assertEquals(s.length(), result[1]); }
@VisibleForTesting public KMSClientProvider[] getProviders() { return providers; }
@Test public void testCreation() throws Exception { Configuration conf = new Configuration(); KeyProvider kp = new KMSClientProvider.Factory().createProvider(new URI( "kms://http@host1:9600/kms/foo"), conf); assertTrue(kp instanceof LoadBalancingKMSClientProvider); KMSClientProvider[] providers = ((LoadBalancingKMSClientProvider) kp).getProviders(); assertEquals(1, providers.length); assertEquals(new HashSet<>(Collections.singleton("http://host1:9600/kms/foo/v1/")), new HashSet<>(Collections.singleton(providers[0].getKMSUrl()))); kp = new KMSClientProvider.Factory().createProvider(new URI( "kms://http@host1;host2;host3:9600/kms/foo"), conf); assertTrue(kp instanceof LoadBalancingKMSClientProvider); providers = ((LoadBalancingKMSClientProvider) kp).getProviders(); assertEquals(3, providers.length); assertEquals(new HashSet<>(Arrays.asList("http://host1:9600/kms/foo/v1/", "http://host2:9600/kms/foo/v1/", "http://host3:9600/kms/foo/v1/")), new HashSet<>(Arrays.asList(providers[0].getKMSUrl(), providers[1].getKMSUrl(), providers[2].getKMSUrl()))); kp = new KMSClientProvider.Factory().createProvider(new URI( "kms://http@host1;host2;host3:9600/kms/foo"), conf); assertTrue(kp instanceof LoadBalancingKMSClientProvider); providers = ((LoadBalancingKMSClientProvider) kp).getProviders(); assertEquals(3, providers.length); assertEquals(new HashSet<>(Arrays.asList("http://host1:9600/kms/foo/v1/", "http://host2:9600/kms/foo/v1/", "http://host3:9600/kms/foo/v1/")), new HashSet<>(Arrays.asList(providers[0].getKMSUrl(), providers[1].getKMSUrl(), providers[2].getKMSUrl()))); }
@Override public void commit(TableMetadata base, TableMetadata metadata) { boolean isStageCreate = Boolean.parseBoolean(metadata.properties().get(CatalogConstants.IS_STAGE_CREATE_KEY)); super.commit(base, metadata); if (isStageCreate) { disableRefresh(); /* disable forced refresh */ } }
@Test void testDoCommitDoesntPersistForStagedTable() { TableMetadata metadata = BASE_TABLE_METADATA.replaceProperties( ImmutableMap.of(CatalogConstants.IS_STAGE_CREATE_KEY, "true")); openHouseInternalTableOperations.commit(null, metadata); // Assert TableMetadata is already set for TableOperations Assertions.assertNotNull(openHouseInternalTableOperations.currentMetadataLocation()); Assertions.assertNotNull(openHouseInternalTableOperations.current()); // Assert houseTableRepository.save() was not called for doCommit() verify(mockHouseTableRepository, times(0)).save(null); // Assert houseTableRepository.findById() was not called for doRefresh() verify(mockHouseTableRepository, times(0)).findById(null); Assertions.assertFalse( DynFields.builder() .hiddenImpl(BaseMetastoreTableOperations.class, "shouldRefresh") .<Boolean>build(openHouseInternalTableOperations) .get()); }
public static void retainMatching(Collection<String> values, String... patterns) { retainMatching(values, Arrays.asList(patterns)); }
@Test public void testRetainMatchingWithMatchingPattern() throws Exception { Collection<String> values = stringToList("A"); StringCollectionUtil.retainMatching(values, "A"); assertTrue(values.contains("A")); }
@Override public void loadData(Priority priority, DataCallback<? super T> callback) { this.callback = callback; serializer.startRequest(priority, url, this); }
@Test public void testRequestComplete_whenCancelledAndUnauthorized_callsCallbackWithNullError() throws Exception { UrlResponseInfo info = getInfo(0, HttpURLConnection.HTTP_FORBIDDEN); fetcher.loadData(Priority.HIGH, callback); Callback urlCallback = urlRequestListenerCaptor.getValue(); urlCallback.onResponseStarted(request, info); urlCallback.onCanceled(request, info); verify(callback, timeout(1000)).onLoadFailed(ArgumentMatchers.<Exception>isNull()); }
@Override public ObjectNode encode(K8sIpam ipam, CodecContext context) { checkNotNull(ipam, "Kubernetes IPAM cannot be null"); return context.mapper().createObjectNode() .put(IPAM_ID, ipam.ipamId()) .put(IP_ADDRESS, ipam.ipAddress().toString()) .put(NETWORK_ID, ipam.networkId()); }
@Test public void testK8sIpamEncode() { K8sIpam ipam = new DefaultK8sIpam("network-1-10.10.10.10", IpAddress.valueOf("10.10.10.10"), "network-1"); ObjectNode nodeJson = k8sIpamCodec.encode(ipam, context); assertThat(nodeJson, matchesK8sIpam(ipam)); }
@Override public ByteBuf setFloat(int index, float value) { setInt(index, Float.floatToRawIntBits(value)); return this; }
@Test public void testSetFloatAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().setFloat(0, 1); } }); }
@Override @Nonnull public ListIterator<T> listIterator(final int initialIndex) { final Iterator<T> initialIterator; try { initialIterator = iterator(initialIndex); } catch (NoSuchElementException ex) { throw new IndexOutOfBoundsException(); } return new AbstractListIterator<T>() { private int index = initialIndex - 1; @Nullable private Iterator<T> forwardIterator = initialIterator; @Nonnull private Iterator<T> getForwardIterator() { if (forwardIterator == null) { try { forwardIterator = iterator(index+1); } catch (IndexOutOfBoundsException ex) { throw new NoSuchElementException(); } } return forwardIterator; } @Override public boolean hasNext() { return getForwardIterator().hasNext(); } @Override public boolean hasPrevious() { return index >= 0; } @Override public T next() { T ret = getForwardIterator().next(); index++; return ret; } @Override public int nextIndex() { return index+1; } @Override public T previous() { forwardIterator = null; try { return iterator(index--).next(); } catch (IndexOutOfBoundsException ex) { throw new NoSuchElementException(); } } @Override public int previousIndex() { return index; } }; }
@Test public void testAlternatingIteration() { ListIterator<Integer> iter = list.listIterator(50); for (int i=0; i<10; i++) { Assert.assertTrue(iter.hasNext()); Assert.assertTrue(iter.hasPrevious()); Assert.assertEquals(50, iter.nextIndex()); Assert.assertEquals(49, iter.previousIndex()); Assert.assertEquals(50, iter.next().intValue()); Assert.assertTrue(iter.hasNext()); Assert.assertTrue(iter.hasPrevious()); Assert.assertEquals(51, iter.nextIndex()); Assert.assertEquals(50, iter.previousIndex()); Assert.assertEquals(50, iter.previous().intValue()); } }
public void setProperty(String name, String value) { if (value == null) { return; } Method setter = aggregationAssessor.findSetterMethod(name); if (setter == null) { addWarn("No setter for property [" + name + "] in " + objClass.getName() + "."); } else { try { setProperty(setter, value); } catch (PropertySetterException ex) { addWarn("Failed to set property [" + name + "] to value \"" + value + "\". ", ex); } } }
@Test public void testDuration() { setter.setProperty("duration", "1.4 seconds"); assertEquals(1400, house.getDuration().getMilliseconds()); }
protected Authorization parseAuthLine(String line) throws ParseException { String[] tokens = line.split("\\s+"); String keyword = tokens[0].toLowerCase(); switch (keyword) { case "topic": return createAuthorization(line, tokens); case "user": m_parsingUsersSpecificSection = true; m_currentUser = tokens[1]; m_parsingPatternSpecificSection = false; return null; case "pattern": m_parsingUsersSpecificSection = false; m_currentUser = ""; m_parsingPatternSpecificSection = true; return createAuthorization(line, tokens); default: throw new ParseException(String.format("invalid line definition found %s", line), 1); } }
@Test public void testParseAuthLineValid_topic_with_space() throws ParseException { Authorization expected = new Authorization(new Topic("/weather/eastern italy/anemometer")); Authorization authorization = authorizator.parseAuthLine("topic readwrite /weather/eastern italy/anemometer"); // Verify assertEquals(expected, authorization); }
@Override public int serializeKV(DataOutputStream out, SizedWritable<?> key, SizedWritable<?> value) throws IOException { return serializePartitionKV(out, -1, key, value); }
@Test public void testSerializeKV() throws IOException { final DataOutputStream dataOut = Mockito.mock(DataOutputStream.class); Mockito.when(dataOut.hasUnFlushedData()).thenReturn(true); Mockito.when(dataOut.shortOfSpace(key.length + value.length + Constants.SIZEOF_KV_LENGTH)).thenReturn(true); final int written = serializer.serializeKV(dataOut, key, value); // flush once, write 4 int, and 2 byte array Mockito.verify(dataOut, Mockito.times(1)).flush(); Mockito.verify(dataOut, Mockito.times(4)).writeInt(anyInt()); Mockito.verify(dataOut, Mockito.times(2)).write(any(byte[].class), anyInt(), anyInt()); Assert.assertEquals(written, key.length + value.length + Constants.SIZEOF_KV_LENGTH); }
@NonNull public AuthorizationResponse auth(@NonNull AuthorizationRequest request) { validateAuthorizationRequest(request); var verifier = generatePkceCodeVerifier(); var codeChallenge = calculateS256CodeChallenge(verifier); var relyingPartyCallback = baseUri.resolve("/auth/callback"); var step1 = authenticationFlow.start( new AuthenticationFlow.Session( request.state(), request.nonce(), relyingPartyCallback, codeChallenge, federationConfig.scopes())); var identityProviders = step1.fetchIdpOptions(); var sessionId = IdGenerator.generateID(); var session = Session.create() .id(sessionId) .state(request.state()) .nonce(request.nonce()) .redirectUri(request.redirectUri()) .clientId(request.clientId()) .codeVerifier(verifier) .selectSectoralIdpStep(step1) .build(); sessionRepo.save(session); return new AuthorizationResponse(identityProviders, sessionId); }
@Test void auth_badScopes() { var rpConfig = new RelyingPartyConfig(null, List.of(REDIRECT_URI)); var sut = new AuthService(BASE_URI, rpConfig, null, null, null, null); var scope = "openid email"; var state = UUID.randomUUID().toString(); var nonce = UUID.randomUUID().toString(); var responseType = "code"; var clientId = "myapp"; // when var req = new AuthorizationRequest(scope, state, responseType, clientId, REDIRECT_URI, nonce); var e = assertThrows(ValidationException.class, () -> sut.auth(req)); // then assertEquals( "https://myapp.example.com?error=invalid_scope&error_description=error.unsupportedScope&state=%s" .formatted(state), e.seeOther().toString()); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) { return aggregate(initializer, Materialized.with(null, null)); }
@Test public void namedParamShouldSetName() { final StreamsBuilder builder = new StreamsBuilder(); final KStream<String, String> stream = builder.stream(TOPIC, Consumed .with(Serdes.String(), Serdes.String())); groupedStream = stream.groupByKey(Grouped.with(Serdes.String(), Serdes.String())); groupedStream.cogroup(MockAggregator.TOSTRING_ADDER) .windowedBy(SlidingWindows.withTimeDifferenceAndGrace(ofMillis(WINDOW_SIZE_MS), ofMillis(2000L))) .aggregate(MockInitializer.STRING_INIT, Named.as("foo")); assertThat(builder.build().describe().toString(), equalTo( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [topic])\n" + " --> foo-cogroup-agg-0\n" + " Processor: foo-cogroup-agg-0 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" + " --> foo-cogroup-merge\n" + " <-- KSTREAM-SOURCE-0000000000\n" + " Processor: foo-cogroup-merge (stores: [])\n" + " --> none\n" + " <-- foo-cogroup-agg-0\n\n")); }
@Override public String getServiceId() { return serviceId; }
@Test public void testGetNacosServiceId() { String groupName = "group"; String format = "%s__%s"; when(nacosContextProperties.getGroup()).thenReturn(groupName); String serviceId = polarisRegistration1.getServiceId(); assertThat(String.format(format, groupName, SERVICE_PROVIDER).equals(serviceId)); }
@Override public Float getFloat(K name) { return null; }
@Test public void testGetFloat() { assertNull(HEADERS.getFloat("name1")); }
public void set(int index) { Preconditions.checkArgument(index < bitLength && index >= 0); int byteIndex = index >>> 3; byte current = memorySegment.get(offset + byteIndex); current |= (1 << (index & BYTE_INDEX_MASK)); memorySegment.put(offset + byteIndex, current); }
@TestTemplate void verifyInputIndex2() { assertThatThrownBy(() -> bitSet.set(-1)).isInstanceOf(IllegalArgumentException.class); }
<K> OperationNode createPartialGroupByKeyOperation( Network<Node, Edge> network, ParallelInstructionNode node, PipelineOptions options, DataflowExecutionContext<?> executionContext, DataflowOperationContext operationContext) throws Exception { ParallelInstruction instruction = node.getParallelInstruction(); PartialGroupByKeyInstruction pgbk = instruction.getPartialGroupByKey(); OutputReceiver[] receivers = getOutputReceivers(network, node); Coder<?> windowedCoder = CloudObjects.coderFromCloudObject(CloudObject.fromSpec(pgbk.getInputElementCodec())); if (!(windowedCoder instanceof WindowedValueCoder)) { throw new IllegalArgumentException( String.format( "unexpected kind of input coder for PartialGroupByKeyOperation: %s", windowedCoder)); } Coder<?> elemCoder = ((WindowedValueCoder<?>) windowedCoder).getValueCoder(); if (!(elemCoder instanceof KvCoder)) { throw new IllegalArgumentException( String.format( "unexpected kind of input element coder for PartialGroupByKeyOperation: %s", elemCoder)); } @SuppressWarnings("unchecked") KvCoder<K, ?> keyedElementCoder = (KvCoder<K, ?>) elemCoder; CloudObject cloudUserFn = pgbk.getValueCombiningFn() != null ? CloudObject.fromSpec(pgbk.getValueCombiningFn()) : null; ParDoFn fn = PartialGroupByKeyParDoFns.create( options, keyedElementCoder, cloudUserFn, pgbk.getSideInputs(), Arrays.<Receiver>asList(receivers), executionContext, operationContext); return OperationNode.create(new ParDoOperation(fn, receivers, operationContext)); }
@Test public void testCreatePartialGroupByKeyOperation() throws Exception { int producerIndex = 1; int producerOutputNum = 2; ParallelInstructionNode instructionNode = ParallelInstructionNode.create( createPartialGroupByKeyInstruction(producerIndex, producerOutputNum), ExecutionLocation.UNKNOWN); when(network.successors(instructionNode)) .thenReturn( ImmutableSet.<Node>of( IntrinsicMapTaskExecutorFactory.createOutputReceiversTransform(STAGE, counterSet) .apply( InstructionOutputNode.create( instructionNode.getParallelInstruction().getOutputs().get(0), PCOLLECTION_ID)))); when(network.outDegree(instructionNode)).thenReturn(1); Node operationNode = mapTaskExecutorFactory .createOperationTransformForParallelInstructionNodes( STAGE, network, PipelineOptionsFactory.create(), readerRegistry, sinkRegistry, BatchModeExecutionContext.forTesting(options, counterSet, "testStage")) .apply(instructionNode); assertThat(operationNode, instanceOf(OperationNode.class)); assertThat(((OperationNode) operationNode).getOperation(), instanceOf(ParDoOperation.class)); ParDoOperation pgbkOperation = (ParDoOperation) ((OperationNode) operationNode).getOperation(); assertEquals(1, pgbkOperation.receivers.length); assertEquals(0, pgbkOperation.receivers[0].getReceiverCount()); assertEquals(Operation.InitializationState.UNSTARTED, pgbkOperation.initializationState); }
@Override public ConcurrentJobModificationResolveResult resolve(Job localJob, Job storageProviderJob) { //why: we use the JobVersioner to bump the version so it matched the one from the DB new JobVersioner(localJob); storageProvider.save(localJob); return ConcurrentJobModificationResolveResult.succeeded(localJob); }
@Test void ifJobHasEnqueuedStateAndWasScheduledTooEarlyByJobZooKeeperItWillResolveLocalJobAndSaveIt() { final Job scheduledJob = aJob() .withFailedState() .withScheduledState() .withVersion(5) .build(); final Job enqueuedJob = aCopyOf(scheduledJob) .withEnqueuedState(Instant.now()) .withVersion(4) .build(); final ConcurrentJobModificationResolveResult resolveResult = allowedStateChange.resolve(enqueuedJob, scheduledJob); verify(storageProvider).save(jobCaptor.capture()); assertThat(jobCaptor.getValue()) .hasVersion(5) .hasStates(StateName.FAILED, StateName.SCHEDULED, StateName.ENQUEUED); assertThat(resolveResult.failed()).isFalse(); assertThat(resolveResult.getLocalJob()).isEqualTo(enqueuedJob); }
Optional<PriorityAndResource> getPriorityAndResource( final TaskExecutorProcessSpec taskExecutorProcessSpec) { tryAdaptAndAddTaskExecutorResourceSpecIfNotExist(taskExecutorProcessSpec); return Optional.ofNullable( taskExecutorProcessSpecToPriorityAndResource.get(taskExecutorProcessSpec)); }
@Test void testExternalResourceFailHadoopVersionNotSupported() { assumeThat(isExternalResourceSupported()).isFalse(); assertThatThrownBy( () -> getAdapterWithExternalResources( SUPPORTED_EXTERNAL_RESOURCE_NAME, SUPPORTED_EXTERNAL_RESOURCE_CONFIG_KEY) .getPriorityAndResource( TASK_EXECUTOR_PROCESS_SPEC_WITH_EXTERNAL_RESOURCE)) .isInstanceOf(IllegalStateException.class); }
public void write(D datum, Encoder out) throws IOException { Objects.requireNonNull(out, "Encoder cannot be null"); try { write(root, datum, out); } catch (TracingNullPointException | TracingClassCastException | TracingAvroTypeException e) { throw e.summarize(root); } }
@Test void unionUnresolvedExceptionExplicitWhichField() throws IOException { Schema s = schemaWithExplicitNullDefault(); GenericRecord r = new GenericData.Record(s); r.put("f", 100); ByteArrayOutputStream bao = new ByteArrayOutputStream(); try { new GenericDatumWriter<>(s).write(r, EncoderFactory.get().jsonEncoder(s, bao)); fail(); } catch (final UnresolvedUnionException uue) { assertEquals("Not in union [\"null\",\"string\"]: 100 (field=f)", uue.getMessage()); } }
@Override public int indexOfMax() { int index = 0; double value = Double.NEGATIVE_INFINITY; for (int i = 0; i < elements.length; i++) { double tmp = get(i); if (tmp > value) { index = i; value = tmp; } } return index; }
@Test public void maxIndex() { DenseVector s = generateVectorB(); assertEquals(5,s.indexOfMax()); }
@Override public void init(DatabaseMetaData metaData) throws SQLException { checkDbVersion(metaData, MIN_SUPPORTED_VERSION); }
@Test public void init_throws_MessageException_if_mssql_2012() throws Exception { assertThatThrownBy(() -> { DatabaseMetaData metadata = newMetadata( 11, 0); underTest.init(metadata); }) .isInstanceOf(MessageException.class) .hasMessage("Unsupported mssql version: 11.0. Minimal supported version is 12.0."); }
public String render(Object o) { StringBuilder result = new StringBuilder(template.length()); render(o, result); return result.toString(); }
@Test(expected = IllegalArgumentException.class) public void missingFieldShouldException() { Template template = new Template("Hello {{wtf}} "); template.render(foo); }
public static <InputT, OutputT> MapElements<InputT, OutputT> via( final InferableFunction<InputT, OutputT> fn) { return new MapElements<>(fn, fn.getInputTypeDescriptor(), fn.getOutputTypeDescriptor()); }
@Test public void testPolymorphicSimpleFunction() throws Exception { pipeline.enableAbandonedNodeEnforcement(false); pipeline .apply(Create.of(1, 2, 3)) // This is the function that needs to propagate the input T to output T .apply("Polymorphic Identity", MapElements.via(new PolymorphicSimpleFunction<>())) // This is a consumer to ensure that all coder inference logic is executed. .apply( "Test Consumer", MapElements.via( new SimpleFunction<Integer, Integer>() { @Override public Integer apply(Integer input) { return input; } })); }
public static <K, V> WithKeys<K, V> of(SerializableFunction<V, K> fn) { checkNotNull( fn, "WithKeys constructed with null function. Did you mean WithKeys.of((Void) null)?"); return new WithKeys<>(fn, null); }
@Test @Category(NeedsRunner.class) public void testExtractKeys() { PCollection<String> input = p.apply(Create.of(Arrays.asList(COLLECTION)).withCoder(StringUtf8Coder.of())); PCollection<KV<Integer, String>> output = input.apply(WithKeys.of(new LengthAsKey())); PAssert.that(output).containsInAnyOrder(WITH_KEYS); p.run(); }
@VisibleForTesting public void updateSlowDiskReportAsync(long now) { if (isUpdateInProgress.compareAndSet(false, true)) { lastUpdateTime = now; new Thread(new Runnable() { @Override public void run() { slowDisksReport = getSlowDisks(diskIDLatencyMap, maxDisksToReport, now); cleanUpOldReports(now); isUpdateInProgress.set(false); } }).start(); } }
@Test public void testEmptyReports() { tracker.updateSlowDiskReportAsync(timer.monotonicNow()); assertTrue(getSlowDisksReportForTesting(tracker).isEmpty()); }
private void setRequestId(Message message, Request<?, ?> request) { final Long id = message.getHeader(Web3jConstants.ID, Long.class); LOG.debug("setRequestId {}", id); if (id != null) { request.setId(id); } }
@Test public void setRequestIdTest() throws Exception { Web3ClientVersion response = Mockito.mock(Web3ClientVersion.class); Mockito.when(mockWeb3j.web3ClientVersion()).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Exchange exchange = createExchangeWithBodyAndHeader(null, Web3jConstants.ID, Long.valueOf(1)); template.send(exchange); Mockito.verify(request).setId(1L); }
@Override public DarkClusterConfigMap getDarkClusterConfigMap(String clusterName) throws ServiceUnavailableException { FutureCallback<DarkClusterConfigMap> darkClusterConfigMapFutureCallback = new FutureCallback<>(); getDarkClusterConfigMap(clusterName, darkClusterConfigMapFutureCallback); try { return darkClusterConfigMapFutureCallback.get(_timeout, _unit); } catch (ExecutionException | TimeoutException | IllegalStateException | InterruptedException e) { if (e instanceof TimeoutException || e.getCause() instanceof TimeoutException) { DarkClusterConfigMap darkClusterConfigMap = getDarkClusterConfigMapFromCache(clusterName); if (darkClusterConfigMap != null) { _log.info("Got dark cluster config map for {} timed out, used cached value instead.", clusterName); return darkClusterConfigMap; } } die("ClusterInfo", "PEGA_1018, unable to retrieve dark cluster info for cluster: " + clusterName + ", exception" + ": " + e); return new DarkClusterConfigMap(); } }
@Test public void testClusterInfoProviderGetDarkClustersNoCluster() throws InterruptedException, ExecutionException, ServiceUnavailableException { MockStore<ServiceProperties> serviceRegistry = new MockStore<>(); MockStore<ClusterProperties> clusterRegistry = new MockStore<>(); MockStore<UriProperties> uriRegistry = new MockStore<>(); SimpleLoadBalancer loadBalancer = setupLoadBalancer(serviceRegistry, clusterRegistry, uriRegistry); loadBalancer.getDarkClusterConfigMap(NONEXISTENT_CLUSTER, new Callback<DarkClusterConfigMap>() { @Override public void onError(Throwable e) { Assert.fail("getDarkClusterConfigMap threw exception", e); } @Override public void onSuccess(DarkClusterConfigMap returnedDarkClusterConfigMap) { Assert.assertEquals(returnedDarkClusterConfigMap.size(), 0, "expected empty map"); } }); }
public static boolean isDirectory(URL resourceURL) throws URISyntaxException { final String protocol = resourceURL.getProtocol(); switch (protocol) { case "jar": try { final JarURLConnection jarConnection = (JarURLConnection) resourceURL.openConnection(); final JarEntry entry = jarConnection.getJarEntry(); if (entry.isDirectory()) { return true; } // WARNING! Heuristics ahead. // It turns out that JarEntry#isDirectory() really just tests whether the filename ends in a '/'. // If you try to open the same URL without a trailing '/', it'll succeed — but the result won't be // what you want. We try to get around this by calling getInputStream() on the file inside the jar. // This seems to return null for directories (though that behavior is undocumented as far as I // can tell). If you have a better idea, please improve this. final String relativeFilePath = entry.getName(); final JarFile jarFile = jarConnection.getJarFile(); final ZipEntry zipEntry = jarFile.getEntry(relativeFilePath); final InputStream inputStream = jarFile.getInputStream(zipEntry); return inputStream == null; } catch (IOException e) { throw new ResourceNotFoundException(e); } case "file": return new File(resourceURL.toURI()).isDirectory(); default: throw new IllegalArgumentException("Unsupported protocol " + resourceURL.getProtocol() + " for resource " + resourceURL); } }
@Test void isDirectoryReturnsFalseForFilesWithSpacesInJars() throws Exception { final URL url = new URL("jar:" + resourceJar.toExternalForm() + "!/file with space.txt"); assertThat(url.getProtocol()).isEqualTo("jar"); assertThat(ResourceURL.isDirectory(url)).isFalse(); }
@Override public UpdateStatistics removeStatistics(long snapshotId) { statisticsToSet.put(snapshotId, Optional.empty()); return this; }
@TestTemplate public void testRemoveStatistics() { // Create a snapshot table.newFastAppend().commit(); assertThat(version()).isEqualTo(1); TableMetadata base = readMetadata(); long snapshotId = base.currentSnapshot().snapshotId(); GenericStatisticsFile statisticsFile = new GenericStatisticsFile( snapshotId, "/some/statistics/file.puffin", 100, 42, ImmutableList.of()); table.updateStatistics().setStatistics(snapshotId, statisticsFile).commit(); TableMetadata metadata = readMetadata(); assertThat(version()).isEqualTo(2); assertThat(metadata.statisticsFiles()).containsExactly(statisticsFile); table.updateStatistics().removeStatistics(snapshotId).commit(); metadata = readMetadata(); assertThat(version()).isEqualTo(3); assertThat(metadata.statisticsFiles()).isEmpty(); }
@Override @CheckForNull public Instant forkDate(String referenceBranchName, Path projectBaseDir) { return null; }
@Test public void forkDate_returns_null() { assertThat(newScmProvider().forkDate("unknown", worktree)).isNull(); }
@Override public int get(PageId pageId, int pageOffset, int bytesToRead, ReadTargetBuffer target, CacheContext cacheContext) { getOrUpdateShadowCache(pageId, bytesToRead, cacheContext); return mCacheManager.get(pageId, pageOffset, bytesToRead, target, cacheContext); }
@Test public void getNotExist() throws Exception { assertEquals(0, mCacheManager.get(PAGE_ID1, PAGE1.length, mBuf, 0)); }
public static String getInstanceWorkerIdNodePath(final String instanceId) { return String.join("/", "", ROOT_NODE, COMPUTE_NODE, WORKER_ID, instanceId); }
@Test void assertGetInstanceWorkerIdNodePath() { assertThat(ComputeNode.getInstanceWorkerIdNodePath("foo_instance"), is("/nodes/compute_nodes/worker_id/foo_instance")); }
@Transactional @PreAuthorize(value = "@permissionValidator.isSuperAdmin()") @PostMapping(value = "/consumers") public ConsumerInfo create( @RequestBody ConsumerCreateRequestVO requestVO, @RequestParam(value = "expires", required = false) @DateTimeFormat(pattern = "yyyyMMddHHmmss") Date expires ) { if (StringUtils.isBlank(requestVO.getAppId())) { throw BadRequestException.appIdIsBlank(); } if (StringUtils.isBlank(requestVO.getName())) { throw BadRequestException.appNameIsBlank(); } if (StringUtils.isBlank(requestVO.getOwnerName())) { throw BadRequestException.ownerNameIsBlank(); } if (StringUtils.isBlank(requestVO.getOrgId())) { throw BadRequestException.orgIdIsBlank(); } Consumer createdConsumer = consumerService.createConsumer(convertToConsumer(requestVO)); if (Objects.isNull(expires)) { expires = DEFAULT_EXPIRES; } ConsumerToken consumerToken = consumerService.generateAndSaveConsumerToken(createdConsumer, expires); if (requestVO.isAllowCreateApplication()) { consumerService.assignCreateApplicationRoleToConsumer(consumerToken.getToken()); } return consumerService.getConsumerInfoByAppId(requestVO.getAppId()); }
@Test void createWithBadRequest() { ConsumerService consumerService = Mockito.mock(ConsumerService.class); ConsumerController consumerController = new ConsumerController(consumerService); ConsumerCreateRequestVO requestVO = new ConsumerCreateRequestVO(); // blank appId assertThrows(BadRequestException.class, () -> consumerController.create(requestVO, null)); requestVO.setAppId("appId1"); // blank name assertThrows(BadRequestException.class, () -> consumerController.create(requestVO, null)); requestVO.setName("app 1"); // blank ownerName assertThrows(BadRequestException.class, () -> consumerController.create(requestVO, null)); requestVO.setOwnerName("user1"); // blank orgId assertThrows(BadRequestException.class, () -> consumerController.create(requestVO, null)); requestVO.setOrgId("orgId1"); }
protected void setMethod() { boolean activateBody = RestMeta.isActiveBody( wMethod.getText() ); boolean activateParams = RestMeta.isActiveParameters( wMethod.getText() ); wlBody.setEnabled( activateBody ); wBody.setEnabled( activateBody ); wApplicationType.setEnabled( activateBody ); wlParameters.setEnabled( activateParams ); wParameters.setEnabled( activateParams ); wGet.setEnabled( activateParams ); wlMatrixParameters.setEnabled( activateParams ); wMatrixParameters.setEnabled( activateParams ); wMatrixGet.setEnabled( activateParams ); }
@Test public void testSetMethod_HEAD() { doReturn( RestMeta.HTTP_METHOD_HEAD ).when( method ).getText(); dialog.setMethod(); verify( bodyl, times( 1 ) ).setEnabled( false ); verify( body, times( 1 ) ).setEnabled( false ); verify( type, times( 1 ) ).setEnabled( false ); verify( paramsl, times( 1 ) ).setEnabled( false ); verify( params, times( 1 ) ).setEnabled( false ); verify( paramsb, times( 1 ) ).setEnabled( false ); verify( matrixl, times( 1 ) ).setEnabled( false ); verify( matrix, times( 1 ) ).setEnabled( false ); verify( matrixb, times( 1 ) ).setEnabled( false ); }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void optifineIsNotCompatibleWithForge() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/logs/optifine_is_not_compatible_with_forge.txt")), CrashReportAnalyzer.Rule.OPTIFINE_IS_NOT_COMPATIBLE_WITH_FORGE); }
public boolean offer(Serializable event) { if (queue == null) { throw new IllegalStateException("client has no event queue"); } return queue.offer(event); }
@Test public void testOfferEventSequenceAndRun() throws Exception { for (int i = 0; i < 10; i++) { client.offer(TEST_EVENT + i); } Thread thread = new Thread(client); thread.start(); thread.join(1000); assertFalse(thread.isAlive()); ObjectInputStream ois = new ObjectInputStream( new ByteArrayInputStream(outputStream.toByteArray())); for (int i = 0; i < 10; i++) { assertEquals(TEST_EVENT + i, ois.readObject()); } }
@Override public boolean contains(CharSequence name, CharSequence value) { return contains(name, value, false); }
@Test public void testContainsName() { Http2Headers headers = new DefaultHttp2Headers(); headers.add(CONTENT_LENGTH, "36"); assertFalse(headers.contains("Content-Length")); assertTrue(headers.contains("content-length")); assertTrue(headers.contains(CONTENT_LENGTH)); headers.remove(CONTENT_LENGTH); assertFalse(headers.contains("Content-Length")); assertFalse(headers.contains("content-length")); assertFalse(headers.contains(CONTENT_LENGTH)); assertFalse(headers.contains("non-existent-name")); assertFalse(headers.contains(new AsciiString("non-existent-name"))); }
private static MethodDescriptor.MethodType getMethodType(final MethodDescriptor.MethodType methodType) { MethodDescriptor.MethodType grpcMethodType; switch (methodType) { case UNARY: grpcMethodType = MethodDescriptor.MethodType.UNARY; break; case CLIENT_STREAMING: grpcMethodType = MethodDescriptor.MethodType.CLIENT_STREAMING; break; case SERVER_STREAMING: grpcMethodType = MethodDescriptor.MethodType.SERVER_STREAMING; break; case BIDI_STREAMING: grpcMethodType = MethodDescriptor.MethodType.BIDI_STREAMING; break; default: grpcMethodType = MethodDescriptor.MethodType.UNKNOWN; } return grpcMethodType; }
@Test public void getMethodTypeTest() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { final Method getMethodType = JsonMessage.class.getDeclaredMethod("getMethodType", MethodDescriptor.MethodType.class); getMethodType.setAccessible(true); assertEquals(MethodDescriptor.MethodType.UNARY, getMethodType.invoke(null, MethodDescriptor.MethodType.UNARY)); assertEquals(MethodDescriptor.MethodType.CLIENT_STREAMING, getMethodType.invoke(null, MethodDescriptor.MethodType.CLIENT_STREAMING)); assertEquals(MethodDescriptor.MethodType.SERVER_STREAMING, getMethodType.invoke(null, MethodDescriptor.MethodType.SERVER_STREAMING)); assertEquals(MethodDescriptor.MethodType.BIDI_STREAMING, getMethodType.invoke(null, MethodDescriptor.MethodType.BIDI_STREAMING)); assertEquals(MethodDescriptor.MethodType.UNKNOWN, getMethodType.invoke(null, MethodDescriptor.MethodType.UNKNOWN)); }
public static void partitionSort(List<Map.Entry<String, ? extends Comparable>> arr, int k) { int start = 0; int end = arr.size() - 1; int target = k - 1; while (start < end) { int lo = start; int hi = end; int mid = lo; var pivot = arr.get(hi).getValue(); while (mid <= hi) { int cmp = pivot.compareTo(arr.get(mid).getValue()); if (cmp < 0) { var tmp = arr.get(lo); arr.set(lo++, arr.get(mid)); arr.set(mid++, tmp); } else if (cmp > 0) { var tmp = arr.get(mid); arr.set(mid, arr.get(hi)); arr.set(hi--, tmp); } else { mid++; } } if (lo <= target && target < mid) { end = lo; break; } if (target < lo) { end = lo - 1; } else { start = mid; } } Collections.sort(arr.subList(0, end), (a, b) -> b.getValue().compareTo(a.getValue())); }
@Test public void testPartitionSort() { Random rand = new Random(); List<Map.Entry<String, ? extends Comparable>> actual = new ArrayList<>(); List<Map.Entry<String, ? extends Comparable>> expected = new ArrayList<>(); for (int j = 0; j < 100; j++) { Map<String, Integer> map = new HashMap<>(); int max = rand.nextInt(10) + 1; for (int i = 0; i < max; i++) { int val = rand.nextInt(max); map.put("" + i, val); } actual.clear(); expected.clear(); for (var etr : map.entrySet()) { actual.add(etr); expected.add(etr); } int topk = rand.nextInt(max) + 1; TopKBundles.partitionSort(actual, topk); Collections.sort(expected, (a, b) -> b.getValue().compareTo(a.getValue())); String errorMsg = null; for (int i = 0; i < topk; i++) { Integer l = (Integer) actual.get(i).getValue(); Integer r = (Integer) expected.get(i).getValue(); if (!l.equals(r)) { errorMsg = String.format("Diff found at i=%d, %d != %d, actual:%s, expected:%s", i, l, r, actual, expected); } assertNull(errorMsg); } } }
@Get(uri = "icons") @ExecuteOn(TaskExecutors.IO) @Operation(tags = {"Plugins"}, summary = "Get plugins icons") public MutableHttpResponse<Map<String, PluginIcon>> icons() { Map<String, PluginIcon> icons = pluginRegistry.plugins() .stream() .flatMap(plugin -> Stream.of( plugin.getTasks().stream(), plugin.getTriggers().stream(), plugin.getConditions().stream(), plugin.getTaskRunners().stream() ) .flatMap(i -> i) .map(e -> new AbstractMap.SimpleEntry<>( e.getName(), new PluginIcon( e.getSimpleName(), plugin.icon(e), FlowableTask.class.isAssignableFrom(e) ) )) ) .filter(entry -> entry.getKey() != null) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (a1, a2) -> a1)); // add aliases Map<String, PluginIcon> aliasIcons = pluginRegistry.plugins().stream() .flatMap(plugin -> plugin.getAliases().values().stream().map(e -> new AbstractMap.SimpleEntry<>( e.getKey(), new PluginIcon( e.getKey().substring(e.getKey().lastIndexOf('.') + 1), plugin.icon(e.getValue()), FlowableTask.class.isAssignableFrom(e.getValue()) )))) .filter(entry -> entry.getKey() != null) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (a1, a2) -> a1)); icons.putAll(aliasIcons); return HttpResponse.ok(icons).header(HttpHeaders.CACHE_CONTROL, CACHE_DIRECTIVE); }
@Test void icons() throws URISyntaxException { Helpers.runApplicationContext((applicationContext, embeddedServer) -> { ReactorHttpClient client = ReactorHttpClient.create(embeddedServer.getURL()); Map<String, PluginIcon> list = client.toBlocking().retrieve( HttpRequest.GET("/api/v1/plugins/icons"), Argument.mapOf(String.class, PluginIcon.class) ); assertThat(list.entrySet().stream().filter(e -> e.getKey().equals(Log.class.getName())).findFirst().orElseThrow().getValue().getIcon(), is(notNullValue())); // test an alias assertThat(list.entrySet().stream().filter(e -> e.getKey().equals("io.kestra.core.tasks.log.Log")).findFirst().orElseThrow().getValue().getIcon(), is(notNullValue())); }); }
@Override public <T> T convert(DataTable dataTable, Type type) { return convert(dataTable, type, false); }
@Test void convert_to_map_of_string_to_map() { DataTable table = parse("", "| | lat | lon |", "| KMSY | 29.993333 | -90.258056 |", "| KSFO | 37.618889 | -122.375 |", "| KSEA | 47.448889 | -122.309444 |", "| KJFK | 40.639722 | -73.778889 |"); Map<String, Map<String, String>> expected = new HashMap<String, Map<String, String>>() { { put("KMSY", new HashMap<String, String>() { { put("lat", "29.993333"); put("lon", "-90.258056"); } }); put("KSFO", new HashMap<String, String>() { { put("lat", "37.618889"); put("lon", "-122.375"); } }); put("KSEA", new HashMap<String, String>() { { put("lat", "47.448889"); put("lon", "-122.309444"); } }); put("KJFK", new HashMap<String, String>() { { put("lat", "40.639722"); put("lon", "-73.778889"); } }); } }; assertEquals(expected, converter.convert(table, MAP_OF_STRING_TO_MAP)); }
@Override public void execute(String commandName, BufferedReader reader, BufferedWriter writer) throws Py4JException, IOException { char subCommand = safeReadLine(reader).charAt(0); String returnCommand = null; if (subCommand == ARRAY_GET_SUB_COMMAND_NAME) { returnCommand = getArray(reader); } else if (subCommand == ARRAY_SET_SUB_COMMAND_NAME) { returnCommand = setArray(reader); } else if (subCommand == ARRAY_SLICE_SUB_COMMAND_NAME) { returnCommand = sliceArray(reader); } else if (subCommand == ARRAY_LEN_SUB_COMMAND_NAME) { returnCommand = lenArray(reader); } else if (subCommand == ARRAY_CREATE_SUB_COMMAND_NAME) { returnCommand = createArray(reader); } else { returnCommand = Protocol.getOutputErrorCommand("Unknown Array SubCommand Name: " + subCommand); } logger.finest("Returning command: " + returnCommand); writer.write(returnCommand); writer.flush(); }
@Test public void testGet() { String inputCommand = ArrayCommand.ARRAY_GET_SUB_COMMAND_NAME + "\n" + target + "\ni1\ne\n"; try { command.execute("a", new BufferedReader(new StringReader(inputCommand)), writer); assertEquals("!ys111\n", sWriter.toString()); } catch (Exception e) { e.printStackTrace(); fail(); } }
public CompletableFuture<Map<ExecutionAttemptID, Collection<ThreadInfoSample>>> requestThreadInfoSamples( Map<Long, ExecutionAttemptID> threads, final ThreadInfoSamplesRequest requestParams) { checkNotNull(threads, "threads must not be null"); checkNotNull(requestParams, "requestParams must not be null"); CompletableFuture<Map<ExecutionAttemptID, Collection<ThreadInfoSample>>> resultFuture = new CompletableFuture<>(); scheduledExecutor.execute( () -> requestThreadInfoSamples( threads, requestParams.getNumSamples(), requestParams.getDelayBetweenSamples(), requestParams.getMaxStackTraceDepth(), CollectionUtil.newHashMapWithExpectedSize(threads.size()), resultFuture)); return resultFuture; }
@Test void testSampleTaskThreadInfo() throws Exception { Set<IdleTestTask> tasks = new HashSet<>(); executeWithTerminationGuarantee( () -> { tasks.add(new IdleTestTask()); tasks.add(new IdleTestTask()); Thread.sleep(2000); Map<Long, ExecutionAttemptID> threads = collectExecutionAttempts(tasks); final Map<ExecutionAttemptID, Collection<ThreadInfoSample>> threadInfoSamples = threadInfoSampleService .requestThreadInfoSamples(threads, requestParams) .get(); int count = 0; for (Collection<ThreadInfoSample> samples : threadInfoSamples.values()) { for (ThreadInfoSample sample : samples) { count++; StackTraceElement[] traces = sample.getStackTrace(); assertThat(traces).hasSizeLessThanOrEqualTo(MAX_STACK_TRACK_DEPTH); } } assertThat(count).isEqualTo(NUMBER_OF_SAMPLES * 2); }, tasks); }
public static Status unblock( final UnsafeBuffer logMetaDataBuffer, final UnsafeBuffer termBuffer, final int blockedOffset, final int tailOffset, final int termId) { Status status = NO_ACTION; int frameLength = frameLengthVolatile(termBuffer, blockedOffset); if (frameLength < 0) { resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, -frameLength); status = UNBLOCKED; } else if (0 == frameLength) { int currentOffset = blockedOffset + FRAME_ALIGNMENT; while (currentOffset < tailOffset) { frameLength = frameLengthVolatile(termBuffer, currentOffset); if (frameLength != 0) { if (scanBackToConfirmZeroed(termBuffer, currentOffset, blockedOffset)) { final int length = currentOffset - blockedOffset; resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length); status = UNBLOCKED; } break; } currentOffset += FRAME_ALIGNMENT; } if (currentOffset == termBuffer.capacity()) { if (0 == frameLengthVolatile(termBuffer, blockedOffset)) { final int length = currentOffset - blockedOffset; resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length); status = UNBLOCKED_TO_END; } } } return status; }
@Test void shouldTakeNoActionIfMessageNonCommittedAfterScan() { final int messageLength = HEADER_LENGTH * 4; final int termOffset = 0; final int tailOffset = messageLength * 2; when(mockTermBuffer.getIntVolatile(termOffset)) .thenReturn(0) .thenReturn(-messageLength); when(mockTermBuffer.getIntVolatile(messageLength)) .thenReturn(messageLength); assertEquals( NO_ACTION, TermUnblocker.unblock(mockLogMetaDataBuffer, mockTermBuffer, termOffset, tailOffset, TERM_ID)); }
public Schema getSchema() { return context.getSchema(); }
@Test public void testNestedSchema() { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(Nested.getDescriptor()); Schema schema = schemaProvider.getSchema(); assertEquals(NESTED_SCHEMA, schema); }
public static DataSchema avroToDataSchema(String avroSchemaInJson, AvroToDataSchemaTranslationOptions options) throws IllegalArgumentException { ValidationOptions validationOptions = SchemaParser.getDefaultSchemaParserValidationOptions(); validationOptions.setAvroUnionMode(true); SchemaParserFactory parserFactory = SchemaParserFactory.instance(validationOptions); DataSchemaResolver resolver = getResolver(parserFactory, options); PegasusSchemaParser parser = parserFactory.create(resolver); parser.parse(avroSchemaInJson); if (parser.hasError()) { throw new IllegalArgumentException(parser.errorMessage()); } assert(parser.topLevelDataSchemas().size() == 1); DataSchema dataSchema = parser.topLevelDataSchemas().get(0); DataSchema resultDataSchema = null; AvroToDataSchemaTranslationMode translationMode = options.getTranslationMode(); if (translationMode == AvroToDataSchemaTranslationMode.RETURN_EMBEDDED_SCHEMA || translationMode == AvroToDataSchemaTranslationMode.VERIFY_EMBEDDED_SCHEMA) { // check for embedded schema Object dataProperty = dataSchema.getProperties().get(SchemaTranslator.DATA_PROPERTY); if (dataProperty != null && dataProperty.getClass() == DataMap.class) { Object schemaProperty = ((DataMap) dataProperty).get(SchemaTranslator.SCHEMA_PROPERTY); if (schemaProperty.getClass() == DataMap.class) { SchemaParser embeddedSchemaParser = SchemaParserFactory.instance().create(null); embeddedSchemaParser.parse(Arrays.asList(schemaProperty)); if (embeddedSchemaParser.hasError()) { throw new IllegalArgumentException("Embedded schema is invalid\n" + embeddedSchemaParser.errorMessage()); } assert(embeddedSchemaParser.topLevelDataSchemas().size() == 1); resultDataSchema = embeddedSchemaParser.topLevelDataSchemas().get(0); if (translationMode == AvroToDataSchemaTranslationMode.VERIFY_EMBEDDED_SCHEMA) { // additional verification to make sure that embedded schema translates to Avro schema DataToAvroSchemaTranslationOptions dataToAvroSchemaOptions = new DataToAvroSchemaTranslationOptions(); Object optionalDefaultModeProperty = ((DataMap) dataProperty).get(SchemaTranslator.OPTIONAL_DEFAULT_MODE_PROPERTY); dataToAvroSchemaOptions.setOptionalDefaultMode(OptionalDefaultMode.valueOf(optionalDefaultModeProperty.toString())); Schema avroSchemaFromEmbedded = dataToAvroSchema(resultDataSchema, dataToAvroSchemaOptions); Schema avroSchemaFromJson = AvroCompatibilityHelper.parse(avroSchemaInJson, SchemaParseConfiguration.STRICT, null).getMainSchema(); Object embededSchemaPropertyVal = avroSchemaFromJson.getObjectProp(DATA_PROPERTY); if (embededSchemaPropertyVal != null) { avroSchemaFromEmbedded.addProp(DATA_PROPERTY, embededSchemaPropertyVal); } if (!avroSchemaFromEmbedded.equals(avroSchemaFromJson)) { throw new IllegalArgumentException("Embedded schema does not translate to input Avro schema: " + avroSchemaInJson); } } } } } if (resultDataSchema == null) { // translationMode == TRANSLATE or no embedded schema DataSchemaTraverse traverse = new DataSchemaTraverse(); traverse.traverse(dataSchema, AvroToDataSchemaConvertCallback.INSTANCE); // convert default values traverse.traverse(dataSchema, DefaultAvroToDataConvertCallback.INSTANCE); // make sure it can round-trip String dataSchemaJson = dataSchema.toString(); resultDataSchema = DataTemplateUtil.parseSchema(dataSchemaJson); } return resultDataSchema; }
@Test public void testAvroUnionModeChaining() throws IOException { String expectedSchema = "{ " + " \"type\" : \"record\", " + " \"name\" : \"A\", " + " \"namespace\" : \"com.linkedin.pegasus.test\", " + " \"fields\" : [ " + " { " + " \"name\" : \"someBorC\", " + " \"type\" : [ " + " { " + " \"type\" : \"record\", " + " \"name\" : \"B\", " + " \"fields\" : [ " + " { " + " \"name\" : \"someAorC\", " + " \"type\" : [ " + " \"A\", " + " { " + " \"type\" : \"record\", " + " \"name\" : \"C\", " + " \"fields\" : [ " + " { " + " \"name\" : \"something\", " + " \"type\" : \"int\", " + " \"optional\" : true, " + " \"default\" : 42" + " } " + " ] " + " } " + " ] " + " } " + " ] " + " }, " + " \"C\" " + " ] " + " } " + " ]" + "}"; String avroRootUrl = getClass().getClassLoader().getResource("avro").getFile(); String avroRootDir = new File(avroRootUrl).getAbsolutePath(); String avroFilePath = avroRootDir + FS + "com" + FS + "linkedin" + FS + "pegasus" + FS + "test" + FS + "A.avsc"; File avroFile = new File(avroFilePath); String schema = readFile(avroFile); AvroToDataSchemaTranslationOptions options = new AvroToDataSchemaTranslationOptions(AvroToDataSchemaTranslationMode.TRANSLATE).setFileResolutionPaths(avroRootDir); DataSchema pdscSchema = SchemaTranslator.avroToDataSchema(schema, options); DataMap actual = TestUtil.dataMapFromString(pdscSchema.toString()); DataMap expected = TestUtil.dataMapFromString(expectedSchema); assertEquals(actual, expected); }
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception { return fromXmlPartial(toInputStream(partial, UTF_8), o); }
@Test void shouldLoadPartialConfigWithPipeline() throws Exception { String partialConfigWithPipeline = (""" <cruise schemaVersion='%d'> <pipelines group="first"> <pipeline name="pipeline"> <materials> <hg url="/hgrepo"/> </materials> <stage name="mingle"> <jobs> <job name="functional"> <artifacts> <log src="artifact1.xml" dest="cruise-output" /> </artifacts> </job> </jobs> </stage> </pipeline> </pipelines> </cruise> """).formatted(CONFIG_SCHEMA_VERSION); PartialConfig partialConfig = xmlLoader.fromXmlPartial(partialConfigWithPipeline, PartialConfig.class); assertThat(partialConfig.getGroups().size()).isEqualTo(1); PipelineConfig pipeline = partialConfig.getGroups().get(0).getPipelines().get(0); assertThat(pipeline.name()).isEqualTo(new CaseInsensitiveString("pipeline")); assertThat(pipeline.size()).isEqualTo(1); assertThat(pipeline.findBy(new CaseInsensitiveString("mingle")).jobConfigByInstanceName("functional", true)).isNotNull(); }
public static void main(String[] args) { if (args.length < 1 || args[0].equals("-h") || args[0].equals("--help")) { System.out.println(usage); return; } // Copy args, because CommandFormat mutates the list. List<String> argsList = new ArrayList<String>(Arrays.asList(args)); CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "-glob", "-jar"); try { cf.parse(argsList); } catch (UnknownOptionException e) { terminate(1, "unrecognized option"); return; } String classPath = System.getProperty("java.class.path"); if (cf.getOpt("-glob")) { // The classpath returned from the property has been globbed already. System.out.println(classPath); } else if (cf.getOpt("-jar")) { if (argsList.isEmpty() || argsList.get(0) == null || argsList.get(0).isEmpty()) { terminate(1, "-jar option requires path of jar file to write"); return; } // Write the classpath into the manifest of a temporary jar file. Path workingDir = new Path(System.getProperty("user.dir")); final String tmpJarPath; try { tmpJarPath = FileUtil.createJarWithClassPath(classPath, workingDir, System.getenv())[0]; } catch (IOException e) { terminate(1, "I/O error creating jar: " + e.getMessage()); return; } // Rename the temporary file to its final location. String jarPath = argsList.get(0); try { FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath)); } catch (IOException e) { terminate(1, "I/O error renaming jar temporary file to path: " + e.getMessage()); return; } } }
@Test public void testJar() throws IOException { File file = new File(TEST_DIR, "classpath.jar"); Classpath.main(new String[] { "--jar", file.getAbsolutePath() }); assertTrue(stdout.toByteArray().length == 0); assertTrue(stderr.toByteArray().length == 0); assertTrue(file.exists()); assertJar(file); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() != ChatMessageType.SPAM) { return; } var message = event.getMessage(); if (FISHING_CATCH_REGEX.matcher(message).find()) { session.setLastFishCaught(Instant.now()); spotOverlay.setHidden(false); fishingSpotMinimapOverlay.setHidden(false); } if (message.equals("A flying fish jumps up and eats some of your minnows!")) { notifier.notify(config.flyingFishNotification(), "A flying fish is eating your minnows!"); } }
@Test public void testCormorant() { ChatMessage chatMessage = new ChatMessage(); chatMessage.setType(ChatMessageType.SPAM); chatMessage.setMessage("Your cormorant returns with its catch."); fishingPlugin.onChatMessage(chatMessage); assertNotNull(fishingPlugin.getSession().getLastFishCaught()); }
@Override public int compare( Object data1, Object data2 ) throws KettleValueException { InetAddress inet1 = getInternetAddress( data1 ); InetAddress inet2 = getInternetAddress( data2 ); int cmp = 0; if ( inet1 == null ) { if ( inet2 == null ) { cmp = 0; } else { cmp = -1; } } else if ( inet2 == null ) { cmp = 1; } else { BigDecimal bd1 = getBigNumber( inet1 ); BigDecimal bd2 = getBigNumber( inet2 ); cmp = bd1.compareTo( bd2 ); } if ( isSortedDescending() ) { return -cmp; } else { return cmp; } }
@Test public void testCompare() throws UnknownHostException, KettleValueException { ValueMetaInternetAddress vm = new ValueMetaInternetAddress(); InetAddress smaller = InetAddress.getByName( "127.0.0.1" ); InetAddress larger = InetAddress.getByName( "127.0.1.1" ); assertTrue( vm.isSortedAscending() ); assertFalse( vm.isSortedDescending() ); assertEquals( 0, vm.compare( null, null ) ); assertEquals( -1, vm.compare( null, smaller ) ); assertEquals( 1, vm.compare( smaller, null ) ); assertEquals( 0, vm.compare( smaller, smaller ) ); assertEquals( -1, vm.compare( smaller, larger ) ); assertEquals( 1, vm.compare( larger, smaller ) ); vm.setSortedDescending( true ); assertFalse( vm.isSortedAscending() ); assertTrue( vm.isSortedDescending() ); assertEquals( 0, vm.compare( null, null ) ); assertEquals( 1, vm.compare( null, smaller ) ); assertEquals( -1, vm.compare( smaller, null ) ); assertEquals( 0, vm.compare( smaller, smaller ) ); assertEquals( 1, vm.compare( smaller, larger ) ); assertEquals( -1, vm.compare( larger, smaller ) ); }
protected String getTag(ILoggingEvent event) { // format tag based on encoder layout; truncate if max length // exceeded (only necessary for isLoggable(), which throws // IllegalArgumentException) String tag = (this.tagEncoder != null) ? this.tagEncoder.getLayout().doLayout(event) : event.getLoggerName(); if (checkLoggable && (tag.length() > MAX_TAG_LENGTH)) { tag = tag.substring(0, MAX_TAG_LENGTH - 1) + "*"; } return tag; }
@Test public void longTagAllowedIfNotCheckLoggable() { LoggingEvent event = new LoggingEvent(); event.setMessage(TAG); boolean checkLoggable = false; setTagPattern(TAG, checkLoggable); String actualTag = logcatAppender.getTag(event); assertThat(TRUNCATED_TAG, is(not(actualTag))); assertThat(TAG, is(actualTag)); }
@Override public boolean readyToExecute() throws UserException { if (checkReadyToExecuteFast()) { return true; } KafkaRoutineLoadJob kafkaRoutineLoadJob = (KafkaRoutineLoadJob) job; Map<Integer, Long> latestOffsets = KafkaUtil.getLatestOffsets(kafkaRoutineLoadJob.getBrokerList(), kafkaRoutineLoadJob.getTopic(), ImmutableMap.copyOf(kafkaRoutineLoadJob.getConvertedCustomProperties()), new ArrayList<>(partitionIdToOffset.keySet()), warehouseId); for (Map.Entry<Integer, Long> entry : latestOffsets.entrySet()) { kafkaRoutineLoadJob.setPartitionOffset(entry.getKey(), entry.getValue()); } for (Map.Entry<Integer, Long> entry : partitionIdToOffset.entrySet()) { int partitionId = entry.getKey(); Long latestOffset = latestOffsets.get(partitionId); Long consumeOffset = entry.getValue(); if (latestOffset != null) { if (latestOffset > consumeOffset) { this.latestPartOffset = latestOffsets; return true; } else if (latestOffset < consumeOffset) { throw new RoutineLoadPauseException( ERR_ROUTINE_LOAD_OFFSET_INVALID.formatErrorMsg(consumeOffset, latestOffset, partitionId)); } } } return false; }
@Test public void testReadyToExecute(@Injectable KafkaRoutineLoadJob kafkaRoutineLoadJob) throws Exception { new MockUp<RoutineLoadMgr>() { @Mock public RoutineLoadJob getJob(long jobId) { return kafkaRoutineLoadJob; } }; new MockUp<KafkaUtil>() { @Mock public Map<Integer, Long> getLatestOffsets(String brokerList, String topic, ImmutableMap<String, String> properties, List<Integer> partitions, long warehouseId) throws UserException { Map<Integer, Long> offsets = Maps.newHashMap(); offsets.put(0, 100L); offsets.put(1, 100L); return offsets; } }; Map<Integer, Long> offset1 = Maps.newHashMap(); offset1.put(0, 99L); KafkaTaskInfo kafkaTaskInfo1 = new KafkaTaskInfo(UUID.randomUUID(), kafkaRoutineLoadJob, System.currentTimeMillis(), System.currentTimeMillis(), offset1, Config.routine_load_task_timeout_second * 1000); Assert.assertEquals("kafka", kafkaTaskInfo1.dataSourceType()); Assert.assertTrue(kafkaTaskInfo1.readyToExecute()); Map<Integer, Long> offset2 = Maps.newHashMap(); offset2.put(0, 100L); KafkaTaskInfo kafkaTaskInfo2 = new KafkaTaskInfo(UUID.randomUUID(), kafkaRoutineLoadJob, System.currentTimeMillis(), System.currentTimeMillis(), offset2, Config.routine_load_task_timeout_second * 1000); Assert.assertFalse(kafkaTaskInfo2.readyToExecute()); // consume offset > latest offset Map<Integer, Long> offset3 = Maps.newHashMap(); offset3.put(0, 101L); KafkaTaskInfo kafkaTaskInfo3 = new KafkaTaskInfo(UUID.randomUUID(), kafkaRoutineLoadJob, System.currentTimeMillis(), System.currentTimeMillis(), offset3, Config.routine_load_task_timeout_second * 1000); ExceptionChecker.expectThrowsWithMsg(RoutineLoadPauseException.class, "Consume offset: 101 is greater than the latest offset: 100 in kafka partition: 0. " + "You can modify 'kafka_offsets' property through ALTER ROUTINE LOAD and RESUME the job", () -> kafkaTaskInfo3.readyToExecute()); }
@Override public void addDocInfo(final UpstreamInstance instance, final String docInfoJson, final String oldMd5, final Consumer<DocInfo> callback) { if (StringUtils.isEmpty(docInfoJson)) { return; } String newMd5 = DigestUtils.md5DigestAsHex(docInfoJson.getBytes(StandardCharsets.UTF_8)); if (Objects.equals(newMd5, oldMd5)) { return; } DocInfo docInfo = getDocInfo(instance.getClusterName(), docInfoJson); if (Objects.isNull(docInfo) || CollectionUtils.isEmpty(docInfo.getDocModuleList())) { return; } docInfo.setDocMd5(newMd5); List<DocModule> docModules = docInfo.getDocModuleList(); docModules.forEach(docModule -> docModule.getDocItems().forEach(docItem -> { ApiDocRegisterDTO build = ApiDocRegisterDTO.builder() .consume(this.getProduceConsume(docItem.getConsumes())) .produce(this.getProduceConsume(docItem.getProduces())) .httpMethod(this.getHttpMethod(docItem)) .contextPath(docInfo.getContextPath()) .ext(this.buildExtJson(instance, docItem)) .document(JsonUtils.toJson(docItem)) .rpcType(RpcTypeEnum.HTTP.getName()) .version(API_DOC_VERSION) .apiDesc(docItem.getDescription()) .tags(Collections.singletonList(docInfo.getContextPath())) .apiPath(docItem.getName()) .apiSource(ApiSourceEnum.SWAGGER.getValue()) .state(ApiStateEnum.UNPUBLISHED.getState()) .apiOwner("admin") .eventType(EventType.REGISTER) .build(); registerApiDocService.registerApiDocument(build); })); callback.accept(docInfo); }
@Test public void testAddDocInfo() { UpstreamInstance instance = new UpstreamInstance(); instance.setContextPath("/testClusterName"); AtomicBoolean atomicBoolean = new AtomicBoolean(false); docManager.addDocInfo(instance, SwaggerDocParserTest.DOC_INFO_JSON, "", docInfo -> { Assertions.assertEquals(docInfo.getTitle(), "shenyu-examples-http-swagger2 API"); Assertions.assertEquals(docInfo.getClusterName(), "testClusterName"); atomicBoolean.set(true); }); Assertions.assertTrue(atomicBoolean.get()); }
@Override public void replaceNode(ResourceId path, DataNode node) { super.replaceNode(toAbsoluteId(path), node); }
@Test public void testReplaceNode() { view.replaceNode(relIntf, node); assertTrue(ResourceIds.isPrefix(rid, realPath)); }
@Override public boolean isEnabled() { return true; }
@Test public void isEnabled_returnsTrue() { assertThat(underTest.isEnabled()).isTrue(); }
@Override public List<BlockWorkerInfo> getPreferredWorkers(WorkerClusterView workerClusterView, String fileId, int count) throws ResourceExhaustedException { if (workerClusterView.size() < count) { throw new ResourceExhaustedException(String.format( "Not enough workers in the cluster %d workers in the cluster but %d required", workerClusterView.size(), count)); } Set<WorkerIdentity> workerIdentities = workerClusterView.workerIds(); mHashProvider.refresh(workerIdentities); List<WorkerIdentity> workers = mHashProvider.getMultiple(fileId, count); if (workers.size() != count) { throw new ResourceExhaustedException(String.format( "Found %d workers from the hash ring but %d required", workers.size(), count)); } ImmutableList.Builder<BlockWorkerInfo> builder = ImmutableList.builder(); for (WorkerIdentity worker : workers) { Optional<WorkerInfo> optionalWorkerInfo = workerClusterView.getWorkerById(worker); final WorkerInfo workerInfo; if (optionalWorkerInfo.isPresent()) { workerInfo = optionalWorkerInfo.get(); } else { // the worker returned by the policy does not exist in the cluster view // supplied by the client. // this can happen when the membership changes and some callers fail to update // to the latest worker cluster view. // in this case, just skip this worker LOG.debug("Inconsistency between caller's view of cluster and that of " + "the consistent hash policy's: worker {} selected by policy does not exist in " + "caller's view {}. Skipping this worker.", worker, workerClusterView); continue; } BlockWorkerInfo blockWorkerInfo = new BlockWorkerInfo( worker, workerInfo.getAddress(), workerInfo.getCapacityBytes(), workerInfo.getUsedBytes(), workerInfo.getState() == WorkerState.LIVE ); builder.add(blockWorkerInfo); } List<BlockWorkerInfo> infos = builder.build(); return infos; }
@Test public void workerAddrUpdateWithIdUnchanged() throws Exception { ConsistentHashPolicy policy = new ConsistentHashPolicy(mConf); List<WorkerInfo> workers = new ArrayList<>(); workers.add(new WorkerInfo().setIdentity(WorkerIdentityTestUtils.ofLegacyId(1L)) .setAddress(new WorkerNetAddress().setHost("host1")) .setCapacityBytes(0) .setUsedBytes(0) .setState(WorkerState.LIVE)); workers.add(new WorkerInfo().setIdentity(WorkerIdentityTestUtils.ofLegacyId(2L)) .setAddress(new WorkerNetAddress().setHost("host2")) .setCapacityBytes(0) .setUsedBytes(0) .setState(WorkerState.LIVE)); List<BlockWorkerInfo> selectedWorkers = policy.getPreferredWorkers(new WorkerClusterView(workers), "fileId", 2); assertEquals("host1", selectedWorkers.stream() .filter(w -> w.getIdentity().equals(WorkerIdentityTestUtils.ofLegacyId(1L))) .findFirst() .get() .getNetAddress() .getHost()); // now the worker 1 has migrated to host 3 workers.set(0, new WorkerInfo().setIdentity(WorkerIdentityTestUtils.ofLegacyId(1L)) .setAddress(new WorkerNetAddress().setHost("host3")) .setCapacityBytes(0) .setUsedBytes(0) .setState(WorkerState.LIVE)); List<BlockWorkerInfo> updatedWorkers = policy.getPreferredWorkers(new WorkerClusterView(workers), "fileId", 2); assertEquals( selectedWorkers.stream().map(BlockWorkerInfo::getIdentity).collect(Collectors.toList()), updatedWorkers.stream().map(BlockWorkerInfo::getIdentity).collect(Collectors.toList())); assertEquals("host3", updatedWorkers.stream() .filter(w -> w.getIdentity().equals(WorkerIdentityTestUtils.ofLegacyId(1L))) .findFirst() .get() .getNetAddress() .getHost()); }
@Override public long extractWatermark(IcebergSourceSplit split) { return split.task().files().stream() .map( scanTask -> { Preconditions.checkArgument( scanTask.file().lowerBounds() != null && scanTask.file().lowerBounds().get(eventTimeFieldId) != null, "Missing statistics for column name = %s in file = %s", eventTimeFieldName, eventTimeFieldId, scanTask.file()); return timeUnit.toMillis( Conversions.fromByteBuffer( Types.LongType.get(), scanTask.file().lowerBounds().get(eventTimeFieldId))); }) .min(Comparator.comparingLong(l -> l)) .get(); }
@TestTemplate public void testEmptyStatistics() throws IOException { assumeThat(columnName).isEqualTo("timestamp_column"); // Create an extractor for a column we do not have statistics ColumnStatsWatermarkExtractor extractor = new ColumnStatsWatermarkExtractor(10, "missing_field"); assertThatThrownBy(() -> extractor.extractWatermark(split(0))) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Missing statistics for column"); }
@Override public PendingSplitsCheckpoint<FileSourceSplit> snapshotState(long checkpointId) throws Exception { final PendingSplitsCheckpoint<FileSourceSplit> checkpoint = PendingSplitsCheckpoint.fromCollectionSnapshot( splitAssigner.remainingSplits(), pathsAlreadyProcessed); LOG.debug("Source Checkpoint is {}", checkpoint); return checkpoint; }
@Test void testDiscoverSplitWhenNoReaderRegistered() throws Exception { final TestingFileEnumerator fileEnumerator = new TestingFileEnumerator(); final TestingSplitEnumeratorContext<FileSourceSplit> context = new TestingSplitEnumeratorContext<>(4); final ContinuousFileSplitEnumerator enumerator = createEnumerator(fileEnumerator, context); // make one split available and trigger the periodic discovery final FileSourceSplit split = createRandomSplit(); fileEnumerator.addSplits(split); context.triggerAllActions(); assertThat(enumerator.snapshotState(1L).getSplits()).contains(split); }
long getNodeResultLimit(int ownedPartitions) { return isQueryResultLimitEnabled ? (long) ceil(resultLimitPerPartition * ownedPartitions) : Long.MAX_VALUE; }
@Test public void testNodeResultLimitSinglePartition() { initMocksWithConfiguration(200000, 3); assertEquals(849, limiter.getNodeResultLimit(1)); }
@Override public UserCodeNamespaceConfig setName(@Nonnull String name) { Objects.requireNonNull(name, "Namespace name cannot be null"); this.name = name; return this; }
@Test (expected = NullPointerException.class) public void testNullName() { userCodeNamespaceConfig.setName(null); }
public void setPrefix(String prefix) { this.prefix = prefix; }
@Test public void customMetricsPrefix() throws Exception { iqtp.setPrefix(PREFIX); iqtp.start(); assertThat(metricRegistry.getNames()) .overridingErrorMessage("Custom metrics prefix doesn't match") .allSatisfy(name -> assertThat(name).startsWith(PREFIX)); iqtp.stop(); assertThat(metricRegistry.getMetrics()) .overridingErrorMessage("The default metrics prefix was changed") .isEmpty(); }
@Udf public <T> boolean contains( @UdfParameter final List<T> array, @UdfParameter final T val ) { return array != null && array.contains(val); }
@Test public void shouldFindDoublesInList() { assertTrue(udf.contains(Arrays.asList(1.0, 2.0, 3.0), 2.0)); assertFalse(udf.contains(Arrays.asList(1.0, 2.0, 3.0), 4.0)); assertFalse(udf.contains(Arrays.asList(1.0, 2.0, 3.0), "1")); assertFalse(udf.contains(Arrays.asList(1.0, 2.0, 3.0), "aaa")); }
public boolean isReusableTable() { return isReusableTable; }
@Test public void testIsReusableTable() { assertTrue(CONFIGURATION_1.isReusableTable()); assertFalse(CONFIGURATION_2.isReusableTable()); }
public void addChild(Entry entry) { childEntries.add(entry); entry.setParent(this); }
@Test public void findsAncestorComponent(){ Component component = mock(Component.class); Entry structureWithEntry = new Entry(); new EntryAccessor().setComponent(structureWithEntry, component); final Entry level1child = new Entry(); structureWithEntry.addChild(level1child); final Entry level2child = new Entry(); level1child.addChild(level2child); assertThat(new EntryAccessor().getAncestorComponent(level2child), equalTo(component)); }
@Override public void registerStore(final StateStore store, final StateRestoreCallback stateRestoreCallback, final CommitCallback commitCallback) { final String storeName = store.name(); // TODO (KAFKA-12887): we should not trigger user's exception handler for illegal-argument but always // fail-crash; in this case we would not need to immediately close the state store before throwing if (CHECKPOINT_FILE_NAME.equals(storeName)) { store.close(); throw new IllegalArgumentException(format("%sIllegal store name: %s, which collides with the pre-defined " + "checkpoint file name", logPrefix, storeName)); } if (stores.containsKey(storeName)) { store.close(); throw new IllegalArgumentException(format("%sStore %s has already been registered.", logPrefix, storeName)); } if (stateRestoreCallback instanceof StateRestoreListener) { log.warn("The registered state restore callback is also implementing the state restore listener interface, " + "which is not expected and would be ignored"); } final StateStoreMetadata storeMetadata = isLoggingEnabled(storeName) ? new StateStoreMetadata( store, getStorePartition(storeName), stateRestoreCallback, commitCallback, converterForStore(store)) : new StateStoreMetadata(store, commitCallback); // register the store first, so that if later an exception is thrown then eventually while we call `close` // on the state manager this state store would be closed as well stores.put(storeName, storeMetadata); if (!stateUpdaterEnabled) { maybeRegisterStoreWithChangelogReader(storeName); } log.debug("Registered state store {} to its state manager", storeName); }
@Test public void shouldThrowIllegalArgumentExceptionOnRegisterWhenStoreHasAlreadyBeenRegistered() { final ProcessorStateManager stateManager = getStateManager(Task.TaskType.ACTIVE); stateManager.registerStore(persistentStore, persistentStore.stateRestoreCallback, null); assertThrows(IllegalArgumentException.class, () -> stateManager.registerStore(persistentStore, persistentStore.stateRestoreCallback, null)); }