focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public IssueDto addImpact(ImpactDto impact) { impacts.stream().filter(impactDto -> impactDto.getSoftwareQuality() == impact.getSoftwareQuality()).findFirst() .ifPresent(impactDto -> { throw new IllegalStateException(format("Impact already defined on issue for Software Quality [%s]", impact.getSoftwareQuality())); }); impacts.add(impact); return this; }
@Test void addImpact_whenSoftwareQualityAlreadyDefined_shouldThrowISE() { IssueDto dto = new IssueDto(); dto.addImpact(newImpactDto(MAINTAINABILITY, LOW)); ImpactDto duplicatedImpact = newImpactDto(MAINTAINABILITY, HIGH); assertThatThrownBy(() -> dto.addImpact(duplicatedImpact)) .isInstanceOf(IllegalStateException.class) .hasMessage("Impact already defined on issue for Software Quality [MAINTAINABILITY]"); }
public List<FileStoreInfo> listFileStore() throws DdlException { try { return client.listFileStore(serviceId); } catch (StarClientException e) { throw new DdlException("Failed to list file store, error: " + e.getMessage()); } }
@Test public void testListFileStore() throws StarClientException, DdlException { S3FileStoreInfo s3FsInfo = S3FileStoreInfo.newBuilder() .setRegion("region").setEndpoint("endpoint").build(); FileStoreInfo fsInfo = FileStoreInfo.newBuilder().setFsKey("test-fskey") .setFsName("test-fsname").setFsType(FileStoreType.S3).setS3FsInfo(s3FsInfo).build(); new Expectations() { { client.listFileStore("1"); result = new ArrayList<>(Arrays.asList(fsInfo)); minTimes = 0; client.listFileStore("2"); result = new StarClientException(StatusCode.INVALID_ARGUMENT, "mocked exception"); } }; Deencapsulation.setField(starosAgent, "serviceId", "1"); Assert.assertEquals(1, starosAgent.listFileStore().size()); Assert.assertEquals("test-fskey", starosAgent.listFileStore().get(0).getFsKey()); Deencapsulation.setField(starosAgent, "serviceId", "2"); ExceptionChecker.expectThrowsWithMsg(DdlException.class, "Failed to list file store, error: INVALID_ARGUMENT:mocked exception", () -> starosAgent.listFileStore()); }
public static Read read() { return new AutoValue_RabbitMqIO_Read.Builder() .setQueueDeclare(false) .setExchangeDeclare(false) .setMaxReadTime(null) .setMaxNumRecords(Long.MAX_VALUE) .setUseCorrelationId(false) .build(); }
@Test public void testDeclareIncompatibleExchangeFails() throws Exception { RabbitMqIO.Read read = RabbitMqIO.read().withExchange("IncompatibleExchange", "direct", "unused"); try { doExchangeTest(new ExchangeTestPlan(read, 1), true); fail("Expected to have failed to declare an incompatible exchange"); } catch (Exception e) { Throwable cause = Throwables.getRootCause(e); if (cause instanceof ShutdownSignalException) { ShutdownSignalException sse = (ShutdownSignalException) cause; Method reason = sse.getReason(); if (reason instanceof com.rabbitmq.client.AMQP.Connection.Close) { com.rabbitmq.client.AMQP.Connection.Close close = (com.rabbitmq.client.AMQP.Connection.Close) reason; assertEquals("Expected failure is 530: not-allowed", 530, close.getReplyCode()); } else { fail( "Unexpected ShutdownSignalException reason. Expected Connection.Close. Got: " + reason); } } else { fail("Expected to fail with ShutdownSignalException. Instead failed with " + cause); } } }
public static UriTemplate create(String template, Charset charset) { return new UriTemplate(template, true, charset); }
@Test void nullTemplate() { assertThrows(IllegalArgumentException.class, () -> UriTemplate.create(null, Util.UTF_8)); }
@Deprecated public PassiveCompletableFuture<TaskExecutionState> deployLocalTask( @NonNull TaskGroup taskGroup) { return deployLocalTask( taskGroup, Thread.currentThread().getContextClassLoader(), emptyList()); }
@Test public void testCriticalCallTime() throws InterruptedException { AtomicBoolean stopMark = new AtomicBoolean(false); CopyOnWriteArrayList<Long> stopTime = new CopyOnWriteArrayList<>(); int count = 100; // Must be the same as the timer timeout int callTime = 50; // Create tasks with critical delays List<Task> criticalTask = buildStopTestTask(callTime, count, stopMark, stopTime); TaskExecutionService taskExecutionService = server.getTaskExecutionService(); CompletableFuture<TaskExecutionState> taskCts = taskExecutionService.deployLocalTask( new TaskGroupDefaultImpl( new TaskGroupLocation( jobId, pipeLineId, FLAKE_ID_GENERATOR.newId()), "t1", Lists.newArrayList(criticalTask))); // Run it for a while Thread.sleep(taskRunTime); // stop task stopMark.set(true); // Check all task ends right await().atMost(count * callTime, TimeUnit.MILLISECONDS) .untilAsserted(() -> assertEquals(FINISHED, taskCts.get().getExecutionState())); // Check that each Task is only Done once assertEquals(count, stopTime.size()); }
public boolean isVersionActive(final ClientPlatform platform, final Semver version) { final Map<Semver, ClientRelease> releasesByVersion = clientReleasesByPlatform.get(platform); return releasesByVersion != null && releasesByVersion.containsKey(version) && releasesByVersion.get(version).expiration().isAfter(clock.instant()); }
@Test void isVersionActive() { final Semver iosVersion = new Semver("1.2.3"); final Semver desktopVersion = new Semver("4.5.6"); when(clientReleases.getClientReleases()).thenReturn(Map.of( ClientPlatform.DESKTOP, Map.of(desktopVersion, new ClientRelease(ClientPlatform.DESKTOP, desktopVersion, clock.instant(), clock.instant().plus(Duration.ofDays(90)))), ClientPlatform.IOS, Map.of(iosVersion, new ClientRelease(ClientPlatform.IOS, iosVersion, clock.instant().minus(Duration.ofDays(91)), clock.instant().minus(Duration.ofDays(1)))) )); clientReleaseManager.refreshClientVersions(); assertTrue(clientReleaseManager.isVersionActive(ClientPlatform.DESKTOP, desktopVersion)); assertFalse(clientReleaseManager.isVersionActive(ClientPlatform.DESKTOP, iosVersion)); assertFalse(clientReleaseManager.isVersionActive(ClientPlatform.IOS, iosVersion)); assertFalse(clientReleaseManager.isVersionActive(ClientPlatform.ANDROID, new Semver("7.8.9"))); }
@Override public void publish(ScannerReportWriter writer) { Optional<String> targetBranch = getTargetBranch(); if (targetBranch.isPresent()) { Profiler profiler = Profiler.create(LOG).startInfo(LOG_MSG); int count = writeChangedLines(scmConfiguration.provider(), writer, targetBranch.get()); LOG.debug("SCM reported changed lines for {} {} in the branch", count, ScannerUtils.pluralize("file", count)); profiler.stopInfo(); } }
@Test public void skip_if_not_pr() { when(branchConfiguration.isPullRequest()).thenReturn(false); publisher.publish(writer); verifyNoInteractions(inputComponentStore, inputModuleHierarchy, provider); assertNotPublished(); }
@Override public MetadataNode child(String name) { if (name.equals(ClusterImageBrokersNode.NAME)) { return new ClusterImageBrokersNode(image); } else if (name.equals(ClusterImageControllersNode.NAME)) { return new ClusterImageControllersNode(image); } else { return null; } }
@Test public void testBrokersChild() { MetadataNode child = NODE.child("brokers"); assertNotNull(child); assertEquals(ClusterImageBrokersNode.class, child.getClass()); }
@Override public State cancel() throws IOException { State state = delegate.cancel(); this.terminalMetrics = delegate.metrics(); this.terminalState = state; this.cancel.run(); return state; }
@Test public void cancelStopsExecutable_reportsTerminalState() throws IOException { PipelineResult delegate = mock(PipelineResult.class); when(delegate.cancel()).thenReturn(PipelineResult.State.CANCELLED); PrismPipelineResult underTest = new PrismPipelineResult(delegate, exec::stop); assertThat(underTest.cancel()).isEqualTo(PipelineResult.State.CANCELLED); }
public static String getAppName() { String appName; appName = getAppNameByProjectName(); if (appName != null) { return appName; } appName = getAppNameByServerHome(); if (appName != null) { return appName; } return DEFAULT_APP_NAME; }
@Test void testGetAppNameByServerTypeForJboss() { System.setProperty("jboss.server.home.dir", "/home/admin/testAppName/"); String appName = AppNameUtils.getAppName(); assertEquals("testAppName", appName); }
@ShellMethod(key = "clean showpartitions", value = "Show partition level details of a clean") public String showCleanPartitions( @ShellOption(value = {"--clean"}, help = "clean to show") final String instantTime, @ShellOption(value = {"--limit"}, help = "Limit commits", defaultValue = "-1") final Integer limit, @ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField, @ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending, @ShellOption(value = {"--headeronly"}, help = "Print Header Only", defaultValue = "false") final boolean headerOnly) throws Exception { HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().getActiveTimeline(); HoodieTimeline timeline = activeTimeline.getCleanerTimeline().filterCompletedInstants(); HoodieInstant cleanInstant = new HoodieInstant(false, HoodieTimeline.CLEAN_ACTION, instantTime); if (!timeline.containsInstant(cleanInstant)) { return "Clean " + instantTime + " not found in metadata " + timeline; } HoodieCleanMetadata cleanMetadata = TimelineMetadataUtils.deserializeHoodieCleanMetadata(timeline.getInstantDetails(cleanInstant).get()); List<Comparable[]> rows = new ArrayList<>(); for (Map.Entry<String, HoodieCleanPartitionMetadata> entry : cleanMetadata.getPartitionMetadata().entrySet()) { String path = entry.getKey(); HoodieCleanPartitionMetadata stats = entry.getValue(); String policy = stats.getPolicy(); int totalSuccessDeletedFiles = stats.getSuccessDeleteFiles().size(); int totalFailedDeletedFiles = stats.getFailedDeleteFiles().size(); rows.add(new Comparable[] {path, policy, totalSuccessDeletedFiles, totalFailedDeletedFiles}); } TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION_PATH) .addTableHeaderField(HoodieTableHeaderFields.HEADER_CLEANING_POLICY) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_SUCCESSFULLY_DELETED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FAILED_DELETIONS); return HoodiePrintHelper.print(header, new HashMap<>(), sortByField, descending, limit, headerOnly, rows); }
@Test public void testShowCleanPartitions() { // Check properties file exists. assertNotNull(propsFilePath, "Not found properties file"); // First, run clean with two partition SparkMain.clean(jsc(), HoodieCLI.basePath, propsFilePath.toString(), new ArrayList<>()); assertEquals(1, metaClient.getActiveTimeline().reload().getCleanerTimeline().countInstants(), "Loaded 1 clean and the count should match"); HoodieInstant clean = metaClient.getActiveTimeline().reload().getCleanerTimeline().getInstantsAsStream().findFirst().get(); Object result = shell.evaluate(() -> "clean showpartitions --clean " + clean.getTimestamp()); assertTrue(ShellEvaluationResultUtil.isSuccess(result)); TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION_PATH) .addTableHeaderField(HoodieTableHeaderFields.HEADER_CLEANING_POLICY) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_SUCCESSFULLY_DELETED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FAILED_DELETIONS); // There should be two partition path List<Comparable[]> rows = new ArrayList<>(); rows.add(new Comparable[] {HoodieTestCommitMetadataGenerator.DEFAULT_SECOND_PARTITION_PATH, HoodieCleaningPolicy.KEEP_LATEST_COMMITS, "1", "0"}); rows.add(new Comparable[] {HoodieTestCommitMetadataGenerator.DEFAULT_THIRD_PARTITION_PATH, HoodieCleaningPolicy.KEEP_LATEST_COMMITS, "0", "0"}); rows.add(new Comparable[] {HoodieTestCommitMetadataGenerator.DEFAULT_FIRST_PARTITION_PATH, HoodieCleaningPolicy.KEEP_LATEST_COMMITS, "1", "0"}); String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows); expected = removeNonWordAndStripSpace(expected); String got = removeNonWordAndStripSpace(result.toString()); assertEquals(expected, got); }
@Override public Metrics toDay() { MaxLabeledFunction metrics = (MaxLabeledFunction) createNew(); metrics.setEntityId(getEntityId()); metrics.setTimeBucket(toTimeBucketInDay()); metrics.setServiceId(getServiceId()); metrics.getValue().copyFrom(getValue()); return metrics; }
@Test public void testToDay() { function.accept( MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_1 ); function.accept( MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_2 ); function.calculate(); final MaxLabeledFunction dayFunction = (MaxLabeledFunction) function.toDay(); dayFunction.calculate(); assertThat(dayFunction.getValue()).isEqualTo(HTTP_CODE_COUNT_3); }
public static boolean isEntropyInjecting(FileSystem fs, Path target) { final EntropyInjectingFileSystem entropyFs = getEntropyFs(fs); return entropyFs != null && entropyFs.getEntropyInjectionKey() != null && target.getPath().contains(entropyFs.getEntropyInjectionKey()); }
@Test void testIsEntropyFs() throws Exception { final String entropyKey = "_test_"; final FileSystem efs = new TestEntropyInjectingFs(entropyKey, "ignored"); final File folder = TempDirUtils.newFolder(tempFolder); final Path path = new Path(Path.fromLocalFile(folder), entropyKey + "/path/"); assertThat(EntropyInjector.isEntropyInjecting(efs, path)).isTrue(); }
public NodeResources availableCapacityOf(Node host) { return availableCapacityOf(host, false, true); }
@Test public void availableCapacityOf() { assertEquals(new NodeResources(5, 40, 80, 2, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote, NodeResources.Architecture.x86_64), capacity.availableCapacityOf(host1)); assertEquals(new NodeResources(5, 60, 80, 4.5, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote, NodeResources.Architecture.x86_64), capacity.availableCapacityOf(host3)); assertEquals(NodeResources.zero(), capacity.availableCapacityOf(host4)); }
public static ChannelBuffer wrappedBuffer(byte[] array, int offset, int length) { if (array == null) { throw new NullPointerException("array == null"); } byte[] dest = new byte[length]; System.arraycopy(array, offset, dest, 0, length); return wrappedBuffer(dest); }
@Test void testWrappedBuffer() { byte[] bytes = new byte[16]; ChannelBuffer channelBuffer = ChannelBuffers.wrappedBuffer(bytes, 0, 15); Assertions.assertTrue(channelBuffer instanceof HeapChannelBuffer); Assertions.assertEquals(channelBuffer.capacity(), 15); channelBuffer = ChannelBuffers.wrappedBuffer(new byte[] {}); Assertions.assertEquals(channelBuffer, EMPTY_BUFFER); ByteBuffer byteBuffer = ByteBuffer.allocate(16); channelBuffer = ChannelBuffers.wrappedBuffer(byteBuffer); Assertions.assertTrue(channelBuffer instanceof HeapChannelBuffer); byteBuffer = ByteBuffer.allocateDirect(16); channelBuffer = ChannelBuffers.wrappedBuffer(byteBuffer); Assertions.assertTrue(channelBuffer instanceof ByteBufferBackedChannelBuffer); byteBuffer.position(byteBuffer.limit()); channelBuffer = ChannelBuffers.wrappedBuffer(byteBuffer); Assertions.assertEquals(channelBuffer, EMPTY_BUFFER); }
String substituteParametersInSqlString(String sql, SqlParameterSource paramSource) { ParsedSql parsedSql = NamedParameterUtils.parseSqlStatement(sql); List<SqlParameter> declaredParams = NamedParameterUtils.buildSqlParameterList(parsedSql, paramSource); if (declaredParams.isEmpty()) { return sql; } for (SqlParameter parSQL: declaredParams) { String paramName = parSQL.getName(); if (!paramSource.hasValue(paramName)) { continue; } Object value = paramSource.getValue(paramName); if (value instanceof SqlParameterValue) { value = ((SqlParameterValue)value).getValue(); } if (!(value instanceof Iterable)) { String ValueForSQLQuery = getValueForSQLQuery(value); sql = sql.replace(":" + paramName, ValueForSQLQuery); continue; } //Iterable int count = 0; String valueArrayStr = ""; for (Object valueTemp: (Iterable)value) { if (count > 0) { valueArrayStr+=", "; } String valueForSQLQuery = getValueForSQLQuery(valueTemp); valueArrayStr += valueForSQLQuery; ++count; } sql = sql.replace(":" + paramName, valueArrayStr); } return sql; }
@Test public void substituteParametersInSqlString_UuidListType() { List<UUID> guids = List.of(UUID.fromString("634a8d03-6871-4e01-94d0-876bf3e67dff"), UUID.fromString("3adbb5b8-4dc6-4faf-80dc-681a7b518b5e"), UUID.fromString("63a50f0c-2058-4d1d-8f15-812eb7f84412")); String sql = "Select * from Table Where guid IN (:guids)"; String sqlToUse = "Select * from Table Where guid IN ('634a8d03-6871-4e01-94d0-876bf3e67dff', '3adbb5b8-4dc6-4faf-80dc-681a7b518b5e', '63a50f0c-2058-4d1d-8f15-812eb7f84412')"; ctx.addUuidListParameter("guids", guids); String sqlToUseResult = queryLog.substituteParametersInSqlString(sql, ctx); assertEquals(sqlToUse, sqlToUseResult); }
@VisibleForTesting Database getDatabase( LoggingObjectInterface parentObject, PGBulkLoaderMeta pgBulkLoaderMeta ) { DatabaseMeta dbMeta = pgBulkLoaderMeta.getDatabaseMeta(); // If dbNameOverride is present, clone the origin db meta and override the DB name String dbNameOverride = environmentSubstitute( pgBulkLoaderMeta.getDbNameOverride() ); if ( !Utils.isEmpty( dbNameOverride ) ) { dbMeta = (DatabaseMeta) pgBulkLoaderMeta.getDatabaseMeta().clone(); dbMeta.setDBName( dbNameOverride.trim() ); logDebug( "DB name overridden to the value: " + dbNameOverride ); } return new Database( parentObject, dbMeta ); }
@Test public void testDBNameNOTOverridden_IfDbNameOverrideEmpty() throws Exception { // Db Name Override is empty PGBulkLoaderMeta pgBulkLoaderMock = getPgBulkLoaderMock( DB_NAME_EMPTY ); Database database = pgBulkLoader.getDatabase( pgBulkLoader, pgBulkLoaderMock ); assertNotNull( database ); // Verify DB name is NOT overridden assertEquals( CONNECTION_DB_NAME, database.getDatabaseMeta().getDatabaseName() ); // Check additionally other connection information assertEquals( CONNECTION_NAME, database.getDatabaseMeta().getName() ); assertEquals( CONNECTION_DB_HOST, database.getDatabaseMeta().getHostname() ); assertEquals( CONNECTION_DB_PORT, database.getDatabaseMeta().getDatabasePortNumberString() ); assertEquals( CONNECTION_DB_USERNAME, database.getDatabaseMeta().getUsername() ); assertEquals( CONNECTION_DB_PASSWORD, database.getDatabaseMeta().getPassword() ); }
public static List<AclEntry> replaceAclEntries(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); // Replacement is done separately for each scope: access and default. EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry aclSpecEntry: aclSpec) { scopeDirty.add(aclSpecEntry.getScope()); if (aclSpecEntry.getType() == MASK) { providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); maskDirty.add(aclSpecEntry.getScope()); } else { aclBuilder.add(aclSpecEntry); } } // Copy existing entries if the scope was not replaced. for (AclEntry existingEntry: existingAcl) { if (!scopeDirty.contains(existingEntry.getScope())) { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test(expected=AclException.class) public void testReplaceAclEntriesMissingGroup() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, OTHER, NONE)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, "sales", ALL), aclEntry(ACCESS, MASK, ALL), aclEntry(ACCESS, OTHER, NONE)); replaceAclEntries(existing, aclSpec); }
public WorkerService getWorkerService() throws UnsupportedOperationException { return functionWorkerService.orElseThrow(() -> new UnsupportedOperationException("Pulsar Function Worker " + "is not enabled, probably functionsWorkerEnabled is set to false")); }
@Test public void testGetWorkerService() throws Exception { ServiceConfiguration configuration = new ServiceConfiguration(); configuration.setMetadataStoreUrl("zk:localhost"); configuration.setClusterName("clusterName"); configuration.setFunctionsWorkerEnabled(true); configuration.setBrokerShutdownTimeoutMs(0L); configuration.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); WorkerService expectedWorkerService = mock(WorkerService.class); @Cleanup PulsarService pulsarService = spy(new PulsarService(configuration, new WorkerConfig(), Optional.of(expectedWorkerService), (exitCode) -> {})); WorkerService actualWorkerService = pulsarService.getWorkerService(); assertSame(expectedWorkerService, actualWorkerService); }
public static Node build(final List<JoinInfo> joins) { Node root = null; for (final JoinInfo join : joins) { if (root == null) { root = new Leaf(join.getLeftSource()); } if (root.containsSource(join.getRightSource()) && root.containsSource(join.getLeftSource())) { throw new KsqlException("Cannot perform circular join - both " + join.getRightSource() + " and " + join.getLeftJoinExpression() + " are already included in the current join tree: " + root.debugString(0)); } else if (root.containsSource(join.getLeftSource())) { root = new Join(root, new Leaf(join.getRightSource()), join); } else if (root.containsSource(join.getRightSource())) { root = new Join(root, new Leaf(join.getLeftSource()), join.flip()); } else { throw new KsqlException( "Cannot build JOIN tree; neither source in the join is the FROM source or included " + "in a previous JOIN: " + join + ". The current join tree is " + root.debugString(0) ); } } return root; }
@Test public void shouldIgnoreOuterJoinsWhenComputingViableKeys() { // Given: when(j1.getLeftSource()).thenReturn(a); when(j1.getRightSource()).thenReturn(b); when(j2.getLeftSource()).thenReturn(a); when(j2.getRightSource()).thenReturn(c); when(j1.getType()).thenReturn(JoinType.OUTER); when(j2.getLeftJoinExpression()).thenReturn(col1); when(j2.getRightJoinExpression()).thenReturn(col2); final Node root = JoinTree.build(ImmutableList.of(j1, j2)); // When: final List<?> keys = root.viableKeyColumns(); // Then: assertThat(keys, contains(col1, col2)); }
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) { final Fetch<K, V> fetch = Fetch.empty(); final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>(); int recordsRemaining = fetchConfig.maxPollRecords; try { while (recordsRemaining > 0) { final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch(); if (nextInLineFetch == null || nextInLineFetch.isConsumed()) { final CompletedFetch completedFetch = fetchBuffer.peek(); if (completedFetch == null) break; if (!completedFetch.isInitialized()) { try { fetchBuffer.setNextInLineFetch(initialize(completedFetch)); } catch (Exception e) { // Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and // (2) there are no fetched completedFetch with actual content preceding this exception. // The first condition ensures that the completedFetches is not stuck with the same completedFetch // in cases such as the TopicAuthorizationException, and the second condition ensures that no // potential data loss due to an exception in a following record. if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0) fetchBuffer.poll(); throw e; } } else { fetchBuffer.setNextInLineFetch(completedFetch); } fetchBuffer.poll(); } else if (subscriptions.isPaused(nextInLineFetch.partition)) { // when the partition is paused we add the records back to the completedFetches queue instead of draining // them so that they can be returned on a subsequent poll if the partition is resumed at that time log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition); pausedCompletedFetches.add(nextInLineFetch); fetchBuffer.setNextInLineFetch(null); } else { final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch, recordsRemaining); recordsRemaining -= nextFetch.numRecords(); fetch.add(nextFetch); } } } catch (KafkaException e) { if (fetch.isEmpty()) throw e; } finally { // add any polled completed fetches for paused partitions back to the completed fetches queue to be // re-evaluated in the next poll fetchBuffer.addAll(pausedCompletedFetches); } return fetch; }
@Test public void testCollectFetchInitializationOffsetOutOfRangeErrorWithNullPosition() { final TopicPartition topicPartition0 = new TopicPartition("topic", 0); final SubscriptionState subscriptions = mock(SubscriptionState.class); when(subscriptions.hasValidPosition(topicPartition0)).thenReturn(true); when(subscriptions.positionOrNull(topicPartition0)).thenReturn(null); final FetchCollector<String, String> fetchCollector = createFetchCollector(subscriptions); FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData() .setPartitionIndex(topicPartition0.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()); final CompletedFetch completedFetch = new CompletedFetchBuilder() .partitionData(partitionData) .partition(topicPartition0).build(); final FetchBuffer fetchBuffer = mock(FetchBuffer.class); when(fetchBuffer.nextInLineFetch()).thenReturn(null); when(fetchBuffer.peek()).thenReturn(completedFetch).thenReturn(null); final Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); assertTrue(fetch.isEmpty()); verify(fetchBuffer).setNextInLineFetch(null); }
public static Void unwrapAndThrowException(ServiceException se) throws IOException, YarnException { Throwable cause = se.getCause(); if (cause == null) { // SE generated by the RPC layer itself. throw new IOException(se); } else { if (cause instanceof RemoteException) { RemoteException re = (RemoteException) cause; Class<?> realClass = null; try { realClass = Class.forName(re.getClassName()); } catch (ClassNotFoundException cnf) { // Assume this to be a new exception type added to YARN. This isn't // absolutely correct since the RPC layer could add an exception as // well. throw instantiateYarnException(YarnException.class, re); } if (YarnException.class.isAssignableFrom(realClass)) { throw instantiateYarnException( realClass.asSubclass(YarnException.class), re); } else if (IOException.class.isAssignableFrom(realClass)) { throw instantiateIOException(realClass.asSubclass(IOException.class), re); } else if (RuntimeException.class.isAssignableFrom(realClass)) { throw instantiateRuntimeException( realClass.asSubclass(RuntimeException.class), re); } else { throw re; } // RemoteException contains useful information as against the // java.lang.reflect exceptions. } else if (cause instanceof IOException) { // RPC Client exception. throw (IOException) cause; } else if (cause instanceof RuntimeException) { // RPC RuntimeException throw (RuntimeException) cause; } else { // Should not be generated. throw new IOException(se); } } }
@Test void testRPCServiceExceptionUnwrapping() { String message = "ServiceExceptionMessage"; ServiceException se = new ServiceException(message); Throwable t = null; try { RPCUtil.unwrapAndThrowException(se); } catch (Throwable thrown) { t = thrown; } assertTrue(IOException.class.isInstance(t)); assertTrue(t.getMessage().contains(message)); }
@Override public VirtualNetwork getVirtualNetwork(NetworkId networkId) { checkNotNull(networkId, NETWORK_NULL); return store.getNetwork(networkId); }
@Test(expected = NullPointerException.class) public void testGetVirtualForNullVirtualNetworkId() { manager.getVirtualNetwork(null); }
public Header setContentType(String contentType) { if (contentType == null) { contentType = MediaType.APPLICATION_JSON; } return addParam(HttpHeaderConsts.CONTENT_TYPE, contentType); }
@Test void testSetContentType() { Header header = Header.newInstance(); header.setContentType(null); assertEquals(MediaType.APPLICATION_JSON, header.getValue(HttpHeaderConsts.CONTENT_TYPE)); header.setContentType(MediaType.MULTIPART_FORM_DATA); assertEquals(MediaType.MULTIPART_FORM_DATA, header.getValue(HttpHeaderConsts.CONTENT_TYPE)); }
public Collection<String> getTrimmedStringCollection(String name) { String valueString = get(name); if (null == valueString) { Collection<String> empty = new ArrayList<String>(); return empty; } return StringUtils.getTrimmedStringCollection(valueString); }
@Test public void testGetTrimmedStringCollection() { Configuration c = new Configuration(); c.set("x", "a, b, c"); Collection<String> strs = c.getStringCollection("x"); assertEquals(3, strs.size()); assertArrayEquals(new String[]{ "a", " b", " c" }, strs.toArray(new String[0])); // Check that the result is mutable strs.add("z"); // Make sure same is true for missing config strs = c.getStringCollection("does-not-exist"); assertEquals(0, strs.size()); strs.add("z"); }
@Override protected Result[] run(String value) { final Grok grok = grokPatternRegistry.cachedGrokForPattern(this.pattern, this.namedCapturesOnly); // the extractor instance is rebuilt every second anyway final Match match = grok.match(value); final Map<String, Object> matches = match.captureFlattened(); final List<Result> results = new ArrayList<>(matches.size()); for (final Map.Entry<String, Object> entry : matches.entrySet()) { // never add null values to the results, those don't make sense for us if (entry.getValue() != null) { results.add(new Result(entry.getValue(), entry.getKey(), -1, -1)); } } return results.toArray(new Result[0]); }
@Test public void testFlattenValue() { final Map<String, Object> config = new HashMap<>(); final GrokExtractor extractor1 = makeExtractor("%{TWOBASENUMS}", config); /* Test flatten with a multiple non unique result [ 22, 23 ] */ Extractor.Result[] result1 = extractor1.run("22 23"); assertThat(result1) .hasSize(2) .contains(new Extractor.Result("22 23", "TWOBASENUMS", -1, -1), new Extractor.Result(Arrays.asList("22", "23"), "BASE10NUM", -1, -1)); /* Test flatten with a multiple but unique result [ 22, 22 ] */ Extractor.Result[] result2 = extractor1.run("22 22"); assertThat(result2) .hasSize(2) .contains(new Extractor.Result("22 22", "TWOBASENUMS", -1, -1), new Extractor.Result("22", "BASE10NUM", -1, -1)); }
@VisibleForTesting void validateMobileUnique(Long id, String mobile) { if (StrUtil.isBlank(mobile)) { return; } AdminUserDO user = userMapper.selectByMobile(mobile); if (user == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的用户 if (id == null) { throw exception(USER_MOBILE_EXISTS); } if (!user.getId().equals(id)) { throw exception(USER_MOBILE_EXISTS); } }
@Test public void testValidateMobileUnique_mobileExistsForUpdate() { // 准备参数 Long id = randomLongId(); String mobile = randomString(); // mock 数据 userMapper.insert(randomAdminUserDO(o -> o.setMobile(mobile))); // 调用,校验异常 assertServiceException(() -> userService.validateMobileUnique(id, mobile), USER_MOBILE_EXISTS); }
public static String format(Object x) { if (x != null) { return format(x.toString()); } else { return StrUtil.EMPTY; } }
@Test public void issue3579Test() { assertEquals("ZERO AND CENTS TEN ONLY", NumberWordFormatter.format(0.1)); assertEquals("ZERO AND CENTS ONE ONLY", NumberWordFormatter.format(0.01)); }
@Override public void setBigNumber( BigDecimal number ) { string = number.toString(); }
@Test public void testSetBigNumber() { ValueString vs = new ValueString(); try { vs.setBigNumber( null ); // assertNull(vs.getString()); fail( "expected NullPointerException" ); } catch ( NullPointerException ex ) { // This is the original behaviour } vs.setBigNumber( BigDecimal.ZERO ); assertEquals( "0", vs.getString() ); }
public Properties getProperties() { return properties; }
@Test public void testGetProperties() { }
@Override public synchronized Snapshot record(long duration, TimeUnit durationUnit, Outcome outcome) { totalAggregation.record(duration, durationUnit, outcome); moveWindowByOne().record(duration, durationUnit, outcome); return new SnapshotImpl(totalAggregation); }
@Test public void testRecordSlowSuccess() { Metrics metrics = new FixedSizeSlidingWindowMetrics(5); Snapshot snapshot = metrics .record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.SLOW_SUCCESS); assertThat(snapshot.getTotalNumberOfCalls()).isEqualTo(1); assertThat(snapshot.getNumberOfSuccessfulCalls()).isEqualTo(1); assertThat(snapshot.getNumberOfFailedCalls()).isZero(); assertThat(snapshot.getTotalNumberOfSlowCalls()).isEqualTo(1); assertThat(snapshot.getNumberOfSlowSuccessfulCalls()).isEqualTo(1); assertThat(snapshot.getNumberOfSlowFailedCalls()).isZero(); assertThat(snapshot.getTotalDuration().toMillis()).isEqualTo(100); assertThat(snapshot.getAverageDuration().toMillis()).isEqualTo(100); assertThat(snapshot.getFailureRate()).isZero(); }
public Map<MetricId, Number> metrics() { return metrics; }
@Test public void builder_applies_output_names() { String ONE = "one"; String TWO = "two"; String THREE = "three"; String NON_EXISTENT = "non-existent"; MetricId ONE_ID = toMetricId(ONE); MetricId TWO_ID = toMetricId(TWO); MetricId THREE_ID = toMetricId(THREE); MetricId NON_EXISTENT_ID = toMetricId(NON_EXISTENT); Map<MetricId, List<MetricId>> outputNamesById = Map.of( ONE_ID, List.of(ONE_ID), TWO_ID, List.of(TWO_ID, toMetricId("dos")), THREE_ID, List.of(toMetricId("3")), NON_EXISTENT_ID, List.of(NON_EXISTENT_ID)); MetricsPacket packet = new MetricsPacket.Builder(toServiceId("foo")) .putMetrics(List.of( new Metric(ONE_ID, 1), new Metric(TWO_ID, 2), new Metric(THREE_ID, 3))) .applyOutputNames(outputNamesById) .build(); // Only original name assertTrue(packet.metrics().containsKey(ONE_ID)); // Both names assertTrue(packet.metrics().containsKey(TWO_ID)); assertTrue(packet.metrics().containsKey(toMetricId("dos"))); // Only new name assertFalse(packet.metrics().containsKey(THREE_ID)); assertTrue(packet.metrics().containsKey(toMetricId("3"))); // Non-existent metric not added assertFalse(packet.metrics().containsKey(NON_EXISTENT_ID)); }
public double getZ() { return position.z(); }
@Test public void testGetZ() throws Exception { World world = mock(World.class); Location location = new Location(world, Vector3.at(0, 0, TEST_VALUE)); assertEquals(TEST_VALUE, location.getZ(), EPSILON); }
public void setBuildFile(String buildFile) { this.buildFile = buildFile; }
@Test public void antTaskShouldNormalizeBuildFile() { AntTask task = new AntTask(); task.setBuildFile("pavan\\build.xml"); assertThat(task.arguments(), containsString("\"pavan/build.xml\"")); }
public static NacosAsyncRestTemplate getNacosAsyncRestTemplate(Logger logger) { return getNacosAsyncRestTemplate(new DefaultHttpClientFactory(logger)); }
@Test void testGetNacosAsyncRestTemplateWithCustomFactory() { assertTrue(restAsyncMap.isEmpty()); HttpClientBeanHolder.getNacosAsyncRestTemplate((Logger) null); assertEquals(1, restAsyncMap.size()); NacosAsyncRestTemplate actual = HttpClientBeanHolder.getNacosAsyncRestTemplate(mockFactory); assertEquals(2, restAsyncMap.size()); assertEquals(mockAsyncRestTemplate, actual); }
@Override public String getRomOAID() { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) { try { String oaid = Settings.Global.getString(mContext.getContentResolver(), "pps_oaid"); if (!TextUtils.isEmpty(oaid)) { SALog.i(TAG, "Get oaid from global settings"); return oaid; } } catch (Throwable t) { SALog.i(TAG, t); } } String oaid = null; String[] packages = new String[]{"com.huawei.hwid", "com.huawei.hwid.tv", "com.huawei.hms"}; for (String pg : packages) { if (TextUtils.isEmpty(oaid)) { oaid = realLoadOaid(pg); } } return oaid; }
@Test public void getRomOAID() { HuaweiImpl huawei = new HuaweiImpl(mApplication); // if (huawei.isSupported()) { // Assert.assertNull(huawei.getRomOAID()); // } }
public static void setField( final Object object, final String fieldName, final Object fieldNewValue) { try { traverseClassHierarchy( object.getClass(), NoSuchFieldException.class, (InsideTraversal<Void>) traversalClass -> { Field field = traversalClass.getDeclaredField(fieldName); field.setAccessible(true); field.set(object, fieldNewValue); return null; }); } catch (Exception e) { throw new RuntimeException(e); } }
@Test public void setFieldReflectively_setsInheritedFields() { ExampleDescendant example = new ExampleDescendant(); example.setNotOverridden(5); ReflectionHelpers.setField(example, "notOverridden", 10); assertThat(example.getNotOverridden()).isEqualTo(10); }
@Override public CiConfiguration loadConfiguration() { // https://wiki.jenkins-ci.org/display/JENKINS/GitHub+pull+request+builder+plugin#GitHubpullrequestbuilderplugin-EnvironmentVariables // https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project String revision = system.envVariable("ghprbActualCommit"); if (StringUtils.isNotBlank(revision)) { return new CiConfigurationImpl(revision, getName()); } revision = system.envVariable("GIT_COMMIT"); if (StringUtils.isNotBlank(revision)) { if (StringUtils.isNotBlank(system.envVariable("CHANGE_ID"))) { String jenkinsGitPrSha1 = getJenkinsGitPrSha1(); if (StringUtils.isNotBlank(jenkinsGitPrSha1)) { return new CiConfigurationImpl(jenkinsGitPrSha1, getName()); } } return new CiConfigurationImpl(revision, getName()); } revision = system.envVariable("SVN_COMMIT"); return new CiConfigurationImpl(revision, getName()); }
@Test public void loadConfiguration_of_git_repo_with_branch_plugin_without_git_repo() throws IOException { // prepare fake git clone Path baseDir = temp.newFolder().toPath(); when(project.getBaseDir()).thenReturn(baseDir); setEnvVariable("CHANGE_ID", "3"); setEnvVariable("GIT_BRANCH", "PR-3"); setEnvVariable("GIT_COMMIT", "abc"); assertThat(underTest.loadConfiguration().getScmRevision()).hasValue("abc"); }
@Override public void handle(final RoutingContext routingContext) { routingContext.addEndHandler(ar -> { // After the response is complete, log results here. final int status = routingContext.request().response().getStatusCode(); if (!loggingRateLimiter.shouldLog(logger, routingContext.request().path(), status)) { return; } final long contentLength = routingContext.request().response().bytesWritten(); final HttpVersion version = routingContext.request().version(); final HttpMethod method = routingContext.request().method(); final String uri = enableQueryLogging ? routingContext.request().uri() : routingContext.request().path(); if (endpointFilter.isPresent() && endpointFilter.get().matcher(uri).matches()) { return; } final long requestBodyLength = routingContext.request().bytesRead(); final String versionFormatted; switch (version) { case HTTP_1_0: versionFormatted = "HTTP/1.0"; break; case HTTP_1_1: versionFormatted = "HTTP/1.1"; break; case HTTP_2: versionFormatted = "HTTP/2.0"; break; default: versionFormatted = "-"; } final String name = Optional.ofNullable((ApiUser) routingContext.user()) .map(u -> u.getPrincipal().getName()) .orElse("-"); final String userAgent = Optional.ofNullable( routingContext.request().getHeader(HTTP_HEADER_USER_AGENT)).orElse("-"); final String timestamp = Utils.formatRFC1123DateTime(clock.millis()); final SocketAddress socketAddress = routingContext.request().remoteAddress(); final String message = String.format( "%s - %s [%s] \"%s %s %s\" %d %d \"-\" \"%s\" %d", socketAddress == null ? "null" : socketAddress.host(), name, timestamp, method, uri, versionFormatted, status, contentLength, userAgent, requestBodyLength); doLog(status, message); }); routingContext.next(); }
@Test public void shouldProduceLogWithRandomFilter() { // Given: when(response.getStatusCode()).thenReturn(200); config = new KsqlRestConfig( ImmutableMap.of(KsqlRestConfig.KSQL_ENDPOINT_LOGGING_IGNORED_PATHS_REGEX_CONFIG, ".*random.*") ); when(server.getConfig()).thenReturn(config); loggingHandler = new LoggingHandler(server, loggingRateLimiter, logger, clock); // When: loggingHandler.handle(routingContext); verify(routingContext).addEndHandler(endCallback.capture()); endCallback.getValue().handle(null); // Then: verify(logger).info(logStringCaptor.capture()); assertThat(logStringCaptor.getValue(), is("123.111.222.333 - - [Sun, 12 Nov 2023 18:23:54 GMT] " + "\"POST /query HTTP/1.1\" 200 5678 \"-\" \"bot\" 3456")); }
@Override public boolean supports(Job job) { JobDetails jobDetails = job.getJobDetails(); return !jobDetails.hasStaticFieldName() && Modifier.isStatic(getJobMethod(jobDetails).getModifiers()); }
@Test void doesNotSupportJobIfJobMethodIsNotStatic() { Job job = anEnqueuedJob() .<TestService>withJobDetails(ts -> ts.doWorkThatFails()) .build(); assertThat(backgroundStaticJobWithoutIocRunner.supports(job)).isFalse(); }
public void onRequest(FilterRequestContext requestContext, RestLiFilterResponseContextFactory filterResponseContextFactory) { // Initiate the filter chain iterator. The RestLiCallback will be passed to the method invoker at the end of the // filter chain. _filterChainIterator.onRequest(requestContext, filterResponseContextFactory, new RestLiCallback(requestContext, filterResponseContextFactory, this)); }
@SuppressWarnings("unchecked") @Test public void testFilterInvocationRequestThrowsError() throws Exception { _restLiFilterChain = new RestLiFilterChain(Arrays.asList(_filters), _mockFilterChainDispatcher, _mockFilterChainCallback); _filters[1] = new CountFilterRequestThrowsError(); when(_responseHandler.buildExceptionResponseData(eq(_method), any(RestLiServiceException.class), anyMap(), anyList())) .thenReturn(_mockRestLiResponseData); when(_mockRestLiResponseData.getResponseEnvelope()).thenReturn(mock(RestLiResponseEnvelope.class)); _restLiFilterChain.onRequest(_mockFilterRequestContext, new RestLiFilterResponseContextFactory(_request, _method, _responseHandler)); verifySecondFilterRequestException(); }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void comparisonInFixOp() { String inputExpression = "foo >= bar * 10"; BaseNode infix = parse( inputExpression ); assertThat( infix).isInstanceOf(InfixOpNode.class); assertThat( infix.getResultType()).isEqualTo(BuiltInType.BOOLEAN); assertThat( infix.getText()).isEqualTo(inputExpression); InfixOpNode in = (InfixOpNode) infix; assertThat( in.getLeft()).isInstanceOf(NameRefNode.class); assertThat( in.getLeft().getText()).isEqualTo("foo"); assertThat( in.getRight()).isInstanceOf(InfixOpNode.class); assertThat( in.getRight().getText()).isEqualTo( "bar * 10"); }
@Udf public String lcase( @UdfParameter(description = "The string to lower-case") final String input) { if (input == null) { return null; } return input.toLowerCase(); }
@Test public void shouldReturnEmptyForEmptyInput() { final String result = udf.lcase(""); assertThat(result, is("")); }
@Override public void add(Component file, Duplication duplication) { checkFileComponentArgument(file); checkNotNull(duplication, "duplication can not be null"); duplications.put(file.getKey(), duplication); }
@Test @UseDataProvider("allComponentTypesButFile") public void addDuplication_inner_throws_IAE_if_file_type_is_not_FILE(Component.Type type) { assertThatThrownBy(() -> { Component component = mockComponentGetType(type); underTest.add(component, SOME_DUPLICATION); }) .isInstanceOf(IllegalArgumentException.class) .hasMessage("type of file must be FILE"); }
@Override public void onGestureTypingInput(int x, int y, long eventTime) {}
@Test public void testOnGestureTypingInput() { mUnderTest.onGestureTypingInput(66, 99, 1231); Mockito.verifyZeroInteractions(mMockParentListener, mMockKeyboardDismissAction); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testAnalyzePinnedInstallJsonV010() throws Exception { try (Engine engine = new Engine(getSettings())) { final Dependency result = new Dependency(BaseTest.getResourceAsFile(this, "maven_install_v010.json")); engine.addDependency(result); analyzer.analyze(result, engine); assertFalse(ArrayUtils.contains(engine.getDependencies(), result)); assertEquals(10, engine.getDependencies().length); boolean found = false; for (Dependency d : engine.getDependencies()) { if ("com.google.errorprone:error_prone_annotations".equals(d.getName())) { found = true; assertEquals("2.3.4", d.getVersion()); assertEquals(Ecosystem.JAVA, d.getEcosystem()); } } assertTrue("Expected to find com.google.errorprone:error_prone_annotations:2.3.4", found); } }
@Override public boolean syncVerifyData(DistroData verifyData, String targetServer) { if (isNoExistTarget(targetServer)) { return true; } // replace target server as self server so that can callback. verifyData.getDistroKey().setTargetServer(memberManager.getSelf().getAddress()); DistroDataRequest request = new DistroDataRequest(verifyData, DataOperation.VERIFY); Member member = memberManager.find(targetServer); if (checkTargetServerStatusUnhealthy(member)) { Loggers.DISTRO .warn("[DISTRO] Cancel distro verify caused by target server {} unhealthy, key: {}", targetServer, verifyData.getDistroKey()); return false; } try { Response response = clusterRpcClientProxy.sendRequest(member, request); return checkResponse(response); } catch (NacosException e) { Loggers.DISTRO.error("[DISTRO-FAILED] Verify distro data failed! key: {} ", verifyData.getDistroKey(), e); } return false; }
@Test void testSyncVerifyDataWithCallbackException2() throws NacosException { DistroData verifyData = new DistroData(); verifyData.setDistroKey(new DistroKey()); when(memberManager.hasMember(member.getAddress())).thenReturn(true); when(memberManager.find(member.getAddress())).thenReturn(member); member.setState(NodeState.UP); when(clusterRpcClientProxy.isRunning(member)).thenReturn(true); doAnswer(invocationOnMock -> { RequestCallBack<Response> callback = invocationOnMock.getArgument(2); callback.onException(new NacosException()); return null; }).when(clusterRpcClientProxy).asyncRequest(eq(member), any(), any()); transportAgent.syncVerifyData(verifyData, member.getAddress(), distroCallback); verify(distroCallback).onFailed(any(NacosException.class)); }
public static Schema schemaFromPojoClass( TypeDescriptor<?> typeDescriptor, FieldValueTypeSupplier fieldValueTypeSupplier) { return StaticSchemaInference.schemaFromClass(typeDescriptor, fieldValueTypeSupplier); }
@Test public void testSimplePOJO() { Schema schema = POJOUtils.schemaFromPojoClass( new TypeDescriptor<SimplePOJO>() {}, JavaFieldTypeSupplier.INSTANCE); assertEquals(SIMPLE_POJO_SCHEMA, schema); }
@Override @MethodNotAvailable public void evictAll() { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testEvictAll() { adapter.evictAll(); }
@SuppressWarnings("unchecked") public static Builder fromMap(final Map<String, Object> data) { final String tableName = ObjectHelper.cast(String.class, data.get(TABLE_NAME)); final StitchSchema schema = StitchSchema.builder() .addKeywords(ObjectHelper.cast(Map.class, data.getOrDefault(SCHEMA, Collections.emptyMap()))) .build(); final Collection<StitchMessage> messages = (Collection<StitchMessage>) ObjectHelper .cast(Collection.class, data.getOrDefault(MESSAGES, Collections.emptyList())) .stream() .filter(ObjectHelper::isNotEmpty) .map(message -> StitchMessage .fromMap(ObjectHelper.cast(Map.class, message)) .build()) .collect(Collectors.toList()); final Collection<String> keyNames = ObjectHelper.cast(Collection.class, data.get(KEY_NAMES)); return new Builder() .withSchema(schema) .withTableName(tableName) .withKeyNames(keyNames) .withSchema(schema) .addMessages(messages); }
@Test void testIfNotCreateRequestBodyFromInvalidMap() { final Map<String, Object> data = new LinkedHashMap<>(); data.put(StitchRequestBody.TABLE_NAME, "table"); data.put(StitchRequestBody.SCHEMA, 1); data.put(StitchRequestBody.MESSAGES, Collections.emptyList()); data.put(StitchRequestBody.KEY_NAMES, Collections.emptySet()); assertThrows(IllegalArgumentException.class, () -> StitchRequestBody.fromMap(data)); final Map<String, Object> data2 = new LinkedHashMap<>(); data2.put(StitchRequestBody.TABLE_NAME, "table"); data2.put(StitchRequestBody.SCHEMA, Collections.emptyMap()); data2.put(StitchRequestBody.MESSAGES, 12); data2.put(StitchRequestBody.KEY_NAMES, Collections.emptySet()); assertThrows(IllegalArgumentException.class, () -> StitchRequestBody.fromMap(data2)); }
@Override public void characters(char[] ch, int start, int length) throws SAXException { filter(ch, start, length, charactersOutput); }
@Test public void testInvalidCharacters() throws SAXException { safe.characters("ab\u0007".toCharArray(), 0, 3); safe.characters("a\u000Bc".toCharArray(), 0, 3); safe.characters("\u0019bc".toCharArray(), 0, 3); assertEquals("ab\ufffda\ufffdc\ufffdbc", output.toString()); }
public static String getOperatingSystemCompleteName() { return OS_COMPLETE_NAME; }
@Test @EnabledOnOs(OS.LINUX) public void shouldGetCompleteNameOnLinux() { assertThat(SystemInfo.getOperatingSystemCompleteName()).matches("[ \\w]+ [0-9.]+( \\w+)?( \\(.*\\))?"); }
@Override public List<? extends SortKey> getSortKeys() { return isSorted() ? Collections.singletonList(sortkey) : Collections.emptyList(); }
@Test public void toggleSortOrder_none() { assertSame(emptyList(), sorter.getSortKeys()); }
@Override public Graph<EntityDescriptor> resolveNativeEntity(EntityDescriptor entityDescriptor) { final MutableGraph<EntityDescriptor> mutableGraph = GraphBuilder.directed().build(); mutableGraph.addNode(entityDescriptor); final ModelId modelId = entityDescriptor.id(); final Optional<LookupTableDto> lookupTableDto = lookupTableService.get(modelId.id()); lookupTableDto.map(LookupTableDto::dataAdapterId) .map(this::adapterDescriptor) .ifPresent(dataAdapter -> mutableGraph.putEdge(entityDescriptor, dataAdapter)); lookupTableDto.map(LookupTableDto::cacheId) .map(this::cacheDescriptor) .ifPresent(cache -> mutableGraph.putEdge(entityDescriptor, cache)); return ImmutableGraph.copyOf(mutableGraph); }
@Test @MongoDBFixtures({"LookupCacheFacadeTest.json", "LookupDataAdapterFacadeTest.json", "LookupTableFacadeTest.json"}) public void resolveEntityDescriptor() { final EntityDescriptor descriptor = EntityDescriptor.create("5adf24dd4b900a0fdb4e530d", ModelTypes.LOOKUP_TABLE_V1); final Graph<EntityDescriptor> graph = facade.resolveNativeEntity(descriptor); assertThat(graph.nodes()) .hasSize(3) .containsOnly( descriptor, EntityDescriptor.create("5adf24a04b900a0fdb4e52c8", ModelTypes.LOOKUP_ADAPTER_V1), EntityDescriptor.create("5adf24b24b900a0fdb4e52dd", ModelTypes.LOOKUP_CACHE_V1)); }
public Span nextSpan(TraceContextOrSamplingFlags extracted) { if (extracted == null) throw new NullPointerException("extracted == null"); TraceContext context = extracted.context(); if (context != null) return newChild(context); TraceIdContext traceIdContext = extracted.traceIdContext(); if (traceIdContext != null) { return _toSpan(null, decorateContext( InternalPropagation.instance.flags(extracted.traceIdContext()), traceIdContext.traceIdHigh(), traceIdContext.traceId(), 0L, 0L, 0L, extracted.extra() )); } SamplingFlags samplingFlags = extracted.samplingFlags(); List<Object> extra = extracted.extra(); TraceContext parent = currentTraceContext.get(); int flags; long traceIdHigh = 0L, traceId = 0L, localRootId = 0L, spanId = 0L; if (parent != null) { // At this point, we didn't extract trace IDs, but do have a trace in progress. Since typical // trace sampling is up front, we retain the decision from the parent. flags = InternalPropagation.instance.flags(parent); traceIdHigh = parent.traceIdHigh(); traceId = parent.traceId(); localRootId = parent.localRootId(); spanId = parent.spanId(); extra = concat(extra, parent.extra()); } else { flags = InternalPropagation.instance.flags(samplingFlags); } return _toSpan(parent, decorateContext(flags, traceIdHigh, traceId, localRootId, spanId, 0L, extra)); }
@Test void localRootId_nextSpan_ids_sampled() { TraceIdContext context1 = TraceIdContext.newBuilder().traceId(1).sampled(true).build(); TraceIdContext context2 = TraceIdContext.newBuilder().traceId(2).sampled(true).build(); localRootId(context1, context2, ctx -> tracer.nextSpan(ctx)); }
static Timestamp toTimestamp(final JsonNode object) { if (object instanceof NumericNode) { return new Timestamp(object.asLong()); } if (object instanceof TextNode) { try { return new Timestamp(Long.parseLong(object.textValue())); } catch (final NumberFormatException e) { throw failedStringCoercionException(SqlBaseType.TIMESTAMP); } } throw invalidConversionException(object, SqlBaseType.TIMESTAMP); }
@Test public void shouldConvertStringToTimestampCorrectly() { final Timestamp d = JsonSerdeUtils.toTimestamp(JsonNodeFactory.instance.textNode("100")); assertThat(d.getTime(), equalTo(100L)); }
public URI qualifiedURI(String filename) throws IOException { try { URI fileURI = new URI(filename); if (RESOURCE_URI_SCHEME.equals(fileURI.getScheme())) { return fileURI; } } catch (URISyntaxException ignore) { } return qualifiedPath(filename).toUri(); }
@Test public void qualifiedURITest() throws IOException { URI uri = this.command.qualifiedURI(FILE_PATH); Assert.assertEquals("/var/tmp/test.parquet", uri.getPath()); }
@Override public Iterator<Map.Entry<String, Object>> getIterator() { return variables.getIterator(); }
@Test public void testGetIteratorIsUnmodifable() { Iterator<Map.Entry<String, Object>> iterator = unmodifiables.getIterator(); assertThat(iterator.hasNext(), CoreMatchers.is(true)); iterator.next(); assertThrowsUnsupportedOperation(iterator::remove); }
public static String normalizeUri(String uri) throws URISyntaxException { // try to parse using the simpler and faster Camel URI parser String[] parts = CamelURIParser.fastParseUri(uri); if (parts != null) { // we optimized specially if an empty array is returned if (parts == URI_ALREADY_NORMALIZED) { return uri; } // use the faster and more simple normalizer return doFastNormalizeUri(parts); } else { // use the legacy normalizer as the uri is complex and may have unsafe URL characters return doComplexNormalizeUri(uri); } }
@Test public void testRawParameterCurly() throws Exception { String out = URISupport.normalizeUri( "xmpp://camel-user@localhost:123/test-user@localhost?password=RAW{++?w0rd}&serviceName=some chat"); assertEquals("xmpp://camel-user@localhost:123/test-user@localhost?password=RAW{++?w0rd}&serviceName=some+chat", out); String out2 = URISupport.normalizeUri( "xmpp://camel-user@localhost:123/test-user@localhost?password=RAW{foo %% bar}&serviceName=some chat"); // Just make sure the RAW parameter can be resolved rightly, we need to replace the % into %25 assertEquals("xmpp://camel-user@localhost:123/test-user@localhost?password=RAW{foo %25%25 bar}&serviceName=some+chat", out2); }
@Override public CiConfiguration loadConfiguration() { String revision = system.envVariable(PROPERTY_COMMIT); if (isEmpty(revision)) { LoggerFactory.getLogger(getClass()).warn("Missing environment variable " + PROPERTY_COMMIT); } return new CiConfigurationImpl(revision, getName()); }
@Test public void configuration_of_pull_request() { setEnvVariable("CIRRUS_PR", "1234"); setEnvVariable("CIRRUS_BASE_SHA", "abd12fc"); setEnvVariable("CIRRUS_CHANGE_IN_REPO", "fd355db"); assertThat(underTest.loadConfiguration().getScmRevision()).hasValue("fd355db"); }
public static Node build(final List<JoinInfo> joins) { Node root = null; for (final JoinInfo join : joins) { if (root == null) { root = new Leaf(join.getLeftSource()); } if (root.containsSource(join.getRightSource()) && root.containsSource(join.getLeftSource())) { throw new KsqlException("Cannot perform circular join - both " + join.getRightSource() + " and " + join.getLeftJoinExpression() + " are already included in the current join tree: " + root.debugString(0)); } else if (root.containsSource(join.getLeftSource())) { root = new Join(root, new Leaf(join.getRightSource()), join); } else if (root.containsSource(join.getRightSource())) { root = new Join(root, new Leaf(join.getLeftSource()), join.flip()); } else { throw new KsqlException( "Cannot build JOIN tree; neither source in the join is the FROM source or included " + "in a previous JOIN: " + join + ". The current join tree is " + root.debugString(0) ); } } return root; }
@Test public void shouldIncludeOnlyColFromLastInViableKeyEvenWithoutOverlap() { // Given: when(j1.getLeftSource()).thenReturn(a); when(j1.getRightSource()).thenReturn(b); when(j2.getLeftSource()).thenReturn(a); when(j2.getRightSource()).thenReturn(c); when(j1.getLeftJoinExpression()).thenReturn(e1); when(j1.getRightJoinExpression()).thenReturn(e2); when(j2.getLeftJoinExpression()).thenReturn(col1); when(j2.getRightJoinExpression()).thenReturn(e3); final List<JoinInfo> joins = ImmutableList.of(j1, j2); final Node root = JoinTree.build(joins); // When: final List<?> keys = root.viableKeyColumns(); // Then: assertThat(keys, contains(col1)); }
public static synchronized String getFullName(String cweId) { final String name = getName(cweId); if (name != null) { return cweId + " " + name; } return cweId; }
@Test public void testGetFullName() { String cweId = "CWE-16"; String expResult = "CWE-16 Configuration"; String result = CweDB.getFullName(cweId); assertEquals(expResult, result); cweId = "CWE-260000"; expResult = "CWE-260000"; result = CweDB.getFullName(cweId); assertEquals(expResult, result); }
public void ensureActiveGroup() { while (!ensureActiveGroup(time.timer(Long.MAX_VALUE))) { log.warn("still waiting to ensure active group"); } }
@Test public void testWakeupInOnJoinComplete() throws Exception { setupCoordinator(); coordinator.wakeupOnJoinComplete = true; mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException ignored) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); // the join group completes in this poll() coordinator.wakeupOnJoinComplete = false; consumerClient.poll(mockTime.timer(0)); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
@Override public void pluginLoaded(GoPluginDescriptor pluginDescriptor) { if (notificationExtension.canHandlePlugin(pluginDescriptor.id())) { try { notificationPluginRegistry.registerPlugin(pluginDescriptor.id()); List<String> notificationsInterestedIn = notificationExtension.getNotificationsOfInterestFor(pluginDescriptor.id()); if (notificationsInterestedIn != null && !notificationsInterestedIn.isEmpty()) { checkNotificationTypes(pluginDescriptor, notificationsInterestedIn); notificationPluginRegistry.registerPluginInterests(pluginDescriptor.id(), notificationsInterestedIn); } } catch (Exception e) { LOGGER.warn("Error occurred during plugin notification interest registration.", e); } } }
@Test public void shouldLogWarningIfPluginTriesToRegisterForInvalidNotificationType() { NotificationPluginRegistrar notificationPluginRegistrar = new NotificationPluginRegistrar(pluginManager, notificationExtension, notificationPluginRegistry); try (LogFixture logging = LogFixture.logFixtureFor(NotificationPluginRegistrar.class, Level.WARN)) { notificationPluginRegistrar.pluginLoaded(GoPluginDescriptor.builder().id(PLUGIN_ID_1).isBundledPlugin(true).build()); assertTrue(logging.contains(Level.WARN, "Plugin 'plugin-id-1' is trying to register for 'pipeline-status' which is not a valid notification type. Valid notification types are")); assertTrue(logging.contains(Level.WARN, "Plugin 'plugin-id-1' is trying to register for 'job-status' which is not a valid notification type. Valid notification types are")); } }
@Override public DescribeTopicsResult describeTopics(final TopicCollection topics, DescribeTopicsOptions options) { if (topics instanceof TopicIdCollection) return DescribeTopicsResult.ofTopicIds(handleDescribeTopicsByIds(((TopicIdCollection) topics).topicIds(), options)); else if (topics instanceof TopicNameCollection) return DescribeTopicsResult.ofTopicNames(handleDescribeTopicsByNamesWithDescribeTopicPartitionsApi(((TopicNameCollection) topics).topicNames(), options)); else throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for describeTopics."); }
@SuppressWarnings({"NPathComplexity", "CyclomaticComplexity"}) @Test public void testDescribeTopicsWithDescribeTopicPartitionsApiEdgeCase() throws ExecutionException, InterruptedException { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); String topicName0 = "test-0"; String topicName1 = "test-1"; String topicName2 = "test-2"; Map<String, Uuid> topics = new HashMap<>(); topics.put(topicName0, Uuid.randomUuid()); topics.put(topicName1, Uuid.randomUuid()); topics.put(topicName2, Uuid.randomUuid()); env.kafkaClient().prepareResponse( prepareDescribeClusterResponse(0, env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 2, MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED) ); DescribeTopicPartitionsResponseData dataFirstPart = new DescribeTopicPartitionsResponseData(); addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName0, topics.get(topicName0), singletonList(0)); addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName1, topics.get(topicName1), singletonList(0)); dataFirstPart.setNextCursor(new DescribeTopicPartitionsResponseData.Cursor() .setTopicName(topicName1) .setPartitionIndex(1)); env.kafkaClient().prepareResponse(body -> { DescribeTopicPartitionsRequestData request = (DescribeTopicPartitionsRequestData) body.data(); if (request.topics().size() != 3) return false; if (!request.topics().get(0).name().equals(topicName0)) return false; if (!request.topics().get(1).name().equals(topicName1)) return false; if (!request.topics().get(2).name().equals(topicName2)) return false; return request.cursor() == null; }, new DescribeTopicPartitionsResponse(dataFirstPart)); DescribeTopicPartitionsResponseData dataSecondPart = new DescribeTopicPartitionsResponseData(); addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName1, topics.get(topicName1), singletonList(1)); addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName2, topics.get(topicName2), singletonList(0)); dataSecondPart.setNextCursor(new DescribeTopicPartitionsResponseData.Cursor() .setTopicName(topicName2) .setPartitionIndex(1)); env.kafkaClient().prepareResponse(body -> { DescribeTopicPartitionsRequestData request = (DescribeTopicPartitionsRequestData) body.data(); if (request.topics().size() != 2) return false; if (!request.topics().get(0).name().equals(topicName1)) return false; if (!request.topics().get(1).name().equals(topicName2)) return false; DescribeTopicPartitionsRequestData.Cursor cursor = request.cursor(); return cursor != null && cursor.topicName().equals(topicName1) && cursor.partitionIndex() == 1; }, new DescribeTopicPartitionsResponse(dataSecondPart)); DescribeTopicPartitionsResponseData dataThirdPart = new DescribeTopicPartitionsResponseData(); addPartitionToDescribeTopicPartitionsResponse(dataThirdPart, topicName2, topics.get(topicName2), singletonList(1)); env.kafkaClient().prepareResponse(body -> { DescribeTopicPartitionsRequestData request = (DescribeTopicPartitionsRequestData) body.data(); if (request.topics().size() != 1) return false; if (!request.topics().get(0).name().equals(topicName2)) return false; DescribeTopicPartitionsRequestData.Cursor cursor = request.cursor(); return cursor != null && cursor.topicName().equals(topicName2) && cursor.partitionIndex() == 1; }, new DescribeTopicPartitionsResponse(dataThirdPart)); DescribeTopicsResult result = env.adminClient().describeTopics( asList(topicName1, topicName0, topicName2), new DescribeTopicsOptions() ); Map<String, TopicDescription> topicDescriptions = result.allTopicNames().get(); assertEquals(3, topicDescriptions.size()); TopicDescription topicDescription = topicDescriptions.get(topicName0); assertEquals(1, topicDescription.partitions().size()); assertEquals(0, topicDescription.partitions().get(0).partition()); topicDescription = topicDescriptions.get(topicName1); assertEquals(2, topicDescription.partitions().size()); topicDescription = topicDescriptions.get(topicName2); assertEquals(2, topicDescription.partitions().size()); assertNull(topicDescription.authorizedOperations()); } }
public static String getValue( Object object, Field field ) { try { Method getMethod = getDeclaredMethod( object.getClass(), GET_PREFIX + StringUtils.capitalize( field.getName() ) ); return (String) getMethod.invoke( object ); } catch ( NoSuchMethodException | IllegalAccessException | InvocationTargetException e ) { return null; } }
@Test public void testGetValue() throws NoSuchFieldException { TestConnectionWithBucketsDetailsChild testConnectionDetails = new TestConnectionWithBucketsDetailsChild(); testConnectionDetails.setPassword( PASSWORD ); testConnectionDetails.setPassword3( PASSWORD3 ); String value = EncryptUtils.getValue( testConnectionDetails, testConnectionDetails.getClass().getDeclaredField( "password3" ) ); Assert.assertNotNull( value ); Assert.assertEquals( PASSWORD3, value ); }
@Override public double getStdDev() { // two-pass algorithm for variance, avoids numeric overflow if (values.length <= 1) { return 0; } final double mean = getMean(); double variance = 0; for (int i = 0; i < values.length; i++) { final double diff = values[i] - mean; variance += normWeights[i] * diff * diff; } return Math.sqrt(variance); }
@Test public void calculatesTheStdDev() { assertThat(snapshot.getStdDev()) .isEqualTo(1.2688, offset(0.0001)); }
public int getCurrentTableCapacity() { return table.length; }
@Test void testSizeComparator() { KeyMap<String, String> map1 = new KeyMap<>(5); KeyMap<String, String> map2 = new KeyMap<>(80); assertThat(map1.getCurrentTableCapacity()).isLessThan(map2.getCurrentTableCapacity()); assertThat(KeyMap.CapacityDescendingComparator.INSTANCE.compare(map1, map1)).isZero(); assertThat(KeyMap.CapacityDescendingComparator.INSTANCE.compare(map2, map2)).isZero(); assertThat(KeyMap.CapacityDescendingComparator.INSTANCE.compare(map1, map2)).isPositive(); assertThat(KeyMap.CapacityDescendingComparator.INSTANCE.compare(map2, map1)).isNegative(); }
public static void main(String[] args) throws IOException, ClassNotFoundException { final var dataSource = createDataSource(); deleteSchema(dataSource); createSchema(dataSource); // Initializing Country Object China final var China = new Country( 86, "China", "Asia", "Chinese" ); // Initializing Country Object UnitedArabEmirates final var UnitedArabEmirates = new Country( 971, "United Arab Emirates", "Asia", "Arabic" ); // Initializing CountrySchemaSql Object with parameter "China" and "dataSource" final var serializedChina = new CountrySchemaSql(China, dataSource); // Initializing CountrySchemaSql Object with parameter "UnitedArabEmirates" and "dataSource" final var serializedUnitedArabEmirates = new CountrySchemaSql(UnitedArabEmirates, dataSource); /* By using CountrySchemaSql.insertCountry() method, the private (Country) type variable within Object CountrySchemaSql will be serialized to a set of bytes and persist to database. For more details of CountrySchemaSql.insertCountry() method please refer to CountrySchemaSql.java file */ serializedChina.insertCountry(); serializedUnitedArabEmirates.insertCountry(); /* By using CountrySchemaSql.selectCountry() method, CountrySchemaSql object will read the sets of bytes from database and deserialize it to Country object. For more details of CountrySchemaSql.selectCountry() method please refer to CountrySchemaSql.java file */ serializedChina.selectCountry(); serializedUnitedArabEmirates.selectCountry(); }
@Test void shouldExecuteSerializedEntityWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public String doLayout(ILoggingEvent event) { if (!isStarted()) { return CoreConstants.EMPTY_STRING; } return writeLoopOnConverters(event); }
@Test public void testNopExeptionHandler() { pl.setPattern("%nopex %m%n"); pl.start(); String val = pl.doLayout(makeLoggingEvent(aMessage, ex)); assertTrue(!val.contains("java.lang.Exception: Bogus exception")); }
public static <T> DequeCoder<T> of(Coder<T> elemCoder) { return new DequeCoder<>(elemCoder); }
@Test public void structuralValueDecodeEncodeEqualIterable() throws Exception { DequeCoder<byte[]> coder = DequeCoder.of(ByteArrayCoder.of()); Deque<byte[]> value = new ArrayDeque<>(Collections.singletonList(new byte[] {1, 2, 3, 4})); CoderProperties.structuralValueDecodeEncodeEqualIterable(coder, value); }
public T allowDuplicateContentLengths(boolean allow) { this.allowDuplicateContentLengths = allow; return get(); }
@Test void allowDuplicateContentLengths() { checkDefaultAllowDuplicateContentLengths(conf); conf.allowDuplicateContentLengths(true); assertThat(conf.allowDuplicateContentLengths()).as("allow duplicate Content-Length headers").isTrue(); checkDefaultMaxInitialLineLength(conf); checkDefaultMaxHeaderSize(conf); checkDefaultMaxChunkSize(conf); checkDefaultValidateHeaders(conf); checkDefaultInitialBufferSize(conf); }
@Override public OAuth2AccessTokenDO grantImplicit(Long userId, Integer userType, String clientId, List<String> scopes) { return oauth2TokenService.createAccessToken(userId, userType, clientId, scopes); }
@Test public void testGrantImplicit() { // 准备参数 Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String clientId = randomString(); List<String> scopes = Lists.newArrayList("read", "write"); // mock 方法 OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class); when(oauth2TokenService.createAccessToken(eq(userId), eq(userType), eq(clientId), eq(scopes))).thenReturn(accessTokenDO); // 调用,并断言 assertPojoEquals(accessTokenDO, oauth2GrantService.grantImplicit( userId, userType, clientId, scopes)); }
@Override public boolean nukeExistingCluster() throws Exception { log.info("Nuking metadata of existing cluster, ledger root path: {}", ledgersRootPath); if (!store.exists(ledgersRootPath + "/" + INSTANCEID).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) { log.info("There is no existing cluster with ledgersRootPath: {}, so exiting nuke operation", ledgersRootPath); return true; } @Cleanup RegistrationClient registrationClient = new PulsarRegistrationClient(store, ledgersRootPath); Collection<BookieId> rwBookies = registrationClient.getWritableBookies() .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS).getValue(); if (rwBookies != null && !rwBookies.isEmpty()) { log.error("Bookies are still up and connected to this cluster, " + "stop all bookies before nuking the cluster"); return false; } Collection<BookieId> roBookies = registrationClient.getReadOnlyBookies() .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS).getValue(); if (roBookies != null && !roBookies.isEmpty()) { log.error("Readonly Bookies are still up and connected to this cluster, " + "stop all bookies before nuking the cluster"); return false; } LayoutManager layoutManager = new PulsarLayoutManager(store, ledgersRootPath); LedgerManagerFactory ledgerManagerFactory = new PulsarLedgerManagerFactory(); ledgerManagerFactory.initialize(conf, layoutManager, LegacyHierarchicalLedgerManagerFactory.CUR_VERSION); return ledgerManagerFactory.validateAndNukeExistingCluster(conf, layoutManager); }
@Test(dataProvider = "impl") public void testNukeExistingCluster(String provider, Supplier<String> urlSupplier) throws Exception { methodSetup(urlSupplier); assertTrue(registrationManager.initNewCluster()); assertClusterExists(); assertTrue(registrationManager.nukeExistingCluster()); assertClusterNotExists(); }
public static String composeFullyQualifiedTableName(String catalog, String schema, String tableName, char separator) { StringBuilder sb = new StringBuilder(); if (stringHasValue(catalog)) { sb.append(catalog); sb.append(separator); } if (stringHasValue(schema)) { sb.append(schema); sb.append(separator); } else { if (sb.length() > 0) { sb.append(separator); } } sb.append(tableName); return sb.toString(); }
@Test void testNoSchema() { String answer = StringUtility.composeFullyQualifiedTableName("catalog", null, "table", '.'); assertEquals("catalog..table", answer); }
@Override public boolean putRowWait( RowMetaInterface rowMeta, Object[] rowData, long time, TimeUnit tu ) { return putRow( rowMeta, rowData ); }
@Test public void testPutRowWait() throws Exception { rowSet.putRowWait( new RowMeta(), row, 1, TimeUnit.SECONDS ); assertSame( row, rowSet.getRowWait( 1, TimeUnit.SECONDS ) ); }
public ControllerResult<ElectMasterResponseHeader> electMaster(final ElectMasterRequestHeader request, final ElectPolicy electPolicy) { final String brokerName = request.getBrokerName(); final Long brokerId = request.getBrokerId(); final ControllerResult<ElectMasterResponseHeader> result = new ControllerResult<>(new ElectMasterResponseHeader()); final ElectMasterResponseHeader response = result.getResponse(); if (!isContainsBroker(brokerName)) { // this broker set hasn't been registered result.setCodeAndRemark(ResponseCode.CONTROLLER_BROKER_NEED_TO_BE_REGISTERED, "Broker hasn't been registered"); return result; } final SyncStateInfo syncStateInfo = this.syncStateSetInfoTable.get(brokerName); final BrokerReplicaInfo brokerReplicaInfo = this.replicaInfoTable.get(brokerName); final Set<Long> syncStateSet = syncStateInfo.getSyncStateSet(); final Long oldMaster = syncStateInfo.getMasterBrokerId(); Set<Long> allReplicaBrokers = controllerConfig.isEnableElectUncleanMaster() ? brokerReplicaInfo.getAllBroker() : null; Long newMaster = null; if (syncStateInfo.isFirstTimeForElect()) { // If never have a master in this broker set, in other words, it is the first time to elect a master // elect it as the first master newMaster = brokerId; } // elect by policy if (newMaster == null || newMaster == -1) { // we should assign this assignedBrokerId when the brokerAddress need to be elected by force Long assignedBrokerId = request.getDesignateElect() ? brokerId : null; newMaster = electPolicy.elect(brokerReplicaInfo.getClusterName(), brokerReplicaInfo.getBrokerName(), syncStateSet, allReplicaBrokers, oldMaster, assignedBrokerId); } if (newMaster != null && newMaster.equals(oldMaster)) { // old master still valid, change nothing String err = String.format("The old master %s is still alive, not need to elect new master for broker %s", oldMaster, brokerReplicaInfo.getBrokerName()); LOGGER.warn("{}", err); // the master still exist response.setMasterEpoch(syncStateInfo.getMasterEpoch()); response.setSyncStateSetEpoch(syncStateInfo.getSyncStateSetEpoch()); response.setMasterBrokerId(oldMaster); response.setMasterAddress(brokerReplicaInfo.getBrokerAddress(oldMaster)); result.setBody(new ElectMasterResponseBody(syncStateSet).encode()); result.setCodeAndRemark(ResponseCode.CONTROLLER_MASTER_STILL_EXIST, err); return result; } // a new master is elected if (newMaster != null) { final int masterEpoch = syncStateInfo.getMasterEpoch(); final int syncStateSetEpoch = syncStateInfo.getSyncStateSetEpoch(); final HashSet<Long> newSyncStateSet = new HashSet<>(); newSyncStateSet.add(newMaster); response.setMasterBrokerId(newMaster); response.setMasterAddress(brokerReplicaInfo.getBrokerAddress(newMaster)); response.setMasterEpoch(masterEpoch + 1); response.setSyncStateSetEpoch(syncStateSetEpoch + 1); ElectMasterResponseBody responseBody = new ElectMasterResponseBody(newSyncStateSet); BrokerMemberGroup brokerMemberGroup = buildBrokerMemberGroup(brokerReplicaInfo); if (null != brokerMemberGroup) { responseBody.setBrokerMemberGroup(brokerMemberGroup); } result.setBody(responseBody.encode()); final ElectMasterEvent event = new ElectMasterEvent(brokerName, newMaster); result.addEvent(event); LOGGER.info("Elect new master {} for broker {}", newMaster, brokerName); return result; } // If elect failed and the electMaster is triggered by controller (we can figure it out by brokerAddress), // we still need to apply an ElectMasterEvent to tell the statemachine // that the master was shutdown and no new master was elected. if (request.getBrokerId() == null || request.getBrokerId() == -1) { final ElectMasterEvent event = new ElectMasterEvent(false, brokerName); result.addEvent(event); result.setCodeAndRemark(ResponseCode.CONTROLLER_MASTER_NOT_AVAILABLE, "Old master has down and failed to elect a new broker master"); } else { result.setCodeAndRemark(ResponseCode.CONTROLLER_ELECT_MASTER_FAILED, "Failed to elect a new master"); } LOGGER.warn("Failed to elect a new master for broker {}", brokerName); return result; }
@Test public void testElectMasterPreferHigherPriorityWhenEpochAndOffsetEquals() { mockMetaData(); final ElectMasterRequestHeader request = new ElectMasterRequestHeader(DEFAULT_BROKER_NAME); ElectPolicy electPolicy = new DefaultElectPolicy(this.heartbeatManager::isBrokerActive, this.heartbeatManager::getBrokerLiveInfo); mockHeartbeatDataHigherPriority(); final ControllerResult<ElectMasterResponseHeader> cResult = this.replicasInfoManager.electMaster(request, electPolicy); final ElectMasterResponseHeader response = cResult.getResponse(); assertEquals(DEFAULT_IP[2], response.getMasterAddress()); assertEquals(3L, response.getMasterBrokerId().longValue()); assertEquals(2, response.getMasterEpoch().intValue()); }
public void replayEndTransactionMarker( long producerId, TransactionResult result ) throws RuntimeException { Offsets pendingOffsets = pendingTransactionalOffsets.remove(producerId); if (pendingOffsets == null) { log.debug("Replayed end transaction marker with result {} for producer id {} but " + "no pending offsets are present. Ignoring it.", result, producerId); return; } pendingOffsets.offsetsByGroup.keySet().forEach(groupId -> { TimelineHashSet<Long> openTransactions = openTransactionsByGroup.get(groupId); if (openTransactions != null) { openTransactions.remove(producerId); if (openTransactions.isEmpty()) { openTransactionsByGroup.remove(groupId); } } }); if (result == TransactionResult.COMMIT) { log.debug("Committed transactional offset commits for producer id {}.", producerId); pendingOffsets.offsetsByGroup.forEach((groupId, topicOffsets) -> { topicOffsets.forEach((topicName, partitionOffsets) -> { partitionOffsets.forEach((partitionId, offsetAndMetadata) -> { OffsetAndMetadata existingOffsetAndMetadata = offsets.get( groupId, topicName, partitionId ); // We always keep the most recent committed offset when we have a mix of transactional and regular // offset commits. Without preserving information of the commit record offset, compaction of the // __consumer_offsets topic itself may result in the wrong offset commit being materialized. if (existingOffsetAndMetadata == null || offsetAndMetadata.recordOffset > existingOffsetAndMetadata.recordOffset) { log.debug("Committed transactional offset commit {} for producer id {} in group {} " + "with topic {} and partition {}.", offsetAndMetadata, producerId, groupId, topicName, partitionId); OffsetAndMetadata previousValue = offsets.put( groupId, topicName, partitionId, offsetAndMetadata ); if (previousValue == null) { metrics.incrementNumOffsets(); } } else { log.info("Skipped the materialization of transactional offset commit {} for producer id {} in group {} with topic {}, " + "partition {} since its record offset {} is smaller than the record offset {} of the last committed offset.", offsetAndMetadata, producerId, groupId, topicName, partitionId, offsetAndMetadata.recordOffset, existingOffsetAndMetadata.recordOffset); } }); }); }); } else { log.debug("Aborted transactional offset commits for producer id {}.", producerId); } }
@Test public void testOffsetCommitsNumberMetricWithTransactionalOffsets() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Add pending transactional commit for producer id 4. verifyTransactionalReplay(context, 4L, "foo", "bar", 0, new OffsetAndMetadata( 0L, 100L, OptionalInt.empty(), "small", context.time.milliseconds(), OptionalLong.empty() )); // Add pending transactional commit for producer id 5. verifyTransactionalReplay(context, 5L, "foo", "bar", 0, new OffsetAndMetadata( 1L, 101L, OptionalInt.empty(), "small", context.time.milliseconds(), OptionalLong.empty() )); // Add pending transactional commit for producer id 6. verifyTransactionalReplay(context, 6L, "foo", "bar", 1, new OffsetAndMetadata( 2L, 200L, OptionalInt.empty(), "small", context.time.milliseconds(), OptionalLong.empty() )); // Commit all the transactions. context.replayEndTransactionMarker(4L, TransactionResult.COMMIT); context.replayEndTransactionMarker(5L, TransactionResult.COMMIT); context.replayEndTransactionMarker(6L, TransactionResult.COMMIT); // Verify the sensor is called twice as we have only // two partitions. verify(context.metrics, times(2)).incrementNumOffsets(); }
public static String getSchemaKind(String json) { int i = json.indexOf("\"kind\""); if (i >= 0) { int s = json.indexOf("\"", i + 6); if (s >= 0) { int e = json.indexOf("\"", s + 1); if (e >= 0) { return json.substring(s + 1, e); } } } return null; }
@Test public void testGetSchemaKind() throws Exception { File file = ResourceUtils.getResourceAsFile("json/aop.json"); String json = PackageHelper.loadText(file); assertEquals("model", PackageHelper.getSchemaKind(json)); }
@Override public Table getTable(long id, String name, List<Column> schema, String dbName, String catalogName, Map<String, String> properties) throws DdlException { Map<String, String> newProp = new HashMap<>(properties); newProp.putIfAbsent(JDBCTable.JDBC_TABLENAME, "\"" + dbName + "\"" + "." + "\"" + name + "\""); return new JDBCTable(id, name, schema, dbName, catalogName, newProp); }
@Test public void testGetTable() throws SQLException { new Expectations() { { dataSource.getConnection(); result = connection; minTimes = 0; connection.getCatalog(); result = "t1"; minTimes = 0; connection.getMetaData().getColumns("t1", "test", "tbl1", "%"); result = columnResult; minTimes = 0; } }; try { JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource); Table table = jdbcMetadata.getTable("test", "tbl1"); Assert.assertTrue(table instanceof JDBCTable); Assert.assertEquals("catalog.test.tbl1", table.getUUID()); Assert.assertEquals("tbl1", table.getName()); Assert.assertNull(properties.get(JDBCTable.JDBC_TABLENAME)); Assert.assertTrue(table.getColumn("a").getType().isFloat()); Assert.assertTrue(table.getColumn("b").getType().isDouble()); Assert.assertTrue(table.getColumn("c").getType().isDecimalV3()); Assert.assertTrue(table.getColumn("d").getType().isStringType()); Assert.assertTrue(table.getColumn("e").getType().isStringType()); Assert.assertTrue(table.getColumn("f").getType().isBinaryType()); Assert.assertTrue(table.getColumn("g").getType().isStringType()); Assert.assertTrue(table.getColumn("h").getType().isDate()); Assert.assertTrue(table.getColumn("i").getType().isStringType()); Assert.assertTrue(table.getColumn("j").getType().isStringType()); Assert.assertTrue(table.getColumn("k").getType().isStringType()); Assert.assertTrue(table.getColumn("l").getType().isBinaryType()); } catch (Exception e) { System.out.println(e.getMessage()); Assert.fail(); } }
public static String getRootCauseMessage(Throwable t) { return formatMessageCause(getRootCause(t)); }
@Test public void getRootCauseMessage() { assertThat(ExceptionUtils.getRootCauseMessage(new Exception("cause1", new Exception("root")))).satisfies(m -> { assertThat(m).isNotBlank(); assertThat(m).isEqualTo("root."); }); }
@Override public int partition(StatisticsOrRecord wrapper, int numPartitions) { if (wrapper.hasStatistics()) { this.delegatePartitioner = delegatePartitioner(wrapper.statistics()); return (int) (roundRobinCounter(numPartitions).getAndIncrement() % numPartitions); } else { if (delegatePartitioner != null) { return delegatePartitioner.partition(wrapper.record(), numPartitions); } else { int partition = (int) (roundRobinCounter(numPartitions).getAndIncrement() % numPartitions); LOG.trace("Statistics not available. Round robin to partition {}", partition); return partition; } } }
@Test public void testRoundRobinStatisticsWrapper() { RangePartitioner partitioner = new RangePartitioner(SCHEMA, SORT_ORDER); Set<Integer> results = Sets.newHashSetWithExpectedSize(numPartitions); for (int i = 0; i < numPartitions; ++i) { GlobalStatistics statistics = GlobalStatistics.fromRangeBounds(1L, new SortKey[] {CHAR_KEYS.get("a")}); results.add( partitioner.partition(StatisticsOrRecord.fromStatistics(statistics), numPartitions)); } // round-robin. every partition should get an assignment assertThat(results).containsExactlyInAnyOrder(0, 1, 2, 3); }
@Override public ObjectNode encode(OpenstackNode node, CodecContext context) { checkNotNull(node, "Openstack node cannot be null"); ObjectNode result = context.mapper().createObjectNode() .put(HOST_NAME, node.hostname()) .put(TYPE, node.type().name()) .put(STATE, node.state().name()) .put(MANAGEMENT_IP, node.managementIp().toString()); OpenstackNode.NodeType type = node.type(); // serialize uplink port only for gateway node if (type == OpenstackNode.NodeType.GATEWAY) { result.put(UPLINK_PORT, node.uplinkPort()); } // serialize keystone config for controller node if (type == OpenstackNode.NodeType.CONTROLLER) { ObjectNode keystoneConfigJson = context.codec(KeystoneConfig.class) .encode(node.keystoneConfig(), context); result.set(KEYSTONE_CONFIG, keystoneConfigJson); // serialize neutron config for controller node if (node.neutronConfig() != null) { ObjectNode neutronConfigJson = context.codec(NeutronConfig.class) .encode(node.neutronConfig(), context); result.set(NEUTRON_CONFIG, neutronConfigJson); } } // serialize integration bridge config if (node.intgBridge() != null) { result.put(INTEGRATION_BRIDGE, node.intgBridge().toString()); } // serialize VLAN interface, it is valid only if any VLAN interface presents if (node.vlanIntf() != null) { result.put(VLAN_INTF_NAME, node.vlanIntf()); } // serialize data IP only if it presents if (node.dataIp() != null) { result.put(DATA_IP, node.dataIp().toString()); } // serialize physical interfaces, it is valid only if any of physical interface presents if (node.phyIntfs() != null && !node.phyIntfs().isEmpty()) { ArrayNode phyIntfs = context.mapper().createArrayNode(); node.phyIntfs().forEach(phyIntf -> { ObjectNode phyIntfJson = context.codec(OpenstackPhyInterface.class).encode(phyIntf, context); phyIntfs.add(phyIntfJson); }); result.set(PHYSICAL_INTERFACES, phyIntfs); } // serialize controllers, it is valid only if any of controller presents if (node.controllers() != null && !node.controllers().isEmpty()) { ArrayNode controllers = context.mapper().createArrayNode(); node.controllers().forEach(controller -> { ObjectNode controllerJson = context.codec(ControllerInfo.class).encode(controller, context); controllers.add(controllerJson); }); result.set(CONTROLLERS, controllers); } // serialize SSH authentication info, it is valid only if auth info presents if (node.sshAuthInfo() != null) { ObjectNode sshAuthJson = context.codec(OpenstackSshAuth.class) .encode(node.sshAuthInfo(), context); result.set(SSH_AUTH, sshAuthJson); } // serialize DPDK config, it is valid only if dpdk config presents if (node.dpdkConfig() != null) { ObjectNode dpdkConfigJson = context.codec(DpdkConfig.class) .encode(node.dpdkConfig(), context); result.set(DPDK_CONFIG, dpdkConfigJson); } return result; }
@Test public void testOpenstackDpdkComputeNodeEncode() { DpdkInterface dpdkInterface1 = DefaultDpdkInterface.builder() .deviceName("br-int") .intf("dpdk0") .mtu(Long.valueOf(1600)) .pciAddress("0000:85:00.0") .type(DpdkInterface.Type.DPDK) .build(); DpdkInterface dpdkInterface2 = DefaultDpdkInterface.builder() .deviceName("br-tun") .intf("dpdk1") .pciAddress("0000:85:00.1") .type(DpdkInterface.Type.DPDK) .build(); Collection<DpdkInterface> dpdkInterfaceCollection = new ArrayList<>(); dpdkInterfaceCollection.add(dpdkInterface1); dpdkInterfaceCollection.add(dpdkInterface2); DpdkConfig dpdkConfig = DefaultDpdkConfig.builder() .datapathType(DpdkConfig.DatapathType.NETDEV) .dpdkIntfs(dpdkInterfaceCollection) .build(); OpenstackNode node = DefaultOpenstackNode.builder() .hostname("compute") .type(OpenstackNode.NodeType.COMPUTE) .state(NodeState.INIT) .managementIp(IpAddress.valueOf("10.10.10.1")) .intgBridge(DeviceId.deviceId("br-int")) .vlanIntf("vlan") .dataIp(IpAddress.valueOf("20.20.20.2")) .dpdkConfig(dpdkConfig) .build(); ObjectNode nodeJson = openstackNodeCodec.encode(node, context); assertThat(nodeJson, matchesOpenstackNode(node)); }
public synchronized Topology addSource(final String name, final String... topics) { internalTopologyBuilder.addSource(null, name, null, null, null, topics); return this; }
@Test public void shouldNotAllowToAddTopicTwice() { topology.addSource("source", "topic-1"); try { topology.addSource("source-2", "topic-1"); fail("Should throw TopologyException for already used topic"); } catch (final TopologyException expected) { } }
@Override public synchronized void editSchedule() { updateConfigIfNeeded(); long startTs = clock.getTime(); CSQueue root = scheduler.getRootQueue(); Resource clusterResources = Resources.clone(scheduler.getClusterResource()); containerBasedPreemptOrKill(root, clusterResources); if (LOG.isDebugEnabled()) { LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms."); } }
@Test public void testSkipAMContainer() { int[][] qData = new int[][] { // / A B { 100, 50, 50 }, // abs { 100, 100, 100 }, // maxcap { 100, 100, 0 }, // used { 70, 20, 50 }, // pending { 0, 0, 0 }, // reserved { 5, 4, 1 }, // apps { -1, 1, 1 }, // req granularity { 2, 0, 0 }, // subqueues }; setAMContainer = true; ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); policy.editSchedule(); // By skipping AM Container, all other 24 containers of appD will be // preempted verify(mDisp, times(24)).handle(argThat(new IsPreemptionRequestFor(appD))); // By skipping AM Container, all other 24 containers of appC will be // preempted verify(mDisp, times(24)).handle(argThat(new IsPreemptionRequestFor(appC))); // Since AM containers of appC and appD are saved, 2 containers from appB // has to be preempted. verify(mDisp, times(2)).handle(argThat(new IsPreemptionRequestFor(appB))); setAMContainer = false; }
@Override public int getMaxCatalogNameLength() { return 0; }
@Test void assertGetMaxCatalogNameLength() { assertThat(metaData.getMaxCatalogNameLength(), is(0)); }
public static String formatTM(TimeZone tz, Date date) { return formatTM(tz, date, new DatePrecision()); }
@Test public void testFormatTM() { assertEquals("020000.000", DateUtils.formatTM(tz, new Date(0))); }
public static List<FieldSchema> convert(Schema schema) { return schema.columns().stream() .map(col -> new FieldSchema(col.name(), convertToTypeString(col.type()), col.doc())) .collect(Collectors.toList()); }
@Test public void testComplexSchemaConvertToIcebergSchema() { assertThat(HiveSchemaUtil.convert(COMPLEX_HIVE_SCHEMA).asStruct()) .isEqualTo(COMPLEX_ICEBERG_SCHEMA.asStruct()); }
public static Labels fromString(String stringLabels) throws IllegalArgumentException { Map<String, String> labels = new HashMap<>(); try { if (stringLabels != null && !stringLabels.isEmpty()) { String[] labelsArray = stringLabels.split(","); for (String label : labelsArray) { String[] fields = label.split("="); labels.put(fields[0].trim(), fields[1].trim()); } } } catch (Exception e) { throw new IllegalArgumentException("Failed to parse labels from string " + stringLabels, e); } return new Labels(labels); }
@Test public void testParseNullLabels() { String validLabels = null; assertThat(Labels.fromString(validLabels), is(Labels.EMPTY)); }
public static void forceMkdir(String path) throws IOException { FileUtils.forceMkdir(new File(path)); }
@Test void testForceMkdirWithPath() throws IOException { Path path = Paths.get(TMP_PATH, UUID.randomUUID().toString(), UUID.randomUUID().toString()); DiskUtils.forceMkdir(path.toString()); File file = path.toFile(); assertTrue(file.exists()); file.deleteOnExit(); }
@SuppressWarnings({"unchecked", "UnstableApiUsage"}) @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement) { if (!(statement.getStatement() instanceof DropStatement)) { return statement; } final DropStatement dropStatement = (DropStatement) statement.getStatement(); if (!dropStatement.isDeleteTopic()) { return statement; } final SourceName sourceName = dropStatement.getName(); final DataSource source = metastore.getSource(sourceName); if (source != null) { if (source.isSource()) { throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text()); } checkTopicRefs(source); deleteTopic(source); final Closer closer = Closer.create(); closer.register(() -> deleteKeySubject(source)); closer.register(() -> deleteValueSubject(source)); try { closer.close(); } catch (final KsqlException e) { throw e; } catch (final Exception e) { throw new KsqlException(e); } } else if (!dropStatement.getIfExists()) { throw new KsqlException("Could not find source to delete topic for: " + statement); } final T withoutDelete = (T) dropStatement.withoutDeleteClause(); final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";"; return statement.withStatement(withoutDeleteText, withoutDelete); }
@Test public void shouldThrowExceptionIfSourceDoesNotExist() { // Given: final ConfiguredStatement<DropStream> dropStatement = givenStatement( "DROP SOMETHING", new DropStream(SourceName.of("SOMETHING_ELSE"), false, true)); // When: final Exception e = assertThrows( RuntimeException.class, () -> deleteInjector.inject(dropStatement) ); // Then: assertThat(e.getMessage(), containsString("Could not find source to delete topic for")); }
@Override public void getFields( RowMetaInterface r, String origin, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space, Repository repository, IMetaStore metaStore ) { // Check compatibility mode boolean compatibilityMode = ValueMetaBase.convertStringToBoolean( space.getVariable( Const.KETTLE_COMPATIBILITY_MEMORY_GROUP_BY_SUM_AVERAGE_RETURN_NUMBER_TYPE, "N" ) ); // re-assemble a new row of metadata // RowMetaInterface fields = new RowMeta(); // Add the grouping fields in the correct order... // for ( int i = 0; i < groupField.length; i++ ) { ValueMetaInterface valueMeta = r.searchValueMeta( groupField[i] ); if ( valueMeta != null ) { valueMeta.setStorageType( ValueMetaInterface.STORAGE_TYPE_NORMAL ); fields.addValueMeta( valueMeta ); } } // Re-add aggregates // for ( int i = 0; i < subjectField.length; i++ ) { ValueMetaInterface subj = r.searchValueMeta( subjectField[i] ); if ( subj != null || aggregateType[i] == TYPE_GROUP_COUNT_ANY ) { String value_name = aggregateField[i]; int value_type = ValueMetaInterface.TYPE_NONE; int length = -1; int precision = -1; switch ( aggregateType[i] ) { case TYPE_GROUP_FIRST: case TYPE_GROUP_LAST: case TYPE_GROUP_FIRST_INCL_NULL: case TYPE_GROUP_LAST_INCL_NULL: case TYPE_GROUP_MIN: case TYPE_GROUP_MAX: value_type = subj.getType(); break; case TYPE_GROUP_COUNT_DISTINCT: case TYPE_GROUP_COUNT_ALL: case TYPE_GROUP_COUNT_ANY: value_type = ValueMetaInterface.TYPE_INTEGER; break; case TYPE_GROUP_CONCAT_COMMA: value_type = ValueMetaInterface.TYPE_STRING; break; case TYPE_GROUP_SUM: case TYPE_GROUP_AVERAGE: if ( !compatibilityMode && subj.isNumeric() ) { value_type = subj.getType(); } else { value_type = ValueMetaInterface.TYPE_NUMBER; } break; case TYPE_GROUP_MEDIAN: case TYPE_GROUP_PERCENTILE: case TYPE_GROUP_STANDARD_DEVIATION: value_type = ValueMetaInterface.TYPE_NUMBER; break; case TYPE_GROUP_CONCAT_STRING: value_type = ValueMetaInterface.TYPE_STRING; break; default: break; } if ( aggregateType[i] == TYPE_GROUP_COUNT_ALL || aggregateType[i] == TYPE_GROUP_COUNT_DISTINCT || aggregateType[i] == TYPE_GROUP_COUNT_ANY ) { length = ValueMetaInterface.DEFAULT_INTEGER_LENGTH; precision = 0; } else if ( aggregateType[i] == TYPE_GROUP_SUM && value_type != ValueMetaInterface.TYPE_INTEGER && value_type != ValueMetaInterface.TYPE_NUMBER && value_type != ValueMetaInterface.TYPE_BIGNUMBER ) { // If it ain't numeric, we change it to Number // value_type = ValueMetaInterface.TYPE_NUMBER; precision = -1; length = -1; } if ( value_type != ValueMetaInterface.TYPE_NONE ) { ValueMetaInterface v; try { v = ValueMetaFactory.createValueMeta( value_name, value_type ); } catch ( KettlePluginException e ) { log.logError( BaseMessages.getString( PKG, "MemoryGroupByMeta.Exception.UnknownValueMetaType" ), value_type, e ); v = new ValueMetaNone( value_name ); } v.setOrigin( origin ); v.setLength( length, precision ); if ( subj != null ) { v.setConversionMask( subj.getConversionMask() ); } fields.addValueMeta( v ); } } } // Now that we have all the fields we want, we should clear the original row and replace the values... // r.clear(); r.addRowMeta( fields ); }
@Test public void testGetFields() { final String stepName = "this step name"; MemoryGroupByMeta meta = new MemoryGroupByMeta(); meta.setDefault(); meta.allocate( 1, 17 ); // Declare input fields RowMetaInterface rm = getInputRowMeta(); String[] groupFields = new String[2]; groupFields[0] = "myGroupField1"; groupFields[1] = "myGroupField2"; String[] aggregateFields = new String[24]; String[] subjectFields = new String[24]; int[] aggregateTypes = new int[24]; String[] valueFields = new String[24]; subjectFields[0] = "myString"; aggregateTypes[0] = MemoryGroupByMeta.TYPE_GROUP_CONCAT_COMMA; aggregateFields[0] = "ConcatComma"; valueFields[0] = null; subjectFields[1] = "myString"; aggregateTypes[1] = MemoryGroupByMeta.TYPE_GROUP_CONCAT_STRING; aggregateFields[1] = "ConcatString"; valueFields[1] = "|"; subjectFields[2] = "myString"; aggregateTypes[2] = MemoryGroupByMeta.TYPE_GROUP_COUNT_ALL; aggregateFields[2] = "CountAll"; valueFields[2] = null; subjectFields[3] = "myString"; aggregateTypes[3] = MemoryGroupByMeta.TYPE_GROUP_COUNT_ANY; aggregateFields[3] = "CountAny"; valueFields[3] = null; subjectFields[4] = "myString"; aggregateTypes[4] = MemoryGroupByMeta.TYPE_GROUP_COUNT_DISTINCT; aggregateFields[4] = "CountDistinct"; valueFields[4] = null; subjectFields[5] = "myString"; aggregateTypes[5] = MemoryGroupByMeta.TYPE_GROUP_FIRST; aggregateFields[5] = "First(String)"; valueFields[5] = null; subjectFields[6] = "myInteger"; aggregateTypes[6] = MemoryGroupByMeta.TYPE_GROUP_FIRST; aggregateFields[6] = "First(Integer)"; valueFields[6] = null; subjectFields[7] = "myNumber"; aggregateTypes[7] = MemoryGroupByMeta.TYPE_GROUP_FIRST_INCL_NULL; aggregateFields[7] = "FirstInclNull(Number)"; valueFields[7] = null; subjectFields[8] = "myBigNumber"; aggregateTypes[8] = MemoryGroupByMeta.TYPE_GROUP_FIRST_INCL_NULL; aggregateFields[8] = "FirstInclNull(BigNumber)"; valueFields[8] = null; subjectFields[9] = "myBinary"; aggregateTypes[9] = MemoryGroupByMeta.TYPE_GROUP_LAST; aggregateFields[9] = "Last(Binary)"; valueFields[9] = null; subjectFields[10] = "myBoolean"; aggregateTypes[10] = MemoryGroupByMeta.TYPE_GROUP_LAST; aggregateFields[10] = "Last(Boolean)"; valueFields[10] = null; subjectFields[11] = "myDate"; aggregateTypes[11] = MemoryGroupByMeta.TYPE_GROUP_LAST_INCL_NULL; aggregateFields[11] = "LastInclNull(Date)"; valueFields[11] = null; subjectFields[12] = "myTimestamp"; aggregateTypes[12] = MemoryGroupByMeta.TYPE_GROUP_LAST_INCL_NULL; aggregateFields[12] = "LastInclNull(Timestamp)"; valueFields[12] = null; subjectFields[13] = "myInternetAddress"; aggregateTypes[13] = MemoryGroupByMeta.TYPE_GROUP_MAX; aggregateFields[13] = "Max(InternetAddress)"; valueFields[13] = null; subjectFields[14] = "myString"; aggregateTypes[14] = MemoryGroupByMeta.TYPE_GROUP_MAX; aggregateFields[14] = "Max(String)"; valueFields[14] = null; subjectFields[15] = "myInteger"; aggregateTypes[15] = MemoryGroupByMeta.TYPE_GROUP_MEDIAN; // Always returns Number aggregateFields[15] = "Median(Integer)"; valueFields[15] = null; subjectFields[16] = "myNumber"; aggregateTypes[16] = MemoryGroupByMeta.TYPE_GROUP_MIN; aggregateFields[16] = "Min(Number)"; valueFields[16] = null; subjectFields[17] = "myBigNumber"; aggregateTypes[17] = MemoryGroupByMeta.TYPE_GROUP_MIN; aggregateFields[17] = "Min(BigNumber)"; valueFields[17] = null; subjectFields[18] = "myBinary"; aggregateTypes[18] = MemoryGroupByMeta.TYPE_GROUP_PERCENTILE; aggregateFields[18] = "Percentile(Binary)"; valueFields[18] = "0.5"; subjectFields[19] = "myBoolean"; aggregateTypes[19] = MemoryGroupByMeta.TYPE_GROUP_STANDARD_DEVIATION; aggregateFields[19] = "StandardDeviation(Boolean)"; valueFields[19] = null; subjectFields[20] = "myDate"; aggregateTypes[20] = MemoryGroupByMeta.TYPE_GROUP_SUM; aggregateFields[20] = "Sum(Date)"; valueFields[20] = null; subjectFields[21] = "myInteger"; aggregateTypes[21] = MemoryGroupByMeta.TYPE_GROUP_SUM; aggregateFields[21] = "Sum(Integer)"; valueFields[21] = null; subjectFields[22] = "myInteger"; aggregateTypes[22] = MemoryGroupByMeta.TYPE_GROUP_AVERAGE; aggregateFields[22] = "Average(Integer)"; valueFields[22] = null; subjectFields[23] = "myDate"; aggregateTypes[23] = MemoryGroupByMeta.TYPE_GROUP_AVERAGE; aggregateFields[23] = "Average(Date)"; valueFields[23] = null; meta.setGroupField( groupFields ); meta.setSubjectField( subjectFields ); meta.setAggregateType( aggregateTypes ); meta.setAggregateField( aggregateFields ); meta.setValueField( valueFields ); Variables vars = new Variables(); meta.getFields( rm, stepName, null, null, vars, null, null ); assertNotNull( rm ); assertEquals( 26, rm.size() ); assertTrue( rm.indexOfValue( "myGroupField1" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( rm.indexOfValue( "myGroupField1" ) ).getType() ); assertTrue( rm.indexOfValue( "myGroupField2" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( rm.indexOfValue( "myGroupField2" ) ).getType() ); assertTrue( rm.indexOfValue( "myGroupField2" ) > rm.indexOfValue( "myGroupField1" ) ); assertTrue( rm.indexOfValue( "ConcatComma" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( rm.indexOfValue( "ConcatComma" ) ).getType() ); assertTrue( rm.indexOfValue( "ConcatString" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( rm.indexOfValue( "ConcatString" ) ).getType() ); assertTrue( rm.indexOfValue( "CountAll" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_INTEGER, rm.getValueMeta( rm.indexOfValue( "CountAll" ) ).getType() ); assertTrue( rm.indexOfValue( "CountAny" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_INTEGER, rm.getValueMeta( rm.indexOfValue( "CountAny" ) ).getType() ); assertTrue( rm.indexOfValue( "CountDistinct" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_INTEGER, rm.getValueMeta( rm.indexOfValue( "CountDistinct" ) ).getType() ); assertTrue( rm.indexOfValue( "First(String)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( rm.indexOfValue( "First(String)" ) ).getType() ); assertTrue( rm.indexOfValue( "First(Integer)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_INTEGER, rm.getValueMeta( rm.indexOfValue( "First(Integer)" ) ).getType() ); assertTrue( rm.indexOfValue( "FirstInclNull(Number)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_NUMBER, rm.getValueMeta( rm.indexOfValue( "FirstInclNull(Number)" ) ).getType() ); assertTrue( rm.indexOfValue( "FirstInclNull(BigNumber)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_BIGNUMBER, rm.getValueMeta( rm.indexOfValue( "FirstInclNull(BigNumber)" ) ).getType() ); assertTrue( rm.indexOfValue( "Last(Binary)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_BINARY, rm.getValueMeta( rm.indexOfValue( "Last(Binary)" ) ).getType() ); assertTrue( rm.indexOfValue( "Last(Boolean)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_BOOLEAN, rm.getValueMeta( rm.indexOfValue( "Last(Boolean)" ) ).getType() ); assertTrue( rm.indexOfValue( "LastInclNull(Date)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_DATE, rm.getValueMeta( rm.indexOfValue( "LastInclNull(Date)" ) ).getType() ); assertTrue( rm.indexOfValue( "LastInclNull(Timestamp)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_TIMESTAMP, rm.getValueMeta( rm.indexOfValue( "LastInclNull(Timestamp)" ) ).getType() ); assertTrue( rm.indexOfValue( "Max(InternetAddress)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_INET, rm.getValueMeta( rm.indexOfValue( "Max(InternetAddress)" ) ).getType() ); assertTrue( rm.indexOfValue( "Max(String)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( rm.indexOfValue( "Max(String)" ) ).getType() ); assertTrue( rm.indexOfValue( "Median(Integer)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_NUMBER, rm.getValueMeta( rm.indexOfValue( "Median(Integer)" ) ).getType() ); assertTrue( rm.indexOfValue( "Min(Number)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_NUMBER, rm.getValueMeta( rm.indexOfValue( "Min(Number)" ) ).getType() ); assertTrue( rm.indexOfValue( "Min(BigNumber)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_BIGNUMBER, rm.getValueMeta( rm.indexOfValue( "Min(BigNumber)" ) ).getType() ); assertTrue( rm.indexOfValue( "Percentile(Binary)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_NUMBER, rm.getValueMeta( rm.indexOfValue( "Percentile(Binary)" ) ).getType() ); assertTrue( rm.indexOfValue( "StandardDeviation(Boolean)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_NUMBER, rm.getValueMeta( rm.indexOfValue( "StandardDeviation(Boolean)" ) ).getType() ); assertTrue( rm.indexOfValue( "Sum(Date)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_NUMBER, rm.getValueMeta( rm.indexOfValue( "Sum(Date)" ) ).getType() ); // Force changed to Numeric assertTrue( rm.indexOfValue( "Sum(Integer)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_INTEGER, rm.getValueMeta( rm.indexOfValue( "Sum(Integer)" ) ).getType() ); assertTrue( rm.indexOfValue( "Average(Integer)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_INTEGER, rm.getValueMeta( rm.indexOfValue( "Average(Integer)" ) ).getType() ); assertTrue( rm.indexOfValue( "Average(Date)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_NUMBER, rm.getValueMeta( rm.indexOfValue( "Average(Date)" ) ).getType() ); // Test Compatibility rm = getInputRowMeta(); vars.setVariable( Const.KETTLE_COMPATIBILITY_MEMORY_GROUP_BY_SUM_AVERAGE_RETURN_NUMBER_TYPE, "Y" ); meta.getFields( rm, stepName, null, null, vars, null, null ); assertNotNull( rm ); assertEquals( 26, rm.size() ); assertTrue( rm.indexOfValue( "Average(Integer)" ) >= 0 ); assertEquals( ValueMetaInterface.TYPE_NUMBER, rm.getValueMeta( rm.indexOfValue( "Average(Integer)" ) ).getType() ); }
@Override public Health health() { final Health.Builder health = Health.unknown(); if (!jobRunrProperties.getBackgroundJobServer().isEnabled()) { health .up() .withDetail("backgroundJobServer", "disabled"); } else { final BackgroundJobServer backgroundJobServer = backgroundJobServerProvider.getObject(); if (backgroundJobServer.isRunning()) { health .up() .withDetail("backgroundJobServer", "enabled") .withDetail("backgroundJobServerStatus", "running"); } else { health .down() .withDetail("backgroundJobServer", "enabled") .withDetail("backgroundJobServerStatus", "stopped"); } } return health.build(); }
@Test void givenEnabledBackgroundJobServerAndBackgroundJobServerStopped_ThenHealthIsDown() { when(backgroundJobServerProperties.isEnabled()).thenReturn(true); when(backgroundJobServer.isRunning()).thenReturn(false); assertThat(jobRunrHealthIndicator.health().getStatus()).isEqualTo(Status.DOWN); }
protected final AnyKeyboardViewBase getMiniKeyboard() { return mMiniKeyboard; }
@Test public void testShortPressWithLabelWhenNoPrimaryKeyAndNoPopupItemsShouldNotOutput() throws Exception { ExternalAnyKeyboard anyKeyboard = new ExternalAnyKeyboard( new DefaultAddOn(getApplicationContext(), getApplicationContext()), getApplicationContext(), keyboard_with_keys_with_no_codes, keyboard_with_keys_with_no_codes, "test", 0, 0, "en", "", "", KEYBOARD_ROW_MODE_NORMAL); anyKeyboard.loadKeyboard(mViewUnderTest.mKeyboardDimens); mViewUnderTest.setKeyboard(anyKeyboard, 0); Assert.assertEquals(7, anyKeyboard.getKeys().size()); Assert.assertNull(mViewUnderTest.getMiniKeyboard()); Assert.assertFalse(mViewUnderTest.mMiniKeyboardPopup.isShowing()); final AnyKeyboard.AnyKey key = (AnyKeyboard.AnyKey) anyKeyboard.getKeys().get(4); Assert.assertEquals(0, key.getPrimaryCode()); Assert.assertEquals(0, key.getCodesCount()); Assert.assertEquals(0, key.popupResId); Assert.assertEquals("d", key.label); Assert.assertNull(key.popupCharacters); ViewTestUtils.navigateFromTo(mViewUnderTest, key, key, 30, true, false); Assert.assertNull(mViewUnderTest.getMiniKeyboard()); Assert.assertFalse(mViewUnderTest.mMiniKeyboardPopup.isShowing()); ViewTestUtils.navigateFromTo(mViewUnderTest, key, key, 30, false, true); Mockito.verify(mMockKeyboardListener).onKey(eq(0), same(key), eq(0), any(), anyBoolean()); }
@Override public CompletableFuture<Versioned<Set<BookieId>>> getReadOnlyBookies() { return getBookiesThenFreshCache(bookieReadonlyRegistrationPath); }
@Test(dataProvider = "impl") public void testGetReadonlyBookies(String provider, Supplier<String> urlSupplier) throws Exception { @Cleanup MetadataStoreExtended store = MetadataStoreExtended.create(urlSupplier.get(), MetadataStoreConfig.builder().fsyncEnable(false).build()); String ledgersRoot = "/test/ledgers-" + UUID.randomUUID(); @Cleanup RegistrationManager rm = new PulsarRegistrationManager(store, ledgersRoot, mock(AbstractConfiguration.class)); @Cleanup RegistrationClient rc = new PulsarRegistrationClient(store, ledgersRoot); Set<BookieId> addresses = prepareNBookies(10); List<String> children = new ArrayList<>(); for (BookieId address : addresses) { children.add(address.toString()); rm.registerBookie(address, true, new BookieServiceInfo()); } Versioned<Set<BookieId>> result = result(rc.getReadOnlyBookies()); assertEquals(result.getValue().size(), addresses.size()); }
public static URI getProxyUri(URI originalUri, URI proxyUri, ApplicationId id) { try { String path = getPath(id, originalUri == null ? "/" : originalUri.getPath()); return new URI(proxyUri.getScheme(), proxyUri.getAuthority(), path, originalUri == null ? null : originalUri.getQuery(), originalUri == null ? null : originalUri.getFragment()); } catch (URISyntaxException e) { throw new RuntimeException("Could not proxy "+originalUri, e); } }
@Test void testGetProxyUri() throws Exception { URI originalUri = new URI("http://host.com/static/foo?bar=bar"); URI proxyUri = new URI("http://proxy.net:8080/"); ApplicationId id = BuilderUtils.newApplicationId(6384623l, 5); URI expected = new URI("http://proxy.net:8080/proxy/application_6384623_0005/static/foo?bar=bar"); URI result = ProxyUriUtils.getProxyUri(originalUri, proxyUri, id); assertEquals(expected, result); }
public static void substituteDeprecatedConfigPrefix( Configuration config, String deprecatedPrefix, String designatedPrefix) { // set the designated key only if it is not set already final int prefixLen = deprecatedPrefix.length(); Configuration replacement = new Configuration(); for (String key : config.keySet()) { if (key.startsWith(deprecatedPrefix)) { String newKey = designatedPrefix + key.substring(prefixLen); if (!config.containsKey(newKey)) { replacement.setString(newKey, config.getString(key, null)); } } } config.addAll(replacement); }
@Test void testSubstituteConfigKeyPrefix() { String deprecatedPrefix1 = "deprecated-prefix"; String deprecatedPrefix2 = "-prefix-2"; String deprecatedPrefix3 = "prefix-3"; String designatedPrefix1 = "p1"; String designatedPrefix2 = "ppp"; String designatedPrefix3 = "zzz"; String depr1 = deprecatedPrefix1 + "var"; String depr2 = deprecatedPrefix2 + "env"; String depr3 = deprecatedPrefix2 + "x"; String desig1 = designatedPrefix1 + "var"; String desig2 = designatedPrefix2 + "env"; String desig3 = designatedPrefix2 + "x"; String val1 = "1"; String val2 = "2"; String val3Depr = "3-"; String val3Desig = "3+"; // config contains only deprecated key 1, and for key 2 both deprecated and designated Configuration cfg = new Configuration(); cfg.setString(depr1, val1); cfg.setString(depr2, val2); cfg.setString(depr3, val3Depr); cfg.setString(desig3, val3Desig); BootstrapTools.substituteDeprecatedConfigPrefix(cfg, deprecatedPrefix1, designatedPrefix1); BootstrapTools.substituteDeprecatedConfigPrefix(cfg, deprecatedPrefix2, designatedPrefix2); BootstrapTools.substituteDeprecatedConfigPrefix(cfg, deprecatedPrefix3, designatedPrefix3); assertThat(cfg.getString(desig1, null)).isEqualTo(val1); assertThat(cfg.getString(desig2, null)).isEqualTo(val2); assertThat(cfg.getString(desig3, null)).isEqualTo(val3Desig); // check that nothing with prefix 3 is contained for (String key : cfg.keySet()) { assertThat(key.startsWith(designatedPrefix3)).isFalse(); assertThat(key.startsWith(deprecatedPrefix3)).isFalse(); } }
@Override public Long getSmsTemplateCountByChannelId(Long channelId) { return smsTemplateMapper.selectCountByChannelId(channelId); }
@Test public void testGetSmsTemplateCountByChannelId() { // mock 数据 SmsTemplateDO dbSmsTemplate = randomPojo(SmsTemplateDO.class, o -> o.setChannelId(1L)); smsTemplateMapper.insert(dbSmsTemplate); // 测试 channelId 不匹配 smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setChannelId(2L))); // 准备参数 Long channelId = 1L; // 调用 Long count = smsTemplateService.getSmsTemplateCountByChannelId(channelId); // 断言 assertEquals(1, count); }
protected boolean init() { return true; }
@Test public void testEmbedSetup() { avroInput.init( (StepMetaInterface) mockStepMeta, mockStepDataInterface ); }
@Override public void validateKeyPresent(final SourceName sinkName) { getSource().validateKeyPresent(sinkName, projection); }
@Test public void shouldValidateKeysByCallingSourceWithProjection() { // When: projectNode.validateKeyPresent(SOURCE_NAME); // Then: verify(source).validateKeyPresent(SOURCE_NAME, Projection.of(selects)); }