focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static String getGeneratedResourceString(GeneratedResource generatedResource) throws JsonProcessingException {
return objectMapper.writeValueAsString(generatedResource);
} | @Test
void getGeneratedResourceString() throws JsonProcessingException {
String fullClassName = "full.class.Name";
GeneratedResource generatedResource = new GeneratedClassResource(fullClassName);
String expected = String.format("{\"step-type\":\"class\",\"fullClassName\":\"%s\"}", fullClassName);
String retrieved = JSONUtils.getGeneratedResourceString(generatedResource);
assertThat(retrieved).isEqualTo(expected);
LocalUri modelLocalUriId = new ReflectiveAppRoot("test")
.get(ComponentFoo.class)
.get("this", "is", "modelLocalUriId")
.asLocalUri();
ModelLocalUriId localUriId = new ModelLocalUriId(modelLocalUriId);
String target = LocalComponentIdFoo.PREFIX;
generatedResource = new GeneratedRedirectResource(localUriId, target);
expected = String.format("{\"step-type\":\"redirect\",\"modelLocalUriId\":%s,\"target\":\"%s\"}",
JSONUtils.getModelLocalUriIdString(localUriId), target);
retrieved = JSONUtils.getGeneratedResourceString(generatedResource);
assertThat(retrieved).isEqualTo(expected);
generatedResource = new GeneratedExecutableResource(localUriId, Collections.singletonList(fullClassName));
expected = String.format("{\"step-type\":\"executable\",\"modelLocalUriId\":%s,\"fullClassNames\":[\"%s\"]}",
JSONUtils.getModelLocalUriIdString(localUriId), fullClassName);
retrieved = JSONUtils.getGeneratedResourceString(generatedResource);
assertThat(retrieved).isEqualTo(expected);
} |
public TaskRunScheduler getTaskRunScheduler() {
return taskRunScheduler;
} | @Test
public void testTaskRunNotMerge() {
TaskRunManager taskRunManager = new TaskRunManager();
Task task = new Task("test");
task.setDefinition("select 1");
long taskId = 1;
TaskRun taskRun1 = TaskRunBuilder
.newBuilder(task)
.setExecuteOption(DEFAULT_NO_MERGE_OPTION)
.build();
long now = System.currentTimeMillis();
taskRun1.setTaskId(taskId);
taskRun1.initStatus("1", now);
taskRun1.getStatus().setPriority(0);
TaskRun taskRun2 = TaskRunBuilder
.newBuilder(task)
.setExecuteOption(DEFAULT_NO_MERGE_OPTION)
.build();
taskRun2.setTaskId(taskId);
taskRun2.initStatus("2", now);
taskRun2.getStatus().setPriority(10);
TaskRun taskRun3 = TaskRunBuilder
.newBuilder(task)
.setExecuteOption(DEFAULT_NO_MERGE_OPTION)
.build();
taskRun3.setTaskId(taskId);
taskRun3.initStatus("3", now + 10);
taskRun3.getStatus().setPriority(10);
taskRunManager.arrangeTaskRun(taskRun2, false);
taskRunManager.arrangeTaskRun(taskRun1, false);
taskRunManager.arrangeTaskRun(taskRun3, false);
TaskRunScheduler taskRunScheduler = taskRunManager.getTaskRunScheduler();
Collection<TaskRun> taskRuns = taskRunScheduler.getPendingTaskRunsByTaskId(taskId);
Assert.assertTrue(taskRuns != null);
Assert.assertEquals(3, taskRuns.size());
} |
@Override
public void triggerForProject(String projectUuid) {
try (DbSession dbSession = dbClient.openSession(false)) {
// remove already existing indexing task, if any
removeExistingIndexationTasksForProject(dbSession, projectUuid);
dbClient.branchDao().updateAllNeedIssueSyncForProject(dbSession, projectUuid);
List<BranchDto> branchInNeedOfIssueSync = dbClient.branchDao().selectBranchNeedingIssueSyncForProject(dbSession, projectUuid);
LOG.info("{} branch(es) found in need of issue sync for project.", branchInNeedOfIssueSync.size());
List<CeTaskSubmit> tasks = new ArrayList<>();
for (BranchDto branch : branchInNeedOfIssueSync) {
tasks.add(buildTaskSubmit(branch));
}
ceQueue.massSubmit(tasks);
dbSession.commit();
}
} | @Test
public void triggerForProject() {
ProjectDto projectDto = dbTester.components().insertPrivateProject().getProjectDto();
BranchDto dto = new BranchDto()
.setBranchType(BRANCH)
.setKey("branchName")
.setUuid("branch_uuid")
.setProjectUuid(projectDto.getUuid())
.setIsMain(true);
dbTester.components().insertProjectBranch(projectDto, dto);
underTest.triggerForProject(projectDto.getUuid());
Optional<BranchDto> branch = dbClient.branchDao().selectByUuid(dbTester.getSession(), "branch_uuid");
assertThat(branch).isPresent();
assertThat(branch.get().isNeedIssueSync()).isTrue();
verify(ceQueue, times(2)).prepareSubmit();
verify(ceQueue, times(1)).massSubmit(anyCollection());
assertThat(logTester.logs(Level.INFO))
.contains("2 branch(es) found in need of issue sync for project.");
} |
public void refreshLogRetentionSettings() throws IOException {
if (getServiceState() == STATE.STARTED) {
Configuration conf = createConf();
setConfig(conf);
stopRMClient();
stopTimer();
scheduleLogDeletionTasks();
} else {
LOG.warn("Failed to execute refreshLogRetentionSettings : Aggregated Log Deletion Service is not started");
}
} | @Test
void testRefreshLogRetentionSettings() throws Exception {
long now = System.currentTimeMillis();
long before2000Secs = now - (2000 * 1000);
long before50Secs = now - (50 * 1000);
int checkIntervalSeconds = 2;
int checkIntervalMilliSeconds = checkIntervalSeconds * 1000;
Configuration conf = setupConfiguration(1800, 1);
LogAggregationTestcase testcase = LogAggregationTestcaseBuilder.create(conf)
.withRootPath(ROOT)
.withRemoteRootLogPath(REMOTE_ROOT_LOG_DIR)
.withUserDir(USER_ME, before50Secs)
.withSuffixDir(SUFFIX, before50Secs)
.withBucketDir(before50Secs)
.withApps(Lists.newArrayList(
//Set time last modified of app1Dir directory and its files to before2000Secs
new AppDescriptor(before2000Secs, Lists.newArrayList(
Pair.of(DIR_HOST1, before2000Secs))),
//Set time last modified of app1Dir directory and its files to before50Secs
new AppDescriptor(before50Secs, Lists.newArrayList(
Pair.of(DIR_HOST1, before50Secs))))
)
.withFinishedApps(1, 2)
.withRunningApps()
.build();
testcase
.startDeletionService()
//app1Dir would be deleted since it is done above log retention period
.verifyAppDirDeleted(1, 10000L)
//app2Dir is not expected to be deleted since it is below the threshold
.verifyAppDirNotDeleted(2, 3000L);
//Now, let's change the log aggregation retention configs
conf.setInt(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, 50);
conf.setInt(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS,
checkIntervalSeconds);
testcase
//We have not called refreshLogSettings, hence don't expect to see
// the changed conf values
.verifyCheckIntervalMilliSecondsNotEqualTo(checkIntervalMilliSeconds)
//refresh the log settings
.refreshLogRetentionSettings()
//Check interval time should reflect the new value
.verifyCheckIntervalMilliSecondsEqualTo(checkIntervalMilliSeconds)
//app2Dir should be deleted since it falls above the threshold
.verifyAppDirDeleted(2, 10000L)
//Close expected 2 times: once for refresh and once for stopping
.teardown(2);
} |
public static boolean isLong(String s) {
if (StrUtil.isBlank(s)) {
return false;
}
try {
Long.parseLong(s);
} catch (NumberFormatException e) {
return false;
}
return true;
} | @Test
public void isLongTest() {
assertTrue(NumberUtil.isLong("-12"));
assertTrue(NumberUtil.isLong("256"));
assertTrue(NumberUtil.isLong("0256"));
assertTrue(NumberUtil.isLong("0"));
assertFalse(NumberUtil.isLong("23.4"));
assertFalse(NumberUtil.isLong(null));
assertFalse(NumberUtil.isLong(""));
assertFalse(NumberUtil.isLong(" "));
} |
@Override
public synchronized InetSocketAddress getConfAddress() throws UnavailableException {
return mMasterSelectionPolicy.getPrimaryMasterAddressCached(mMasterInquireClient);
} | @Test
public void specificMaster() throws UnavailableException {
int masterIndex = 2;
AbstractMasterClient client = new TestAbstractClient(
mMockMasterClientContext,
MasterSelectionPolicy.Factory.specifiedMaster(mAddress.get(masterIndex))
);
Assert.assertEquals(mAddress.get(masterIndex), client.getRemoteSockAddress());
Assert.assertEquals(mAddress.get(mPrimaryMasterIndex), client.getConfAddress());
} |
public static <FnT extends DoFn<?, ?>> DoFnSignature signatureForDoFn(FnT fn) {
return getSignature(fn.getClass());
} | @Test
public void testSimpleStateIdNamedDoFn() throws Exception {
class DoFnForTestSimpleStateIdNamedDoFn extends DoFn<KV<String, Integer>, Long> {
@StateId("foo")
private final StateSpec<ValueState<Integer>> bizzle = StateSpecs.value(VarIntCoder.of());
@ProcessElement
public void foo(ProcessContext context) {}
}
// Test classes at the bottom of the file
DoFnSignature sig = DoFnSignatures.signatureForDoFn(new DoFnForTestSimpleStateIdNamedDoFn());
assertThat(sig.stateDeclarations().size(), equalTo(1));
DoFnSignature.StateDeclaration decl = sig.stateDeclarations().get("foo");
assertThat(decl.id(), equalTo("foo"));
assertThat(
decl.field(), equalTo(DoFnForTestSimpleStateIdNamedDoFn.class.getDeclaredField("bizzle")));
assertThat(
decl.stateType(),
Matchers.<TypeDescriptor<?>>equalTo(new TypeDescriptor<ValueState<Integer>>() {}));
} |
public Set<Device> getDevicesFromPath(String path) throws IOException {
MutableInt counter = new MutableInt(0);
try (Stream<Path> stream = Files.walk(Paths.get(path), 1)) {
return stream.filter(p -> p.toFile().getName().startsWith("veslot"))
.map(p -> toDevice(p, counter))
.collect(Collectors.toSet());
}
} | @Test
public void testDetectSingleOnlineDevice() throws IOException {
createVeSlotFile(0);
createOsStateFile(0);
when(mockCommandExecutor.getOutput())
.thenReturn("8:1:character special file");
when(udevUtil.getSysPath(anyInt(), anyChar())).thenReturn(testFolder);
Set<Device> devices = discoverer.getDevicesFromPath(testFolder);
assertEquals("Number of devices", 1, devices.size());
Device device = devices.iterator().next();
assertEquals("Device ID", 0, device.getId());
assertEquals("Major number", 8, device.getMajorNumber());
assertEquals("Minor number", 1, device.getMinorNumber());
assertEquals("Status", "ONLINE", device.getStatus());
assertTrue("Device is not healthy", device.isHealthy());
} |
@Override
public Long clusterCountKeysInSlot(int slot) {
RedisClusterNode node = clusterGetNodeForSlot(slot);
MasterSlaveEntry entry = executorService.getConnectionManager().getEntry(new InetSocketAddress(node.getHost(), node.getPort()));
RFuture<Long> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_COUNTKEYSINSLOT, slot);
return syncFuture(f);
} | @Test
public void testClusterCountKeysInSlot() {
Long t = connection.clusterCountKeysInSlot(1);
assertThat(t).isZero();
} |
@Override
public ConfigInfoStateWrapper findConfigInfoState(final String dataId, final String group, final String tenant) {
final String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO);
final String sql = configInfoMapper.select(
Arrays.asList("id", "data_id", "group_id", "tenant_id", "gmt_modified"),
Arrays.asList("data_id", "group_id", "tenant_id"));
return databaseOperate.queryOne(sql, new Object[] {dataId, group, tenantTmp},
CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER);
} | @Test
void testFindConfigInfoState() {
String dataId = "dataId1324";
String group = "group23546";
String tenant = "tenant13245";
//mock select config state
ConfigInfoStateWrapper mockedConfig = new ConfigInfoStateWrapper();
mockedConfig.setLastModified(2345678L);
mockedConfig.setId(23456789098765L);
when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}),
eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(mockedConfig);
//execute return mock obj
ConfigInfoStateWrapper configInfoStateWrapper = embeddedConfigInfoPersistService.findConfigInfoState(dataId, group, tenant);
//expect check schema & tags.
assertEquals(mockedConfig.getId(), configInfoStateWrapper.getId());
assertEquals(mockedConfig.getLastModified(), configInfoStateWrapper.getLastModified());
} |
@Override
public String getUrl() {
return String.format("/%sartifact/%s", run.getUrl(), artifact.getHref());
} | @Test
public void findUniqueArtifactsWithSameName() throws IllegalAccessException, NoSuchFieldException {
//mock artifacts
assumeTrue("TODO in Java 12+ final cannot be removed", Runtime.version().feature() < 12);
FieldUtils.removeFinalModifier(Run.Artifact.class.getField("relativePath"));
Run.Artifact artifact1 = mock(Run.Artifact.class);
Run.Artifact artifact2 = mock(Run.Artifact.class);
//artifact1 mocks
when(artifact1.getFileName()).thenReturn("test-suite.log");
// FIXME with Junit5 use JUnit 5
Whitebox.setInternalState(artifact1, "relativePath", "path1/test-suite.log");
when(artifact1.getHref()).thenReturn("path1/test-suite.log");
//artifact2 mocks
when(artifact2.getFileName()).thenReturn("test-suite.log");
Whitebox.setInternalState(artifact2, "relativePath", "path2/test-suite.log");
when(artifact2.getHref()).thenReturn("path2/test-suite.log");
//list of artifacts
List artifactList = new ArrayList<>();
artifactList.add(artifact1);
artifactList.add(artifact2);
//mock run
Run run = mock(Run.class);
when(run.getUrl()).thenReturn("job/myfolder/job/myjob/1/");
when(run.getArtifacts()).thenReturn(artifactList);
Link parentLink = mock(Link.class);
ArtifactImpl a1 = new ArtifactImpl(run, artifact1, parentLink);
ArtifactImpl a2 = new ArtifactImpl(run, artifact2, parentLink);
assertThat(a1.getId(), is(not(a2.getId())));
} |
public Chapter readChapter(@NonNull FrameHeader frameHeader) throws IOException, ID3ReaderException {
int chapterStartedPosition = getPosition();
String elementId = readIsoStringNullTerminated(100);
long startTime = readInt();
skipBytes(12); // Ignore end time, start offset, end offset
Chapter chapter = new Chapter();
chapter.setStart(startTime);
chapter.setChapterId(elementId);
// Read sub-frames
while (getPosition() < chapterStartedPosition + frameHeader.getSize()) {
FrameHeader subFrameHeader = readFrameHeader();
readChapterSubFrame(subFrameHeader, chapter);
}
return chapter;
} | @Test
public void testReadChapterWithoutSubframes() throws IOException, ID3ReaderException {
FrameHeader header = new FrameHeader(ChapterReader.FRAME_ID_CHAPTER,
CHAPTER_WITHOUT_SUBFRAME.length, (short) 0);
CountingInputStream inputStream = new CountingInputStream(new ByteArrayInputStream(CHAPTER_WITHOUT_SUBFRAME));
Chapter chapter = new ChapterReader(inputStream).readChapter(header);
assertEquals(CHAPTER_WITHOUT_SUBFRAME_START_TIME, chapter.getStart());
} |
public String getChineseMonth() {
return getChineseMonth(false);
} | @Test
public void getChineseMonthTest(){
ChineseDate chineseDate = new ChineseDate(2020,6,15);
assertEquals("2020-08-04 00:00:00", chineseDate.getGregorianDate().toString());
assertEquals("六月", chineseDate.getChineseMonth());
chineseDate = new ChineseDate(2020,4,15);
assertEquals("2020-06-06 00:00:00", chineseDate.getGregorianDate().toString());
assertEquals("闰四月", chineseDate.getChineseMonth());
chineseDate = new ChineseDate(2020,5,15);
assertEquals("2020-07-05 00:00:00", chineseDate.getGregorianDate().toString());
assertEquals("五月", chineseDate.getChineseMonth());
} |
@Override
public int hashCode() {
return Objects.hash(mInstantCreated, mWorkers);
} | @Test
public void hashCodeImpl() {
WorkerIdentity worker1 = WorkerIdentityTestUtils.ofLegacyId(1);
WorkerIdentity worker2 = WorkerIdentityTestUtils.ofLegacyId(2);
WorkerIdentity worker3 = WorkerIdentityTestUtils.ofLegacyId(3);
List<WorkerInfo> workers = ImmutableList.of(
new WorkerInfo().setIdentity(worker1),
new WorkerInfo().setIdentity(worker2),
new WorkerInfo().setIdentity(worker3)
);
Instant time = Instant.now();
WorkerClusterView view1 = new WorkerClusterView(workers, time);
WorkerClusterView view2 = new WorkerClusterView(Lists.reverse(workers), time);
assertEquals(view1, view2);
assertEquals(view1.hashCode(), view2.hashCode());
} |
@Override
public Stream<HoodieInstant> getCandidateInstants(HoodieTableMetaClient metaClient, HoodieInstant currentInstant,
Option<HoodieInstant> lastSuccessfulInstant) {
HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline();
// To find which instants are conflicting, we apply the following logic
// 1. Get completed instants timeline only for commits that have happened since the last successful write.
// 2. Get any scheduled or completed compaction or clustering operations that have started and/or finished
// after the current instant. We need to check for write conflicts since they may have mutated the same files
// that are being newly created by the current write.
Stream<HoodieInstant> completedCommitsInstantStream = activeTimeline
.getCommitsTimeline()
.filterCompletedInstants()
.findInstantsAfter(lastSuccessfulInstant.isPresent() ? lastSuccessfulInstant.get().getTimestamp() : HoodieTimeline.INIT_INSTANT_TS)
.getInstantsAsStream();
Stream<HoodieInstant> compactionAndClusteringPendingTimeline = activeTimeline
.filterPendingReplaceClusteringAndCompactionTimeline()
.filter(instant -> ClusteringUtils.isClusteringInstant(activeTimeline, instant)
|| HoodieTimeline.COMPACTION_ACTION.equals(instant.getAction()))
.findInstantsAfter(currentInstant.getTimestamp())
.getInstantsAsStream();
return Stream.concat(completedCommitsInstantStream, compactionAndClusteringPendingTimeline);
} | @Test
public void testNoConcurrentWrites() throws Exception {
String newInstantTime = HoodieTestTable.makeNewCommitTime();
createCommit(newInstantTime, metaClient);
// consider commits before this are all successful
Option<HoodieInstant> lastSuccessfulInstant = metaClient.getCommitsTimeline().filterCompletedInstants().lastInstant();
newInstantTime = HoodieTestTable.makeNewCommitTime();
Option<HoodieInstant> currentInstant = Option.of(new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, newInstantTime));
SimpleConcurrentFileWritesConflictResolutionStrategy strategy = new SimpleConcurrentFileWritesConflictResolutionStrategy();
Stream<HoodieInstant> candidateInstants = strategy.getCandidateInstants(metaClient, currentInstant.get(), lastSuccessfulInstant);
Assertions.assertTrue(candidateInstants.count() == 0);
} |
public void putWorkerMetrics(String source, List<Metric> metrics) {
if (metrics.isEmpty() || source == null) {
return;
}
try (LockResource r = new LockResource(mLock.readLock())) {
putReportedMetrics(InstanceType.WORKER, metrics);
}
LOG.debug("Put {} metrics of worker {}", metrics.size(), source);
} | @Test
public void putWorkerUfsMetrics() {
String readBytes = MetricKey.WORKER_BYTES_READ_UFS.getName();
String writtenBytes = MetricKey.WORKER_BYTES_WRITTEN_UFS.getName();
String ufsName1 = MetricsSystem.escape(new AlluxioURI("/my/local/folder"));
String readBytesUfs1 = Metric.getMetricNameWithTags(readBytes, MetricInfo.TAG_UFS, ufsName1);
String writtenBytesUfs1 = Metric
.getMetricNameWithTags(writtenBytes, MetricInfo.TAG_UFS, ufsName1);
String ufsName2 = MetricsSystem.escape(new AlluxioURI("s3://my/s3/bucket/"));
String readBytesUfs2 = Metric.getMetricNameWithTags(readBytes, MetricInfo.TAG_UFS, ufsName2);
String writtenBytesUfs2 = Metric
.getMetricNameWithTags(writtenBytes, MetricInfo.TAG_UFS, ufsName2);
String host1 = "192_1_1_1";
List<Metric> metrics1 = Lists.newArrayList(
Metric.from(readBytesUfs1 + "." + host1, 10, MetricType.COUNTER),
Metric.from(writtenBytesUfs1 + "." + host1, 20, MetricType.COUNTER),
Metric.from(writtenBytesUfs2 + "." + host1, 7, MetricType.COUNTER));
mMetricStore.putWorkerMetrics(host1, metrics1);
String host2 = "192_1_1_2";
List<Metric> metrics2 = Lists.newArrayList(
Metric.from(readBytesUfs1 + "." + host2, 5, MetricType.COUNTER),
Metric.from(writtenBytesUfs1 + "." + host2, 12, MetricType.COUNTER),
Metric.from(readBytesUfs2 + "." + host2, 33, MetricType.COUNTER));
mMetricStore.putWorkerMetrics(host2, metrics2);
assertEquals(15, MetricsSystem.counter(
Metric.getMetricNameWithTags(MetricKey.CLUSTER_BYTES_READ_UFS.getName(),
MetricInfo.TAG_UFS, ufsName1)).getCount());
assertEquals(33, MetricsSystem.counter(
Metric.getMetricNameWithTags(MetricKey.CLUSTER_BYTES_READ_UFS.getName(),
MetricInfo.TAG_UFS, ufsName2)).getCount());
assertEquals(48,
MetricsSystem.counter(MetricKey.CLUSTER_BYTES_READ_UFS_ALL.getName()).getCount());
assertEquals(32, MetricsSystem.counter(
Metric.getMetricNameWithTags(MetricKey.CLUSTER_BYTES_WRITTEN_UFS.getName(),
MetricInfo.TAG_UFS, ufsName1)).getCount());
assertEquals(7, MetricsSystem.counter(
Metric.getMetricNameWithTags(MetricKey.CLUSTER_BYTES_WRITTEN_UFS.getName(),
MetricInfo.TAG_UFS, ufsName2)).getCount());
assertEquals(39,
MetricsSystem.counter(MetricKey.CLUSTER_BYTES_WRITTEN_UFS_ALL.getName()).getCount());
} |
public abstract HttpHeaders set(String name, Object value); | @Test
public void testSetNullHeaderValueNotValidate() {
final HttpHeaders headers = new DefaultHttpHeaders(false);
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() {
headers.set(of("test"), (CharSequence) null);
}
});
} |
@Override
public URL getResource(String name) {
ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name);
log.trace("Received request to load resource '{}'", name);
for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) {
URL url = null;
switch (classLoadingSource) {
case APPLICATION:
url = super.getResource(name);
break;
case PLUGIN:
url = findResource(name);
break;
case DEPENDENCIES:
url = findResourceFromDependencies(name);
break;
}
if (url != null) {
log.trace("Found resource '{}' in {} classpath", name, classLoadingSource);
return url;
} else {
log.trace("Couldn't find resource '{}' in {}", name, classLoadingSource);
}
}
return null;
} | @Test
void parentFirstGetResourceNonExisting() {
assertNull(parentFirstPluginClassLoader.getResource("META-INF/non-existing-file"));
} |
public static ElasticSearchLogCollectClient getElasticSearchLogCollectClient() {
return ELASTICSEARCH_LOG_COLLECT_CLIENT;
} | @Test
public void testGetRocketMqLogCollectClient() {
Assertions.assertEquals(LoggingElasticSearchPluginDataHandler.getElasticSearchLogCollectClient().getClass(), ElasticSearchLogCollectClient.class);
} |
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
} | @Test
public void shouldHandleSelectStructAllOnNestedStruct() {
// Given:
final SingleStatementContext stmt =
givenQuery("SELECT NESTED_ORDER_COL->ITEMINFO->* FROM NESTED_STREAM;");
// When:
final Query result = (Query) builder.buildStatement(stmt);
// Then:
assertThat(result.getSelect(), is(new Select(ImmutableList.of(
new StructAll(
new DereferenceExpression(
Optional.empty(),
new UnqualifiedColumnReferenceExp(ColumnName.of("NESTED_ORDER_COL")),
"ITEMINFO"
))
))));
} |
public List<PhotoAlbum> split(int numberOfNewAlbums) {
return IntStream.range(1, numberOfNewAlbums + 1)
.mapToObj(
i ->
new PhotoAlbum(
String.format("%s-pt%d", id, i),
String.format("%s (%d/%d)", id, i, numberOfNewAlbums),
description))
.collect(Collectors.toList());
} | @Test
public void splitSimple() {
PhotoAlbum originalAlbum = new PhotoAlbum("123", "MyAlbum", DESCRIPTION);
List<PhotoAlbum> actual = originalAlbum.split(3);
Truth.assertThat(actual)
.containsExactly(
new PhotoAlbum("123-pt1", "123 (1/3)", DESCRIPTION),
new PhotoAlbum("123-pt2", "123 (2/3)", DESCRIPTION),
new PhotoAlbum("123-pt3", "123 (3/3)", DESCRIPTION));
} |
public static Properties parseKeyValueArgs(List<String> args) {
return parseKeyValueArgs(args, true);
} | @Test
public void testParseEmptyArg() {
List<String> argArray = Arrays.asList("my.empty.property=");
assertThrows(IllegalArgumentException.class, () -> CommandLineUtils.parseKeyValueArgs(argArray, false));
} |
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) {
SinkConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Sink Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName()
.equals(existingConfig.getSourceSubscriptionName())) {
throw new IllegalArgumentException("Subscription Name cannot be altered");
}
if (newConfig.getInputSpecs() == null) {
newConfig.setInputSpecs(new HashMap<>());
}
if (mergedConfig.getInputSpecs() == null) {
mergedConfig.setInputSpecs(new HashMap<>());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getInputs() != null) {
newConfig.getInputs().forEach((topicName -> {
newConfig.getInputSpecs().putIfAbsent(topicName,
ConsumerConfig.builder().isRegexPattern(false).build());
}));
}
if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) {
newConfig.getInputSpecs().put(newConfig.getTopicsPattern(),
ConsumerConfig.builder()
.isRegexPattern(true)
.build());
}
if (newConfig.getTopicToSerdeClassName() != null) {
newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.serdeClassName(serdeClassName)
.isRegexPattern(false)
.build());
});
}
if (newConfig.getTopicToSchemaType() != null) {
newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.schemaType(schemaClassname)
.isRegexPattern(false)
.build());
});
}
if (!newConfig.getInputSpecs().isEmpty()) {
SinkConfig finalMergedConfig = mergedConfig;
newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> {
if (!existingConfig.getInputSpecs().containsKey(topicName)) {
throw new IllegalArgumentException("Input Topics cannot be altered");
}
if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) {
throw new IllegalArgumentException(
"isRegexPattern for input topic " + topicName + " cannot be altered");
}
finalMergedConfig.getInputSpecs().put(topicName, consumerConfig);
});
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering()
.equals(existingConfig.getRetainOrdering())) {
throw new IllegalArgumentException("Retain Ordering cannot be altered");
}
if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering()
.equals(existingConfig.getRetainKeyOrdering())) {
throw new IllegalArgumentException("Retain Key Ordering cannot be altered");
}
if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) {
throw new IllegalArgumentException("AutoAck cannot be altered");
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (newConfig.getTimeoutMs() != null) {
mergedConfig.setTimeoutMs(newConfig.getTimeoutMs());
}
if (newConfig.getCleanupSubscription() != null) {
mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription());
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (newConfig.getTransformFunction() != null) {
mergedConfig.setTransformFunction(newConfig.getTransformFunction());
}
if (newConfig.getTransformFunctionClassName() != null) {
mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName());
}
if (newConfig.getTransformFunctionConfig() != null) {
mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig());
}
return mergedConfig;
} | @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Subscription Name cannot be altered")
public void testMergeDifferentSubname() {
SinkConfig sinkConfig = createSinkConfig();
SinkConfig newSinkConfig = createUpdatedSinkConfig("sourceSubscriptionName", "Different");
SinkConfig mergedConfig = SinkConfigUtils.validateUpdate(sinkConfig, newSinkConfig);
} |
public static List<String> getErrorMessages(final Throwable e) {
return getThrowables(e).stream()
.map(ErrorMessageUtil::getErrorMessage)
.collect(Collectors.toList());
} | @Test
public void shouldBuildErrorMessageChain() {
// Given:
final Throwable e = new Exception("root", new Exception("cause"));
// Then:
assertThat(getErrorMessages(e), equalTo(ImmutableList.of("root", "cause")));
} |
public void clear(long index) {
assert index >= 0;
int prefix = (int) (index >>> Integer.SIZE);
if (prefix == lastPrefix) {
if (lastStorage.clear((int) index)) {
lastPrefix = -1;
lastStorage = null;
// cleanup the empty storage
storages.clear(prefix);
}
} else {
SparseIntArray<E> storage = storages.get(prefix);
if (storage != null) {
if (storage.clear((int) index)) {
// cleanup the empty storage
storages.clear(prefix);
} else {
lastPrefix = prefix;
lastStorage = storage;
}
}
}
} | @Test
public void testClear() {
// try to clear empty array
actual.clear();
verify();
// at the beginning
for (long i = 0; i < 1000; ++i) {
set(i);
}
for (long i = 0; i < 1000 + 100; ++i) {
clear(i);
verify();
// try nonexistent
clear(i);
verify();
}
// offset
for (long i = 1000000; i < 1000000 + 1000; ++i) {
set(i);
}
for (long i = 1000000 + 1000 + 100; i >= 1000000; --i) {
clear(i);
verify();
// try nonexistent
clear(i);
verify();
}
// test empty again
actual.clear();
verify();
// try gaps
for (long i = 0; i < 1000; ++i) {
set(i * i);
}
for (long i = 0; i < 1000; ++i) {
clear(i * i);
verify();
}
// try larger gaps
for (long i = (long) Math.sqrt(Long.MAX_VALUE) - 1000; i < (long) Math.sqrt(Long.MAX_VALUE); ++i) {
set(i * i);
}
for (long i = (long) Math.sqrt(Long.MAX_VALUE) - 1000; i < (long) Math.sqrt(Long.MAX_VALUE); ++i) {
clear(i * i);
verify();
}
// try larger 2-element gaps
for (long i = (long) Math.sqrt(Long.MAX_VALUE) - 1000; i < (long) Math.sqrt(Long.MAX_VALUE); ++i) {
set(i * i);
set(i * i - 1);
}
for (long i = (long) Math.sqrt(Long.MAX_VALUE) - 1000; i < (long) Math.sqrt(Long.MAX_VALUE); ++i) {
clear(i * i);
verify();
clear(i * i - 1);
verify();
}
// try some edge cases
for (long i = 0; i <= 2; ++i) {
set(i);
}
for (long i = 0; i <= 2; ++i) {
clear(i);
verify();
}
for (long i = Short.MAX_VALUE - 2; i <= Short.MAX_VALUE + 2; ++i) {
set(i);
}
for (long i = Short.MAX_VALUE - 2; i <= Short.MAX_VALUE + 2; ++i) {
clear(i);
verify();
}
for (long i = Integer.MAX_VALUE - 2; i <= (long) Integer.MAX_VALUE + 2; ++i) {
set(i);
}
for (long i = Integer.MAX_VALUE - 2; i <= (long) Integer.MAX_VALUE + 2; ++i) {
clear(i);
verify();
}
for (long i = Long.MAX_VALUE; i >= Long.MAX_VALUE - 2; --i) {
set(i);
}
for (long i = Long.MAX_VALUE; i >= Long.MAX_VALUE - 2; --i) {
clear(i);
verify();
}
} |
@Override
public void handleTenantMenu(TenantMenuHandler handler) {
// 如果禁用,则不执行逻辑
if (isTenantDisable()) {
return;
}
// 获得租户,然后获得菜单
TenantDO tenant = getTenant(TenantContextHolder.getRequiredTenantId());
Set<Long> menuIds;
if (isSystemTenant(tenant)) { // 系统租户,菜单是全量的
menuIds = CollectionUtils.convertSet(menuService.getMenuList(), MenuDO::getId);
} else {
menuIds = tenantPackageService.getTenantPackage(tenant.getPackageId()).getMenuIds();
}
// 执行处理器
handler.handle(menuIds);
} | @Test
public void testHandleTenantMenu_disable() {
// 准备参数
TenantMenuHandler handler = mock(TenantMenuHandler.class);
// mock 禁用
when(tenantProperties.getEnable()).thenReturn(false);
// 调用
tenantService.handleTenantMenu(handler);
// 断言
verify(handler, never()).handle(any());
} |
public static ShenyuAdminResult error(final String msg) {
return error(CommonErrorCode.ERROR, msg);
} | @Test
public void testError() {
final ShenyuAdminResult result = ShenyuAdminResult.error("msg");
assertEquals(CommonErrorCode.ERROR, result.getCode().intValue());
assertEquals("msg", result.getMessage());
assertNull(result.getData());
assertEquals(3871218, result.hashCode());
assertEquals("ShenyuAdminResult{code=500, message='msg', data=null}", result.toString());
} |
public static boolean isComplete(Object obj) throws IllegalArgumentException {
requireNonNull(obj);
Field[] fields = obj.getClass().getDeclaredFields();
StringBuilder error = new StringBuilder();
for (Field field : fields) {
if (field.isAnnotationPresent(FieldContext.class)) {
field.setAccessible(true);
Object value;
try {
value = field.get(obj);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
if (log.isDebugEnabled()) {
log.debug("Validating configuration field '{}' = '{}'", field.getName(), value);
}
boolean isRequired = field.getAnnotation(FieldContext.class).required();
long minValue = field.getAnnotation(FieldContext.class).minValue();
long maxValue = field.getAnnotation(FieldContext.class).maxValue();
if (isRequired && isEmpty(value)) {
error.append(String.format("Required %s is null,", field.getName()));
}
if (value != null && Number.class.isAssignableFrom(value.getClass())) {
long fieldVal = ((Number) value).longValue();
boolean valid = fieldVal >= minValue && fieldVal <= maxValue;
if (!valid) {
error.append(String.format("%s value %d doesn't fit in given range (%d, %d),", field.getName(),
fieldVal, minValue, maxValue));
}
}
}
}
if (error.length() > 0) {
throw new IllegalArgumentException(error.substring(0, error.length() - 1));
}
return true;
} | @Test
public void testComplete() throws Exception {
TestCompleteObject complete = new TestCompleteObject();
assertTrue(isComplete(complete));
} |
@Override
public Map<SubClusterId, List<ResourceRequest>> splitResourceRequests(
List<ResourceRequest> resourceRequests,
Set<SubClusterId> timedOutSubClusters) throws YarnException {
// object used to accumulate statistics about the answer, initialize with
// active subclusters. Create a new instance per call because this method
// can be called concurrently.
AllocationBookkeeper bookkeeper = new AllocationBookkeeper();
bookkeeper.reinitialize(getActiveSubclusters(), timedOutSubClusters, conf);
List<ResourceRequest> nonLocalizedRequests = new ArrayList<>();
SubClusterId targetId = null;
Set<SubClusterId> targetIds = null;
// if the RR is resolved to a local subcluster add it directly (node and
// resolvable racks)
for (ResourceRequest rr : resourceRequests) {
targetId = null;
targetIds = null;
// Handle: ANY (accumulated for later)
if (ResourceRequest.isAnyLocation(rr.getResourceName())) {
nonLocalizedRequests.add(rr);
continue;
}
// Handle "node" requests
try {
targetId = resolver.getSubClusterForNode(rr.getResourceName());
// If needed, re-reroute node requests base on SC load
boolean loadBasedSCSelectorEnabled =
conf.getBoolean(LOAD_BASED_SC_SELECTOR_ENABLED, DEFAULT_LOAD_BASED_SC_SELECTOR_ENABLED);
if (loadBasedSCSelectorEnabled) {
int maxPendingThreshold = conf.getInt(LOAD_BASED_SC_SELECTOR_THRESHOLD,
DEFAULT_LOAD_BASED_SC_SELECTOR_THRESHOLD);
targetId = routeNodeRequestIfNeeded(targetId, maxPendingThreshold,
bookkeeper.getActiveAndEnabledSC());
}
LOG.debug("Node request {}", rr.getResourceName());
} catch (YarnException e) {
// this might happen as we can't differentiate node from rack names
// we log altogether later
}
if (bookkeeper.isActiveAndEnabled(targetId)) {
bookkeeper.addLocalizedNodeRR(targetId, rr);
continue;
}
// Handle "rack" requests
try {
targetIds = resolver.getSubClustersForRack(rr.getResourceName());
} catch (YarnException e) {
// this might happen as we can't differentiate node from rack names
// we log altogether later
}
if (targetIds != null && targetIds.size() > 0) {
boolean hasActive = false;
for (SubClusterId tid : targetIds) {
if (bookkeeper.isActiveAndEnabled(tid)) {
bookkeeper.addRackRR(tid, rr);
hasActive = true;
}
}
if (hasActive) {
continue;
}
}
// Handle node/rack requests that the SubClusterResolver cannot map to
// any cluster. Pick a random sub-cluster from active and enabled ones.
targetId = getSubClusterForUnResolvedRequest(bookkeeper,
rr.getAllocationRequestId());
LOG.debug("ERROR resolving sub-cluster for resourceName: {}, picked a "
+ "random subcluster to forward:{}", rr.getResourceName(), targetId);
if (targetIds != null && targetIds.size() > 0) {
bookkeeper.addRackRR(targetId, rr);
} else {
bookkeeper.addLocalizedNodeRR(targetId, rr);
}
}
// handle all non-localized requests (ANY)
splitAnyRequests(nonLocalizedRequests, bookkeeper);
// Take the split result, feed into the askBalancer
Map<SubClusterId, List<ResourceRequest>> answer = bookkeeper.getAnswer();
LOG.info("Before split {} RRs: {}", resourceRequests.size(),
prettyPrintRequests(resourceRequests, this.printRRMax));
for (Map.Entry<SubClusterId, List<ResourceRequest>> entry : bookkeeper.getAnswer().entrySet()) {
LOG.info("After split {} has {} RRs: {}", entry.getKey(), entry.getValue().size(),
prettyPrintRequests(entry.getValue(), this.printRRMax));
}
return answer;
} | @Test
public void testCancelWithLocalizedResource() throws YarnException {
// Configure policy to be 100% headroom based
getPolicyInfo().setHeadroomAlpha(1.0f);
initializePolicy();
List<ResourceRequest> resourceRequests = new ArrayList<>();
// Initialize the headroom map
prepPolicyWithHeadroom(true);
// Cancel at ANY level only
resourceRequests.add(FederationPoliciesTestUtil.createResourceRequest(0L,
"subcluster0-rack0-host0", 1024, 1, 1, 1, null, false));
resourceRequests.add(FederationPoliciesTestUtil.createResourceRequest(0L,
"subcluster0-rack0", 1024, 1, 1, 1, null, false));
resourceRequests.add(FederationPoliciesTestUtil.createResourceRequest(0L,
ResourceRequest.ANY, 1024, 1, 1, 0, null, false));
Map<SubClusterId, List<ResourceRequest>> response =
((FederationAMRMProxyPolicy) getPolicy()).splitResourceRequests(
resourceRequests, new HashSet<SubClusterId>());
checkExpectedAllocation(response, "subcluster0", 3, 1);
checkExpectedAllocation(response, "subcluster1", 1, 0);
checkExpectedAllocation(response, "subcluster2", 1, 0);
checkExpectedAllocation(response, "subcluster3", -1, -1);
checkExpectedAllocation(response, "subcluster4", -1, -1);
checkExpectedAllocation(response, "subcluster5", -1, -1);
resourceRequests.clear();
// Cancel at node level only
resourceRequests.add(FederationPoliciesTestUtil.createResourceRequest(0L,
"subcluster0-rack0-host0", 1024, 1, 1, 0, null, false));
resourceRequests.add(FederationPoliciesTestUtil.createResourceRequest(0L,
"subcluster0-rack0", 1024, 1, 1, 0, null, false));
resourceRequests.add(FederationPoliciesTestUtil.createResourceRequest(0L,
ResourceRequest.ANY, 1024, 1, 1, 100, null, false));
response = ((FederationAMRMProxyPolicy) getPolicy())
.splitResourceRequests(resourceRequests, new HashSet<SubClusterId>());
/*
* Since node request is a cancel, it should not be considered associated
* with localized requests. Based on headroom, we expect 75 containers to
* got to subcluster0 (60) and subcluster2 (15) according to the advertised
* headroom (40 and 10), no containers for sublcuster1 as it advertise zero
* headroom, and 25 to subcluster5 which has unknown headroom, and so it
* gets 1/4th of the load
*/
checkExpectedAllocation(response, "subcluster0", 3, 60);
checkExpectedAllocation(response, "subcluster1", 1, -1);
checkExpectedAllocation(response, "subcluster2", 1, 15);
checkExpectedAllocation(response, "subcluster5", 1, 25);
checkTotalContainerAllocation(response, 100);
} |
public static void handleUncaughtException(
CompletableFuture<?> completableFuture,
Thread.UncaughtExceptionHandler uncaughtExceptionHandler) {
handleUncaughtException(
completableFuture, uncaughtExceptionHandler, FatalExitExceptionHandler.INSTANCE);
} | @Test
void testHandleUncaughtExceptionWithExceptionallyCompletion() {
final CompletableFuture<String> future = new CompletableFuture<>();
final TestingUncaughtExceptionHandler uncaughtExceptionHandler =
new TestingUncaughtExceptionHandler();
FutureUtils.handleUncaughtException(future, uncaughtExceptionHandler);
assertThat(uncaughtExceptionHandler.hasBeenCalled()).isFalse();
future.completeExceptionally(new FlinkException("barfoo"));
assertThat(uncaughtExceptionHandler.hasBeenCalled()).isTrue();
} |
@Override
public void pluginUnLoaded(GoPluginDescriptor descriptor) {
PluggableTaskConfigStore.store().removePreferenceFor(descriptor.id());
} | @Test
public void shouldRemoveConfigForTheTaskCorrespondingToGivenPluginId() throws Exception {
final GoPluginDescriptor descriptor = mock(GoPluginDescriptor.class);
String pluginId = "test-plugin-id";
when(descriptor.id()).thenReturn(pluginId);
final Task task = mock(Task.class);
TaskConfig config = new TaskConfig();
TaskView taskView = mock(TaskView.class);
when(task.config()).thenReturn(config);
when(task.view()).thenReturn(taskView);
PluggableTaskConfigStore.store().setPreferenceFor(pluginId, new TaskPreference(task));
PluginManager pluginManager = mock(PluginManager.class);
PluggableTaskPreferenceLoader pluggableTaskPreferenceLoader = new PluggableTaskPreferenceLoader(pluginManager, taskExtension);
assertThat(PluggableTaskConfigStore.store().hasPreferenceFor(pluginId), is(true));
pluggableTaskPreferenceLoader.pluginUnLoaded(descriptor);
assertThat(PluggableTaskConfigStore.store().hasPreferenceFor(pluginId), is(false));
verify(pluginManager).addPluginChangeListener(pluggableTaskPreferenceLoader);
} |
public static String findANummer(List<Container> categorieList){
return findValue(categorieList, CATEGORIE_IDENTIFICATIENUMMERS, ELEMENT_A_NUMMER);
} | @Test
public void testFindAnummer() {
assertThat(CategorieUtil.findANummer(createFullCategories()), is("a-nummer"));
} |
public static MetadataExtractor create(String metadataClassName) {
String metadataExtractorClassName = metadataClassName;
try {
LOGGER.info("Instantiating MetadataExtractor class {}", metadataExtractorClassName);
MetadataExtractor metadataExtractor = (MetadataExtractor) Class.forName(metadataExtractorClassName).newInstance();
return metadataExtractor;
} catch (Exception e) {
LOGGER.warn("No metadata extractor class passed in, using default");
return new DefaultMetadataExtractor();
}
} | @Test
public void testConfiguredMetadataProvider() {
Assert.assertTrue(
MetadataExtractorFactory.create(DefaultMetadataExtractor.class.getName()) instanceof DefaultMetadataExtractor);
} |
public Certificate add(X509Certificate cert) {
final Certificate db;
try {
db = Certificate.from(cert);
} catch (CertificateEncodingException e) {
logger.error("Encoding error in certificate", e);
throw new RuntimeException("Encoding error in certificate", e);
}
try {
// Special case for first CSCA certificate for this document type
if (repository.countByDocumentType(db.getDocumentType()) == 0) {
cert.verify(cert.getPublicKey());
logger.warn("Added first CSCA certificate for {}, set trusted flag manually", db.getDocumentType());
} else {
verify(cert, allowAddingExpired ? cert.getNotAfter() : null);
}
} catch (GeneralSecurityException | VerificationException e) {
logger.error(
String.format("Could not verify certificate of %s issued by %s",
cert.getSubjectX500Principal(), cert.getIssuerX500Principal()
), e
);
throw new BadRequestException("Could not verify certificate", e);
}
return repository.saveAndFlush(db);
} | @Test
public void shouldDisallowToAddCRLIfNotNewer() throws Exception {
certificateRepo.saveAndFlush(loadCertificate("rdw/02.cer", true));
crlRepo.saveAndFlush(loadCRL("rdw/02.crl"));
Exception exception = assertThrows(BadRequestException.class, () -> {
service.add(readCRL("rdw/02.crl"));
});
assertEquals("CRL is not newer, refuse to update", exception.getMessage());
} |
@Override
public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException {
try {
final StoregateApiClient client = session.getClient();
final MoveFileRequest move = new MoveFileRequest()
.name(renamed.getName())
.parentID(fileid.getFileId(renamed.getParent()))
.mode(1); // Overwrite
final HttpEntityEnclosingRequestBase request;
request = new HttpPost(String.format("%s/v4.2/files/%s/move", client.getBasePath(), fileid.getFileId(file)));
if(status.getLockId() != null) {
request.addHeader("X-Lock-Id", status.getLockId().toString());
}
request.setEntity(new StringEntity(new JSON().getContext(move.getClass()).writeValueAsString(move),
ContentType.create("application/json", StandardCharsets.UTF_8.name())));
request.addHeader(HTTP.CONTENT_TYPE, MEDIA_TYPE);
final HttpResponse response = client.getClient().execute(request);
try {
switch(response.getStatusLine().getStatusCode()) {
case HttpStatus.SC_NO_CONTENT:
final PathAttributes attr = new PathAttributes(file.attributes());
fileid.cache(file, null);
fileid.cache(renamed, attr.getFileId());
return renamed.withAttributes(attr);
default:
throw new StoregateExceptionMappingService(fileid).map("Cannot rename {0}",
new ApiException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file);
}
}
finally {
EntityUtils.consume(response.getEntity());
}
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Cannot rename {0}", e, file);
}
} | @Test
public void testMoveToDifferentParentAndRename() throws Exception {
final StoregateIdProvider nodeid = new StoregateIdProvider(session);
final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir(
new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()),
EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final String filename = new AlphanumericRandomStringService().random();
final Path test = new StoregateTouchFeature(session, nodeid).touch(new Path(room, filename, EnumSet.of(Path.Type.file)), new TransferStatus());
final Path target = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new StoregateTouchFeature(session, nodeid).touch(target, new TransferStatus());
new StoregateMoveFeature(session, nodeid).move(test, target, new TransferStatus().exists(true), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(new DefaultFindFeature(session).find(new Path(room, filename, EnumSet.of(Path.Type.file))));
assertTrue(new DefaultFindFeature(session).find(target));
new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public DdlCommand create(
final String sqlExpression,
final DdlStatement ddlStatement,
final SessionConfig config
) {
return FACTORIES
.getOrDefault(ddlStatement.getClass(), (statement, cf, ci) -> {
throw new KsqlException(
"Unable to find ddl command factory for statement:"
+ statement.getClass()
+ " valid statements:"
+ FACTORIES.keySet()
);
})
.handle(
this,
new CallInfo(sqlExpression, config),
ddlStatement);
} | @Test
public void shouldCreateCommandForCreateStream() {
// Given:
final CreateStream statement = new CreateStream(SOME_NAME, SOME_ELEMENTS, false, true, withProperties, false);
// When:
final DdlCommand result = commandFactories
.create(sqlExpression, statement, SessionConfig.of(ksqlConfig, emptyMap()));
assertThat(result, is(createStreamCommand));
verify(createSourceFactory).createStreamCommand(statement, ksqlConfig);
} |
public static Config fromHocon(ConfigOrigin origin, com.typesafe.config.Config config) {
var object = config.root();
var path = ConfigValuePath.root();
var value = toObject(origin, object, path);
return new SimpleConfig(origin, value);
} | @Test
void testFromHocon() {
var hocon = ConfigFactory.parseString("""
testObject {
f1 = "test1"
}
test {
f1 = "test1"
f2 = prefix_${testObject.f1}_suffix
f3 = 10
f4 = 15 seconds
f5 = true
}
testArray = [1, 2, 3]
""")
.resolve();
var config = HoconConfigFactory.fromHocon(new SimpleConfigOrigin(""), hocon);
assertThat(config.get("testObject")).isInstanceOf(ConfigValue.ObjectValue.class);
assertThat(config.get("testObject.f1")).isInstanceOf(ConfigValue.StringValue.class)
.extracting(ConfigValue::asString).isEqualTo("test1");
assertThat(config.get("test")).isInstanceOf(ConfigValue.ObjectValue.class);
assertThat(config.get("test.f1")).isInstanceOf(ConfigValue.StringValue.class)
.extracting(ConfigValue::asString).isEqualTo("test1");
assertThat(config.get("test.f2")).isInstanceOf(ConfigValue.StringValue.class)
.extracting(ConfigValue::asString).isEqualTo("prefix_test1_suffix");
assertThat(config.get("test.f3")).isInstanceOf(ConfigValue.NumberValue.class)
.extracting(ConfigValue::asNumber).isEqualTo(10);
assertThat(config.get("test.f4")).isInstanceOf(ConfigValue.StringValue.class)
.extracting(ConfigValue::asString).isEqualTo("15 seconds");
assertThat(config.get("test.f5")).isInstanceOf(ConfigValue.BooleanValue.class)
.extracting(ConfigValue::asBoolean).isEqualTo(true);
assertThat(config.get("testArray")).isInstanceOf(ConfigValue.ArrayValue.class)
.extracting(v -> v.asArray().value().stream().map(ConfigValue::value).toList())
.isEqualTo(List.of(1, 2, 3));
} |
public String process(final Expression expression) {
return formatExpression(expression);
} | @Test
public void shouldGenerateCorrectCodeForTimestampStringEQ() {
// Given:
final ComparisonExpression compExp = new ComparisonExpression(
Type.EQUAL,
TIMESTAMPCOL,
new StringLiteral("2020-01-01T00:00:00")
);
// When:
final String java = sqlToJavaVisitor.process(compExp);
// Then:
assertThat(java, containsString("(((java.sql.Timestamp) arguments.get(\"COL10\")).compareTo(SqlTimeTypes.parseTimestamp(\"2020-01-01T00:00:00\")) == 0)"));
} |
static PrintStream create(
Handler handler, String loggerName, Level messageLevel, Charset charset) {
try {
return new JulHandlerPrintStream(handler, loggerName, messageLevel, charset);
} catch (UnsupportedEncodingException exc) {
throw new RuntimeException("Encoding not supported: " + charset.name(), exc);
}
} | @Test
public void testLogRecordMetadata() {
PrintStream printStream =
JulHandlerPrintStreamAdapterFactory.create(
handler, "fooLogger", Level.WARNING, StandardCharsets.UTF_8);
printStream.println("anyMessage");
assertThat(handler.getLogs(), not(empty()));
LogRecord log = Iterables.get(handler.getLogs(), 0);
assertThat(log.getLevel(), is(Level.WARNING));
assertThat(log.getLoggerName(), is("fooLogger"));
} |
@Override
public void handle(final ClassicHttpResponse response) {
final int statusCode = response.getCode();
try {
if (statusCode == HttpStatus.SC_OK && response.getEntity().getContent() != null) {
final String content = EntityUtils.toString(response.getEntity());
if (content.length() > 0) {
log.warn(content);
}
}
} catch (final ParseException | IOException e) {
log.error("Error while parsing the Version check response ", e);
}
} | @Test
public void testHandle() throws IOException {
// Given
final ClassicHttpResponse response = mock(ClassicHttpResponse.class);
final HttpEntity entity = mock(HttpEntity.class);
final Logger log = mock(Logger.class);
expect(response.getCode()).andReturn(HttpStatus.SC_OK).once();
expect(response.getEntity()).andReturn(entity).times(2);
final ByteArrayInputStream bais = new ByteArrayInputStream("yolo".getBytes(StandardCharsets.UTF_8));
expect(entity.getContent()).andReturn(bais).times(2);
expect(entity.getContentType()).andReturn("content-type").times(1);
expect(entity.getContentLength()).andReturn(4L).times(1);
log.warn("yolo");
expectLastCall().once();
replay(response, entity, log);
final KsqlVersionCheckerResponseHandler kvcr = new KsqlVersionCheckerResponseHandler(log);
// When
kvcr.handle(response);
// Then
verify(response, entity, log);
} |
@Override
public boolean processLine(String line) throws IOException {
BugPatternInstance pattern = new Gson().fromJson(line, BugPatternInstance.class);
pattern.severity = severityRemapper.apply(pattern);
result.add(pattern);
// replace spaces in filename with underscores
Path checkPath = Paths.get(pattern.name.replace(' ', '_') + ".md");
try (Writer writer = Files.newBufferedWriter(outputDir.resolve(checkPath), UTF_8)) {
// load side-car explanation file, if it exists
Path sidecarExplanation = explanationDir.resolve(checkPath);
if (Files.exists(sidecarExplanation)) {
if (!pattern.explanation.isEmpty()) {
throw new AssertionError(
String.format(
"%s specifies an explanation via @BugPattern and side-car", pattern.name));
}
pattern.explanation = new String(Files.readAllBytes(sidecarExplanation), UTF_8).trim();
}
// Construct an appropriate page for this {@code BugPattern}. Include altNames if
// there are any, and explain the correct way to suppress.
ImmutableMap.Builder<String, Object> templateData =
ImmutableMap.<String, Object>builder()
.put("tags", Joiner.on(", ").join(pattern.tags))
.put("severity", pattern.severity)
.put("name", pattern.name)
.put("className", pattern.className)
.put("summary", pattern.summary.trim())
.put("altNames", Joiner.on(", ").join(pattern.altNames))
.put("explanation", pattern.explanation.trim());
if (baseUrl != null) {
templateData.put("baseUrl", baseUrl);
}
if (generateFrontMatter) {
ImmutableMap<String, String> frontmatterData =
ImmutableMap.<String, String>builder()
.put("title", pattern.name)
.put("summary", pattern.summary)
.put("layout", "bugpattern")
.put("tags", Joiner.on(", ").join(pattern.tags))
.put("severity", pattern.severity.toString())
.buildOrThrow();
DumperOptions options = new DumperOptions();
options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
Yaml yaml =
new Yaml(
new SafeConstructor(new LoaderOptions()),
new Representer(new DumperOptions()),
options);
Writer yamlWriter = new StringWriter();
yamlWriter.write("---\n");
yaml.dump(frontmatterData, yamlWriter);
yamlWriter.write("---\n");
templateData.put("frontmatter", yamlWriter.toString());
}
if (pattern.documentSuppression) {
String suppressionString;
if (pattern.suppressionAnnotations.length == 0) {
suppressionString = "This check may not be suppressed.";
} else {
suppressionString =
pattern.suppressionAnnotations.length == 1
? "Suppress false positives by adding the suppression annotation %s to the "
+ "enclosing element."
: "Suppress false positives by adding one of these suppression annotations to "
+ "the enclosing element: %s";
suppressionString =
String.format(
suppressionString,
Arrays.stream(pattern.suppressionAnnotations)
.map((String anno) -> standardizeAnnotation(anno, pattern.name))
.collect(Collectors.joining(", ")));
}
templateData.put("suppression", suppressionString);
}
MustacheFactory mf = new DefaultMustacheFactory();
Mustache mustache = mf.compile("com/google/errorprone/resources/bugpattern.mustache");
mustache.execute(writer, templateData.buildOrThrow());
}
return true;
} | @Test
public void regressionTest_frontmatter_pygments() throws Exception {
BugPatternFileGenerator generator =
new BugPatternFileGenerator(
wikiDir, explanationDirBase, true, null, input -> input.severity);
generator.processLine(BUGPATTERN_LINE);
String expected =
CharStreams.toString(
new InputStreamReader(
getClass().getResourceAsStream("testdata/DeadException_frontmatter_pygments.md"),
UTF_8));
String actual =
CharStreams.toString(Files.newBufferedReader(wikiDir.resolve("DeadException.md"), UTF_8));
assertThat(actual.trim()).isEqualTo(expected.trim());
} |
@Override
public void putAll(Map<? extends K, ? extends Writable> t) {
for (Map.Entry<? extends K, ? extends Writable> e:
t.entrySet()) {
put(e.getKey(), e.getValue());
}
} | @Test(timeout = 10000)
public void testPutAll() {
SortedMapWritable<Text> map1 = new SortedMapWritable<Text>();
SortedMapWritable<Text> map2 = new SortedMapWritable<Text>();
map1.put(new Text("key"), new Text("value"));
map2.putAll(map1);
assertEquals("map1 entries don't match map2 entries", map1, map2);
assertTrue(
"map2 doesn't have class information from map1",
map2.classToIdMap.containsKey(Text.class)
&& map2.idToClassMap.containsValue(Text.class));
} |
@Override
public void run() {
try { // make sure we call afterRun() even on crashes
// and operate countdown latches, else we may hang the parallel runner
if (steps == null) {
beforeRun();
}
if (skipped) {
return;
}
int count = steps.size();
int index = 0;
while ((index = nextStepIndex()) < count) {
currentStep = steps.get(index);
execute(currentStep);
if (currentStepResult != null) { // can be null if debug step-back or hook skip
result.addStepResult(currentStepResult);
}
}
} catch (Exception e) {
if (currentStepResult != null) {
result.addStepResult(currentStepResult);
}
logError("scenario [run] failed\n" + StringUtils.throwableToString(e));
currentStepResult = result.addFakeStepResult("scenario [run] failed", e);
} finally {
if (!skipped) {
afterRun();
if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) {
featureRuntime.suite.abort();
}
}
if (caller.isNone()) {
logAppender.close(); // reclaim memory
}
}
} | @Test
void testCallKarateFeature() {
run(
"def b = 'bar'",
"def res = call read('called1.feature')"
);
matchVar("res", "{ a: 1, foo: { hello: 'world' } }");
run(
"def b = 'bar'",
"def res = call read('called1.feature') { foo: 'bar' }"
);
matchVar("res", "{ a: 1, foo: { hello: 'world' } }");
run(
"def b = 'bar'",
"def res = call read('called1.feature') [{ foo: 'bar' }]"
);
matchVar("res", "[{ a: 1, foo: { hello: 'world' } }]");
run(
"def b = 'bar'",
"def fun = function(i){ if (i == 1) return null; return { index: i } }",
"def res = call read('called1.feature') fun"
);
matchVar("res", "[{ a: 1, foo: { hello: 'world' }, index: 0 }]");
} |
void start(Iterable<ShardCheckpoint> checkpoints) {
LOG.info(
"Pool {} - starting for stream {} consumer {}. Checkpoints = {}",
poolId,
read.getStreamName(),
consumerArn,
checkpoints);
for (ShardCheckpoint shardCheckpoint : checkpoints) {
checkState(
!state.containsKey(shardCheckpoint.getShardId()),
"Duplicate shard id %s",
shardCheckpoint.getShardId());
ShardState shardState =
new ShardState(
initShardSubscriber(shardCheckpoint), shardCheckpoint, watermarkPolicyFactory);
state.put(shardCheckpoint.getShardId(), shardState);
}
} | @Test
public void poolReSubscribesAndReadsRecordsWithTrimHorizon() throws Exception {
kinesis = new EFOStubbedKinesisAsyncClient(10);
kinesis.stubSubscribeToShard("shard-000", eventWithRecords(3));
kinesis.stubSubscribeToShard("shard-001", eventWithRecords(11, 3));
KinesisReaderCheckpoint initialCheckpoint =
new KinesisReaderCheckpoint(
ImmutableList.of(
trimHorizonCheckpoint("shard-000"), trimHorizonCheckpoint("shard-001")));
pool = new EFOShardSubscribersPool(readSpec, consumerArn, kinesis);
pool.start(initialCheckpoint);
PoolAssertion.assertPool(pool)
.givesCheckPointedRecords(
ShardAssertion.shard("shard-000")
.gives(KinesisRecordView.generate("shard-000", 0, 3))
.withLastCheckpointSequenceNumber(2),
ShardAssertion.shard("shard-001")
.gives(KinesisRecordView.generate("shard-001", 11, 3))
.withLastCheckpointSequenceNumber(13));
assertThat(kinesis.subscribeRequestsSeen())
.containsExactlyInAnyOrder(
subscribeTrimHorizon("shard-000"),
subscribeTrimHorizon("shard-001"),
subscribeAfterSeqNumber("shard-000", "2"),
subscribeAfterSeqNumber("shard-001", "13"));
} |
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext,
final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) {
SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext();
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof TCLStatement) {
return new ShardingDatabaseBroadcastRoutingEngine();
}
if (sqlStatement instanceof DDLStatement) {
if (sqlStatementContext instanceof CursorAvailable) {
return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props);
}
return getDDLRoutingEngine(shardingRule, database, sqlStatementContext);
}
if (sqlStatement instanceof DALStatement) {
return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext);
}
if (sqlStatement instanceof DCLStatement) {
return getDCLRoutingEngine(shardingRule, database, sqlStatementContext);
}
return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext);
} | @Test
void assertNewInstanceForAlwaysFalse() {
SQLStatement sqlStatement = mock(SQLStatement.class);
when(sqlStatementContext.getSqlStatement()).thenReturn(sqlStatement);
QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class));
ShardingRouteEngine actual =
ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet));
assertThat(actual, instanceOf(ShardingUnicastRoutingEngine.class));
} |
@Override
public ProtobufSystemInfo.Section toProtobuf() {
ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder();
protobuf.setName("System");
setAttribute(protobuf, "Server ID", server.getId());
setAttribute(protobuf, "Edition", sonarRuntime.getEdition().getLabel());
setAttribute(protobuf, NCLOC.getName() ,statisticsSupport.getLinesOfCode());
setAttribute(protobuf, "Container", containerSupport.isRunningInContainer());
setAttribute(protobuf, "High Availability", true);
setAttribute(protobuf, "External Users and Groups Provisioning",
commonSystemInformation.getManagedInstanceProviderName());
setAttribute(protobuf, "External User Authentication",
commonSystemInformation.getExternalUserAuthentication());
addIfNotEmpty(protobuf, "Accepted external identity providers",
commonSystemInformation.getEnabledIdentityProviders());
addIfNotEmpty(protobuf, "External identity providers whose users are allowed to sign themselves up",
commonSystemInformation.getAllowsToSignUpEnabledIdentityProviders());
setAttribute(protobuf, "Force authentication", commonSystemInformation.getForceAuthentication());
return protobuf.build();
} | @Test
public void toProtobuf_whenInstanceIsManaged_shouldWriteItsProviderName() {
when(commonSystemInformation.getManagedInstanceProviderName()).thenReturn("Okta");
ProtobufSystemInfo.Section protobuf = underTest.toProtobuf();
assertThatAttributeIs(protobuf, "External Users and Groups Provisioning", "Okta");
} |
public void upgrade() {
viewService.streamAll().forEach(view -> {
final Optional<User> user = view.owner().map(userService::load);
if (user.isPresent() && !user.get().isLocalAdmin()) {
final GRNType grnType = ViewDTO.Type.DASHBOARD.equals(view.type()) ? GRNTypes.DASHBOARD : GRNTypes.SEARCH;
final GRN target = grnType.toGRN(view.id());
ensureGrant(user.get(), target);
}
});
} | @Test
@DisplayName("dont migrate admin owners")
void dontMigrateAdminOwners() {
final GRN testuserGRN = GRNTypes.USER.toGRN("testuser");
final GRN search = GRNTypes.SEARCH.toGRN("54e3deadbeefdeadbeef0001");
final User testuser = mock(User.class);
when(testuser.getName()).thenReturn("testuser");
when(testuser.getId()).thenReturn("testuser");
final User adminuser = mock(User.class);
when(adminuser.isLocalAdmin()).thenReturn(true);
when(userService.load("testuser")).thenReturn(testuser);
when(userService.load("admin")).thenReturn(adminuser);
migration.upgrade();
assertThat(grantService.hasGrantFor(testuserGRN, Capability.OWN, search)).isFalse();
} |
@Override
public void preflight(final Path source, final Path target) throws BackgroundException {
if(!CteraTouchFeature.validate(target.getName())) {
throw new InvalidFilenameException(MessageFormat.format(LocaleFactory.localizedString("Cannot rename {0}", "Error"), source.getName())).withFile(source);
}
assumeRole(source, DELETEPERMISSION);
// defaults to Acl.EMPTY (disabling role checking) if target does not exist
assumeRole(target, WRITEPERMISSION);
// no createfilespermission required for now
if(source.isDirectory()) {
assumeRole(target.getParent(), target.getName(), CREATEDIRECTORIESPERMISSION);
}
} | @Test
public void testPreflightFileAccessGrantedCustomProps() throws Exception {
final Path source = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
source.setAttributes(source.attributes().withAcl(new Acl(new Acl.CanonicalUser(), CteraAttributesFinderFeature.DELETEPERMISSION)));
final Path target = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
target.setAttributes(target.attributes().withAcl(new Acl(new Acl.CanonicalUser(), CteraAttributesFinderFeature.WRITEPERMISSION)));
new CteraMoveFeature(session).preflight(source, target);
// assert no fail
} |
@Bean("EsClient")
public EsClient provide(Configuration config) {
Settings.Builder esSettings = Settings.builder();
// mandatory property defined by bootstrap process
esSettings.put("cluster.name", config.get(CLUSTER_NAME.getKey()).get());
boolean clusterEnabled = config.getBoolean(CLUSTER_ENABLED.getKey()).orElse(false);
boolean searchNode = !clusterEnabled || SEARCH.equals(NodeType.parse(config.get(CLUSTER_NODE_TYPE.getKey()).orElse(null)));
List<HttpHost> httpHosts;
if (clusterEnabled && !searchNode) {
httpHosts = getHttpHosts(config);
LOGGER.info("Connected to remote Elasticsearch: [{}]", displayedAddresses(httpHosts));
} else {
// defaults provided in:
// * in org.sonar.process.ProcessProperties.Property.SEARCH_HOST
// * in org.sonar.process.ProcessProperties.Property.SEARCH_PORT
HostAndPort host = HostAndPort.fromParts(config.get(SEARCH_HOST.getKey()).get(), config.getInt(SEARCH_PORT.getKey()).get());
httpHosts = Collections.singletonList(toHttpHost(host, config));
LOGGER.info("Connected to local Elasticsearch: [{}]", displayedAddresses(httpHosts));
}
return new EsClient(config.get(CLUSTER_SEARCH_PASSWORD.getKey()).orElse(null),
config.get(CLUSTER_ES_HTTP_KEYSTORE.getKey()).orElse(null),
config.get(CLUSTER_ES_HTTP_KEYSTORE_PASSWORD.getKey()).orElse(null),
httpHosts.toArray(new HttpHost[0]));
} | @Test
public void es_client_provider_must_throw_IAE_when_incorrect_port_is_used() {
settings.setProperty(CLUSTER_ENABLED.getKey(), true);
settings.setProperty(CLUSTER_NODE_TYPE.getKey(), "search");
settings.setProperty(SEARCH_HOST.getKey(), "localhost");
settings.setProperty(SEARCH_PORT.getKey(), "100000");
assertThatThrownBy(() -> underTest.provide(settings.asConfig()))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Port out of range: 100000");
} |
public void setRemoteHost(String remoteHost) {
this.remoteHost = remoteHost;
} | @Test
public void testStartNoPort() throws Exception {
receiver.setRemoteHost(TEST_HOST_NAME);
receiver.start();
assertFalse(receiver.isStarted());
int count = lc.getStatusManager().getCount();
Status status = lc.getStatusManager().getCopyOfStatusList().get(count - 1);
assertTrue(status.getMessage().contains("port"));
} |
public final void containsEntry(@Nullable Object key, @Nullable Object value) {
Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value);
checkNotNull(actual);
if (!actual.entrySet().contains(entry)) {
List<@Nullable Object> keyList = singletonList(key);
List<@Nullable Object> valueList = singletonList(value);
if (actual.containsKey(key)) {
Object actualValue = actual.get(key);
/*
* In the case of a null expected or actual value, clarify that the key *is* present and
* *is* expected to be present. That is, get() isn't returning null to indicate that the key
* is missing, and the user isn't making an assertion that the key is missing.
*/
StandardSubjectBuilder check = check("get(%s)", key);
if (value == null || actualValue == null) {
check = check.withMessage("key is present but with a different value");
}
// See the comment on IterableSubject's use of failEqualityCheckForEqualsWithoutDescription.
check.that(actualValue).failEqualityCheckForEqualsWithoutDescription(value);
} else if (hasMatchingToStringPair(actual.keySet(), keyList)) {
failWithoutActual(
fact("expected to contain entry", entry),
fact("an instance of", objectToTypeName(entry)),
simpleFact("but did not"),
fact(
"though it did contain keys",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(actual.keySet(), /* itemsToCheck= */ keyList))),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else if (actual.containsValue(value)) {
Set<@Nullable Object> keys = new LinkedHashSet<>();
for (Map.Entry<?, ?> actualEntry : actual.entrySet()) {
if (Objects.equal(actualEntry.getValue(), value)) {
keys.add(actualEntry.getKey());
}
}
failWithoutActual(
fact("expected to contain entry", entry),
simpleFact("but did not"),
fact("though it did contain keys with that value", keys),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else if (hasMatchingToStringPair(actual.values(), valueList)) {
failWithoutActual(
fact("expected to contain entry", entry),
fact("an instance of", objectToTypeName(entry)),
simpleFact("but did not"),
fact(
"though it did contain values",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(actual.values(), /* itemsToCheck= */ valueList))),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else {
failWithActual("expected to contain entry", entry);
}
}
} | @Test
public void failMapContainsKeyWithValue() {
ImmutableMap<String, String> actual = ImmutableMap.of("a", "A");
expectFailureWhenTestingThat(actual).containsEntry("a", "a");
assertFailureValue("value of", "map.get(a)");
assertFailureValue("expected", "a");
assertFailureValue("but was", "A");
assertFailureValue("map was", "{a=A}");
assertThat(expectFailure.getFailure())
.hasMessageThat()
.doesNotContain(KEY_IS_PRESENT_WITH_DIFFERENT_VALUE);
} |
public static Future<Void> maybeUpdateMetadataVersion(
Reconciliation reconciliation,
Vertx vertx,
TlsPemIdentity coTlsPemIdentity,
AdminClientProvider adminClientProvider,
String desiredMetadataVersion,
KafkaStatus status
) {
String bootstrapHostname = KafkaResources.bootstrapServiceName(reconciliation.name()) + "." + reconciliation.namespace() + ".svc:" + KafkaCluster.REPLICATION_PORT;
LOGGER.debugCr(reconciliation, "Creating AdminClient for Kafka cluster in namespace {}", reconciliation.namespace());
Admin kafkaAdmin = adminClientProvider.createAdminClient(bootstrapHostname, coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity());
Promise<Void> updatePromise = Promise.promise();
maybeUpdateMetadataVersion(reconciliation, vertx, kafkaAdmin, desiredMetadataVersion, status)
.onComplete(res -> {
// Close the Admin client and return the original result
LOGGER.debugCr(reconciliation, "Closing the Kafka Admin API connection");
kafkaAdmin.close();
updatePromise.handle(res);
});
return updatePromise.future();
} | @Test
public void testSuccessfulMetadataVersionUpgrade(VertxTestContext context) {
// Mock the Admin client
Admin mockAdminClient = mock(Admin.class);
// Mock describing the current metadata version
mockDescribeVersion(mockAdminClient);
// Mock updating metadata version
UpdateFeaturesResult ufr = mock(UpdateFeaturesResult.class);
when(ufr.values()).thenReturn(Map.of(KRaftMetadataManager.METADATA_VERSION_KEY, KafkaFuture.completedFuture(null)));
@SuppressWarnings(value = "unchecked")
ArgumentCaptor<Map<String, FeatureUpdate>> updateCaptor = ArgumentCaptor.forClass(Map.class);
when(mockAdminClient.updateFeatures(updateCaptor.capture(), any())).thenReturn(ufr);
// Mock the Admin client provider
AdminClientProvider mockAdminClientProvider = mockAdminClientProvider(mockAdminClient);
// Dummy KafkaStatus to check the values from
KafkaStatus status = new KafkaStatus();
Checkpoint checkpoint = context.checkpoint();
KRaftMetadataManager.maybeUpdateMetadataVersion(Reconciliation.DUMMY_RECONCILIATION, vertx, DUMMY_IDENTITY, mockAdminClientProvider, "3.6", status)
.onComplete(context.succeeding(s -> {
assertThat(status.getKafkaMetadataVersion(), is("3.6-IV2"));
verify(mockAdminClient, times(1)).updateFeatures(any(), any());
verify(mockAdminClient, times(1)).describeFeatures();
assertThat(updateCaptor.getAllValues().size(), is(1));
assertThat(updateCaptor.getValue().get(KRaftMetadataManager.METADATA_VERSION_KEY).upgradeType(), is(FeatureUpdate.UpgradeType.UPGRADE));
assertThat(updateCaptor.getValue().get(KRaftMetadataManager.METADATA_VERSION_KEY).maxVersionLevel(), is((short) 14));
checkpoint.flag();
}));
} |
@Override
public List<String> choices() {
if (commandLine.getArguments() == null) {
return Collections.emptyList();
}
List<String> argList = Lists.newArrayList();
String argOne = null;
if (argList.size() > 1) {
argOne = argList.get(1);
}
VplsCommandEnum vplsCommandEnum = VplsCommandEnum.enumFromString(argOne);
if (vplsCommandEnum != null) {
switch (vplsCommandEnum) {
case CREATE:
case LIST:
return Collections.emptyList();
default:
VplsCommandEnum.toStringList();
}
}
return VplsCommandEnum.toStringList();
} | @Test
public void testNameCompleter() {
VplsNameCompleter vplsNameCompleter = new VplsNameCompleter();
vplsNameCompleter.vpls = new TestVpls();
((TestVpls) vplsNameCompleter.vpls).initSampleData();
List<String> choices = vplsNameCompleter.choices();
List<String> expected = ImmutableList.of(VPLS1, VPLS2);
// Can not ensure the order, use contains all instead of equals
assertEquals(choices.size(), expected.size());
assertTrue(choices.containsAll(expected));
} |
@Override
public Clob getClob(final int columnIndex) throws SQLException {
return (Clob) mergeResultSet.getValue(columnIndex, Clob.class);
} | @Test
void assertGetClobWithColumnLabel() throws SQLException {
Clob clob = mock(Clob.class);
when(mergeResultSet.getValue(1, Clob.class)).thenReturn(clob);
assertThat(shardingSphereResultSet.getClob("label"), is(clob));
} |
@Override
public long read() {
return gaugeSource.read();
} | @Test
public void whenCreatedForDynamicDoubleMetricWithProvidedValue() {
SomeObject someObject = new SomeObject();
someObject.longField = 42;
metricsRegistry.registerDynamicMetricsProvider((tagger, context) -> context
.collect(tagger.withPrefix("foo"), "doubleField", INFO, BYTES, 41.65D));
LongGaugeImpl longGauge = metricsRegistry.newLongGauge("foo.doubleField");
// needed to collect dynamic metrics and update the gauge created from them
metricsRegistry.collect(mock(MetricsCollector.class));
assertEquals(42, longGauge.read());
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void externalFunctionDefinition() {
String inputExpression = "{ trigonometric cosine : function( angle ) external {"
+ " java : {"
+ " class : \"java.lang.Math\","
+ " method signature : \"cos(double)\""
+ " }"
+ "}}";
BaseNode ctxbase = parse( inputExpression );
assertThat( ctxbase).isInstanceOf(ContextNode.class);
assertThat( ctxbase.getText()).isEqualTo(inputExpression);
ContextNode ctx = (ContextNode) ctxbase;
assertThat( ctx.getEntries()).hasSize(1);
ContextEntryNode entry = ctx.getEntries().get( 0 );
assertThat( entry.getName()).isInstanceOf(NameDefNode.class);
NameDefNode name = (NameDefNode) entry.getName();
assertThat( name.getText()).isEqualTo("trigonometric cosine");
assertThat( entry.getValue()).isInstanceOf(FunctionDefNode.class);
assertThat( entry.getValue().getText()).isEqualTo("function( angle ) external {"
+ " java : {"
+ " class : \"java.lang.Math\","
+ " method signature : \"cos(double)\""
+ " }"
+ "}");
FunctionDefNode cos = (FunctionDefNode) entry.getValue();
assertThat( cos.getFormalParameters()).hasSize(1);
assertThat( cos.getFormalParameters().get( 0 ).getText()).isEqualTo("angle");
assertThat( cos.isExternal()).isEqualTo(true);
assertThat( cos.getBody()).isInstanceOf(ContextNode.class);
ContextNode body = (ContextNode) cos.getBody();
assertThat( body.getEntries()).hasSize(1);
ContextEntryNode java = body.getEntries().get( 0 );
assertThat( java.getName().getText()).isEqualTo("java");
assertThat( java.getValue()).isInstanceOf(ContextNode.class);
ContextNode def = (ContextNode) java.getValue();
assertThat( def.getEntries()).hasSize(2);
assertThat( def.getEntries().get( 0 ).getName().getText()).isEqualTo("class");
assertThat( def.getEntries().get( 0 ).getValue()).isInstanceOf(StringNode.class);
assertThat( def.getEntries().get( 0 ).getValue().getText()).isEqualTo( "\"java.lang.Math\"");
assertThat( def.getEntries().get( 1 ).getName().getText()).isEqualTo( "method signature");
assertThat( def.getEntries().get( 1 ).getValue()).isInstanceOf(StringNode.class);
assertThat( def.getEntries().get( 1 ).getValue().getText()).isEqualTo( "\"cos(double)\"");
} |
@Override
void toHtml() throws IOException {
writeHtmlHeader();
htmlCoreReport.toHtml();
writeHtmlFooter();
} | @Test
public void testWithCollectorServer() throws IOException {
final CollectorServer collectorServer = new CollectorServer();
try {
final HtmlReport htmlReport = new HtmlReport(collector, collectorServer,
javaInformationsList, Period.TOUT, writer);
htmlReport.toHtml(null, null);
assertNotEmptyAndClear(writer);
Utils.setProperty(Parameters.PARAMETER_SYSTEM_PREFIX + "mockLabradorRetriever", "true");
collectorServer.collectWithoutErrors();
htmlReport.toHtml(null, null);
assertNotEmptyAndClear(writer);
} finally {
collectorServer.stop();
}
} |
public static List<String> getJavaOpts(Configuration conf) {
String adminOpts = conf.get(YarnConfiguration.NM_CONTAINER_LOCALIZER_ADMIN_JAVA_OPTS_KEY,
YarnConfiguration.NM_CONTAINER_LOCALIZER_ADMIN_JAVA_OPTS_DEFAULT);
String userOpts = conf.get(YarnConfiguration.NM_CONTAINER_LOCALIZER_JAVA_OPTS_KEY,
YarnConfiguration.NM_CONTAINER_LOCALIZER_JAVA_OPTS_DEFAULT);
boolean isExtraJDK17OptionsConfigured =
conf.getBoolean(YarnConfiguration.NM_CONTAINER_LOCALIZER_JAVA_OPTS_ADD_EXPORTS_KEY,
YarnConfiguration.NM_CONTAINER_LOCALIZER_JAVA_OPTS_ADD_EXPORTS_DEFAULT);
if (Shell.isJavaVersionAtLeast(17) && isExtraJDK17OptionsConfigured) {
userOpts = userOpts.trim().concat(" " + ADDITIONAL_JDK17_PLUS_OPTIONS);
}
List<String> adminOptionList = Arrays.asList(adminOpts.split("\\s+"));
List<String> userOptionList = Arrays.asList(userOpts.split("\\s+"));
return Stream.concat(adminOptionList.stream(), userOptionList.stream())
.filter(s -> !s.isEmpty())
.collect(Collectors.toList());
} | @Test
public void testAdminOptionsPrecedeUserDefinedJavaOptions() throws Exception {
ContainerLocalizerWrapper wrapper = new ContainerLocalizerWrapper();
ContainerLocalizer localizer = wrapper.setupContainerLocalizerForTest();
Configuration conf = new Configuration();
conf.setStrings(YarnConfiguration.NM_CONTAINER_LOCALIZER_ADMIN_JAVA_OPTS_KEY,
"adminOption1 adminOption2");
conf.setStrings(YarnConfiguration.NM_CONTAINER_LOCALIZER_JAVA_OPTS_KEY,
" userOption1 userOption2");
List<String> javaOpts = localizer.getJavaOpts(conf);
Assert.assertEquals(4, javaOpts.size());
Assert.assertTrue(javaOpts.get(0).equals("adminOption1"));
Assert.assertTrue(javaOpts.get(1).equals("adminOption2"));
Assert.assertTrue(javaOpts.get(2).equals("userOption1"));
Assert.assertTrue(javaOpts.get(3).equals("userOption2"));
} |
@Override
public boolean contains(Object objectToCheck) {
return contains(objectToCheck, objectToCheck.hashCode());
} | @Test(expected = NullPointerException.class)
public void testContainsNull() {
final OAHashSet<Integer> set = new OAHashSet<>(8);
set.contains(null);
} |
@Override
public Result apply(ApplyNode applyNode, Captures captures, Context context)
{
return Result.ofPlanNode(applyNode.getInput());
} | @Test
public void testEmptyAssignments()
{
tester().assertThat(new RemoveUnreferencedScalarApplyNodes())
.on(p -> p.apply(
Assignments.of(),
ImmutableList.of(),
p.values(p.variable("x")),
p.values(p.variable("y"))))
.matches(values("x"));
} |
public static LookupDefaultMultiValue create(String valueString, LookupDefaultValue.Type valueType) {
requireNonNull(valueString, "valueString cannot be null");
requireNonNull(valueType, "valueType cannot be null");
Map<Object, Object> value;
try {
switch (valueType) {
case OBJECT:
value = OBJECT_MAPPER.readValue(valueString, TypeReferences.MAP_OBJECT_OBJECT);
break;
case NULL:
value = null;
break;
default:
throw new IllegalArgumentException("Could not convert <" + valueString + "> to multi value type <" + valueType + ">");
}
} catch (IllegalArgumentException e) {
throw e;
} catch (Exception e) {
throw new IllegalArgumentException("Could not parse JSON "+ valueType.toString().toLowerCase(Locale.ENGLISH) + " value <" + valueString + ">", e);
}
return builder()
.valueString(valueString)
.valueType(valueType)
.value(value)
.build();
} | @Test
public void createSingleBoolean() throws Exception {
expectedException.expect(IllegalArgumentException.class);
LookupDefaultMultiValue.create("true", LookupDefaultMultiValue.Type.BOOLEAN);
} |
public static DataList convertListRetainingNulls(List<?> input)
{
return convertList(input, true, false);
} | @Test
void testConvertListWithRetainingNull()
{
List<String> listWithNull = Arrays.asList("element1", null, "element2");
DataList dataList = DataComplexUtil.convertListRetainingNulls(listWithNull);
Assert.assertNotNull(dataList);
Assert.assertEquals(dataList.size(), 3);
Assert.assertEquals(dataList.get(0), "element1");
Assert.assertEquals(dataList.get(1), Data.NULL);
Assert.assertEquals(dataList.get(2), "element2");
} |
void forwardToStateService(DeviceStateServiceMsgProto deviceStateServiceMsg, TbCallback callback) {
if (statsEnabled) {
stats.log(deviceStateServiceMsg);
}
stateService.onQueueMsg(deviceStateServiceMsg, callback);
} | @Test
public void givenProcessingFailure_whenForwardingActivityMsgToStateService_thenOnFailureCallbackIsCalled() {
// GIVEN
var activityMsg = TransportProtos.DeviceActivityProto.newBuilder()
.setTenantIdMSB(tenantId.getId().getMostSignificantBits())
.setTenantIdLSB(tenantId.getId().getLeastSignificantBits())
.setDeviceIdMSB(deviceId.getId().getMostSignificantBits())
.setDeviceIdLSB(deviceId.getId().getLeastSignificantBits())
.setLastActivityTime(time)
.build();
doCallRealMethod().when(defaultTbCoreConsumerServiceMock).forwardToStateService(activityMsg, tbCallbackMock);
var runtimeException = new RuntimeException("Something bad happened!");
doThrow(runtimeException).when(stateServiceMock).onDeviceActivity(tenantId, deviceId, time);
// WHEN
defaultTbCoreConsumerServiceMock.forwardToStateService(activityMsg, tbCallbackMock);
// THEN
then(tbCallbackMock).should(never()).onSuccess();
var exceptionCaptor = ArgumentCaptor.forClass(Throwable.class);
then(tbCallbackMock).should().onFailure(exceptionCaptor.capture());
assertThat(exceptionCaptor.getValue())
.isInstanceOf(RuntimeException.class)
.hasMessage("Failed to update device activity for device [" + deviceId.getId() + "]!")
.hasCause(runtimeException);
} |
@Override
public SyntheticSourceReader createReader(PipelineOptions pipelineOptions) {
return new SyntheticSourceReader(this);
} | @Test
public void testRegressingProgress() throws Exception {
PipelineOptions options = PipelineOptionsFactory.create();
testSourceOptions.progressShape = ProgressShape.LINEAR_REGRESSING;
SyntheticBoundedSource source = new SyntheticBoundedSource(testSourceOptions);
BoundedSource.BoundedReader<KV<byte[], byte[]>> reader = source.createReader(options);
double lastFractionConsumed = reader.getFractionConsumed();
for (boolean more = reader.start(); more; more = reader.advance()) {
assertTrue(reader.getFractionConsumed() <= lastFractionConsumed);
lastFractionConsumed = reader.getFractionConsumed();
}
} |
@Override
public SchemaAndValue get(final ProcessingLogConfig config) {
final Struct struct = new Struct(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA)
.put(ProcessingLogMessageSchema.TYPE, MessageType.SERIALIZATION_ERROR.getTypeId())
.put(ProcessingLogMessageSchema.SERIALIZATION_ERROR, serializationError(config));
return new SchemaAndValue(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA, struct);
} | @Test
public void shouldBuildSerializationError() {
// Given:
final SerializationError<GenericRow> serError = new SerializationError<>(
ERROR,
Optional.of(RECORD),
TOPIC,
false
);
// When:
final SchemaAndValue msg = serError.get(LOGGING_CONFIG);
// Then:
final Schema schema = msg.schema();
assertThat(schema, equalTo(PROCESSING_LOG_SCHEMA));
final Struct struct = (Struct) msg.value();
assertThat(
struct.get(ProcessingLogMessageSchema.TYPE),
equalTo(ProcessingLogMessageSchema.MessageType.SERIALIZATION_ERROR.getTypeId()));
final Struct serializationError = struct.getStruct(SERIALIZATION_ERROR);
assertThat(
serializationError.get(SERIALIZATION_ERROR_FIELD_TARGET),
equalTo("value")
);
assertThat(
serializationError.get(SERIALIZATION_ERROR_FIELD_MESSAGE),
equalTo(ERROR.getMessage())
);
assertThat(
serializationError.get(SERIALIZATION_ERROR_FIELD_CAUSE),
equalTo(CAUSE_LIST)
);
assertThat(
serializationError.get(SERIALIZATION_ERROR_FIELD_RECORD),
equalTo(RECORD.toString())
);
assertThat(
serializationError.get(SERIALIZATION_ERROR_FIELD_TOPIC),
equalTo(TOPIC)
);
schema.fields().forEach(
f -> {
if (!ImmutableList.of(TYPE, SERIALIZATION_ERROR).contains(f.name())) {
assertThat(struct.get(f), is(nullValue()));
}
}
);
} |
@Override
public int offer(E e) {
@SuppressWarnings("deprecation")
long z = mix64(Thread.currentThread().getId());
int increment = ((int) (z >>> 32)) | 1;
int h = (int) z;
int mask;
int result;
Buffer<E> buffer;
boolean uncontended = true;
Buffer<E>[] buffers = table;
if ((buffers == null)
|| ((mask = buffers.length - 1) < 0)
|| ((buffer = buffers[h & mask]) == null)
|| !(uncontended = ((result = buffer.offer(e)) != Buffer.FAILED))) {
return expandOrRetry(e, h, increment, uncontended);
}
return result;
} | @Test(dataProvider = "buffers")
public void init(FakeBuffer<Integer> buffer) {
assertThat(buffer.table).isNull();
var result = buffer.offer(ELEMENT);
assertThat(buffer.table).hasLength(1);
assertThat(result).isEqualTo(Buffer.SUCCESS);
} |
String getBcp47Locale(boolean canonicalize) {
StringBuilder str = new StringBuilder();
// This represents the "any" locale value, which has traditionally been
// represented by the empty string.
if (language[0] == '\0' && country[0] == '\0') {
return "";
}
if (language[0] != '\0') {
if (canonicalize && areIdentical(language, kTagalog)) {
// Replace Tagalog with Filipino if we are canonicalizing
str.setLength(0);
str.append("fil"); // 3-letter code for Filipino
} else {
str.append(unpackLanguage());
}
}
if (isTruthy(localeScript[0]) && !localeScriptWasComputed) {
if (str.length() > 0) {
str.append('-');
}
for (byte aLocaleScript : localeScript) {
str.append((char) aLocaleScript);
}
}
if (country[0] != '\0') {
if (str.length() > 0) {
str.append('-');
}
String regionStr = unpackRegion();
str.append(regionStr);
}
if (isTruthy(localeVariant[0])) {
if (str.length() > 0) {
str.append('-');
}
for (byte aLocaleScript : localeVariant) {
str.append((char) aLocaleScript);
}
}
// Add Unicode extension only if at least one other locale component is present
if (localeNumberingSystem[0] != '\0' && str.length() > 0) {
String NU_PREFIX = "-u-nu-";
str.append(NU_PREFIX);
str.append(new String(localeNumberingSystem, UTF_8));
}
return str.toString();
} | @Test
public void getBcp47Locale_philippines_shouldReturnFil() {
ResTable_config resTable_config = new ResTable_config();
resTable_config.language[0] = 't';
resTable_config.language[1] = 'l';
resTable_config.country[0] = 'p';
resTable_config.country[1] = 'h';
assertThat(resTable_config.getBcp47Locale(/* canonicalize= */ true)).isEqualTo("fil-ph");
} |
public static boolean isAnyNullOrEmpty(String... args) {
for (String arg : args)
if (StringUtil.isNullOrEmpty(arg))
return true;
return false;
} | @Test
void testIsAnyNullOrEmpty() {
assertTrue(StringUtil.isAnyNullOrEmpty("", "a", null));
assertTrue(StringUtil.isAnyNullOrEmpty("", "a"));
assertTrue(StringUtil.isAnyNullOrEmpty("a", null));
assertTrue(StringUtil.isAnyNullOrEmpty(""));
assertTrue(StringUtil.isAnyNullOrEmpty((String) null));
assertFalse(StringUtil.isAnyNullOrEmpty("a", "b", "c"));
} |
public Map<Object, Double> getTopValues(int number) {
AssertUtil.isTrue(number > 0, "number must be positive");
metric.currentWindow();
List<CacheMap<Object, LongAdder>> buckets = metric.values();
Map<Object, Long> result = new HashMap<>(buckets.size());
for (CacheMap<Object, LongAdder> b : buckets) {
Set<Object> subSet = b.keySet(true);
for (Object o : subSet) {
Long count = result.get(o);
if (count == null) {
count = getCount(b.get(o));
} else {
count += getCount(b.get(o));
}
result.put(o, count);
}
}
// After merge, get the top set one.
Set<Entry<Object, Long>> set = result.entrySet();
List<Entry<Object, Long>> list = new ArrayList<>(set);
Collections.sort(list, new Comparator<Entry<Object, Long>>() {
@Override
public int compare(Entry<Object, Long> a,
Entry<Object, Long> b) {
return (int) (b.getValue() == null ? 0 : b.getValue()) - (int) (a.getValue() == null ? 0 : a.getValue());
}
});
Map<Object, Double> doubleResult = new HashMap<Object, Double>();
int size = list.size() > number ? number : list.size();
for (int i = 0; i < size; i++) {
Map.Entry<Object, Long> x = list.get(i);
if (x.getValue() == 0) {
break;
}
doubleResult.put(x.getKey(), ((double) x.getValue()) / metric.getIntervalInSecond());
}
return doubleResult;
} | @Test(expected = IllegalArgumentException.class)
public void testIllegalArgument() {
ClusterParamMetric metric = new ClusterParamMetric(5, 25, 100);
metric.getTopValues(-1);
} |
@Override
public void validTenant(Long id) {
TenantDO tenant = getTenant(id);
if (tenant == null) {
throw exception(TENANT_NOT_EXISTS);
}
if (tenant.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) {
throw exception(TENANT_DISABLE, tenant.getName());
}
if (DateUtils.isExpired(tenant.getExpireTime())) {
throw exception(TENANT_EXPIRE, tenant.getName());
}
} | @Test
public void testValidTenant_success() {
// mock 数据
TenantDO tenant = randomPojo(TenantDO.class, o -> o.setId(1L).setStatus(CommonStatusEnum.ENABLE.getStatus())
.setExpireTime(LocalDateTime.now().plusDays(1)));
tenantMapper.insert(tenant);
// 调用,并断言业务异常
tenantService.validTenant(1L);
} |
public static Pair<CloudObjectIncrCheckpoint, Option<Dataset<Row>>> filterAndGenerateCheckpointBasedOnSourceLimit(Dataset<Row> sourceData,
long sourceLimit, QueryInfo queryInfo,
CloudObjectIncrCheckpoint cloudObjectIncrCheckpoint) {
if (sourceData.isEmpty()) {
// There is no file matching the prefix.
CloudObjectIncrCheckpoint updatedCheckpoint =
queryInfo.getEndInstant().equals(cloudObjectIncrCheckpoint.getCommit())
? cloudObjectIncrCheckpoint
: new CloudObjectIncrCheckpoint(queryInfo.getEndInstant(), null);
return Pair.of(updatedCheckpoint, Option.empty());
}
// Let's persist the dataset to avoid triggering the dag repeatedly
sourceData.persist(StorageLevel.MEMORY_AND_DISK());
// Set ordering in query to enable batching
Dataset<Row> orderedDf = QueryRunner.applyOrdering(sourceData, queryInfo.getOrderByColumns());
Option<String> lastCheckpoint = Option.of(cloudObjectIncrCheckpoint.getCommit());
Option<String> lastCheckpointKey = Option.ofNullable(cloudObjectIncrCheckpoint.getKey());
Option<String> concatenatedKey = lastCheckpoint.flatMap(checkpoint -> lastCheckpointKey.map(key -> checkpoint + key));
// Filter until last checkpoint key
if (concatenatedKey.isPresent()) {
orderedDf = orderedDf.withColumn("commit_key",
functions.concat(functions.col(queryInfo.getOrderColumn()), functions.col(queryInfo.getKeyColumn())));
// Apply incremental filter
orderedDf = orderedDf.filter(functions.col("commit_key").gt(concatenatedKey.get())).drop("commit_key");
// If there are no more files where commit_key is greater than lastCheckpointCommit#lastCheckpointKey
if (orderedDf.isEmpty()) {
LOG.info("Empty ordered source, returning endpoint:" + queryInfo.getEndInstant());
sourceData.unpersist();
// queryInfo.getEndInstant() represents source table's last completed instant
// If current checkpoint is c1#abc and queryInfo.getEndInstant() is c1, return c1#abc.
// If current checkpoint is c1#abc and queryInfo.getEndInstant() is c2, return c2.
CloudObjectIncrCheckpoint updatedCheckpoint =
queryInfo.getEndInstant().equals(cloudObjectIncrCheckpoint.getCommit())
? cloudObjectIncrCheckpoint
: new CloudObjectIncrCheckpoint(queryInfo.getEndInstant(), null);
return Pair.of(updatedCheckpoint, Option.empty());
}
}
// Limit based on sourceLimit
WindowSpec windowSpec = Window.orderBy(col(queryInfo.getOrderColumn()), col(queryInfo.getKeyColumn()));
// Add the 'cumulativeSize' column with running sum of 'limitColumn'
Dataset<Row> aggregatedData = orderedDf.withColumn(CUMULATIVE_COLUMN_NAME,
sum(col(queryInfo.getLimitColumn())).over(windowSpec));
Dataset<Row> collectedRows = aggregatedData.filter(col(CUMULATIVE_COLUMN_NAME).leq(sourceLimit));
Row row = null;
if (collectedRows.isEmpty()) {
// If the first element itself exceeds limits then return first element
LOG.info("First object exceeding source limit: " + sourceLimit + " bytes");
row = aggregatedData.select(queryInfo.getOrderColumn(), queryInfo.getKeyColumn(), CUMULATIVE_COLUMN_NAME).first();
collectedRows = aggregatedData.limit(1);
} else {
// Get the last row and form composite key
row = collectedRows.select(queryInfo.getOrderColumn(), queryInfo.getKeyColumn(), CUMULATIVE_COLUMN_NAME).orderBy(
col(queryInfo.getOrderColumn()).desc(), col(queryInfo.getKeyColumn()).desc()).first();
}
LOG.info("Processed batch size: " + row.get(row.fieldIndex(CUMULATIVE_COLUMN_NAME)) + " bytes");
sourceData.unpersist();
return Pair.of(new CloudObjectIncrCheckpoint(row.getString(0), row.getString(1)), Option.of(collectedRows));
} | @Test
void testEmptySource() {
StructType schema = new StructType();
Dataset<Row> emptyDataset = spark().createDataFrame(new ArrayList<Row>(), schema);
QueryInfo queryInfo = new QueryInfo(
QUERY_TYPE_INCREMENTAL_OPT_VAL(), "commit1", "commit1",
"commit2", "_hoodie_commit_time",
"s3.object.key", "s3.object.size");
Pair<CloudObjectIncrCheckpoint, Option<Dataset<Row>>> result = IncrSourceHelper.filterAndGenerateCheckpointBasedOnSourceLimit(
emptyDataset, 50L, queryInfo, new CloudObjectIncrCheckpoint(null, null));
assertEquals("commit2", result.getKey().toString());
assertTrue(!result.getRight().isPresent());
} |
@Override
@SuppressWarnings("rawtypes")
public void report(SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers) {
final long timestamp = TimeUnit.MILLISECONDS.toSeconds(clock.getTime());
for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
reportGauge(timestamp, entry.getKey(), entry.getValue());
}
for (Map.Entry<String, Counter> entry : counters.entrySet()) {
reportCounter(timestamp, entry.getKey(), entry.getValue());
}
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
reportHistogram(timestamp, entry.getKey(), entry.getValue());
}
for (Map.Entry<String, Meter> entry : meters.entrySet()) {
reportMeter(timestamp, entry.getKey(), entry.getValue());
}
for (Map.Entry<String, Timer> entry : timers.entrySet()) {
reportTimer(timestamp, entry.getKey(), entry.getValue());
}
} | @Test
public void reportsTimerValues() throws Exception {
final Timer timer = mock(Timer.class);
when(timer.getCount()).thenReturn(1L);
when(timer.getMeanRate()).thenReturn(2.0);
when(timer.getOneMinuteRate()).thenReturn(3.0);
when(timer.getFiveMinuteRate()).thenReturn(4.0);
when(timer.getFifteenMinuteRate()).thenReturn(5.0);
final Snapshot snapshot = mock(Snapshot.class);
when(snapshot.getMax()).thenReturn(TimeUnit.MILLISECONDS.toNanos(100));
when(snapshot.getMean()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(200));
when(snapshot.getMin()).thenReturn(TimeUnit.MILLISECONDS.toNanos(300));
when(snapshot.getStdDev()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(400));
when(snapshot.getMedian()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(500));
when(snapshot.get75thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(600));
when(snapshot.get95thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(700));
when(snapshot.get98thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(800));
when(snapshot.get99thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(900));
when(snapshot.get999thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(1000));
when(timer.getSnapshot()).thenReturn(snapshot);
reporter.report(map(),
map(),
map(),
map(),
map("test.another.timer", timer));
assertThat(fileContents("test.another.timer.csv"))
.isEqualTo(csv(
"t,count,max,mean,min,stddev,p50,p75,p95,p98,p99,p999,mean_rate,m1_rate,m5_rate,m15_rate,rate_unit,duration_unit",
"19910191,1,100.000000,200.000000,300.000000,400.000000,500.000000,600.000000,700.000000,800.000000,900.000000,1000.000000,2.000000,3.000000,4.000000,5.000000,calls/second,milliseconds"
));
} |
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
} | @Test
public void kGroupedStreamNamedMaterializedCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("count-store")
.withStoreType(Materialized.StoreType.IN_MEMORY));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000001\n" +
" Processor: KSTREAM-AGGREGATE-0000000001 (stores: [count-store])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
} |
public static String normalizeUri(String uri) throws URISyntaxException {
// try to parse using the simpler and faster Camel URI parser
String[] parts = CamelURIParser.fastParseUri(uri);
if (parts != null) {
// we optimized specially if an empty array is returned
if (parts == URI_ALREADY_NORMALIZED) {
return uri;
}
// use the faster and more simple normalizer
return doFastNormalizeUri(parts);
} else {
// use the legacy normalizer as the uri is complex and may have unsafe URL characters
return doComplexNormalizeUri(uri);
}
} | @Test
public void testNormalizeHttpEndpointUnicodedParameter() throws Exception {
String out = URISupport.normalizeUri("http://www.google.com?q=S\u00F8ren");
assertEquals("http://www.google.com?q=S%C3%B8ren", out);
} |
public List<ExpressionElement> getExpressionElements() {
return expressionElements;
} | @Test
public void getExpressionElementsWithoutClass_missingExpression() {
original = new FactMapping("FACT_ALIAS", FactIdentifier.create("FI_TEST", "com.test.Foo"), new ExpressionIdentifier("EI_TEST", GIVEN));
assertThatThrownBy(original::getExpressionElementsWithoutClass)
.isInstanceOf(IllegalStateException.class)
.hasMessage("ExpressionElements malformed");
assertThat(original.getExpressionElements()).hasSize(0);
} |
public void execute() throws DdlException {
Map<String, UserVariable> clonedUserVars = new ConcurrentHashMap<>();
boolean hasUserVar = stmt.getSetListItems().stream().anyMatch(var -> var instanceof UserVariable);
boolean executeSuccess = true;
if (hasUserVar) {
clonedUserVars.putAll(ctx.getUserVariables());
ctx.modifyUserVariablesCopyInWrite(clonedUserVars);
}
try {
for (SetListItem var : stmt.getSetListItems()) {
setVariablesOfAllType(var);
}
} catch (Throwable e) {
if (hasUserVar) {
executeSuccess = false;
}
throw e;
} finally {
//If the set sql contains more than one user variable,
//the atomicity of the modification of this set of variables must be ensured.
if (hasUserVar) {
ctx.resetUserVariableCopyInWrite();
if (executeSuccess) {
ctx.modifyUserVariables(clonedUserVars);
}
}
}
} | @Test
public void testNormal() throws UserException {
List<SetListItem> vars = Lists.newArrayList();
vars.add(new SetPassVar(new UserIdentity("testUser", "%"),
"*88EEBA7D913688E7278E2AD071FDB5E76D76D34B"));
vars.add(new SetNamesVar("utf8"));
vars.add(new SystemVariable("query_timeout", new IntLiteral(10L)));
SetStmt stmt = new SetStmt(vars);
ctxToTestUser();
com.starrocks.sql.analyzer.Analyzer.analyze(stmt, starRocksAssert.getCtx());
SetExecutor executor = new SetExecutor(starRocksAssert.getCtx(), stmt);
executor.execute();
} |
@Override
protected String createRegistryCacheKey(URL url) {
String namespace = url.getParameter(CONFIG_NAMESPACE_KEY);
url = URL.valueOf(url.toServiceStringWithoutResolving());
if (StringUtils.isNotEmpty(namespace)) {
url = url.addParameter(CONFIG_NAMESPACE_KEY, namespace);
}
return url.toFullString();
} | @Test
void testCreateRegistryCacheKey() {
URL url = URL.valueOf("dubbo://" + NetUtils.getLocalAddress().getHostAddress() + ":8080?nacos.check=false");
String registryCacheKey1 = nacosRegistryFactory.createRegistryCacheKey(url);
String registryCacheKey2 = nacosRegistryFactory.createRegistryCacheKey(url);
Assertions.assertEquals(registryCacheKey1, registryCacheKey2);
} |
public NearCacheConfig setPreloaderConfig(NearCachePreloaderConfig preloaderConfig) {
this.preloaderConfig = checkNotNull(preloaderConfig, "NearCachePreloaderConfig cannot be null!");
return this;
} | @Test(expected = NullPointerException.class)
public void testSetNearCachePreloaderConfig_whenNull_thenThrowException() {
config.setPreloaderConfig(null);
} |
@Override
public void deleteTenantPackage(Long id) {
// 校验存在
validateTenantPackageExists(id);
// 校验正在使用
validateTenantUsed(id);
// 删除
tenantPackageMapper.deleteById(id);
} | @Test
public void testDeleteTenantPackage_used() {
// mock 数据
TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class);
tenantPackageMapper.insert(dbTenantPackage);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbTenantPackage.getId();
// mock 租户在使用该套餐
when(tenantService.getTenantCountByPackageId(eq(id))).thenReturn(1L);
// 调用, 并断言异常
assertServiceException(() -> tenantPackageService.deleteTenantPackage(id), TENANT_PACKAGE_USED);
} |
public CounterMessageFlyweight keyBuffer(final DirectBuffer keyBuffer, final int keyOffset, final int keyLength)
{
buffer.putInt(offset + KEY_LENGTH_OFFSET, keyLength);
if (null != keyBuffer && keyLength > 0)
{
buffer.putBytes(offset + KEY_BUFFER_OFFSET, keyBuffer, keyOffset, keyLength);
}
return this;
} | @Test
void keyBuffer()
{
final int offset = 24;
buffer.setMemory(0, offset, (byte)15);
flyweight.wrap(buffer, offset);
flyweight.keyBuffer(newBuffer(16), 4, 8);
assertEquals(8, flyweight.keyBufferLength());
assertEquals(KEY_BUFFER_OFFSET, flyweight.keyBufferOffset());
} |
@Override
public State cancel() throws IOException {
// Enforce that a cancel() call on the job is done at most once - as
// a workaround for Dataflow service's current bugs with multiple
// cancellation, where it may sometimes return an error when cancelling
// a job that was already cancelled, but still report the job state as
// RUNNING.
// To partially work around these issues, we absorb duplicate cancel()
// calls. This, of course, doesn't address the case when the job terminates
// externally almost concurrently to calling cancel(), but at least it
// makes it possible to safely call cancel() multiple times and from
// multiple threads in one program.
FutureTask<State> tentativeCancelTask =
new FutureTask<>(
() -> {
Job content = new Job();
content.setProjectId(getProjectId());
String currentJobId = getJobId();
content.setId(currentJobId);
content.setRequestedState("JOB_STATE_CANCELLED");
try {
Job job = dataflowClient.updateJob(currentJobId, content);
return MonitoringUtil.toState(job.getCurrentState());
} catch (IOException e) {
State state = getState();
if (state.isTerminal()) {
LOG.warn("Cancel failed because job is already terminated. State is {}", state);
return state;
} else if (e.getMessage().contains("has terminated")) {
// This handles the case where the getState() call above returns RUNNING but the
// cancel was rejected because the job is in fact done. Hopefully, someday we can
// delete this code if there is better consistency between the State and whether
// Cancel succeeds.
//
// Example message:
// Workflow modification failed. Causes: (7603adc9e9bff51e): Cannot perform
// operation 'cancel' on Job: 2017-04-01_22_50_59-9269855660514862348. Job has
// terminated in state SUCCESS: Workflow job:
// 2017-04-01_22_50_59-9269855660514862348 succeeded.
LOG.warn("Cancel failed because job is already terminated.", e);
return state;
} else {
String errorMsg =
String.format(
"Failed to cancel job in state %s, "
+ "please go to the Developers Console to cancel it manually: %s",
state,
MonitoringUtil.getJobMonitoringPageURL(
getProjectId(), getRegion(), getJobId()));
LOG.warn(errorMsg);
throw new IOException(errorMsg, e);
}
}
});
if (cancelState.compareAndSet(null, tentativeCancelTask)) {
// This thread should perform cancellation, while others will
// only wait for the result.
cancelState.get().run();
}
try {
return cancelState.get().get();
} catch (InterruptedException | ExecutionException e) {
throw new IOException(e);
}
} | @Test
public void testCancelUnterminatedJobThatSucceeds() throws IOException {
Dataflow.Projects.Locations.Jobs.Update update =
mock(Dataflow.Projects.Locations.Jobs.Update.class);
when(mockJobs.update(eq(PROJECT_ID), eq(REGION_ID), eq(JOB_ID), any(Job.class)))
.thenReturn(update);
when(update.execute()).thenReturn(new Job().setCurrentState("JOB_STATE_CANCELLED"));
DataflowPipelineJob job =
new DataflowPipelineJob(DataflowClient.create(options), JOB_ID, options, null);
assertEquals(State.CANCELLED, job.cancel());
Job content = new Job();
content.setProjectId(PROJECT_ID);
content.setId(JOB_ID);
content.setRequestedState("JOB_STATE_CANCELLED");
verify(mockJobs).update(eq(PROJECT_ID), eq(REGION_ID), eq(JOB_ID), eq(content));
verifyNoMoreInteractions(mockJobs);
} |
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
} | @Test
public void testAppNameIsSet() {
ApplicationNameOptions options = PipelineOptionsFactory.as(ApplicationNameOptions.class);
assertEquals(PipelineOptionsFactoryTest.class.getSimpleName(), options.getAppName());
} |
public void transitionTo(ClassicGroupState groupState) {
assertValidTransition(groupState);
previousState = state;
state = groupState;
currentStateTimestamp = Optional.of(time.milliseconds());
metrics.onClassicGroupStateTransition(previousState, state);
} | @Test
public void testPreparingRebalanceToStableIllegalTransition() {
group.transitionTo(PREPARING_REBALANCE);
assertThrows(IllegalStateException.class, () -> group.transitionTo(STABLE));
} |
public long getMappedMemorySize() {
long size = 0;
Object[] mfs = this.copyMappedFiles(0);
if (mfs != null) {
for (Object mf : mfs) {
if (((ReferenceResource) mf).isAvailable()) {
size += this.mappedFileSize;
}
}
}
return size;
} | @Test
public void testGetMappedMemorySize() {
final String fixedMsg = "abcd";
MappedFileQueue mappedFileQueue =
new MappedFileQueue(storePath + File.separator + "d/", 1024, null);
for (int i = 0; i < 1024; i++) {
MappedFile mappedFile = mappedFileQueue.getLastMappedFile(0);
assertThat(mappedFile).isNotNull();
assertThat(mappedFile.appendMessage(fixedMsg.getBytes())).isTrue();
}
assertThat(mappedFileQueue.getMappedMemorySize()).isEqualTo(fixedMsg.length() * 1024);
mappedFileQueue.shutdown(1000);
mappedFileQueue.destroy();
} |
public static boolean isHostName(String hostName) {
return VALID_HOSTNAME_PATTERN.matcher(hostName).matches();
} | @Test
public void testValidHostname() {
assertTrue(DnsClient.isHostName("google.com"));
assertTrue(DnsClient.isHostName("api.graylog.com"));
assertFalse(DnsClient.isHostName("http://api.graylog.com")); // Prefix not allowed
assertFalse(DnsClient.isHostName("api.graylog.com/10")); // URL params not allowed
assertFalse(DnsClient.isHostName("api.graylog.com?name=dano")); // Query strings not allowed.
} |
@Deprecated
@Override public void toXML(Object obj, OutputStream out) {
super.toXML(obj, out);
} | @Issue("JENKINS-71139")
@Test
public void nullsWithoutEncodingDeclaration() throws Exception {
Bar b = new Bar();
b.s = "x\u0000y";
try {
new XStream2().toXML(b, new StringWriter());
fail("expected to fail fast; not supported to read either");
} catch (RuntimeException x) {
assertThat("cause is com.thoughtworks.xstream.io.StreamException: Invalid character 0x0 in XML stream", Functions.printThrowable(x), containsString("0x0"));
}
} |
String getLayerFilename(DescriptorDigest layerDiffId) {
return layerDiffId.getHash();
} | @Test
public void testGetLayerFilename() throws DigestException {
DescriptorDigest diffId =
DescriptorDigest.fromHash(
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb");
Assert.assertEquals(
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
TEST_CACHE_STORAGE_FILES.getLayerFilename(diffId));
} |
@Override
synchronized public void close() {
if (stream != null) {
IOUtils.cleanupWithLogger(LOG, stream);
stream = null;
}
} | @Test(timeout=120000)
public void testRandomFloat() throws Exception {
OsSecureRandom random = getOsSecureRandom();
float rand1 = random.nextFloat();
float rand2 = random.nextFloat();
while (rand1 == rand2) {
rand2 = random.nextFloat();
}
random.close();
} |
public final void containsEntry(@Nullable Object key, @Nullable Object value) {
Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value);
checkNotNull(actual);
if (!actual.entrySet().contains(entry)) {
List<@Nullable Object> keyList = singletonList(key);
List<@Nullable Object> valueList = singletonList(value);
if (actual.containsKey(key)) {
Object actualValue = actual.get(key);
/*
* In the case of a null expected or actual value, clarify that the key *is* present and
* *is* expected to be present. That is, get() isn't returning null to indicate that the key
* is missing, and the user isn't making an assertion that the key is missing.
*/
StandardSubjectBuilder check = check("get(%s)", key);
if (value == null || actualValue == null) {
check = check.withMessage("key is present but with a different value");
}
// See the comment on IterableSubject's use of failEqualityCheckForEqualsWithoutDescription.
check.that(actualValue).failEqualityCheckForEqualsWithoutDescription(value);
} else if (hasMatchingToStringPair(actual.keySet(), keyList)) {
failWithoutActual(
fact("expected to contain entry", entry),
fact("an instance of", objectToTypeName(entry)),
simpleFact("but did not"),
fact(
"though it did contain keys",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(actual.keySet(), /* itemsToCheck= */ keyList))),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else if (actual.containsValue(value)) {
Set<@Nullable Object> keys = new LinkedHashSet<>();
for (Map.Entry<?, ?> actualEntry : actual.entrySet()) {
if (Objects.equal(actualEntry.getValue(), value)) {
keys.add(actualEntry.getKey());
}
}
failWithoutActual(
fact("expected to contain entry", entry),
simpleFact("but did not"),
fact("though it did contain keys with that value", keys),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else if (hasMatchingToStringPair(actual.values(), valueList)) {
failWithoutActual(
fact("expected to contain entry", entry),
fact("an instance of", objectToTypeName(entry)),
simpleFact("but did not"),
fact(
"though it did contain values",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(actual.values(), /* itemsToCheck= */ valueList))),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else {
failWithActual("expected to contain entry", entry);
}
}
} | @Test
public void containsEntry() {
ImmutableMap<String, String> actual = ImmutableMap.of("kurt", "kluever");
assertThat(actual).containsEntry("kurt", "kluever");
} |
public static String getPartitionRangeString(DynamicPartitionProperty property, ZonedDateTime current,
int offset, String format) {
String timeUnit = property.getTimeUnit();
if (timeUnit.equalsIgnoreCase(TimeUnit.HOUR.toString())) {
return getPartitionRangeOfHour(current, offset, format);
} else if (timeUnit.equalsIgnoreCase(TimeUnit.DAY.toString())) {
return getPartitionRangeOfDay(current, offset, format);
} else if (timeUnit.equalsIgnoreCase(TimeUnit.WEEK.toString())) {
return getPartitionRangeOfWeek(current, offset, property.getStartOfWeek(), format);
} else if (timeUnit.equalsIgnoreCase(TimeUnit.MONTH.toString())) {
return getPartitionRangeOfMonth(current, offset, property.getStartOfMonth(), format);
} else if (timeUnit.equalsIgnoreCase(TimeUnit.YEAR.toString())) {
return getPartitionRangeOfYear(current, offset, 1, format);
}
return null;
} | @Test
public void testGetPartitionRangeString() throws DateTimeException {
// TimeUnit: DAY
// 1. 2020-05-25, offset -7
DynamicPartitionProperty property = new DynamicPartitionProperty(getDynamProp("DAY", -3, 3, -1, -1));
String res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-05-25"), -7,
FORMAT);
Assert.assertEquals("2020-05-18", res);
String partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "DAY");
Assert.assertEquals("20200518", partName);
// 2. 2020-05-25, offset 0
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-05-25"), 0,
FORMAT);
Assert.assertEquals("2020-05-25", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "DAY");
Assert.assertEquals("20200525", partName);
// 3. 2020-05-25, offset 7
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-05-25"), 7,
FORMAT);
Assert.assertEquals("2020-06-01", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "DAY");
Assert.assertEquals("20200601", partName);
// 4. 2020-02-28, offset 3
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-02-28"), 3,
FORMAT);
Assert.assertEquals("2020-03-02", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "DAY");
Assert.assertEquals("20200302", partName);
// TimeUnit: WEEK
// 0. 2023-01-31 start day: WEDNESDAY, offset 0
property = new DynamicPartitionProperty(getDynamProp("WEEK", -3, 3, 3, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2023-01-31"), 0,
FORMAT);
Assert.assertEquals("2023-01-25", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "WEEK");
Assert.assertEquals("2023_04", partName);
// 1. 2020-05-25, start day: MONDAY, offset 0
property = new DynamicPartitionProperty(getDynamProp("WEEK", -3, 3, 1, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-05-25"), 0,
FORMAT);
Assert.assertEquals("2020-05-25", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "WEEK");
Assert.assertEquals("2020_22", partName);
// 2. 2020-05-28, start day: MONDAY, offset 0
property = new DynamicPartitionProperty(getDynamProp("WEEK", -3, 3, 1, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-05-28"), 0,
FORMAT);
Assert.assertEquals("2020-05-25", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "WEEK");
Assert.assertEquals("2020_22", partName);
// 3. 2020-05-25, start day: SUNDAY, offset 0
property = new DynamicPartitionProperty(getDynamProp("WEEK", -3, 3, 7, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-05-25"), 0,
FORMAT);
Assert.assertEquals("2020-05-24", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "WEEK");
Assert.assertEquals("2020_22", partName);
// 4. 2020-05-25, start day: MONDAY, offset -2
property = new DynamicPartitionProperty(getDynamProp("WEEK", -3, 3, 1, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-05-25"), -2,
FORMAT);
Assert.assertEquals("2020-05-11", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "WEEK");
Assert.assertEquals("2020_20", partName);
// 5. 2020-02-29, start day: WED, offset 0
property = new DynamicPartitionProperty(getDynamProp("WEEK", -3, 3, 3, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-02-29"), 0,
FORMAT);
Assert.assertEquals("2020-02-26", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "WEEK");
Assert.assertEquals("2020_09", partName);
// 6. 2020-02-29, start day: TUS, offset 1
property = new DynamicPartitionProperty(getDynamProp("WEEK", -3, 3, 2, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-02-29"), 1,
FORMAT);
Assert.assertEquals("2020-03-03", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "WEEK");
Assert.assertEquals("2020_10", partName);
// 6. 2020-01-01, start day: MONDAY, offset -1
property = new DynamicPartitionProperty(getDynamProp("WEEK", -3, 3, 1, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-01-01"), -1,
FORMAT);
Assert.assertEquals("2019-12-23", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "WEEK");
Assert.assertEquals("2019_52", partName);
// 6. 2020-01-01, start day: MONDAY, offset 0
property = new DynamicPartitionProperty(getDynamProp("WEEK", -3, 3, 1, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-01-01"), 0,
FORMAT);
Assert.assertEquals("2019-12-30", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "WEEK");
Assert.assertEquals("2019_53", partName);
// TimeUnit: MONTH
// 1. 2020-05-25, start day: 1, offset 0
property = new DynamicPartitionProperty(getDynamProp("MONTH", -3, 3, -1, 1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-05-25"), 0,
FORMAT);
Assert.assertEquals("2020-05-01", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "MONTH");
Assert.assertEquals("202005", partName);
// 2. 2020-05-25, start day: 26, offset 0
property = new DynamicPartitionProperty(getDynamProp("MONTH", -3, 3, -1, 26));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-05-25"), 0,
FORMAT);
Assert.assertEquals("2020-04-26", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "MONTH");
Assert.assertEquals("202004", partName);
// 3. 2020-05-25, start day: 26, offset -1
property = new DynamicPartitionProperty(getDynamProp("MONTH", -3, 3, -1, 26));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-05-25"), -1,
FORMAT);
Assert.assertEquals("2020-03-26", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "MONTH");
Assert.assertEquals("202003", partName);
// 4. 2020-02-29, start day: 26, offset 3
property = new DynamicPartitionProperty(getDynamProp("MONTH", -3, 3, -1, 26));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-02-29"), 3,
FORMAT);
Assert.assertEquals("2020-05-26", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "MONTH");
Assert.assertEquals("202005", partName);
// 5. 2020-02-29, start day: 27, offset 0
property = new DynamicPartitionProperty(getDynamProp("MONTH", -3, 3, -1, 27));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-02-29"), 0,
FORMAT);
Assert.assertEquals("2020-02-27", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "MONTH");
Assert.assertEquals("202002", partName);
// 6. 2020-02-29, start day: 27, offset -3
property = new DynamicPartitionProperty(getDynamProp("MONTH", -3, 3, -1, 27));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-02-29"), -3,
FORMAT);
Assert.assertEquals("2019-11-27", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "MONTH");
Assert.assertEquals("201911", partName);
// 7. 2020-02-29, offset -3
property = new DynamicPartitionProperty(getDynamProp("YEAR", -3, 3, -1, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-02-29"), -3,
FORMAT);
Assert.assertEquals("2017-01-01", res);
partName = DynamicPartitionUtil.getFormattedPartitionName(getCTSTimeZone(), res, "YEAR");
Assert.assertEquals("2017", partName);
property = new DynamicPartitionProperty(getDynamProp("MICROSECOND", -3, 3, -1, -1));
res = DynamicPartitionUtil.getPartitionRangeString(property, getZonedDateTimeFromStr("2020-02-29"), -3,
FORMAT);
Assert.assertNull(res);
} |
public static void getSemanticPropsDualFromString(
DualInputSemanticProperties result,
String[] forwardedFirst,
String[] forwardedSecond,
String[] nonForwardedFirst,
String[] nonForwardedSecond,
String[] readFieldsFirst,
String[] readFieldsSecond,
TypeInformation<?> inType1,
TypeInformation<?> inType2,
TypeInformation<?> outType) {
getSemanticPropsDualFromString(
result,
forwardedFirst,
forwardedSecond,
nonForwardedFirst,
nonForwardedSecond,
readFieldsFirst,
readFieldsSecond,
inType1,
inType2,
outType,
false);
} | @Test
void testNonForwardedDualInvalidTypes1() {
String[] nonForwardedFieldsFirst = {"f1"};
DualInputSemanticProperties dsp = new DualInputSemanticProperties();
assertThatThrownBy(
() ->
SemanticPropUtil.getSemanticPropsDualFromString(
dsp,
null,
null,
nonForwardedFieldsFirst,
null,
null,
null,
fiveIntTupleType,
threeIntTupleType,
threeIntTupleType))
.isInstanceOf(InvalidSemanticAnnotationException.class);
} |
public static String createQueryString(Map<String, Object> options) {
final Set<String> keySet = options.keySet();
return createQueryString(keySet.toArray(new String[0]), options, true);
} | @Test
public void testPlusInQuery() {
Map<String, Object> map = new HashMap<>();
map.put("param1", "+447777111222");
String q = URISupport.createQueryString(map);
assertEquals("param1=%2B447777111222", q);
// will be double encoded however
map.put("param1", "%2B447777111222");
q = URISupport.createQueryString(map);
assertEquals("param1=%252B447777111222", q);
} |
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) {
if (list == null) {
return FEELFnResult.ofResult(true);
}
boolean result = true;
for (final Object element : list) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" +
" a Boolean"));
} else {
if (element != null) {
result &= (Boolean) element;
}
}
}
return FEELFnResult.ofResult(result);
} | @Test
void invokeBooleanParamTrue() {
FunctionTestUtil.assertResult(nnAllFunction.invoke(true), true);
} |
@Override
public Optional<DevOpsProjectCreator> getDevOpsProjectCreator(DbSession dbSession, Map<String, String> characteristics) {
return Optional.empty();
} | @Test
void getDevOpsProjectCreator_whenAlmIsAzureDevOps_shouldReturnProjectCreator() {
AlmSettingDto almSettingDto = mock(AlmSettingDto.class);
when(almSettingDto.getAlm()).thenReturn(ALM.AZURE_DEVOPS);
DevOpsProjectDescriptor devOpsProjectDescriptor = new DevOpsProjectDescriptor(ALM.AZURE_DEVOPS, null, "project-identifier", "bitbucket_project");
DevOpsProjectCreator expectedProjectCreator = new AzureDevOpsProjectCreator(dbClient, almSettingDto, devOpsProjectDescriptor, userSession, azureDevOpsHttpClient,
projectCreator, projectKeyGenerator);
DevOpsProjectCreator devOpsProjectCreator = underTest.getDevOpsProjectCreator(almSettingDto, devOpsProjectDescriptor).orElseThrow();
assertThat(devOpsProjectCreator).usingRecursiveComparison().isEqualTo(expectedProjectCreator);
} |
public static NormalKey createRandom(EncryptionAlgorithmPB algorithm) {
byte[] plainKey;
if (algorithm == EncryptionAlgorithmPB.AES_128) {
plainKey = EncryptionUtil.genRandomKey(16);
} else {
throw new IllegalArgumentException("Unsupported algorithm: " + algorithm);
}
return new NormalKey(algorithm, plainKey, null);
} | @Test
public void testCreateRandom() {
NormalKey key = NormalKey.createRandom();
assertNotNull(key);
assertNotNull(key.getPlainKey());
assertEquals(EncryptionAlgorithmPB.AES_128, key.getAlgorithm());
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void getUpdates() {
GetUpdates getUpdates = new GetUpdates()
.offset(874227176)
.allowedUpdates("")
.timeout(0)
.limit(100);
assertEquals(100, getUpdates.getLimit());
GetUpdatesResponse response = bot.execute(getUpdates);
UpdateTest.check(response.updates());
} |
public float getProgress() {
return total > 0 ? (float) completed / total : 0;
} | @Test
public void testGetProgress() {
assertEquals(1.0f, new BackupTaskStatus(BackupTaskState.SUCCESS, 10, 10).getProgress(), 0.1);
assertEquals(0.5f, new BackupTaskStatus(BackupTaskState.SUCCESS, 5, 10).getProgress(), 0.1);
assertEquals(0.0f, new BackupTaskStatus(BackupTaskState.SUCCESS, 0, 10).getProgress(), 0.1);
} |
public static Map<String, Map<String, String>> splitAll(Map<String, List<String>> list, String separator) {
if (list == null) {
return null;
}
Map<String, Map<String, String>> result = new HashMap<>();
for (Map.Entry<String, List<String>> entry : list.entrySet()) {
result.put(entry.getKey(), split(entry.getValue(), separator));
}
return result;
} | @Test
void testSplitAll() {
assertNull(CollectionUtils.splitAll(null, null));
assertNull(CollectionUtils.splitAll(null, "-"));
assertTrue(CollectionUtils.splitAll(new HashMap<String, List<String>>(), "-")
.isEmpty());
Map<String, List<String>> input = new HashMap<String, List<String>>();
input.put("key1", Arrays.asList("1:a", "2:b", "3:c"));
input.put("key2", Arrays.asList("1:a", "2:b"));
input.put("key3", null);
input.put("key4", new ArrayList<String>());
Map<String, Map<String, String>> expected = new HashMap<String, Map<String, String>>();
expected.put("key1", CollectionUtils.toStringMap("1", "a", "2", "b", "3", "c"));
expected.put("key2", CollectionUtils.toStringMap("1", "a", "2", "b"));
expected.put("key3", null);
expected.put("key4", new HashMap<String, String>());
assertEquals(expected, CollectionUtils.splitAll(input, ":"));
} |
@VisibleForTesting
RegistryErrorException newRegistryErrorException(ResponseException responseException) {
RegistryErrorExceptionBuilder registryErrorExceptionBuilder =
new RegistryErrorExceptionBuilder(
registryEndpointProvider.getActionDescription(), responseException);
if (responseException.getContent() != null) {
try {
ErrorResponseTemplate errorResponse =
JsonTemplateMapper.readJson(
responseException.getContent(), ErrorResponseTemplate.class);
for (ErrorEntryTemplate errorEntry : errorResponse.getErrors()) {
registryErrorExceptionBuilder.addReason(errorEntry);
}
} catch (IOException ex) {
registryErrorExceptionBuilder.addReason(
"registry returned error code "
+ responseException.getStatusCode()
+ "; possible causes include invalid or wrong reference. Actual error output follows:\n"
+ responseException.getContent()
+ "\n");
}
} else {
registryErrorExceptionBuilder.addReason(
"registry returned error code "
+ responseException.getStatusCode()
+ " but did not return any details; possible causes include invalid or wrong reference, or proxy/firewall/VPN interfering \n");
}
return registryErrorExceptionBuilder.build();
} | @Test
public void testNewRegistryErrorException_noOutputFromRegistry() {
ResponseException httpException = Mockito.mock(ResponseException.class);
// Registry returning null error output
Mockito.when(httpException.getContent()).thenReturn(null);
Mockito.when(httpException.getStatusCode()).thenReturn(404);
RegistryErrorException registryException =
endpointCaller.newRegistryErrorException(httpException);
Assert.assertSame(httpException, registryException.getCause());
Assert.assertEquals(
"Tried to actionDescription but failed because: registry returned error code 404 "
+ "but did not return any details; possible causes include invalid or wrong reference, or proxy/firewall/VPN interfering \n",
registryException.getMessage());
} |
public static String getMD5Checksum(File file) throws IOException, NoSuchAlgorithmException {
return getChecksum(MD5, file);
} | @Test
public void testGetMD5Checksum_String() {
String text = "test string";
String expResult = "6f8db599de986fab7a21625b7916589c";
String result = Checksum.getMD5Checksum(text);
assertEquals(expResult, result);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.