focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public Optional<AuthProperty> inferAuth(String registry) throws InferredAuthException {
Server server = getServerFromMavenSettings(registry);
if (server == null) {
return Optional.empty();
}
SettingsDecryptionRequest request = new DefaultSettingsDecryptionRequest(server);
SettingsDecryptionResult result = decrypter.decrypt(request);
// Un-encrypted passwords are passed through, so a problem indicates a real issue.
// If there are any ERROR or FATAL problems reported, then decryption failed.
for (SettingsProblem problem : result.getProblems()) {
if (problem.getSeverity() == SettingsProblem.Severity.ERROR
|| problem.getSeverity() == SettingsProblem.Severity.FATAL) {
throw new InferredAuthException(
"Unable to decrypt server(" + registry + ") info from settings.xml: " + problem);
}
}
Server resultServer = result.getServer();
String username = resultServer.getUsername();
String password = resultServer.getPassword();
return Optional.of(
new AuthProperty() {
@Override
public String getUsername() {
return username;
}
@Override
public String getPassword() {
return password;
}
@Override
public String getAuthDescriptor() {
return CREDENTIAL_SOURCE;
}
@Override
public String getUsernameDescriptor() {
return CREDENTIAL_SOURCE;
}
@Override
public String getPasswordDescriptor() {
return CREDENTIAL_SOURCE;
}
});
} | @Test
public void testInferredAuth_registryWithHostWithoutPort() throws InferredAuthException {
Optional<AuthProperty> auth =
mavenSettingsServerCredentialsNoMasterPassword.inferAuth("docker.example.com");
Assert.assertTrue(auth.isPresent());
Assert.assertEquals("registryUser", auth.get().getUsername());
Assert.assertEquals("registryPassword", auth.get().getPassword());
} |
@Override
public HttpResponseOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final CreateFileUploadResponse uploadResponse = upload.start(file, status);
final String uploadUrl = uploadResponse.getUploadUrl();
if(StringUtils.isBlank(uploadUrl)) {
throw new InteroperabilityException("Missing upload URL in server response");
}
final String uploadToken = uploadResponse.getToken();
if(StringUtils.isBlank(uploadToken)) {
throw new InteroperabilityException("Missing upload token in server response");
}
final MultipartUploadTokenOutputStream proxy = new MultipartUploadTokenOutputStream(session, nodeid, file, status, uploadUrl);
return new HttpResponseOutputStream<Node>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("sds.upload.multipart.chunksize")),
new SDSAttributesAdapter(session), status) {
private final AtomicBoolean close = new AtomicBoolean();
private final AtomicReference<Node> node = new AtomicReference<>();
@Override
public Node getStatus() {
return node.get();
}
@Override
public void close() throws IOException {
try {
if(close.get()) {
log.warn(String.format("Skip double close of stream %s", this));
return;
}
super.close();
node.set(upload.complete(file, uploadToken, status));
}
catch(BackgroundException e) {
throw new IOException(e);
}
finally {
close.set(true);
}
}
@Override
protected void handleIOException(final IOException e) throws IOException {
// Cancel upload on error reply
try {
upload.cancel(file, uploadToken);
}
catch(BackgroundException f) {
log.warn(String.format("Failure %s cancelling upload for file %s with upload token %s after failure %s", f, file, uploadToken, e));
}
throw e;
}
};
} | @Test
public void testWriteZeroSingleByte() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final byte[] content = RandomUtils.nextBytes(2);
final TransferStatus status = new TransferStatus().withLength(content.length);
final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SDSMultipartWriteFeature writer = new SDSMultipartWriteFeature(session, nodeid);
final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
assertNotNull(test.attributes().getVersionId());
assertTrue(new DefaultFindFeature(session).find(test));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public Configuration getConfiguration() {
return configuration;
} | @Test
void validate_shouldValidateFetchPluggableArtifactConfigurationUniqueness() {
FetchPluggableArtifactTask task = new FetchPluggableArtifactTask(new CaseInsensitiveString("dummy"), new CaseInsensitiveString("stage"), new CaseInsensitiveString("job"), "s3", create("Foo", false, "Bar"), create("Foo", false, "Bar"));
task.validate(ConfigSaveValidationContext.forChain(config, new TemplatesConfig(), downstream.getStage(new CaseInsensitiveString("stage"))));
assertThat(task.getConfiguration().hasErrors()).isTrue();
assertThat(task.getConfiguration().get(0).errors().on("configurationKey")).isEqualTo("Duplicate key 'Foo' found for Fetch pluggable artifact");
assertThat(task.getConfiguration().get(1).errors().on("configurationKey")).isEqualTo("Duplicate key 'Foo' found for Fetch pluggable artifact");
} |
@Override
public void open() throws Exception {
super.open();
final String operatorID = getRuntimeContext().getOperatorUniqueID();
this.workerPool =
ThreadPools.newWorkerPool("iceberg-worker-pool-" + operatorID, workerPoolSize);
} | @TestTemplate
public void testMaxContinuousEmptyCommits() throws Exception {
table.updateProperties().set(MAX_CONTINUOUS_EMPTY_COMMITS, "3").commit();
JobID jobId = new JobID();
long checkpointId = 0;
long timestamp = 0;
try (OneInputStreamOperatorTestHarness<WriteResult, Void> harness = createStreamSink(jobId)) {
harness.setup();
harness.open();
assertSnapshotSize(0);
for (int i = 1; i <= 9; i++) {
harness.snapshot(++checkpointId, ++timestamp);
harness.notifyOfCompletedCheckpoint(checkpointId);
assertSnapshotSize(i / 3);
}
}
} |
public static Builder builder() {
return new Builder();
} | @Test
void testFilterInstantsWithRange() throws IOException {
Configuration conf = TestConfigurations.getDefaultConf(basePath);
conf.set(FlinkOptions.READ_STREAMING_SKIP_CLUSTERING, true);
conf.set(FlinkOptions.TABLE_TYPE, FlinkOptions.TABLE_TYPE_MERGE_ON_READ);
metaClient = HoodieTestUtils.init(basePath, HoodieTableType.MERGE_ON_READ);
HoodieActiveTimeline timeline = metaClient.getActiveTimeline();
HoodieInstant commit1 = new HoodieInstant(HoodieInstant.State.COMPLETED, HoodieTimeline.DELTA_COMMIT_ACTION, "1");
HoodieInstant commit2 = new HoodieInstant(HoodieInstant.State.COMPLETED, HoodieTimeline.DELTA_COMMIT_ACTION, "2");
HoodieInstant commit3 = new HoodieInstant(HoodieInstant.State.COMPLETED, HoodieTimeline.DELTA_COMMIT_ACTION, "3");
timeline.createCompleteInstant(commit1);
timeline.createCompleteInstant(commit2);
timeline.createCompleteInstant(commit3);
timeline = metaClient.reloadActiveTimeline();
Map<String, String> completionTimeMap = timeline.filterCompletedInstants().getInstantsAsStream()
.collect(Collectors.toMap(HoodieInstant::getTimestamp, HoodieInstant::getCompletionTime));
IncrementalQueryAnalyzer analyzer1 = IncrementalQueryAnalyzer.builder()
.metaClient(metaClient)
.rangeType(InstantRange.RangeType.OPEN_CLOSED)
.startTime(completionTimeMap.get("1"))
.skipClustering(true)
.build();
// previous read iteration read till instant time "1", next read iteration should return ["2", "3"]
List<HoodieInstant> activeInstants1 = analyzer1.analyze().getActiveInstants();
assertEquals(2, activeInstants1.size());
assertIterableEquals(Arrays.asList(commit2, commit3), activeInstants1);
// simulate first iteration cycle with read from the LATEST commit
IncrementalQueryAnalyzer analyzer2 = IncrementalQueryAnalyzer.builder()
.metaClient(metaClient)
.rangeType(InstantRange.RangeType.CLOSED_CLOSED)
.skipClustering(true)
.build();
List<HoodieInstant> activeInstants2 = analyzer2.analyze().getActiveInstants();
assertEquals(1, activeInstants2.size());
assertIterableEquals(Collections.singletonList(commit3), activeInstants2);
// specifying a start and end commit
IncrementalQueryAnalyzer analyzer3 = IncrementalQueryAnalyzer.builder()
.metaClient(metaClient)
.rangeType(InstantRange.RangeType.CLOSED_CLOSED)
.startTime(completionTimeMap.get("1"))
.endTime(completionTimeMap.get("3"))
.skipClustering(true)
.build();
List<HoodieInstant> activeInstants3 = analyzer3.analyze().getActiveInstants();
assertEquals(3, activeInstants3.size());
assertIterableEquals(Arrays.asList(commit1, commit2, commit3), activeInstants3);
// add an inflight instant which should be excluded
HoodieInstant commit4 = new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.COMPACTION_ACTION, "4");
timeline.createNewInstant(commit4);
timeline = metaClient.reloadActiveTimeline();
assertEquals(4, timeline.getInstants().size());
List<HoodieInstant> activeInstants4 = analyzer3.analyze().getActiveInstants();
assertEquals(3, activeInstants4.size());
} |
public static String getAbsolutePath(String path, Class<?> baseClass) {
String normalPath;
if (path == null) {
normalPath = StrUtil.EMPTY;
} else {
normalPath = normalize(path);
if (isAbsolutePath(normalPath)) {
// 给定的路径已经是绝对路径了
return normalPath;
}
}
// 相对于ClassPath路径
final URL url = ResourceUtil.getResource(normalPath, baseClass);
if (null != url) {
// 对于jar中文件包含file:前缀,需要去掉此类前缀,在此做标准化,since 3.0.8 解决中文或空格路径被编码的问题
return FileUtil.normalize(URLUtil.getDecodedPath(url));
}
// 如果资源不存在,则返回一个拼接的资源绝对路径
final String classPath = ClassUtil.getClassPath();
if (null == classPath) {
// throw new NullPointerException("ClassPath is null !");
// 在jar运行模式中,ClassPath有可能获取不到,此时返回原始相对路径(此时获取的文件为相对工作目录)
return path;
}
// 资源不存在的情况下使用标准化路径有问题,使用原始路径拼接后标准化路径
return normalize(classPath.concat(Objects.requireNonNull(path)));
} | @Test
public void getAbsolutePathTest() {
final String absolutePath = FileUtil.getAbsolutePath("LICENSE-junit.txt");
assertNotNull(absolutePath);
final String absolutePath2 = FileUtil.getAbsolutePath(absolutePath);
assertNotNull(absolutePath2);
assertEquals(absolutePath, absolutePath2);
String path = FileUtil.getAbsolutePath("中文.xml");
assertTrue(path.contains("中文.xml"));
path = FileUtil.getAbsolutePath("d:");
assertEquals("d:", path);
} |
int alignment()
{
return alignment;
} | @Test
void shouldUse1KBAlignmentWhenReadingFromOldCatalogFile() throws IOException
{
final int oldRecordLength = 1024;
final File catalogFile = new File(archiveDir, CATALOG_FILE_NAME);
IoUtil.deleteIfExists(catalogFile);
Files.write(catalogFile.toPath(), new byte[oldRecordLength], CREATE_NEW);
try (Catalog catalog = new Catalog(archiveDir, clock, MIN_CAPACITY, true, null, (version) -> {}))
{
assertEquals(oldRecordLength, catalog.alignment());
}
} |
@Deprecated
@Nonnull
public static Builder newBuilder(@Nonnull Time ttl) {
return new Builder(ttl);
} | @Test
void testStateTtlConfigBuildWithNonPositiveCleanupIncrementalSize() {
List<Integer> illegalCleanUpSizes = Arrays.asList(0, -2);
for (Integer illegalCleanUpSize : illegalCleanUpSizes) {
assertThatThrownBy(
() ->
StateTtlConfig.newBuilder(Time.seconds(1))
.cleanupIncrementally(illegalCleanUpSize, false)
.build())
.isInstanceOf(IllegalArgumentException.class);
}
} |
@Override
public List<TransferItem> normalize(final List<TransferItem> roots) {
final List<TransferItem> normalized = new ArrayList<>();
for(TransferItem upload : roots) {
boolean duplicate = false;
for(Iterator<TransferItem> iter = normalized.iterator(); iter.hasNext(); ) {
TransferItem n = iter.next();
if(upload.local.isChild(n.local)) {
// The selected file is a child of a directory already included
duplicate = true;
break;
}
if(n.local.isChild(upload.local)) {
iter.remove();
}
if(upload.remote.equals(n.remote)) {
// The selected file has the same name; if uploaded as a root element
// it would overwrite the earlier
final Path parent = upload.remote.getParent();
final String filename = upload.remote.getName();
String proposal;
int no = 0;
int index = filename.lastIndexOf('.');
Path remote;
do {
if(index != -1 && index != 0) {
proposal = String.format("%s-%d%s", filename.substring(0, index), ++no, filename.substring(index));
}
else {
proposal = String.format("%s-%d", filename, ++no);
}
remote = new Path(parent, proposal, upload.remote.getType());
}
while(false);//(upload.exists());
if(log.isInfoEnabled()) {
log.info(String.format("Changed name from %s to %s", filename, remote.getName()));
}
upload.remote = remote;
}
}
// Prunes the list of selected files. Files which are a child of an already included directory
// are removed from the returned list.
if(!duplicate) {
normalized.add(new TransferItem(upload.remote, upload.local));
}
}
return normalized;
} | @Test
public void testNameClash() {
UploadRootPathsNormalizer n = new UploadRootPathsNormalizer();
final List<TransferItem> list = new ArrayList<>();
list.add(new TransferItem(new Path("/a", EnumSet.of(Path.Type.file)), new NullLocal("/f/a")));
list.add(new TransferItem(new Path("/a", EnumSet.of(Path.Type.file)), new NullLocal("/g/a")));
final List<TransferItem> normalized = n.normalize(list);
assertEquals(2, normalized.size());
final Iterator<TransferItem> iterator = normalized.iterator();
assertEquals(new Path("/a", EnumSet.of(Path.Type.file)), iterator.next().remote);
assertEquals(new Path("/a-1", EnumSet.of(Path.Type.file)), iterator.next().remote);
} |
public <T> T parse(String input, Class<T> cls) {
return readFlow(input, cls, type(cls));
} | @Test
void invalidPropertyOk() throws IOException {
URL resource = TestsUtils.class.getClassLoader().getResource("flows/invalids/invalid-property.yaml");
assert resource != null;
File file = new File(resource.getFile());
String flowSource = Files.readString(file.toPath(), Charset.defaultCharset());
TypeReference<Map<String, Object>> TYPE_REFERENCE = new TypeReference<>() {};
Map<String, Object> flow = JacksonMapper.ofYaml().readValue(flowSource, TYPE_REFERENCE);
Flow parse = yamlFlowParser.parse(flow, Flow.class, false);
assertThat(parse.getId(), is("duplicate"));
} |
@Override
public void deletePost(Long id) {
// 校验是否存在
validatePostExists(id);
// 删除部门
postMapper.deleteById(id);
} | @Test
public void testValidatePost_notFoundForDelete() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> postService.deletePost(id), POST_NOT_FOUND);
} |
@Override
public void deleteDiscountActivity(Long id) {
// 校验存在
DiscountActivityDO activity = validateDiscountActivityExists(id);
if (CommonStatusEnum.isEnable(activity.getStatus())) { // 未关闭的活动,不能删除噢
throw exception(DISCOUNT_ACTIVITY_DELETE_FAIL_STATUS_NOT_CLOSED);
}
// 删除
discountActivityMapper.deleteById(id);
} | @Test
public void testDeleteDiscountActivity_success() {
// mock 数据
DiscountActivityDO dbDiscountActivity = randomPojo(DiscountActivityDO.class,
o -> o.setStatus(PromotionActivityStatusEnum.CLOSE.getStatus()));
discountActivityMapper.insert(dbDiscountActivity);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbDiscountActivity.getId();
// 调用
discountActivityService.deleteDiscountActivity(id);
// 校验数据不存在了
assertNull(discountActivityMapper.selectById(id));
} |
@Override
public Output run(RunContext runContext) throws Exception {
String renderedNamespace = runContext.render(this.namespace);
FlowService flowService = ((DefaultRunContext) runContext).getApplicationContext().getBean(FlowService.class);
flowService.checkAllowedNamespace(runContext.tenantId(), renderedNamespace, runContext.tenantId(), runContext.flowInfo().namespace());
String renderedKey = runContext.render(this.key);
boolean deleted = runContext.namespaceKv(renderedNamespace).delete(renderedKey);
if (this.errorOnMissing && !deleted) {
throw new NoSuchElementException("No value found for key '" + renderedKey + "' in namespace '" + renderedNamespace + "' and `errorOnMissing` is set to true");
}
return Output.builder().deleted(deleted).build();
} | @Test
void shouldOutputFalseGivenNonExistingKey() throws Exception {
// Given
String namespaceId = "io.kestra." + IdUtils.create();
RunContext runContext = this.runContextFactory.of(Map.of(
"flow", Map.of("namespace", namespaceId),
"inputs", Map.of(
"key", TEST_KV_KEY,
"namespace", namespaceId
)
));
Delete delete = Delete.builder()
.id(Delete.class.getSimpleName())
.type(Delete.class.getName())
.namespace(namespaceId)
.key("my-key")
.build();
// When
Delete.Output run = delete.run(runContext);
assertThat(run.isDeleted(), is(false));
NoSuchElementException noSuchElementException = Assertions.assertThrows(NoSuchElementException.class, () -> delete.toBuilder().errorOnMissing(true).build().run(runContext));
assertThat(noSuchElementException.getMessage(), is("No value found for key 'my-key' in namespace '" + namespaceId + "' and `errorOnMissing` is set to true"));
} |
public List<String> getBackends() {
List<String> backends = new ArrayList<>();
SystemInfoService infoService = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo();
try (CloseableLock ignored = CloseableLock.lock(this.rwLock.readLock())) {
for (Replica replica : replicas) {
Backend backend = infoService.getBackend(replica.getBackendId());
if (backend == null) {
continue;
}
backends.add(backend.getHost());
}
}
return backends;
} | @Test
public void testGetBackends() throws Exception {
LocalTablet tablet = new LocalTablet();
Replica replica1 = new Replica(1L, 10001L, 8,
-1, 10, 10, ReplicaState.NORMAL, 9, 8);
Replica replica2 = new Replica(1L, 10002L, 9,
-1, 10, 10, ReplicaState.NORMAL, -1, 9);
tablet.addReplica(replica1, false);
tablet.addReplica(replica2, false);
Assert.assertEquals(tablet.getBackends().size(), 2);
} |
public static String extractAttributeNameNameWithoutArguments(String attributeNameWithArguments) {
int start = StringUtil.lastIndexOf(attributeNameWithArguments, '[');
int end = StringUtil.lastIndexOf(attributeNameWithArguments, ']');
if (start > 0 && end > 0 && end > start) {
return attributeNameWithArguments.substring(0, start);
}
if (start < 0 && end < 0) {
return attributeNameWithArguments;
}
throw new IllegalArgumentException("Wrong argument input passed " + attributeNameWithArguments);
} | @Test(expected = IllegalArgumentException.class)
public void extractAttributeName_wrongArguments_noArgument() {
extractAttributeNameNameWithoutArguments("car.wheel[");
} |
public static <T> NullableCoder<T> of(Coder<T> valueCoder) {
if (valueCoder instanceof NullableCoder) {
return (NullableCoder<T>) valueCoder;
}
return new NullableCoder<>(valueCoder);
} | @Test
public void testCoderIsSerializableWithWellKnownCoderType() throws Exception {
CoderProperties.coderSerializable(NullableCoder.of(GlobalWindow.Coder.INSTANCE));
} |
public static String execCommand(String... cmd) throws IOException {
return execCommand(cmd, -1);
} | @Test
public void testEchoHello() throws Exception {
String output = Shell.execCommand("echo", "hello");
assertEquals("hello\n", output);
} |
@Override
public Response updateSchedulerConfiguration(SchedConfUpdateInfo mutationInfo,
HttpServletRequest hsr) throws AuthorizationException, InterruptedException {
// Make Sure mutationInfo is not null.
if (mutationInfo == null) {
routerMetrics.incrUpdateSchedulerConfigurationFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), UPDATE_SCHEDULER_CONFIGURATION,
UNKNOWN, TARGET_WEB_SERVICE,
"Parameter error, the schedConfUpdateInfo is empty or null.");
throw new IllegalArgumentException(
"Parameter error, the schedConfUpdateInfo is empty or null.");
}
// In federated mode, we may have a mix of multiple schedulers.
// In order to ensure accurate update scheduler configuration,
// we need users to explicitly set subClusterId.
String pSubClusterId = mutationInfo.getSubClusterId();
if (StringUtils.isBlank(pSubClusterId)) {
routerMetrics.incrUpdateSchedulerConfigurationFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), UPDATE_SCHEDULER_CONFIGURATION,
UNKNOWN, TARGET_WEB_SERVICE,
"Parameter error, the subClusterId is empty or null.");
throw new IllegalArgumentException("Parameter error, " +
"the subClusterId is empty or null.");
}
// Get the subClusterInfo , then update the scheduler configuration.
try {
long startTime = clock.getTime();
SubClusterInfo subClusterInfo = getActiveSubCluster(pSubClusterId);
DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorForSubCluster(
subClusterInfo.getSubClusterId(), subClusterInfo.getRMWebServiceAddress());
Response response = interceptor.updateSchedulerConfiguration(mutationInfo, hsr);
if (response != null) {
long endTime = clock.getTime();
routerMetrics.succeededUpdateSchedulerConfigurationRetrieved(endTime - startTime);
RouterAuditLogger.logSuccess(getUser().getShortUserName(), UPDATE_SCHEDULER_CONFIGURATION,
TARGET_WEB_SERVICE);
return Response.status(response.getStatus()).entity(response.getEntity()).build();
}
} catch (NotFoundException e) {
routerMetrics.incrUpdateSchedulerConfigurationFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), UPDATE_SCHEDULER_CONFIGURATION,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException(e,
"Get subCluster error. subClusterId = %s", pSubClusterId);
} catch (Exception e) {
routerMetrics.incrUpdateSchedulerConfigurationFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), UPDATE_SCHEDULER_CONFIGURATION,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException(e,
"UpdateSchedulerConfiguration error. subClusterId = %s", pSubClusterId);
}
routerMetrics.incrUpdateSchedulerConfigurationFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), UPDATE_SCHEDULER_CONFIGURATION,
UNKNOWN, TARGET_WEB_SERVICE, "UpdateSchedulerConfiguration Failed.");
throw new RuntimeException("UpdateSchedulerConfiguration error. subClusterId = "
+ pSubClusterId);
} | @Test
public void testUpdateSchedulerConfiguration()
throws AuthorizationException, InterruptedException {
SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
updateInfo.setSubClusterId("1");
Map<String, String> goodUpdateMap = new HashMap<>();
goodUpdateMap.put("goodKey", "goodVal");
QueueConfigInfo goodUpdateInfo = new
QueueConfigInfo("root.default", goodUpdateMap);
updateInfo.getUpdateQueueInfo().add(goodUpdateInfo);
Response response = interceptor.updateSchedulerConfiguration(updateInfo, null);
Assert.assertNotNull(response);
Assert.assertEquals(OK, response.getStatus());
String expectMsg = "Configuration change successfully applied.";
Object entity = response.getEntity();
Assert.assertNotNull(entity);
String entityMsg = String.valueOf(entity);
Assert.assertEquals(expectMsg, entityMsg);
} |
@Override
public boolean isEmpty() {
return items.isEmpty();
} | @Test
public void testIsEmpty() throws Exception {
//Checks empty condition
//asserts that the map starts out empty
assertTrue("Map should be empty", map.isEmpty());
map.put(1, 1);
assertFalse("Map shouldn't be empty.", map.isEmpty());
map.remove(1);
assertTrue("Map should be empty", map.isEmpty());
} |
static boolean containsIn(CloneGroup first, CloneGroup second) {
if (first.getCloneUnitLength() > second.getCloneUnitLength()) {
return false;
}
List<ClonePart> firstParts = first.getCloneParts();
List<ClonePart> secondParts = second.getCloneParts();
return SortedListsUtils.contains(secondParts, firstParts, new ContainsInComparator(second.getCloneUnitLength(), first.getCloneUnitLength()))
&& SortedListsUtils.contains(firstParts, secondParts, ContainsInComparator.RESOURCE_ID_COMPARATOR);
} | @Test
public void start_index_in_C1_less_than_in_C2() {
CloneGroup c1 = newCloneGroup(1,
newClonePart("a", 1));
CloneGroup c2 = newCloneGroup(1,
newClonePart("a", 2));
assertThat(Filter.containsIn(c1, c2), is(false));
} |
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(HANA_BOOLEAN);
builder.dataType(HANA_BOOLEAN);
builder.length(2L);
break;
case TINYINT:
builder.columnType(HANA_TINYINT);
builder.dataType(HANA_TINYINT);
break;
case SMALLINT:
builder.columnType(HANA_SMALLINT);
builder.dataType(HANA_SMALLINT);
break;
case INT:
builder.columnType(HANA_INTEGER);
builder.dataType(HANA_INTEGER);
break;
case BIGINT:
builder.columnType(HANA_BIGINT);
builder.dataType(HANA_BIGINT);
break;
case FLOAT:
builder.columnType(HANA_REAL);
builder.dataType(HANA_REAL);
break;
case DOUBLE:
builder.columnType(HANA_DOUBLE);
builder.dataType(HANA_DOUBLE);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", HANA_DECIMAL, precision, scale));
builder.dataType(HANA_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case BYTES:
builder.columnType(HANA_BLOB);
builder.dataType(HANA_BLOB);
break;
case STRING:
if (column.getColumnLength() == null
|| column.getColumnLength() <= MAX_NVARCHAR_LENGTH) {
builder.columnType(HANA_NVARCHAR);
builder.dataType(HANA_NVARCHAR);
builder.length(
column.getColumnLength() == null
? MAX_NVARCHAR_LENGTH
: column.getColumnLength());
} else {
builder.columnType(HANA_CLOB);
builder.dataType(HANA_CLOB);
}
break;
case DATE:
builder.columnType(HANA_DATE);
builder.dataType(HANA_DATE);
break;
case TIME:
builder.columnType(HANA_TIME);
builder.dataType(HANA_TIME);
break;
case TIMESTAMP:
if (column.getScale() == null || column.getScale() <= 0) {
builder.columnType(HANA_SECONDDATE);
builder.dataType(HANA_SECONDDATE);
} else {
int timestampScale = column.getScale();
if (column.getScale() > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(HANA_TIMESTAMP);
builder.dataType(HANA_TIMESTAMP);
builder.scale(timestampScale);
}
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.SAP_HANA,
column.getDataType().getSqlType().name(),
column.getName());
}
BasicTypeDefine typeDefine = builder.build();
typeDefine.setColumnType(
appendColumnSizeIfNeed(
typeDefine.getColumnType(), typeDefine.getLength(), typeDefine.getScale()));
return typeDefine;
} | @Test
public void testReconvertString() {
Column column =
PhysicalColumn.builder()
.name("test")
.dataType(BasicType.STRING_TYPE)
.columnLength(null)
.build();
BasicTypeDefine typeDefine = SapHanaTypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals("NVARCHAR(5000)", typeDefine.getColumnType());
Assertions.assertEquals(SapHanaTypeConverter.HANA_NVARCHAR, typeDefine.getDataType());
column =
PhysicalColumn.builder()
.name("test")
.dataType(BasicType.STRING_TYPE)
.columnLength(20000L)
.build();
typeDefine = SapHanaTypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(SapHanaTypeConverter.HANA_CLOB, typeDefine.getColumnType());
Assertions.assertEquals(SapHanaTypeConverter.HANA_CLOB, typeDefine.getDataType());
} |
@NonNull
@Override
protected AbstractFileObject<?> requireResolvedFileObject() {
return resolvedFileObject;
} | @Test
public void testRequireResolvedFileObject() {
assertEquals( resolvedFileObject, fileObject.requireResolvedFileObject() );
} |
@Override
public Page download(Request request, Task task) {
if (task == null || task.getSite() == null) {
throw new NullPointerException("task or site can not be null");
}
CloseableHttpResponse httpResponse = null;
CloseableHttpClient httpClient = getHttpClient(task.getSite());
Proxy proxy = proxyProvider != null ? proxyProvider.getProxy(request, task) : null;
HttpClientRequestContext requestContext = httpUriRequestConverter.convert(request, task.getSite(), proxy);
Page page = Page.fail(request);
try {
httpResponse = httpClient.execute(requestContext.getHttpUriRequest(), requestContext.getHttpClientContext());
page = handleResponse(request, request.getCharset() != null ? request.getCharset() : task.getSite().getCharset(), httpResponse, task);
onSuccess(page, task);
return page;
} catch (IOException e) {
onError(page, task, e);
return page;
} finally {
if (httpResponse != null) {
//ensure the connection is released back to pool
EntityUtils.consumeQuietly(httpResponse.getEntity());
}
if (proxyProvider != null && proxy != null) {
proxyProvider.returnProxy(proxy, page, task);
}
}
} | @Test
public void testDownloader() {
HttpClientDownloader httpClientDownloader = new HttpClientDownloader();
Html html = httpClientDownloader.download("https://www.baidu.com/");
assertTrue(!html.getFirstSourceText().isEmpty());
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() == 1) {
final int batteryLevel = data.getIntValue(Data.FORMAT_UINT8, 0);
if (batteryLevel >= 0 && batteryLevel <= 100) {
onBatteryLevelChanged(device, batteryLevel);
return;
}
}
onInvalidDataReceived(device, data);
} | @Test
public void onInvalidDataReceived_batteryLevelOutOfRange() {
final DataReceivedCallback callback = new BatteryLevelDataCallback() {
@Override
public void onBatteryLevelChanged(@NonNull final BluetoothDevice device, final int batteryLevel) {
assertEquals("Invalid date returned Battery Level", 1, 2);
}
@Override
public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
assertEquals("Invalid data", data.size(), 1);
}
};
final Data data = new Data(new byte[] { 0x65 });
callback.onDataReceived(null, data);
} |
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType = null;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
StringColumnStatsDataInspector stringColumnStatsData = stringInspectorFromStats(cso);
if (stringColumnStatsData.getNdvEstimator() == null) {
ndvEstimator = null;
break;
} else {
// check if all of the bit vectors can merge
NumDistinctValueEstimator estimator = stringColumnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (ndvEstimator.canMerge(estimator)) {
continue;
} else {
ndvEstimator = null;
break;
}
}
}
}
if (ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory
.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null));
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
StringColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
StringColumnStatsDataInspector newData = stringInspectorFromStats(cso);
if (ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData
.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen()));
aggregateData
.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
// aggregateData already has the ndv of the max of all
}
columnStatisticsData.setStringStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for " + colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
if (ndvEstimator == null) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
StringColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
StringColumnStatsDataInspector newData =
stringInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setStringStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory
.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(),
newData.getAvgColLen()));
aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(),
newData.getMaxColLen()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setStringStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, -1);
}
LOG.debug(
"Ndv estimatation for {} is {} # of partitions requested: {} # of partitions found: {}",
colName, columnStatisticsData.getStringStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
statsObj.setStatsData(columnStatisticsData);
return statsObj;
} | @Test
public void testAggregateSingleStat() throws MetaException {
List<String> partitions = Collections.singletonList("part1");
ColumnStatisticsData data1 = new ColStatsBuilder<>(String.class).numNulls(1).numDVs(2).avgColLen(8.5).maxColLen(13)
.hll(S_1, S_3).build();
List<ColStatsObjWithSourceInfo> statsList =
Collections.singletonList(createStatsWithInfo(data1, TABLE, COL, partitions.get(0)));
StringColumnStatsAggregator aggregator = new StringColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
Assert.assertEquals(data1, computedStatsObj.getStatsData());
} |
@Description("compute md5 hash")
@ScalarFunction
@SqlType(StandardTypes.VARBINARY)
public static Slice md5(@SqlType(StandardTypes.VARBINARY) Slice slice)
{
return computeHash(Hashing.md5(), slice);
} | @Test
public void testMd5()
{
assertFunction("md5(CAST('' AS VARBINARY))", VARBINARY, sqlVarbinaryHex("D41D8CD98F00B204E9800998ECF8427E"));
assertFunction("md5(CAST('hashme' AS VARBINARY))", VARBINARY, sqlVarbinaryHex("533F6357E0210E67D91F651BC49E1278"));
} |
public int getSchedulerConfigurationFailedRetrieved() {
return numGetSchedulerConfigurationFailedRetrieved.value();
} | @Test
public void testGetSchedulerConfigurationRetrievedFailed() {
long totalBadBefore = metrics.getSchedulerConfigurationFailedRetrieved();
badSubCluster.getSchedulerConfigurationFailed();
Assert.assertEquals(totalBadBefore + 1,
metrics.getSchedulerConfigurationFailedRetrieved());
} |
@Override
public int hashCode() {
return Objects.hash( name, path, provider );
} | @Test
public void testHashCode() {
Element element2 = new Element( NAME, TYPE, PATH, LOCAL_PROVIDER );
assertEquals( element1.hashCode(), element2.hashCode() );
element2 = new Element( "diffname", TYPE, "/tmp/diffname", LOCAL_PROVIDER );
assertNotEquals( element1.hashCode(), element2.hashCode() );
element2 = new Element( NAME, TYPE, PATH, "diffProvider" );
assertNotEquals( element1.hashCode(), element2.hashCode() );
element2 = new Element( NAME, EntityType.REPOSITORY_FILE, PATH, LOCAL_PROVIDER );
// Changing the file type does not effect equals because in a map, if the path and provider are the same then
// the files would live in the same physical space.
assertEquals( element1.hashCode(), element2.hashCode() );
// future proofing for unexpected null values
assertNotEquals( new Element( null, null, null, null ).hashCode(),
element2.hashCode() );
} |
public void write(final Map<TopicPartition, Long> offsets) throws IOException {
// if there are no offsets, skip writing the file to save disk IOs
// but make sure to delete the existing file if one exists
if (offsets.isEmpty()) {
Utils.delete(file);
return;
}
synchronized (lock) {
// write to temp file and then swap with the existing file
final File temp = new File(file.getAbsolutePath() + ".tmp");
LOG.trace("Writing tmp checkpoint file {}", temp.getAbsolutePath());
final FileOutputStream fileOutputStream = new FileOutputStream(temp);
try (final BufferedWriter writer = new BufferedWriter(
new OutputStreamWriter(fileOutputStream, StandardCharsets.UTF_8))) {
writeIntLine(writer, VERSION);
writeIntLine(writer, offsets.size());
for (final Map.Entry<TopicPartition, Long> entry : offsets.entrySet()) {
final TopicPartition tp = entry.getKey();
final Long offset = entry.getValue();
if (isValid(offset)) {
writeEntry(writer, tp, offset);
} else {
LOG.error("Received offset={} to write to checkpoint file for {}", offset, tp);
throw new IllegalStateException("Attempted to write a negative offset to the checkpoint file");
}
}
writer.flush();
fileOutputStream.getFD().sync();
}
LOG.trace("Swapping tmp checkpoint file {} {}", temp.toPath(), file.toPath());
Utils.atomicMoveWithFallback(temp.toPath(), file.toPath());
}
} | @Test
public void shouldThrowIOExceptionWhenWritingToNotExistedFile() {
final Map<TopicPartition, Long> offsetsToWrite = Collections.singletonMap(new TopicPartition(topic, 0), 0L);
final File notExistedFile = new File("/not_existed_dir/not_existed_file");
final OffsetCheckpoint checkpoint = new OffsetCheckpoint(notExistedFile);
final IOException e = assertThrows(IOException.class, () -> checkpoint.write(offsetsToWrite));
assertThat(e.getMessage(), containsString("No such file or directory"));
} |
public static void applyLocaleToContext(@NonNull Context context, @Nullable String localeString) {
final Locale forceLocale = LocaleTools.getLocaleForLocaleString(localeString);
final Configuration configuration = context.getResources().getConfiguration();
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1) {
configuration.setLocale(forceLocale);
} else {
//noinspection deprecation
configuration.locale = forceLocale;
}
context.getResources().updateConfiguration(configuration, null);
} | @Test
@Config(sdk = Build.VERSION_CODES.JELLY_BEAN_MR1)
@Ignore("Robolectric does not support this API")
public void testSetAndResetValueAPI17WithKnownLocale() {
Assert.assertEquals(
"English (United States)",
mContext.getResources().getConfiguration().locale.getDisplayName());
LocaleTools.applyLocaleToContext(mContext, "de");
Assert.assertEquals("de", mContext.getResources().getConfiguration().locale.getLanguage());
Assert.assertTrue(
mContext.getResources().getConfiguration().locale.getDisplayName().contains("German"));
LocaleTools.applyLocaleToContext(mContext, "");
Assert.assertSame(Locale.getDefault(), mContext.getResources().getConfiguration().locale);
LocaleTools.applyLocaleToContext(mContext, "NONE_EXISTING");
Assert.assertEquals(
"none_existing", mContext.getResources().getConfiguration().locale.getLanguage());
} |
@Override
@SuppressWarnings("nullness")
public List<Map<String, Object>> readTable(String tableName) {
LOG.info("Reading all rows from {}.{}", databaseName, tableName);
List<Map<String, Object>> result = runSQLQuery(String.format("SELECT * FROM %s", tableName));
LOG.info("Successfully loaded rows from {}.{}", databaseName, tableName);
return result;
} | @Test
public void testReadTableShouldNotThrowErrorIfJDBCDoesNotThrowAnyError() throws SQLException {
when(container.getHost()).thenReturn(HOST);
when(container.getMappedPort(JDBC_PORT)).thenReturn(MAPPED_PORT);
testManager.readTable(TABLE_NAME);
verify(driver.getConnection(any(), any(), any()).createStatement()).executeQuery(anyString());
} |
public static ScalarOperator translate(Expr expression, ExpressionMapping expressionMapping,
ColumnRefFactory columnRefFactory) {
return translate(expression, expressionMapping, null, columnRefFactory);
} | @Test
public void testTranslateComplexFunction() {
StringLiteral test = new StringLiteral("test");
StringLiteral defaultStr = new StringLiteral("default");
BinaryPredicate predicate = new BinaryPredicate(BinaryType.EQ, defaultStr, test);
FunctionCallExpr baseFunc = new FunctionCallExpr("if", ImmutableList.of(predicate, test, defaultStr));
FunctionCallExpr complexFunc = baseFunc;
for (int i = 0; i < 100; i++) {
complexFunc = new FunctionCallExpr("if", ImmutableList.of(predicate, test, complexFunc));
}
CallOperator so = (CallOperator) SqlToScalarOperatorTranslator.translate(complexFunc,
new ExpressionMapping(null, Collections.emptyList()), new ColumnRefFactory());
assertEquals("if", so.getFnName());
} |
public static <T> T[] checkNonEmpty(T[] array, String name) {
//No String concatenation for check
if (checkNotNull(array, name).length == 0) {
throw new IllegalArgumentException("Param '" + name + "' must not be empty");
}
return array;
} | @Test
public void testCheckNonEmptyTArrayString() {
Exception actualEx = null;
try {
ObjectUtil.checkNonEmpty((Object[]) NULL_OBJECT, NULL_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNotNull(actualEx, TEST_RESULT_NULLEX_OK);
assertTrue(actualEx instanceof NullPointerException, TEST_RESULT_EXTYPE_NOK);
actualEx = null;
try {
ObjectUtil.checkNonEmpty((Object[]) NON_NULL_FILLED_OBJECT_ARRAY, NON_NULL_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNull(actualEx, TEST_RESULT_NULLEX_NOK);
actualEx = null;
try {
ObjectUtil.checkNonEmpty((Object[]) NON_NULL_EMPTY_OBJECT_ARRAY, NON_NULL_EMPTY_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNotNull(actualEx, TEST_RESULT_NULLEX_OK);
assertTrue(actualEx instanceof IllegalArgumentException, TEST_RESULT_EXTYPE_NOK);
} |
@Override
public int hashCode() {
return Objects.hashCode(dataSourceName.toUpperCase(), tableName.toUpperCase(), null == schemaName ? null : schemaName.toUpperCase());
} | @Test
void assertHashCodeIncludeInstance() {
assertThat(new DataNode("ds_0.db_0.tbl_0").hashCode(), is(new DataNode("ds_0.db_0.tbl_0").hashCode()));
} |
@NonNull public static synchronized String getAllLogLines() {
ArrayList<String> lines = getAllLogLinesList();
// now to build the string
StringBuilder sb = new StringBuilder("Log contains " + lines.size() + " lines:");
while (lines.size() > 0) {
String line = lines.remove(lines.size() - 1);
sb.append(NEW_LINE);
sb.append(line);
}
return sb.toString();
} | @Test
public void testGetAllLogLines() throws Exception {
Logger.d("mTag", "Text1");
final String expectedFirstLine = "-D-[mTag] Text1";
Assert.assertTrue(Logger.getAllLogLines().endsWith(expectedFirstLine));
} |
public static KeyStore newStoreCopyContent(KeyStore originalKeyStore,
char[] currentPassword,
final char[] newPassword) throws GeneralSecurityException, IOException {
if (newPassword == null) {
throw new IllegalArgumentException("new password cannot be null");
}
KeyStore newKeyStore = KeyStore.getInstance(PKCS12);
newKeyStore.load(null, newPassword);
final Enumeration<String> aliases = originalKeyStore.aliases();
while (aliases.hasMoreElements()) {
String alias = aliases.nextElement();
if (originalKeyStore.entryInstanceOf(alias, KeyStore.PrivateKeyEntry.class)) {
newKeyStore.setKeyEntry(
alias,
originalKeyStore.getKey(alias, currentPassword),
newPassword,
originalKeyStore.getCertificateChain(alias)
);
} else if (originalKeyStore.entryInstanceOf(alias, KeyStore.TrustedCertificateEntry.class)) {
newKeyStore.setCertificateEntry(alias, originalKeyStore.getCertificate(alias));
} else if (originalKeyStore.entryInstanceOf(alias, KeyStore.SecretKeyEntry.class)) {
newKeyStore.setEntry(alias,
originalKeyStore.getEntry(alias, new KeyStore.PasswordProtection(currentPassword)),
new KeyStore.PasswordProtection(newPassword)
);
}
}
return newKeyStore;
} | @Test
void testDifferentEntriesMoving() throws Exception {
final char[] oldPassword = "oldPass".toCharArray();
final char[] newPassword = "newPass".toCharArray();
KeyStore originalKeyStore = KeyStore.getInstance(PKCS12);
originalKeyStore.load(null, oldPassword);
final byte[] originalSecretKey = {0x54, 0x68, 0x61, 0x74, 0x73, 0x20, 0x6D, 0x79, 0x20, 0x4B, 0x75, 0x6E, 0x67, 0x20, 0x46, 0x75};
KeyStore.SecretKeyEntry secret
= new KeyStore.SecretKeyEntry(new SecretKeySpec(originalSecretKey,
"AES"));
originalKeyStore.setEntry("secretEntry", secret, new KeyStore.PasswordProtection(oldPassword));
CertRequest req = CertRequest.selfSigned("darknet.net")
.validity(Duration.ZERO);
final X509Certificate trustedCA = CertificateGenerator.generate(req).certificate();
originalKeyStore.setCertificateEntry("trusted-certificate", trustedCA);
req = CertRequest.selfSigned("localhost")
.validity(Duration.ZERO);
final KeyPair keyPair = CertificateGenerator.generate(req);
originalKeyStore.setKeyEntry("privkey", keyPair.privateKey(), oldPassword, new Certificate[]{keyPair.certificate()});
final KeyStore newKeyStore = KeystoreUtils.newStoreCopyContent(originalKeyStore, oldPassword, newPassword);
final KeyStore.Entry secretRetrieved = newKeyStore.getEntry("secretEntry", new KeyStore.PasswordProtection(newPassword));
final Certificate certificateRetrieved = newKeyStore.getCertificate("trusted-certificate");
final Key privkeyRetrieved = newKeyStore.getKey("privkey", newPassword);
//Verify if all 3 entry types have been moved without any changes
assertArrayEquals(originalSecretKey, ((KeyStore.SecretKeyEntry) secretRetrieved).getSecretKey().getEncoded());
assertEquals(trustedCA, certificateRetrieved);
assertEquals(keyPair.privateKey(), privkeyRetrieved);
} |
public static CodecFactory fromHadoopString(String hadoopCodecClass) {
CodecFactory o = null;
try {
String avroCodec = HADOOP_AVRO_NAME_MAP.get(hadoopCodecClass);
if (avroCodec != null) {
o = CodecFactory.fromString(avroCodec);
}
} catch (Exception e) {
throw new AvroRuntimeException("Unrecognized hadoop codec: " + hadoopCodecClass, e);
}
return o;
} | @Test
void hadoopCodecFactorySnappy() {
CodecFactory hadoopSnappyCodec = HadoopCodecFactory.fromHadoopString("org.apache.hadoop.io.compress.SnappyCodec");
CodecFactory avroSnappyCodec = CodecFactory.fromString("snappy");
assertEquals(hadoopSnappyCodec.getClass(), avroSnappyCodec.getClass());
} |
@Override
public int rpcPortOffset() {
return Integer.parseInt(System.getProperty(GrpcConstants.NACOS_SERVER_GRPC_PORT_OFFSET_KEY,
String.valueOf(Constants.CLUSTER_GRPC_PORT_DEFAULT_OFFSET)));
} | @Test
void testRpcPortOffsetFromSystemProperty() {
System.setProperty(GrpcConstants.NACOS_SERVER_GRPC_PORT_OFFSET_KEY, "10001");
grpcClusterClient = new GrpcClusterClient("test", 8, 8, Collections.emptyMap());
assertEquals(10001, grpcClusterClient.rpcPortOffset());
} |
public static String get(String urlString, Charset customCharset) {
return HttpRequest.get(urlString).charset(customCharset).execute().body();
} | @Test
@Disabled
public void get12306Test() {
HttpRequest.get("https://kyfw.12306.cn/otn/")
.setFollowRedirects(true)
.then(response -> Console.log(response.body()));
} |
static void obtainTokensForNamenodesInternal(Credentials credentials,
Path[] ps, Configuration conf) throws IOException {
Set<FileSystem> fsSet = new HashSet<FileSystem>();
for(Path p: ps) {
fsSet.add(p.getFileSystem(conf));
}
String masterPrincipal = Master.getMasterPrincipal(conf);
for (FileSystem fs : fsSet) {
obtainTokensForNamenodesInternal(fs, credentials, conf, masterPrincipal);
}
} | @Test
public void testSingleTokenFetch() throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
String renewer = Master.getMasterPrincipal(conf);
Credentials credentials = new Credentials();
final MockFileSystem fs = new MockFileSystem();
final MockFileSystem mockFs = (MockFileSystem) fs.getRawFileSystem();
when(mockFs.getCanonicalServiceName()).thenReturn("host:0");
when(mockFs.getUri()).thenReturn(new URI("mockfs://host:0"));
Path mockPath = mock(Path.class);
when(mockPath.getFileSystem(conf)).thenReturn(mockFs);
Path[] paths = new Path[]{ mockPath, mockPath };
when(mockFs.addDelegationTokens("me", credentials)).thenReturn(null);
TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);
verify(mockFs, times(1)).addDelegationTokens(renewer, credentials);
} |
@ProcessElement
public void processElement(OutputReceiver<InitialPipelineState> receiver) throws IOException {
LOG.info(daoFactory.getStreamTableDebugString());
LOG.info(daoFactory.getMetadataTableDebugString());
LOG.info("ChangeStreamName: " + daoFactory.getChangeStreamName());
boolean resume = false;
DetectNewPartitionsState detectNewPartitionsState =
daoFactory.getMetadataTableDao().readDetectNewPartitionsState();
switch (existingPipelineOptions) {
case RESUME_OR_NEW:
// perform resumption.
if (detectNewPartitionsState != null) {
resume = true;
startTime = detectNewPartitionsState.getWatermark();
LOG.info("Resuming from previous pipeline with low watermark of {}", startTime);
} else {
LOG.info(
"Attempted to resume, but previous watermark does not exist, starting at {}",
startTime);
}
break;
case RESUME_OR_FAIL:
// perform resumption.
if (detectNewPartitionsState != null) {
resume = true;
startTime = detectNewPartitionsState.getWatermark();
LOG.info("Resuming from previous pipeline with low watermark of {}", startTime);
} else {
LOG.error("Previous pipeline with the same change stream name doesn't exist, stopping");
return;
}
break;
case FAIL_IF_EXISTS:
if (detectNewPartitionsState != null) {
LOG.error(
"A previous pipeline exists with the same change stream name and existingPipelineOption is set to FAIL_IF_EXISTS.");
return;
}
break;
case SKIP_CLEANUP:
if (detectNewPartitionsState != null) {
LOG.error(
"A previous pipeline exists with the same change stream name and existingPipelineOption is set to SKIP_CLEANUP. This option should only be used in tests.");
return;
}
break;
default:
LOG.error("Unexpected existingPipelineOptions option.");
// terminate pipeline
return;
}
daoFactory.getMetadataTableDao().writeDetectNewPartitionVersion();
receiver.output(new InitialPipelineState(startTime, resume));
} | @Test
public void testInitializeStopWithExistingPipeline() throws IOException {
metadataTableDao.updateDetectNewPartitionWatermark(Instant.now());
Instant startTime = Instant.now();
InitializeDoFn initializeDoFn =
new InitializeDoFn(
daoFactory, startTime, BigtableIO.ExistingPipelineOptions.FAIL_IF_EXISTS);
initializeDoFn.processElement(outputReceiver);
verify(outputReceiver, never()).output(any());
} |
public boolean put(Key key, Value value) {
// retain value so that it's not released before we put it in the cache and calculate the weight
value.retain();
try {
if (!value.matchesKey(key)) {
throw new IllegalArgumentException("Value '" + value + "' does not match key '" + key + "'");
}
long entrySize = weighter.getSize(value);
EntryWrapper<Key, Value> newWrapper = EntryWrapper.create(key, value, entrySize);
if (entries.putIfAbsent(key, newWrapper) == null) {
this.size.addAndGet(entrySize);
return true;
} else {
// recycle the new wrapper as it was not used
newWrapper.recycle();
return false;
}
} finally {
value.release();
}
} | @Test
public void testPutSameObj() {
RangeCache<Integer, RefString> cache = new RangeCache<>(value -> value.s.length(), x -> 0);
RefString s0 = new RefString("zero", 0);
assertEquals(s0.refCnt(), 1);
assertTrue(cache.put(0, s0));
assertFalse(cache.put(0, s0));
} |
@Override
public CheckForDecommissioningNodesResponse checkForDecommissioningNodes(
CheckForDecommissioningNodesRequest request) throws YarnException, IOException {
// Parameter check
if (request == null) {
RouterServerUtil.logAndThrowException("Missing checkForDecommissioningNodes request.", null);
routerMetrics.incrCheckForDecommissioningNodesFailedRetrieved();
}
String subClusterId = request.getSubClusterId();
if (StringUtils.isBlank(subClusterId)) {
routerMetrics.incrCheckForDecommissioningNodesFailedRetrieved();
RouterServerUtil.logAndThrowException("Missing checkForDecommissioningNodes SubClusterId.",
null);
}
try {
long startTime = clock.getTime();
RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod(
new Class[]{CheckForDecommissioningNodesRequest.class}, new Object[]{request});
Collection<CheckForDecommissioningNodesResponse> responses =
remoteMethod.invokeConcurrent(this, CheckForDecommissioningNodesResponse.class,
subClusterId);
if (CollectionUtils.isNotEmpty(responses)) {
// We selected a subCluster, the list is not empty and size=1.
List<CheckForDecommissioningNodesResponse> collects =
responses.stream().collect(Collectors.toList());
if (!collects.isEmpty() && collects.size() == 1) {
CheckForDecommissioningNodesResponse response = collects.get(0);
long stopTime = clock.getTime();
routerMetrics.succeededCheckForDecommissioningNodesRetrieved((stopTime - startTime));
Set<NodeId> nodes = response.getDecommissioningNodes();
return CheckForDecommissioningNodesResponse.newInstance(nodes);
}
}
} catch (YarnException e) {
routerMetrics.incrCheckForDecommissioningNodesFailedRetrieved();
RouterServerUtil.logAndThrowException(e,
"Unable to checkForDecommissioningNodes due to exception " + e.getMessage());
}
routerMetrics.incrCheckForDecommissioningNodesFailedRetrieved();
throw new YarnException("Unable to checkForDecommissioningNodes.");
} | @Test
public void testCheckForDecommissioningNodesNormalRequest() throws Exception {
CheckForDecommissioningNodesRequest request =
CheckForDecommissioningNodesRequest.newInstance("SC-1");
CheckForDecommissioningNodesResponse response =
interceptor.checkForDecommissioningNodes(request);
assertNotNull(response);
Set<NodeId> nodeIds = response.getDecommissioningNodes();
assertNotNull(nodeIds);
assertEquals(0, nodeIds.size());
} |
public static String substringTrimmed( String str, int beginIndex ) {
return substringTrimmed( str, beginIndex, str.length() );
} | @Test
void substringTrimmed() {
testSubstringTrimmed( "", 0 );
testSubstringTrimmed( "a", 0 );
testSubstringTrimmed( "a", 1 );
testSubstringTrimmed( "a ", 0 );
testSubstringTrimmed( " a", 0 );
testSubstringTrimmed( " a ", 0 );
testSubstringTrimmed( " a ", 1 );
testSubstringTrimmed( " a ", 0, 3 );
testSubstringTrimmed( " a ", 1, 4 );
} |
public List<BlameLine> blame(Git git, String filename) {
BlameResult blameResult;
try {
blameResult = git.blame()
// Equivalent to -w command line option
.setTextComparator(RawTextComparator.WS_IGNORE_ALL)
.setFilePath(filename).call();
} catch (Exception e) {
throw new IllegalStateException("Unable to blame file " + filename, e);
}
List<BlameLine> lines = new ArrayList<>();
if (blameResult == null) {
LOG.debug("Unable to blame file {}. It is probably a symlink.", filename);
return emptyList();
}
for (int i = 0; i < blameResult.getResultContents().size(); i++) {
if (blameResult.getSourceAuthor(i) == null || blameResult.getSourceCommit(i) == null) {
LOG.debug("Unable to blame file {}. No blame info at line {}. Is file committed? [Author: {} Source commit: {}]", filename, i + 1,
blameResult.getSourceAuthor(i), blameResult.getSourceCommit(i));
return emptyList();
}
lines.add(new BlameLine()
.date(blameResult.getSourceCommitter(i).getWhen())
.revision(blameResult.getSourceCommit(i).getName())
.author(blameResult.getSourceAuthor(i).getEmailAddress()));
}
return lines;
} | @Test
public void blame_returns_all_lines() {
try (Git git = loadRepository(baseDir)) {
List<BlameLine> blameLines = jGitBlameCommand.blame(git, DUMMY_JAVA);
Date revisionDate1 = DateUtils.parseDateTime("2012-07-17T16:12:48+0200");
String revision1 = "6b3aab35a3ea32c1636fee56f996e677653c48ea";
String author1 = "david@gageot.net";
// second commit, which has a commit date different than the author date
Date revisionDate2 = DateUtils.parseDateTime("2015-05-19T13:31:09+0200");
String revision2 = "0d269c1acfb8e6d4d33f3c43041eb87e0df0f5e7";
String author2 = "duarte.meneses@sonarsource.com";
List<BlameLine> expectedBlame = new LinkedList<>();
for (int i = 0; i < 25; i++) {
expectedBlame.add(new BlameLine().revision(revision1).date(revisionDate1).author(author1));
}
for (int i = 0; i < 3; i++) {
expectedBlame.add(new BlameLine().revision(revision2).date(revisionDate2).author(author2));
}
for (int i = 0; i < 1; i++) {
expectedBlame.add(new BlameLine().revision(revision1).date(revisionDate1).author(author1));
}
assertThat(blameLines).isEqualTo(expectedBlame);
}
} |
public Collection<String> getCandidateEIPs(String myInstanceId, String myZone) {
if (myZone == null) {
myZone = "us-east-1d";
}
Collection<String> eipCandidates = clientConfig.shouldUseDnsForFetchingServiceUrls()
? getEIPsForZoneFromDNS(myZone)
: getEIPsForZoneFromConfig(myZone);
if (eipCandidates == null || eipCandidates.size() == 0) {
throw new RuntimeException("Could not get any elastic ips from the EIP pool for zone :" + myZone);
}
return eipCandidates;
} | @Test
public void shouldFilterNonElasticNamesInOtherRegion() {
when(config.getRegion()).thenReturn("eu-west-1");
List<String> hosts = Lists.newArrayList("example.com", "ec2-1-2-3-4.eu-west-1.compute.amazonaws.com",
"5.6.7.8", "ec2-101-202-33-44.eu-west-1.compute.amazonaws.com");
when(config.getEurekaServerServiceUrls(any(String.class))).thenReturn(hosts);
Collection<String> returnValue = eipManager.getCandidateEIPs("i-123", "eu-west-1a");
assertEquals(2, returnValue.size());
assertTrue(returnValue.contains("1.2.3.4"));
assertTrue(returnValue.contains("101.202.33.44"));
} |
public <T> T parse(String input, Class<T> cls) {
return readFlow(input, cls, type(cls));
} | @Test
void inputsBadType() {
ConstraintViolationException exception = assertThrows(
ConstraintViolationException.class,
() -> this.parse("flows/invalids/inputs-bad-type.yaml")
);
assertThat(exception.getMessage(), containsString("Invalid type: FOO"));
} |
@Nullable
public Long getLongValue(@LongFormat final int formatType,
@IntRange(from = 0) final int offset) {
if ((offset + getTypeLen(formatType)) > size()) return null;
return switch (formatType) {
case FORMAT_UINT32_LE -> unsignedBytesToLong(
mValue[offset],
mValue[offset + 1],
mValue[offset + 2],
mValue[offset + 3]
);
case FORMAT_UINT32_BE -> unsignedBytesToLong(
mValue[offset + 3],
mValue[offset + 2],
mValue[offset + 1],
mValue[offset]
);
case FORMAT_SINT32_LE -> unsignedToSigned(unsignedBytesToLong(
mValue[offset],
mValue[offset + 1],
mValue[offset + 2],
mValue[offset + 3]
), 32);
case FORMAT_SINT32_BE -> unsignedToSigned(unsignedBytesToLong(
mValue[offset + 3],
mValue[offset + 2],
mValue[offset + 1],
mValue[offset]
), 32);
default -> null;
};
} | @Test
public void getValue_UINT32_BE_big() {
final Data data = new Data(new byte[] { (byte) 0xF0, 0x00, 0x00, 0x01 });
final long value = data.getLongValue(Data.FORMAT_UINT32_BE, 0);
assertEquals(0xF0000001L, value);
} |
public static String replaceAll(CharSequence content, String regex, String replacementTemplate) {
final Pattern pattern = Pattern.compile(regex, Pattern.DOTALL);
return replaceAll(content, pattern, replacementTemplate);
} | @Test
public void replaceAllTest() {
//通过正则查找到字符串,然后把匹配到的字符串加入到replacementTemplate中,$1表示分组1的字符串
//此处把1234替换为 ->1234<-
final String replaceAll = ReUtil.replaceAll(content, "(\\d+)", "->$1<-");
assertEquals("ZZZaaabbbccc中文->1234<-", replaceAll);
} |
@Override
public ProcResult fetchResult() throws AnalysisException {
Preconditions.checkNotNull(db);
Preconditions.checkNotNull(schemaChangeHandler);
BaseProcResult result = new BaseProcResult();
result.setNames(TITLE_NAMES);
List<List<Comparable>> schemaChangeJobInfos = getOptimizeJobInfos();
for (List<Comparable> infoStr : schemaChangeJobInfos) {
List<String> oneInfo = new ArrayList<String>(TITLE_NAMES.size());
for (Comparable element : infoStr) {
oneInfo.add(element.toString());
}
result.addRow(oneInfo);
}
return result;
} | @Test
public void testFetchResult() throws AnalysisException {
BaseProcResult result = (BaseProcResult) optimizeProcDir.fetchResult();
List<List<String>> rows = result.getRows();
List<String> list1 = rows.get(0);
Assert.assertEquals(list1.size(), OptimizeProcDir.TITLE_NAMES.size());
// JobId
Assert.assertEquals("1", list1.get(0));
// TableName
Assert.assertEquals("tb1", list1.get(1));
// CreateTime
Assert.assertEquals("2020-01-01", list1.get(2));
// FinishTime
Assert.assertEquals("2020-01-01", list1.get(3));
// Operation
Assert.assertEquals("ALTER", list1.get(4));
// TransactionId
Assert.assertEquals("0", list1.get(5));
// State
Assert.assertEquals("FINISHED", list1.get(6));
// Msg
Assert.assertEquals("", list1.get(7));
// Progress
Assert.assertEquals("100", list1.get(8));
// Timeout
Assert.assertEquals("10000", list1.get(9));
List<String> list2 = rows.get(1);
Assert.assertEquals(list2.size(), OptimizeProcDir.TITLE_NAMES.size());
// JobId
Assert.assertEquals("1", list2.get(0));
// TableName
Assert.assertEquals("tb1", list2.get(1));
// CreateTime
Assert.assertEquals("2020-01-01", list2.get(2));
// FinishTime
Assert.assertEquals("2020-01-01", list2.get(3));
// Operation
Assert.assertEquals("ALTER", list2.get(4));
// TransactionId
Assert.assertEquals("0", list2.get(5));
// State
Assert.assertEquals("FINISHED", list2.get(6));
// Msg
Assert.assertEquals("", list2.get(7));
// Progress
Assert.assertEquals("100", list2.get(8));
// Timeout
Assert.assertEquals("10000", list2.get(9));
} |
public static Predicate parse(String expression)
{
final Stack<Predicate> predicateStack = new Stack<>();
final Stack<Character> operatorStack = new Stack<>();
final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll("");
final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true);
boolean isTokenMode = true;
while (true)
{
final Character operator;
final String token;
if (isTokenMode)
{
if (tokenizer.hasMoreTokens())
{
token = tokenizer.nextToken();
}
else
{
break;
}
if (OPERATORS.contains(token))
{
operator = token.charAt(0);
}
else
{
operator = null;
}
}
else
{
operator = operatorStack.pop();
token = null;
}
isTokenMode = true;
if (operator == null)
{
try
{
predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance());
}
catch (ClassCastException e)
{
throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e);
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
else
{
if (operatorStack.empty() || operator == '(')
{
operatorStack.push(operator);
}
else if (operator == ')')
{
while (operatorStack.peek() != '(')
{
evaluate(predicateStack, operatorStack);
}
operatorStack.pop();
}
else
{
if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek()))
{
evaluate(predicateStack, operatorStack);
isTokenMode = false;
}
operatorStack.push(operator);
}
}
}
while (!operatorStack.empty())
{
evaluate(predicateStack, operatorStack);
}
if (predicateStack.size() > 1)
{
throw new RuntimeException("Invalid logical expression");
}
return predicateStack.pop();
} | @Test
public void testOrParenAnd()
{
final Predicate parsed = PredicateExpressionParser.parse("(com.linkedin.data.it.AlwaysTruePredicate | com.linkedin.data.it.AlwaysTruePredicate) & com.linkedin.data.it.AlwaysFalsePredicate");
Assert.assertEquals(parsed.getClass(), AndPredicate.class);
final List<Predicate> andChildren = ((AndPredicate) parsed).getChildPredicates();
Assert.assertEquals(andChildren.get(0).getClass(), OrPredicate.class);
Assert.assertEquals(andChildren.get(1).getClass(), AlwaysFalsePredicate.class);
final List<Predicate> orChildren = ((OrPredicate) andChildren.get(0)).getChildPredicates();
Assert.assertEquals(orChildren.get(0).getClass(), AlwaysTruePredicate.class);
Assert.assertEquals(orChildren.get(1).getClass(), AlwaysTruePredicate.class);
} |
@Override
public UnderFileSystem create(String path, UnderFileSystemConfiguration conf) {
Preconditions.checkNotNull(path, "Unable to create UnderFileSystem instance:"
+ " URI path should not be null");
if (conf.getInt(PropertyKey.UNDERFS_GCS_VERSION) == GCS_VERSION_TWO) {
try {
return GCSV2UnderFileSystem.createInstance(new AlluxioURI(path), conf);
} catch (IOException e) {
LOG.error("Failed to create GCSV2UnderFileSystem.", e);
throw Throwables.propagate(e);
}
}
else {
if (checkGCSCredentials(conf)) {
try {
return GCSUnderFileSystem.createInstance(new AlluxioURI(path), conf);
} catch (ServiceException e) {
LOG.error("Failed to create GCSUnderFileSystem.", e);
throw Throwables.propagate(e);
}
}
}
String err = "GCS credentials or version not available, cannot create GCS Under File System.";
throw new InvalidArgumentRuntimeException(err);
} | @Test
public void createInstanceWithPath() {
UnderFileSystem ufs = mFactory.create(mPath, mConf);
Assert.assertNotNull(ufs);
Assert.assertTrue(ufs instanceof GCSUnderFileSystem);
} |
@CallSuper
protected void abortCorrectionAndResetPredictionState(boolean disabledUntilNextInputStart) {
mSuggest.resetNextWordSentence();
mLastSpaceTimeStamp = NEVER_TIME_STAMP;
mJustAutoAddedWord = false;
mKeyboardHandler.removeAllSuggestionMessages();
final InputConnection ic = getCurrentInputConnection();
markExpectingSelectionUpdate();
if (ic != null) ic.finishComposingText();
clearSuggestions();
mWord.reset();
mWordRevertLength = 0;
mJustAutoAddedWord = false;
if (disabledUntilNextInputStart) {
Logger.d(TAG, "abortCorrection will abort correct forever");
final KeyboardViewContainerView inputViewContainer = getInputViewContainer();
if (inputViewContainer != null) {
inputViewContainer.removeStripAction(mCancelSuggestionsAction);
}
mPredictionOn = false;
}
} | @Test
public void testStripActionNotRemovedWhenAbortingPredictionNotForever() {
Assert.assertNotNull(
mAnySoftKeyboardUnderTest
.getInputViewContainer()
.findViewById(R.id.close_suggestions_strip_text));
mAnySoftKeyboardUnderTest.abortCorrectionAndResetPredictionState(false);
Assert.assertNotNull(
mAnySoftKeyboardUnderTest
.getInputViewContainer()
.findViewById(R.id.close_suggestions_strip_text));
} |
@Override
public String toString() {
if (!hasParameters()) return typeSubtype;
StringBuilder builder = new StringBuilder().append(typeSubtype);
String strParams = this.params.entrySet().stream()
.map(e -> e.getKey() + "=" + e.getValue())
.collect(Collectors.joining("; "));
return builder.append("; ").append(strParams).toString();
} | @Test
public void testToString() {
assertEquals("application/xml; q=0.9",
new MediaType("application", "xml", Map.of("q", "0.9")).toString());
assertEquals("text/csv", new MediaType("text", "csv").toString());
assertEquals("foo/bar; a=2", new MediaType("foo", "bar", Map.of("a", "2")).toString());
String type = new MediaType("foo", "bar", Map.of("a", "2", "b", "1", "c", "2")).toString();
assertTrue(type.startsWith("foo/bar; "));
assertTrue(type.contains("; a=2"));
assertTrue(type.contains("; b=1"));
assertTrue(type.contains("; c=2"));
assertEquals("a/b; p=1", MediaType.fromString("a/b; p=1; q=2").toStringExcludingParam("q"));
} |
@Override
public List<ServiceDefinition> apply(Exchange exchange, List<ServiceDefinition> services) {
for (int i = 0; i < delegatesSize; i++) {
services = delegates.get(i).apply(exchange, services);
}
return services;
} | @Test
public void testMultiServiceFilter() throws Exception {
CombinedServiceCallServiceFilterConfiguration conf = new CombinedServiceCallServiceFilterConfiguration()
.healthy()
.custom((exchange, services) -> services.stream().filter(s -> s.getPort() < 2000).toList());
Exchange exchange = new DefaultExchange(context);
List<ServiceDefinition> services = conf.newInstance(context).apply(exchange, Arrays.asList(
new DefaultServiceDefinition("no-name", "127.0.0.1", 1000),
new DefaultServiceDefinition("no-name", "127.0.0.1", 1001, new DefaultServiceHealth(false)),
new DefaultServiceDefinition("no-name", "127.0.0.1", 1002, new DefaultServiceHealth(true)),
new DefaultServiceDefinition("no-name", "127.0.0.1", 2001, new DefaultServiceHealth(true)),
new DefaultServiceDefinition("no-name", "127.0.0.1", 2001, new DefaultServiceHealth(false))));
assertEquals(2, services.size());
assertFalse(services.stream().anyMatch(s -> !s.getHealth().isHealthy()));
assertFalse(services.stream().anyMatch(s -> s.getPort() > 2000));
assertTrue(services.stream().anyMatch(s -> s.getPort() == 1000));
assertTrue(services.stream().anyMatch(s -> s.getPort() == 1002));
} |
public EndpointResponse getStatus(final String type, final String entity, final String action) {
final CommandId commandId = new CommandId(type, entity, action);
final Optional<CommandStatus> commandStatus = statementExecutor.getStatus(commandId);
return commandStatus.map(EndpointResponse::ok)
.orElseGet(() -> Errors.notFound("Command not found"));
} | @Test
public void testGetStatus() throws Exception {
final StatusResource testResource = getTestStatusResource();
for (final Map.Entry<CommandId, CommandStatus> commandEntry : mockCommandStatuses.entrySet()) {
final CommandId commandId = commandEntry.getKey();
final CommandStatus expectedCommandStatus = commandEntry.getValue();
final Object statusEntity = testResource.getStatus(commandId.getType().name(), commandId.getEntity(), commandId.getAction().name()).getEntity();
assertThat(statusEntity, instanceOf(CommandStatus.class));
final CommandStatus testCommandStatus = (CommandStatus) statusEntity;
assertEquals(expectedCommandStatus, testCommandStatus);
}
} |
@SuppressWarnings("FieldAccessNotGuarded")
// Note that: callWithLock ensure that code block guarded by resultPartitionReadLock and
// subpartitionLock.
public List<BufferWithIdentity> spillSubpartitionBuffers(
List<BufferIndexAndChannel> toSpill, CompletableFuture<Void> spillDoneFuture) {
return callWithLock(
() ->
toSpill.stream()
.map(
indexAndChannel -> {
int bufferIndex = indexAndChannel.getBufferIndex();
return startSpillingBuffer(bufferIndex, spillDoneFuture)
.map(
(context) ->
new BufferWithIdentity(
context.getBuffer(),
bufferIndex,
targetChannel));
})
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.toList()));
} | @Test
void testSpillSubpartitionBuffers() throws Exception {
CompletableFuture<Void> spilledDoneFuture = new CompletableFuture<>();
TestingMemoryDataManagerOperation memoryDataManagerOperation =
TestingMemoryDataManagerOperation.builder()
.setRequestBufferFromPoolSupplier(() -> createBufferBuilder(RECORD_SIZE))
.build();
HsSubpartitionMemoryDataManager subpartitionMemoryDataManager =
createSubpartitionMemoryDataManager(memoryDataManagerOperation);
final int numBuffers = 3;
for (int i = 0; i < numBuffers; i++) {
subpartitionMemoryDataManager.append(createRecord(i), DataType.DATA_BUFFER);
}
List<BufferIndexAndChannel> toStartSpilling =
HybridShuffleTestUtils.createBufferIndexAndChannelsList(0, 0, 1, 2);
List<BufferWithIdentity> buffers =
subpartitionMemoryDataManager.spillSubpartitionBuffers(
toStartSpilling, spilledDoneFuture);
assertThat(toStartSpilling)
.zipSatisfy(
buffers,
(expected, spilled) -> {
assertThat(expected.getBufferIndex())
.isEqualTo(spilled.getBufferIndex());
assertThat(expected.getChannel()).isEqualTo(spilled.getChannelIndex());
});
List<Integer> expectedValues = Arrays.asList(0, 1, 2);
checkBuffersRefCountAndValue(buffers, Arrays.asList(2, 2, 2), expectedValues);
spilledDoneFuture.complete(null);
checkBuffersRefCountAndValue(buffers, Arrays.asList(1, 1, 1), expectedValues);
} |
public boolean containsValue(final int value)
{
boolean found = false;
final int missingValue = this.missingValue;
if (missingValue != value)
{
final int[] entries = this.entries;
@DoNotSub final int length = entries.length;
@DoNotSub int remaining = size;
for (@DoNotSub int valueIndex = 1; remaining > 0 && valueIndex < length; valueIndex += 2)
{
final int existingValue = entries[valueIndex];
if (missingValue != existingValue)
{
if (existingValue == value)
{
found = true;
break;
}
--remaining;
}
}
}
return found;
} | @Test
void shouldNotContainValueForAMissingEntry()
{
assertFalse(map.containsValue(1));
} |
public void handleLoss(String loss) {
lossHandler.accept(new UnwritableMetadataException(requestedMetadataVersion, loss));
} | @Test
public void testHandleLoss() {
String expectedMessage = "stuff";
for (int i = MetadataVersion.MINIMUM_KRAFT_VERSION.ordinal();
i < MetadataVersion.VERSIONS.length;
i++) {
MetadataVersion version = MetadataVersion.VERSIONS[i];
String formattedMessage = String.format("Metadata has been lost because the following could not be represented in metadata.version %s: %s", version, expectedMessage);
Consumer<UnwritableMetadataException> customLossHandler = e -> assertEquals(formattedMessage, e.getMessage());
ImageWriterOptions options = new ImageWriterOptions.Builder()
.setMetadataVersion(version)
.setLossHandler(customLossHandler)
.build();
options.handleLoss(expectedMessage);
}
} |
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
for (final Thread.State state : Thread.State.values()) {
gauges.put(name(state.toString().toLowerCase(), "count"),
(Gauge<Object>) () -> getThreadCount(state));
}
gauges.put("count", (Gauge<Integer>) threads::getThreadCount);
gauges.put("daemon.count", (Gauge<Integer>) threads::getDaemonThreadCount);
gauges.put("peak.count", (Gauge<Integer>) threads::getPeakThreadCount);
gauges.put("total_started.count", (Gauge<Long>) threads::getTotalStartedThreadCount);
gauges.put("deadlock.count", (Gauge<Integer>) () -> deadlockDetector.getDeadlockedThreads().size());
gauges.put("deadlocks", (Gauge<Set<String>>) deadlockDetector::getDeadlockedThreads);
return Collections.unmodifiableMap(gauges);
} | @Test
public void hasAGaugeForTheNumberOfDaemonThreads() {
assertThat(((Gauge<?>) gauges.getMetrics().get("daemon.count")).getValue())
.isEqualTo(10);
} |
@Override
public Map<String, Boolean> getProjectUuidToManaged(DbSession dbSession, Set<String> projectUuids) {
return findManagedProjectService()
.map(managedProjectService -> managedProjectService.getProjectUuidToManaged(dbSession, projectUuids))
.orElse(returnNonManagedForAll(projectUuids));
} | @Test
public void getProjectUuidToManaged_delegatesToRightService_andPropagateAnswer() {
Set<String> projectUuids = Set.of("a", "b");
Map<String, Boolean> serviceResponse = Map.of("a", false, "b", true);
ManagedInstanceService anotherManagedProjectService = getManagedProjectService(projectUuids, serviceResponse);
DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(Set.of(new NeverManagedInstanceService(), anotherManagedProjectService));
Map<String, Boolean> projectUuidToManaged = managedInstanceService.getProjectUuidToManaged(dbSession, projectUuids);
assertThat(projectUuidToManaged).containsExactlyInAnyOrderEntriesOf(serviceResponse);
} |
public static FormattingTuple arrayFormat(final String messagePattern,
final Object[] argArray) {
if (argArray == null || argArray.length == 0) {
return new FormattingTuple(messagePattern, null);
}
int lastArrIdx = argArray.length - 1;
Object lastEntry = argArray[lastArrIdx];
Throwable throwable = lastEntry instanceof Throwable? (Throwable) lastEntry : null;
if (messagePattern == null) {
return new FormattingTuple(null, throwable);
}
int j = messagePattern.indexOf(DELIM_STR);
if (j == -1) {
// this is a simple string
return new FormattingTuple(messagePattern, throwable);
}
StringBuilder sbuf = new StringBuilder(messagePattern.length() + 50);
int i = 0;
int L = 0;
do {
boolean notEscaped = j == 0 || messagePattern.charAt(j - 1) != ESCAPE_CHAR;
if (notEscaped) {
// normal case
sbuf.append(messagePattern, i, j);
} else {
sbuf.append(messagePattern, i, j - 1);
// check that escape char is not is escaped: "abc x:\\{}"
notEscaped = j >= 2 && messagePattern.charAt(j - 2) == ESCAPE_CHAR;
}
i = j + 2;
if (notEscaped) {
deeplyAppendParameter(sbuf, argArray[L], null);
L++;
if (L > lastArrIdx) {
break;
}
} else {
sbuf.append(DELIM_STR);
}
j = messagePattern.indexOf(DELIM_STR, i);
} while (j != -1);
// append the characters following the last {} pair.
sbuf.append(messagePattern, i, messagePattern.length());
return new FormattingTuple(sbuf.toString(), L <= lastArrIdx? throwable : null);
} | @Test
public void testArrayThrowable() {
FormattingTuple ft;
Throwable t = new Throwable();
Object[] ia = { 1, 2, 3, t };
ft = MessageFormatter.arrayFormat("Value {} is smaller than {} and {}.", ia);
assertEquals("Value 1 is smaller than 2 and 3.", ft.getMessage());
assertEquals(t, ft.getThrowable());
ft = MessageFormatter.arrayFormat("{}{}{}", ia);
assertEquals("123", ft.getMessage());
assertEquals(t, ft.getThrowable());
ft = MessageFormatter.arrayFormat("Value {} is smaller than {}.", ia);
assertEquals("Value 1 is smaller than 2.", ft.getMessage());
assertEquals(t, ft.getThrowable());
ft = MessageFormatter.arrayFormat("Value {} is smaller than {}", ia);
assertEquals("Value 1 is smaller than 2", ft.getMessage());
assertEquals(t, ft.getThrowable());
ft = MessageFormatter.arrayFormat("Val={}, {, Val={}", ia);
assertEquals("Val=1, {, Val=2", ft.getMessage());
assertEquals(t, ft.getThrowable());
ft = MessageFormatter.arrayFormat("Val={}, \\{, Val={}", ia);
assertEquals("Val=1, \\{, Val=2", ft.getMessage());
assertEquals(t, ft.getThrowable());
ft = MessageFormatter.arrayFormat("Val1={}, Val2={", ia);
assertEquals("Val1=1, Val2={", ft.getMessage());
assertEquals(t, ft.getThrowable());
ft = MessageFormatter.arrayFormat("Value {} is smaller than {} and {}.", ia);
assertEquals("Value 1 is smaller than 2 and 3.", ft.getMessage());
assertEquals(t, ft.getThrowable());
ft = MessageFormatter.arrayFormat("{}{}{}{}", ia);
assertEquals("123java.lang.Throwable", ft.getMessage());
assertNull(ft.getThrowable());
} |
public static int[] toArray(List<Integer> list) {
if (list == null) return null;
int[] array = new int[list.size()];
for (int i = 0; i < list.size(); i++) {
array[i] = list.get(i);
}
return array;
} | @Test
public void testToArray() {
assertArrayEquals(new int[] {3, 2, 1}, Replicas.toArray(Arrays.asList(3, 2, 1)));
assertArrayEquals(new int[] {}, Replicas.toArray(Collections.emptyList()));
assertArrayEquals(new int[] {2}, Replicas.toArray(Collections.singletonList(2)));
} |
public boolean isNew(Component component, DefaultIssue issue) {
if (analysisMetadataHolder.isPullRequest()) {
return true;
}
if (periodHolder.hasPeriod()) {
if (periodHolder.hasPeriodDate()) {
return periodHolder.getPeriod().isOnPeriod(issue.creationDate());
}
if (isOnBranchUsingReferenceBranch()) {
return hasAtLeastOneLocationOnChangedLines(component, issue);
}
}
return false;
} | @Test
public void isNew_returns_true_for_any_issue_if_pull_request() {
periodHolder.setPeriod(null);
analysisMetadataHolder.setBranch(newPr());
assertThat(newIssueClassifier.isNew(mock(Component.class), mock(DefaultIssue.class))).isTrue();
} |
@Override public HashSlotCursor8byteKey cursor() {
return new Cursor();
} | @Test(expected = AssertionError.class)
@RequireAssertEnabled
public void testCursor_key_withoutAdvance() {
HashSlotCursor8byteKey cursor = hsa.cursor();
cursor.key();
} |
@TargetApi(21)
public String getNoBackupFilesDirectoryPath() {
return Build.VERSION.SDK_INT >= 21 &&
this.context != null
? absPath(this.context.getNoBackupFilesDir())
: "";
} | @Test
public void getNoBackupFilesDirectoryPathIsNotEmpty() {
assertThat(contextUtil.getNoBackupFilesDirectoryPath(), endsWith("/no_backup"));
} |
@Override
protected FieldValue doGet(String fieldName, EventWithContext eventWithContext) {
final ImmutableMap.Builder<String, Object> dataModelBuilder = ImmutableMap.builder();
if (eventWithContext.messageContext().isPresent()) {
dataModelBuilder.put("source", eventWithContext.messageContext().get().getFields());
} else if (eventWithContext.eventContext().isPresent()) {
dataModelBuilder.put("source", eventWithContext.eventContext().get().toDto().fields());
}
final ImmutableMap<String, Object> dataModel = dataModelBuilder.build();
if (!isValidTemplate(config.template(), dataModel)) {
return FieldValue.error();
}
try {
return FieldValue.string(templateEngine.transform(config.template(), dataModel));
} catch (Exception e) {
LOG.error("Couldn't render field template \"{}\"", config.template(), e);
return FieldValue.error();
}
} | @Test
public void templateWithSyntaxError() {
final TestEvent event = new TestEvent();
final EventWithContext eventWithContext = EventWithContext.create(event, newMessage(ImmutableMap.of("hello", "world")));
final FieldValue fieldValue = newTemplate("hello: ${source.hello").doGet("test", eventWithContext);
assertThat(fieldValue.dataType()).isEqualTo(FieldValueType.ERROR);
} |
public MultiMap<Value, T, List<T>> search() {
if (matcher.isNegate()) {
if (map.containsKey(matcher.getValue())) {
return MultiMap.merge(map.subMap(map.firstKey(), true,
matcher.getValue(), false),
map.subMap(matcher.getValue(), false,
map.lastKey(), true));
} else {
return map;
}
} else {
return map.subMap(matcher.getValue(), true,
matcher.getValue(), true);
}
} | @Test
void testNullSearch() throws Exception {
search = new ExactMatcherSearch<>(new ExactMatcher(KeyDefinition.newKeyDefinition().withId("value").build(),
null),
map);
MultiMap<Value, Object, List<Object>> search1 = search.search();
assertThat(search1.get(new Value(null)).get(0)).isEqualTo("I am null");
} |
public static String name(final String path) {
if(String.valueOf(Path.DELIMITER).equals(path)) {
return path;
}
if(!StringUtils.contains(path, Path.DELIMITER)) {
return path;
}
if(StringUtils.endsWith(path, String.valueOf(Path.DELIMITER))) {
return StringUtils.substringAfterLast(normalize(path), String.valueOf(Path.DELIMITER));
}
return StringUtils.substringAfterLast(path, String.valueOf(Path.DELIMITER));
} | @Test
public void testNormalizeNameWithBackslash() {
assertEquals("file\\name", PathNormalizer.name("/path/to/file\\name"));
} |
@Override
public void close() throws IOException {
if (mClosed.getAndSet(true)) {
LOG.warn("OBSOutputStream is already closed");
return;
}
mLocalOutputStream.close();
try {
BufferedInputStream in = new BufferedInputStream(
new FileInputStream(mFile));
ObjectMetadata objMeta = new ObjectMetadata();
objMeta.setContentLength(mFile.length());
if (mHash != null) {
byte[] hashBytes = mHash.digest();
objMeta.setContentMd5(new String(Base64.encodeBase64(hashBytes)));
}
mContentHash = mObsClient.putObject(mBucketName, mKey, in, objMeta).getEtag();
} catch (ObsException e) {
LOG.error("Failed to upload {}. Temporary file @ {}", mKey, mFile.getPath());
throw new IOException(e);
} finally {
// Delete the temporary file on the local machine if the GCS client completed the
// upload or if the upload failed.
if (!mFile.delete()) {
LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
}
}
} | @Test
@PrepareForTest(OBSOutputStream.class)
public void testCloseError() throws Exception {
String errorMessage = "Invoke the createEmptyObject method error.";
BufferedInputStream inputStream = PowerMockito.mock(BufferedInputStream.class);
PowerMockito.whenNew(BufferedInputStream.class)
.withArguments(any(FileInputStream.class)).thenReturn(inputStream);
PowerMockito
.when(mObsClient.putObject(Mockito.anyString(), Mockito.anyString(),
any(InputStream.class), any(ObjectMetadata.class)))
.thenThrow(new ObsException(errorMessage));
OBSOutputStream stream = new OBSOutputStream("testBucketName", "testKey", mObsClient,
sConf.getList(PropertyKey.TMP_DIRS));
mThrown.expect(IOException.class);
mThrown.expectMessage(errorMessage);
stream.close();
} |
public static <T> Collection<T> intersection(Collection<T> coll1, Collection<T> coll2) {
if (isNotEmpty(coll1) && isNotEmpty(coll2)) {
final ArrayList<T> list = new ArrayList<>(Math.min(coll1.size(), coll2.size()));
final Map<T, Integer> map1 = countMap(coll1);
final Map<T, Integer> map2 = countMap(coll2);
final Set<T> elts = newHashSet(coll2);
int m;
for (T t : elts) {
m = Math.min(Convert.toInt(map1.get(t), 0), Convert.toInt(map2.get(t), 0));
for (int i = 0; i < m; i++) {
list.add(t);
}
}
return list;
}
return new ArrayList<>();
} | @SuppressWarnings("ConstantValue")
@Test
public void intersectionNullTest() {
final List<String> list1 = new ArrayList<>();
list1.add("aa");
final List<String> list2 = new ArrayList<>();
list2.add("aa");
final List<String> list3 = null;
final Collection<String> collection = CollUtil.intersection(list1, list2, list3);
assertNotNull(collection);
} |
@VisibleForTesting
String decorateTarget(String oldTarget, PlayerIndicatorsService.Decorations decorations)
{
String newTarget = oldTarget;
if (decorations.getColor() != null && config.colorPlayerMenu())
{
String prefix = "";
int idx = oldTarget.indexOf("->");
if (idx != -1)
{
prefix = oldTarget.substring(0, idx + 3); // <col=ff9040>Earth rune</col><col=ff> ->
oldTarget = oldTarget.substring(idx + 3);
}
// <col=ff0000>title0RuneLitetitle1<col=ff> (level-126)title2
idx = oldTarget.indexOf('>');
// remove leading <col>
oldTarget = oldTarget.substring(idx + 1);
newTarget = prefix + ColorUtil.prependColorTag(oldTarget, decorations.getColor());
}
FriendsChatRank rank = decorations.getFriendsChatRank();
int image = -1;
if (rank != null && rank != UNRANKED && config.showFriendsChatRanks())
{
image = chatIconManager.getIconNumber(rank);
}
else if (decorations.getClanTitle() != null && config.showClanChatRanks())
{
image = chatIconManager.getIconNumber(decorations.getClanTitle());
}
if (image != -1)
{
newTarget = "<img=" + image + ">" + newTarget;
}
return newTarget;
} | @Test
public void testDecorateTarget()
{
when(playerIndicatorsConfig.colorPlayerMenu()).thenReturn(true);
String t0 = "title0";
String name = "RuneLite";
String t1 = "title1";
String t2 = "title2";
String col = "<col=ff>";
String cmbLevel = col + t0 + name + t1 + col + " (level-126)" + t2;
String skillTotal = col + t0 + name + t1 + " (skill-1234)" + t2;
// widget names contains the col tags
String useCmb = "<col=ff9040>Earth rune</col>" + col + " -> " + cmbLevel;
String useSkill = "<col=ff9040>Earth rune</col>" + col + " -> " + skillTotal;
var deco = new PlayerIndicatorsService.Decorations(null, null, Color.RED);
assertEquals("<col=ff0000>title0RuneLitetitle1<col=ff> (level-126)title2", plugin.decorateTarget(cmbLevel, deco));
assertEquals("<col=ff0000>title0RuneLitetitle1 (skill-1234)title2", plugin.decorateTarget(skillTotal, deco));
assertEquals("<col=ff9040>Earth rune</col><col=ff> -> <col=ff0000>title0RuneLitetitle1<col=ff> (level-126)title2", plugin.decorateTarget(useCmb, deco));
assertEquals("<col=ff9040>Earth rune</col><col=ff> -> <col=ff0000>title0RuneLitetitle1 (skill-1234)title2", plugin.decorateTarget(useSkill, deco));
} |
public Iterator<Entry<String, Optional<MetaProperties>>> nonFailedDirectoryProps() {
return new Iterator<Entry<String, Optional<MetaProperties>>>() {
private final Iterator<String> emptyLogDirsIterator = emptyLogDirs.iterator();
private final Iterator<Entry<String, MetaProperties>> logDirsIterator =
logDirProps.entrySet().iterator();
@Override
public boolean hasNext() {
return emptyLogDirsIterator.hasNext() || logDirsIterator.hasNext();
}
@Override
public Entry<String, Optional<MetaProperties>> next() {
if (emptyLogDirsIterator.hasNext()) {
return new SimpleImmutableEntry<>(emptyLogDirsIterator.next(), Optional.empty());
}
Entry<String, MetaProperties> entry = logDirsIterator.next();
return new SimpleImmutableEntry<>(entry.getKey(), Optional.of(entry.getValue()));
}
};
} | @Test
public void testNonFailedDirectoryPropsForFoo() {
Map<String, Optional<MetaProperties>> results = new HashMap<>();
FOO.nonFailedDirectoryProps().forEachRemaining(entry ->
results.put(entry.getKey(), entry.getValue())
);
assertEquals(Optional.empty(), results.get("/tmp/empty1"));
assertEquals(Optional.empty(), results.get("/tmp/empty2"));
assertNull(results.get("/tmp/error3"));
assertEquals(Optional.of(new MetaProperties.Builder().
setVersion(MetaPropertiesVersion.V1).
setClusterId("fooClusterId").
setNodeId(2).
build()), results.get("/tmp/dir4"));
assertEquals(Optional.of(new MetaProperties.Builder().
setVersion(MetaPropertiesVersion.V1).
setClusterId("fooClusterId").
setNodeId(2).
build()), results.get("/tmp/dir5"));
assertEquals(4, results.size());
} |
public void incTopicPutNums(final String topic) {
this.statsTable.get(Stats.TOPIC_PUT_NUMS).addValue(topic, 1, 1);
} | @Test
public void testIncTopicPutNums() {
brokerStatsManager.incTopicPutNums(TOPIC);
assertThat(brokerStatsManager.getStatsItem(TOPIC_PUT_NUMS, TOPIC).getTimes().doubleValue()).isEqualTo(1L);
brokerStatsManager.incTopicPutNums(TOPIC, 2, 2);
assertThat(brokerStatsManager.getStatsItem(TOPIC_PUT_NUMS, TOPIC).getValue().doubleValue()).isEqualTo(3L);
} |
public static java.util.Date toLogical(Schema schema, long value) {
if (!(LOGICAL_NAME.equals(schema.name())))
throw new DataException("Requested conversion of Timestamp object but the schema does not match.");
return new java.util.Date(value);
} | @Test
public void testToLogical() {
assertEquals(EPOCH.getTime(), Timestamp.toLogical(Timestamp.SCHEMA, 0L));
assertEquals(EPOCH_PLUS_MILLIS.getTime(), Timestamp.toLogical(Timestamp.SCHEMA, TOTAL_MILLIS));
} |
@NotNull
@Override
public Response intercept(@NotNull Chain chain) throws IOException {
Request request = chain.request().newBuilder().removeHeader("Accept-Encoding").build();
Response response = chain.proceed(request);
if (response.headers("Content-Encoding").contains("gzip")) {
response.close();
}
return response;
} | @Test
public void intercept_whenGzipContentEncodingIncluded_shouldCloseTheResponse() throws IOException {
when(response.headers("Content-Encoding")).thenReturn(List.of("gzip"));
underTest.intercept(chain);
verify(response, times(1)).close();
} |
public boolean get() {
return value;
} | @Test
public void testCompareUnequalWritables() throws Exception {
DataOutputBuffer bTrue = writeWritable(new BooleanWritable(true));
DataOutputBuffer bFalse = writeWritable(new BooleanWritable(false));
WritableComparator writableComparator =
WritableComparator.get(BooleanWritable.class);
assertEquals(0, compare(writableComparator, bTrue, bTrue));
assertEquals(0, compare(writableComparator, bFalse, bFalse));
assertEquals(1, compare(writableComparator, bTrue, bFalse));
assertEquals(-1, compare(writableComparator, bFalse, bTrue));
} |
@Override
protected CouchDbEndpoint createEndpoint(String uri, String remaining, Map<String, Object> params) throws Exception {
CouchDbEndpoint endpoint = new CouchDbEndpoint(uri, remaining, this);
setProperties(endpoint, params);
return endpoint;
} | @Test
void testPropertiesSet() throws Exception {
Map<String, Object> params = new HashMap<>();
params.put("createDatabase", true);
params.put("username", "coldplay");
params.put("password", "chrism");
params.put("heartbeat", "1000");
params.put("style", "gothic");
params.put("deletes", false);
params.put("updates", false);
String uri = "couchdb:http://localhost:14/db";
String remaining = "http://localhost:14/db";
CouchDbEndpoint endpoint
= context.getComponent("couchdb", CouchDbComponent.class).createEndpoint(uri, remaining, params);
assertEquals("http", endpoint.getProtocol());
assertEquals("localhost", endpoint.getHostname());
assertEquals("db", endpoint.getDatabase());
assertEquals("coldplay", endpoint.getUsername());
assertEquals("gothic", endpoint.getStyle());
assertEquals("chrism", endpoint.getPassword());
assertTrue(endpoint.isCreateDatabase());
assertFalse(endpoint.isDeletes());
assertFalse(endpoint.isUpdates());
assertEquals(14, endpoint.getPort());
assertEquals(1000, endpoint.getHeartbeat());
} |
@Override
public T get() throws PromiseException {
return _delegate.get();
} | @Test
public void testGet() {
final Promise<String> delegate = Promises.value("value");
final Promise<String> promise = new DelegatingPromise<String>(delegate);
assertEquals(delegate.get(), promise.get());
assertEquals(delegate.isDone(), promise.isDone());
} |
public JSONObject accumulate(String key, Object value) throws JSONException {
InternalJSONUtil.testValidity(value);
Object object = this.getObj(key);
if (object == null) {
this.set(key, value);
} else if (object instanceof JSONArray) {
((JSONArray) object).set(value);
} else {
this.set(key, JSONUtil.createArray(this.config).set(object).set(value));
}
return this;
} | @Test
public void accumulateTest() {
final JSONObject jsonObject = JSONUtil.createObj().accumulate("key1", "value1");
assertEquals("{\"key1\":\"value1\"}", jsonObject.toString());
jsonObject.accumulate("key1", "value2");
assertEquals("{\"key1\":[\"value1\",\"value2\"]}", jsonObject.toString());
jsonObject.accumulate("key1", "value3");
assertEquals("{\"key1\":[\"value1\",\"value2\",\"value3\"]}", jsonObject.toString());
} |
@VisibleForTesting
void parseWorkflowParameter(
Map<String, Parameter> workflowParams, Parameter param, String workflowId) {
parseWorkflowParameter(workflowParams, param, workflowId, new HashSet<>());
} | @Test
public void testParseLiteralWorkflowParameter() {
StringParameter bar = StringParameter.builder().name("bar").value("test $foo-1").build();
paramEvaluator.parseWorkflowParameter(
Collections.singletonMap("foo", LongParameter.builder().expression("1+2+3;").build()),
bar,
"test-workflow");
assertEquals("test 6-1", bar.getEvaluatedResult());
bar = StringParameter.builder().name("bar").value("test $foo-1").build();
paramEvaluator.parseWorkflowParameter(
Collections.singletonMap(
"foo", LongParameter.builder().evaluatedResult(6L).evaluatedTime(123L).build()),
bar,
"test-workflow");
assertEquals("test 6-1", bar.getEvaluatedResult());
} |
public Input saveInput(AWSInputCreateRequest request, User user) throws Exception {
// Transpose the SaveAWSInputRequest to the needed InputCreateRequest
final HashMap<String, Object> configuration = new HashMap<>();
configuration.put(AWSCodec.CK_AWS_MESSAGE_TYPE, request.awsMessageType());
configuration.put(ThrottleableTransport.CK_THROTTLING_ALLOWED, request.throttlingAllowed());
configuration.put(AWSCodec.CK_FLOW_LOG_PREFIX, request.addFlowLogPrefix());
configuration.put(AWSInput.CK_AWS_REGION, request.region());
configuration.put(AWSInput.CK_ACCESS_KEY, request.awsAccessKeyId());
configuration.put(AWSInput.CK_SECRET_KEY, request.awsSecretAccessKey());
configuration.put(AWSInput.CK_ASSUME_ROLE_ARN, request.assumeRoleArn());
configuration.put(AWSInput.CK_CLOUDWATCH_ENDPOINT, request.cloudwatchEndpoint());
configuration.put(AWSInput.CK_DYNAMODB_ENDPOINT, request.dynamodbEndpoint());
configuration.put(AWSInput.CK_IAM_ENDPOINT, request.iamEndpoint());
configuration.put(AWSInput.CK_KINESIS_ENDPOINT, request.kinesisEndpoint());
AWSMessageType inputType = AWSMessageType.valueOf(request.awsMessageType());
if (inputType.isKinesis()) {
configuration.put(KinesisTransport.CK_KINESIS_STREAM_NAME, request.streamName());
configuration.put(KinesisTransport.CK_KINESIS_RECORD_BATCH_SIZE, request.batchSize());
} else {
throw new Exception("The specified input type is not supported.");
}
// Create and save the input.
final InputCreateRequest inputCreateRequest = InputCreateRequest.create(request.name(),
AWSInput.TYPE,
true,
configuration,
nodeId.getNodeId());
try {
final MessageInput messageInput = messageInputFactory.create(inputCreateRequest, user.getName(), nodeId.getNodeId());
messageInput.checkConfiguration();
final Input input = this.inputService.create(messageInput.asMap());
final String newInputId = inputService.save(input);
LOG.debug("New AWS input created. id [{}] request [{}]", newInputId, request);
return input;
} catch (NoSuchInputTypeException e) {
LOG.error("There is no such input type registered.", e);
throw new NotFoundException("There is no such input type registered.", e);
} catch (ConfigurationException e) {
LOG.error("Missing or invalid input configuration.", e);
throw new BadRequestException("Missing or invalid input configuration.", e);
}
} | @Test
public void testSaveInput() throws Exception {
when(inputService.create(isA(HashMap.class))).thenCallRealMethod();
when(inputService.save(isA(Input.class))).thenReturn("input-id");
when(user.getName()).thenReturn("a-user-name");
when(messageInputFactory.create(isA(InputCreateRequest.class), isA(String.class), isA(String.class))).thenReturn(messageInput);
AWSInputCreateRequest request =
AWSInputCreateRequest.builder().region(Region.US_EAST_1.id())
.awsAccessKeyId("a-key")
.awsSecretAccessKey(encryptedValue)
.name("AWS Input")
.awsMessageType(AWSMessageType.KINESIS_CLOUDWATCH_FLOW_LOGS.toString())
.streamName("a-stream")
.batchSize(10000)
.addFlowLogPrefix(true)
.throttlingAllowed(true)
.build();
awsService.saveInput(request, user);
// Verify that inputService received a valid input to save.
final ArgumentCaptor<InputCreateRequest> argumentCaptor = ArgumentCaptor.forClass(InputCreateRequest.class);
verify(messageInputFactory, times(1)).create(argumentCaptor.capture(), eq("a-user-name"), eq("5ca1ab1e-0000-4000-a000-000000000000"));
// Just verify that the input create request was prepared correctly. This verifies the important argument
// transposition logic.
// It's too hard to mock the full inputService.save process, so we are not going to check the final resulting input.
InputCreateRequest input = argumentCaptor.getValue();
assertEquals("AWS Input", input.title());
assertEquals(AWSInput.TYPE, input.type());
assertTrue(input.global());
assertEquals("us-east-1", input.configuration().get(AWSInput.CK_AWS_REGION));
assertEquals("KINESIS_CLOUDWATCH_FLOW_LOGS", input.configuration().get(AWSCodec.CK_AWS_MESSAGE_TYPE));
assertEquals("a-key", input.configuration().get(AWSInput.CK_ACCESS_KEY));
assertEquals(encryptedValue, input.configuration().get(AWSInput.CK_SECRET_KEY));
assertEquals("us-east-1", input.configuration().get(AWSInput.CK_AWS_REGION));
assertEquals("a-stream", input.configuration().get(KinesisTransport.CK_KINESIS_STREAM_NAME));
assertEquals(10000, input.configuration().get(KinesisTransport.CK_KINESIS_RECORD_BATCH_SIZE));
} |
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
} | @Test
public void testShowComputeNodesSharedData(@Mocked StarOSAgent starosAgent) throws AnalysisException, DdlException {
SystemInfoService clusterInfo = AccessTestUtil.fetchSystemInfoService();
ComputeNode node = new ComputeNode(1L, "127.0.0.1", 80);
node.setCpuCores(16);
node.setMemLimitBytes(100L);
node.updateResourceUsage(10, 1L, 30);
TDataCacheMetrics tDataCacheMetrics = new TDataCacheMetrics();
tDataCacheMetrics.setStatus(TDataCacheStatus.NORMAL);
tDataCacheMetrics.setDisk_quota_bytes(1024 * 1024 * 1024);
tDataCacheMetrics.setMem_quota_bytes(1024 * 1024 * 1024);
node.updateDataCacheMetrics(DataCacheMetrics.buildFromThrift(tDataCacheMetrics));
node.setAlive(true);
clusterInfo.addComputeNode(node);
NodeMgr nodeMgr = new NodeMgr();
new Expectations(nodeMgr) {
{
nodeMgr.getClusterInfo();
minTimes = 0;
result = clusterInfo;
}
};
WarehouseManager warehouseManager = new WarehouseManager();
warehouseManager.initDefaultWarehouse();
new Expectations(globalStateMgr) {
{
globalStateMgr.getNodeMgr();
minTimes = 0;
result = nodeMgr;
globalStateMgr.getStarOSAgent();
minTimes = 0;
result = starosAgent;
globalStateMgr.getWarehouseMgr();
minTimes = 0;
result = warehouseManager;
}
};
new MockUp<RunMode>() {
@Mock
RunMode getCurrentRunMode() {
return RunMode.SHARED_DATA;
}
};
long tabletNum = 1024;
new Expectations() {
{
starosAgent.getWorkerTabletNum(anyString);
minTimes = 0;
result = tabletNum;
}
};
ShowComputeNodesStmt stmt = new ShowComputeNodesStmt();
ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertEquals(ComputeNodeProcDir.TITLE_NAMES_SHARED_DATA.size(),
resultSet.getMetaData().getColumnCount());
for (int i = 0; i < ComputeNodeProcDir.TITLE_NAMES_SHARED_DATA.size(); ++i) {
Assert.assertEquals(ComputeNodeProcDir.TITLE_NAMES_SHARED_DATA.get(i),
resultSet.getMetaData().getColumn(i).getName());
}
Assert.assertTrue(resultSet.next());
Assert.assertEquals("16", resultSet.getString(13)); // CpuCores
Assert.assertEquals("100.000B", resultSet.getString(14)); // MemLimit
Assert.assertEquals("10", resultSet.getString(15));
Assert.assertEquals("1.00 %", resultSet.getString(16));
Assert.assertEquals("3.0 %", resultSet.getString(17));
Assert.assertEquals("Status: Normal, DiskUsage: 0B/1GB, MemUsage: 0B/1GB", resultSet.getString(18));
Assert.assertEquals("OK", resultSet.getString(20));
Assert.assertEquals(String.valueOf(tabletNum), resultSet.getString(24));
} |
public long incrementAndGet() {
return getAndAddVal(1L) + 1L;
} | @Test
public void testIncrementAndGet() {
PaddedAtomicLong counter = new PaddedAtomicLong();
long value = counter.incrementAndGet();
assertEquals(1L, value);
assertEquals(1L, counter.get());
} |
public Exchange createDbzExchange(DebeziumConsumer consumer, final SourceRecord sourceRecord) {
final Exchange exchange;
if (consumer != null) {
exchange = consumer.createExchange(false);
} else {
exchange = super.createExchange();
}
final Message message = exchange.getIn();
final Schema valueSchema = sourceRecord.valueSchema();
final Object value = sourceRecord.value();
// extract values from SourceRecord
final Map<String, Object> sourceMetadata = extractSourceMetadataValueFromValueStruct(valueSchema, value);
final Object operation = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.OPERATION);
final Object before = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.BEFORE);
final Object body = extractBodyValueFromValueStruct(valueSchema, value);
final Object timestamp = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.TIMESTAMP);
final Object ddl = extractValueFromValueStruct(valueSchema, value, HistoryRecord.Fields.DDL_STATEMENTS);
// set message headers
message.setHeader(DebeziumConstants.HEADER_IDENTIFIER, sourceRecord.topic());
message.setHeader(DebeziumConstants.HEADER_KEY, sourceRecord.key());
message.setHeader(DebeziumConstants.HEADER_SOURCE_METADATA, sourceMetadata);
message.setHeader(DebeziumConstants.HEADER_OPERATION, operation);
message.setHeader(DebeziumConstants.HEADER_BEFORE, before);
message.setHeader(DebeziumConstants.HEADER_TIMESTAMP, timestamp);
message.setHeader(DebeziumConstants.HEADER_DDL_SQL, ddl);
message.setHeader(Exchange.MESSAGE_TIMESTAMP, timestamp);
message.setBody(body);
return exchange;
} | @Test
void testIfCreatesExchangeFromSourceDeleteRecordWithNull() {
final SourceRecord sourceRecord = createDeleteRecordWithNull();
final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord);
final Message inMessage = exchange.getIn();
assertNotNull(exchange);
// assert headers
assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER));
final Struct key = (Struct) inMessage.getHeader(DebeziumConstants.HEADER_KEY);
assertEquals(12345, key.getInt32("id").intValue());
// assert value
final Struct body = (Struct) inMessage.getBody();
assertNull(body);
} |
public Data getKeyData() {
if (keyData == null && serializationService != null) {
keyData = serializationService.toData(key);
}
return keyData;
} | @Test
public void testGetKeyData_withDataKey() {
assertEquals(toData("key"), dataEvent.getKeyData());
} |
public static ItemSpec<String> item(String key, @Nullable String value) {
return item(key, Type.STRING, value);
} | @Test
public void testCanSerializeItemSpecReference() {
DisplayData.ItemSpec<?> spec = DisplayData.item("clazz", DisplayDataTest.class);
SerializableUtils.ensureSerializable(new HoldsItemSpecReference(spec));
} |
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
} | @Test
public void testShowColumn() throws AnalysisException, DdlException {
ctx.setGlobalStateMgr(globalStateMgr);
ctx.setQualifiedUser("testUser");
ShowColumnStmt stmt = (ShowColumnStmt) com.starrocks.sql.parser.SqlParser.parse("show columns from testTbl in testDb",
ctx.getSessionVariable()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(stmt, ctx);
ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertTrue(resultSet.next());
Assert.assertEquals("col1", resultSet.getString(0));
Assert.assertEquals("NO", resultSet.getString(2));
Assert.assertTrue(resultSet.next());
Assert.assertEquals("col2", resultSet.getString(0));
Assert.assertFalse(resultSet.next());
// verbose
stmt = (ShowColumnStmt) com.starrocks.sql.parser.SqlParser.parse("show full columns from testTbl in testDb",
ctx.getSessionVariable()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(stmt, ctx);
resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertTrue(resultSet.next());
Assert.assertEquals("col1", resultSet.getString(0));
Assert.assertEquals("NO", resultSet.getString(3));
Assert.assertTrue(resultSet.next());
Assert.assertEquals("col2", resultSet.getString(0));
Assert.assertEquals("NO", resultSet.getString(3));
Assert.assertFalse(resultSet.next());
// show full fields
stmt = (ShowColumnStmt) com.starrocks.sql.parser.SqlParser.parse("show full fields from testTbl in testDb",
ctx.getSessionVariable()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(stmt, ctx);
resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertTrue(resultSet.next());
Assert.assertEquals("col1", resultSet.getString(0));
Assert.assertEquals("NO", resultSet.getString(3));
Assert.assertTrue(resultSet.next());
Assert.assertEquals("col2", resultSet.getString(0));
Assert.assertEquals("NO", resultSet.getString(3));
Assert.assertFalse(resultSet.next());
// pattern
stmt = (ShowColumnStmt) com.starrocks.sql.parser.SqlParser.parse("show full columns from testTbl in testDb like \"%1\"",
ctx.getSessionVariable().getSqlMode()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(stmt, ctx);
resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertTrue(resultSet.next());
Assert.assertEquals("col1", resultSet.getString(0));
Assert.assertEquals("NO", resultSet.getString(3));
Assert.assertFalse(resultSet.next());
} |
@VisibleForTesting
static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options) {
return createStreamExecutionEnvironment(
options,
MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()),
options.getFlinkConfDir());
} | @Test
public void shouldInferParallelismFromEnvironmentStreaming() throws IOException {
String confDir = extractFlinkConfig();
FlinkPipelineOptions options = getDefaultPipelineOptions();
options.setRunner(TestFlinkRunner.class);
options.setFlinkMaster("host:80");
StreamExecutionEnvironment sev =
FlinkExecutionEnvironments.createStreamExecutionEnvironment(
options, Collections.emptyList(), confDir);
assertThat(options.getParallelism(), is(23));
assertThat(sev.getParallelism(), is(23));
} |
void add(final long recordingId, final long recordingDescriptorOffset)
{
ensurePositive(recordingId, "recordingId");
ensurePositive(recordingDescriptorOffset, "recordingDescriptorOffset");
final int nextPosition = count << 1;
long[] index = this.index;
if (nextPosition > 0)
{
if (recordingId <= index[nextPosition - 2])
{
throw new IllegalArgumentException("recordingId " + recordingId +
" is less than or equal to the last recordingId " + index[nextPosition - 2]);
}
if (nextPosition == index.length)
{
index = expand(index);
this.index = index;
}
}
index[nextPosition] = recordingId;
index[nextPosition + 1] = recordingDescriptorOffset;
count++;
} | @Test
void addThrowsIllegalArgumentExceptionIfRecordingIdIsNegative()
{
assertThrows(IllegalArgumentException.class, () -> catalogIndex.add(-1, 0));
} |
public HollowOrdinalIterator findKeysWithPrefix(String prefix) {
TST current;
HollowOrdinalIterator it;
do {
current = prefixIndexVolatile;
it = current.findKeysWithPrefix(prefix);
} while (current != this.prefixIndexVolatile);
return it;
} | @Test
public void testListReference() throws Exception {
MovieListReference movieListReference = new MovieListReference(1, 1999, "The Matrix", Arrays.asList("Keanu Reeves", "Laurence Fishburne", "Carrie-Anne Moss"));
objectMapper.add(movieListReference);
StateEngineRoundTripper.roundTripSnapshot(writeStateEngine, readStateEngine);
HollowPrefixIndex prefixIndex = new HollowPrefixIndex(readStateEngine, "MovieListReference", "actors.element.value");
Set<Integer> ordinals = toSet(prefixIndex.findKeysWithPrefix("kea"));
Assert.assertTrue(ordinals.size() == 1);
} |
@Override
public <K, V> ICache<K, V> getCache(String name) {
checkNotNull(name, "Retrieving a cache instance with a null name is not allowed!");
return getCacheByFullName(HazelcastCacheManager.CACHE_MANAGER_PREFIX + name);
} | @Test
public void getCache_when_clientServiceNotFoundExceptionIsThrown_then_illegalStateExceptionIsThrown() {
// when ClientServiceNotFoundException was thrown by hzInstance.getDistributedObject
// (i.e. cache support is not available on the client-side)
HazelcastInstance hzInstance = mock(HazelcastInstance.class);
when(hzInstance.getDistributedObject(anyString(), anyString()))
.thenThrow(new ClientServiceNotFoundException("mock exception"));
ClientICacheManager clientCacheManager = new ClientICacheManager(hzInstance);
// then an IllegalStateException will be thrown by getCache
assertThrows(IllegalStateException.class, () -> clientCacheManager.getCache("any-cache"));
} |
public Promise<Void> gracefullyShutdownClientChannels() {
return gracefullyShutdownClientChannels(ShutdownType.SHUTDOWN);
} | @Test
void connectionsNotForceClosed() throws Exception {
String configName = "server.outofservice.close.timeout";
AbstractConfiguration configuration = ConfigurationManager.getConfigInstance();
DefaultEventLoop eventLoop = spy(EVENT_LOOP);
shutdown = new ClientConnectionsShutdown(channels, eventLoop, null);
try {
configuration.setProperty(configName, "0");
createChannels(10);
Promise<Void> promise = shutdown.gracefullyShutdownClientChannels(ShutdownType.OUT_OF_SERVICE);
verify(eventLoop, never()).schedule(isA(Runnable.class), anyLong(), isA(TimeUnit.class));
channels.forEach(Channel::close);
promise.await(10, TimeUnit.SECONDS);
assertTrue(channels.isEmpty(), "All channels in group should have been closed");
} finally {
configuration.setProperty(configName, "30");
}
} |
@Override
public void check(Thread currentThread) throws CeTaskInterruptedException {
super.check(currentThread);
computeTimeOutOf(taskOf(currentThread))
.ifPresent(timeout -> {
throw new CeTaskTimeoutException(format("Execution of task timed out after %s ms", timeout));
});
} | @Test
public void check_fails_with_ISE_if_thread_is_not_running_a_CeWorker() {
Thread t = newThreadWithRandomName();
assertThatThrownBy(() -> underTest.check(t))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Could not find the CeTask being executed in thread '" + t.getName() + "'");
} |
public JdbcUrl parse(final String jdbcUrl) {
Matcher matcher = CONNECTION_URL_PATTERN.matcher(jdbcUrl);
ShardingSpherePreconditions.checkState(matcher.matches(), () -> new UnrecognizedDatabaseURLException(jdbcUrl, CONNECTION_URL_PATTERN.pattern().replaceAll("%", "%%")));
String authority = matcher.group(AUTHORITY_GROUP_KEY);
ShardingSpherePreconditions.checkNotNull(authority, () -> new UnrecognizedDatabaseURLException(jdbcUrl, CONNECTION_URL_PATTERN.pattern().replaceAll("%", "%%")));
return new JdbcUrl(parseHostname(authority), parsePort(authority), matcher.group(PATH_GROUP_KEY), parseQueryProperties(matcher.group(QUERY_GROUP_KEY)));
} | @Test
void assertParseTestContainersJDBCUrl() {
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:mysql:5.7.34:///demo_ds").getDatabase(), is("demo_ds"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:postgresql:9.6.8:///demo_ds").getDatabase(), is("demo_ds"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:postgis:9.6-2.5:///demo_ds").getDatabase(), is("demo_ds"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:timescaledb:2.1.0-pg13:///demo_ds").getDatabase(), is("demo_ds"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:trino:352://localhost/memory/default").getDatabase(), is("memory/default"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:cockroach:v21.2.3:///demo_ds").getDatabase(), is("demo_ds"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:tidb:v6.1.0:///demo_ds").getDatabase(), is("demo_ds"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:mysql:5.7.34:///demo_ds?TC_INITSCRIPT=somepath/init_mysql.sql")
.getQueryProperties().getProperty("TC_INITSCRIPT"), is("somepath/init_mysql.sql"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:mysql:5.7.34:///demo_ds?TC_INITSCRIPT=file:src/main/resources/init_mysql.sql")
.getQueryProperties().getProperty("TC_INITSCRIPT"), is("file:src/main/resources/init_mysql.sql"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:mysql:5.7.34:///demo_ds?TC_INITFUNCTION=org.testcontainers.jdbc.JDBCDriverTest::sampleInitFunction")
.getQueryProperties().getProperty("TC_INITFUNCTION"), is("org.testcontainers.jdbc.JDBCDriverTest::sampleInitFunction"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:mysql:5.7.34:///demo_ds?TC_DAEMON=true").getQueryProperties().getProperty("TC_DAEMON"), is("true"));
assertThat(new StandardJdbcUrlParser().parse("jdbc:tc:postgresql:9.6.8:///demo_ds?TC_TMPFS=/testtmpfs:rw").getQueryProperties().getProperty("TC_TMPFS"), is("/testtmpfs:rw"));
} |
@Override
public int compareTo(COSObjectKey other)
{
return Long.compare(numberAndGeneration, other.numberAndGeneration);
} | @Test
void compareToInputNotNullOutputZero()
{
// Arrange
final COSObjectKey objectUnderTest = new COSObjectKey(1L, 0);
final COSObjectKey other = new COSObjectKey(1L, 0);
// Act
final int retval = objectUnderTest.compareTo(other);
// Assert result
assertEquals(0, retval);
} |
@Override
public AuditReplayCommand parse(Text inputLine,
Function<Long, Long> relativeToAbsolute) throws IOException {
Matcher m = logLineParseRegex.matcher(inputLine.toString());
if (!m.find()) {
throw new IOException(
"Unable to find valid message pattern from audit log line: `"
+ inputLine + "` using regex `" + logLineParseRegex + "`");
}
long relativeTimestamp;
try {
relativeTimestamp = dateFormat.parse(m.group("timestamp")).getTime()
- startTimestamp;
} catch (ParseException p) {
throw new IOException(
"Exception while parsing timestamp from audit log line: `"
+ inputLine + "`", p);
}
// Sanitize the = in the rename options field into a : so we can split on =
String auditMessageSanitized =
m.group("message").replace("(options=", "(options:");
Map<String, String> parameterMap = new HashMap<String, String>();
String[] auditMessageSanitizedList = auditMessageSanitized.split("\t");
for (String auditMessage : auditMessageSanitizedList) {
String[] splitMessage = auditMessage.split("=", 2);
try {
parameterMap.put(splitMessage[0], splitMessage[1]);
} catch (ArrayIndexOutOfBoundsException e) {
throw new IOException(
"Exception while parsing a message from audit log line: `"
+ inputLine + "`", e);
}
}
return new AuditReplayCommand(relativeToAbsolute.apply(relativeTimestamp),
// Split the UGI on space to remove the auth and proxy portions of it
SPACE_SPLITTER.split(parameterMap.get("ugi")).iterator().next(),
parameterMap.get("cmd").replace("(options:", "(options="),
parameterMap.get("src"), parameterMap.get("dst"),
parameterMap.get("ip"));
} | @Test
public void testInputWithRenameOptions() throws Exception {
Text in = getAuditString("1970-01-01 00:00:11,000", "fakeUser",
"rename (options=[TO_TRASH])", "sourcePath", "destPath");
AuditReplayCommand expected = new AuditReplayCommand(1000, "fakeUser",
"rename (options=[TO_TRASH])", "sourcePath", "destPath", "0.0.0.0");
assertEquals(expected, parser.parse(in, Function.identity()));
} |
@Override
public long getCleanTasksDelay() {
return CANCEL_WORN_OUTS_DELAY;
} | @Test
public void getCleanCeTasksDelay_returns_2() {
assertThat(new CeConfigurationImpl(EMPTY_CONFIGURATION).getCleanTasksDelay())
.isEqualTo(2L);
workerCountProvider.set(1);
assertThat(new CeConfigurationImpl(EMPTY_CONFIGURATION, workerCountProvider).getCleanTasksDelay())
.isEqualTo(2L);
} |
@Override
public List<DeptDO> getDeptList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return Collections.emptyList();
}
return deptMapper.selectBatchIds(ids);
} | @Test
public void testGetDeptList_reqVO() {
// mock 数据
DeptDO dept = randomPojo(DeptDO.class, o -> { // 等会查询到
o.setName("开发部");
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
});
deptMapper.insert(dept);
// 测试 name 不匹配
deptMapper.insert(ObjectUtils.cloneIgnoreId(dept, o -> o.setName("发")));
// 测试 status 不匹配
deptMapper.insert(ObjectUtils.cloneIgnoreId(dept, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())));
// 准备参数
DeptListReqVO reqVO = new DeptListReqVO();
reqVO.setName("开");
reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus());
// 调用
List<DeptDO> sysDeptDOS = deptService.getDeptList(reqVO);
// 断言
assertEquals(1, sysDeptDOS.size());
assertPojoEquals(dept, sysDeptDOS.get(0));
} |
@Override
void handle(Connection connection, DatabaseCharsetChecker.State state) throws SQLException {
expectCaseSensitiveDefaultCollation(connection);
if (state == DatabaseCharsetChecker.State.UPGRADE || state == DatabaseCharsetChecker.State.STARTUP) {
repairColumns(connection);
}
} | @Test
public void upgrade_repairs_indexed_CI_AI_columns() throws SQLException {
answerDefaultCollation("Latin1_General_CS_AS");
answerColumnDefs(
new ColumnDef(TABLE_ISSUES, COLUMN_KEE, "Latin1_General", "Latin1_General_CS_AS", "varchar", 10, false),
new ColumnDef(TABLE_PROJECTS, COLUMN_NAME, "Latin1_General", "Latin1_General_CI_AI", "varchar", 10, false));
answerIndices(
new MssqlCharsetHandler.ColumnIndex("projects_name", false, "name"),
// This index is on two columns. Note that it does not make sense for table "projects" !
new MssqlCharsetHandler.ColumnIndex("projects_login_and_name", true, "login,name"));
underTest.handle(connection, DatabaseCharsetChecker.State.UPGRADE);
verify(sqlExecutor).executeDdl(connection, "DROP INDEX projects.projects_name");
verify(sqlExecutor).executeDdl(connection, "DROP INDEX projects.projects_login_and_name");
verify(sqlExecutor).executeDdl(connection, "ALTER TABLE projects ALTER COLUMN name varchar(10) COLLATE Latin1_General_CS_AS NOT NULL");
verify(sqlExecutor).executeDdl(connection, "CREATE INDEX projects_name ON projects (name)");
verify(sqlExecutor).executeDdl(connection, "CREATE UNIQUE INDEX projects_login_and_name ON projects (login,name)");
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.