focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Entrance
public final void combine(@SourceFrom long count) {
if (count < this.value) {
this.value = count;
}
} | @Test
public void testSelfCombine() {
MinLongMetricsImpl impl = new MinLongMetricsImpl();
impl.combine(10);
impl.combine(5);
MinLongMetricsImpl impl2 = new MinLongMetricsImpl();
impl2.combine(2);
impl2.combine(6);
impl.combine(impl2);
Assertions.assertEquals(2, impl.getValue());
} |
public static Function getFunctionOfRound(FunctionCallExpr node, Function fn, List<Type> argumentTypes) {
return getFunctionOfRound(node.getParams(), fn, argumentTypes);
} | @Test
public void testGetFnOfTruncateForDecimalAndIntLiteral() {
List<Expr> params = Lists.newArrayList();
params.add(new DecimalLiteral(new BigDecimal(new BigInteger("1845076"), 2)));
params.add(new IntLiteral(1));
FunctionCallExpr node = new FunctionCallExpr(FunctionSet.TRUNCATE, params);
List<Type> paramTypes = Lists.newArrayList();
paramTypes.add(ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL32, 7, 2));
paramTypes.add(Type.TINYINT);
Function function = Expr.getBuiltinFunction(FunctionSet.TRUNCATE, paramTypes.toArray(new Type[0]),
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
Assert.assertNotNull(function);
Function newFn = DecimalV3FunctionAnalyzer.getFunctionOfRound(node, function, paramTypes);
Type returnType = newFn.getReturnType();
Assert.assertTrue(returnType.isDecimalV3());
Assert.assertEquals(Integer.valueOf(38), returnType.getPrecision());
} |
public Optional<ByteBuffer> dequeue() throws QueueException {
if (!currentHeadPtr.isGreaterThan(currentTailPtr)) {
if (currentTailPtr.isGreaterThan(currentHeadPtr)) {
// sanity check
throw new QueueException("Current tail " + currentTailPtr + " is forward head " + currentHeadPtr);
}
// head and tail pointer are the same, the queue is empty
return Optional.empty();
}
if (tailSegment == null) {
tailSegment = queuePool.openNextTailSegment(name).get();
}
LOG.debug("currentTail is {}", currentTailPtr);
if (containsHeader(tailSegment, currentTailPtr)) {
// currentSegment contains at least the header (payload length)
final VirtualPointer existingTail;
if (isTailFirstUsage(currentTailPtr)) {
// move to the first readable byte
existingTail = currentTailPtr.plus(1);
} else {
existingTail = currentTailPtr.copy();
}
final int payloadLength = tailSegment.readHeader(existingTail);
// tail must be moved to the next byte to read, so has to move to
// header size + payload size + 1
final int fullMessageSize = payloadLength + LENGTH_HEADER_SIZE;
long remainingInSegment = tailSegment.bytesAfter(existingTail) + 1;
if (remainingInSegment > fullMessageSize) {
// tail segment fully contains the payload with space left over
currentTailPtr = existingTail.moveForward(fullMessageSize);
// read data from currentTail + 4 bytes(the length)
final VirtualPointer dataStart = existingTail.moveForward(LENGTH_HEADER_SIZE);
return Optional.of(readData(tailSegment, dataStart, payloadLength));
} else {
// payload is split across currentSegment and next ones
VirtualPointer dataStart = existingTail.moveForward(LENGTH_HEADER_SIZE);
if (remainingInSegment - LENGTH_HEADER_SIZE == 0) {
queuePool.consumedTailSegment(name);
if (QueuePool.queueDebug) {
tailSegment.fillWith((byte) 'D');
}
tailSegment = queuePool.openNextTailSegment(name).get();
}
LOG.debug("Loading payload size {}", payloadLength);
return Optional.of(loadPayloadFromSegments(payloadLength, tailSegment, dataStart));
}
} else {
// header is split across 2 segments
// the currentSegment is still the tailSegment
// read the length header that's crossing 2 segments
final CrossSegmentHeaderResult result = decodeCrossHeader(tailSegment, currentTailPtr);
// load all payload parts from the segments
LOG.debug("Loading payload size {}", result.payloadLength);
return Optional.of(loadPayloadFromSegments(result.payloadLength, result.segment, result.pointer));
}
} | @Test
public void readFromEmptyQueue() throws QueueException {
final QueuePool queuePool = QueuePool.loadQueues(tempQueueFolder, PAGE_SIZE, SEGMENT_SIZE);
final Queue queue = queuePool.getOrCreate("test");
assertFalse(queue.dequeue().isPresent(), "Pulling from empty queue MUST return null value");
} |
@Override
public OUT nextRecord(OUT record) throws IOException {
OUT returnRecord = null;
do {
returnRecord = super.nextRecord(record);
} while (returnRecord == null && !reachedEnd());
return returnRecord;
} | @Test
void testIntegerFields() {
try {
final String fileContent = "111|222|333|444|555\n666|777|888|999|000|\n";
final FileInputSplit split = createTempFile(fileContent);
final TupleTypeInfo<Tuple5<Integer, Integer, Integer, Integer, Integer>> typeInfo =
TupleTypeInfo.getBasicTupleTypeInfo(
Integer.class,
Integer.class,
Integer.class,
Integer.class,
Integer.class);
final CsvInputFormat<Tuple5<Integer, Integer, Integer, Integer, Integer>> format =
new TupleCsvInputFormat<Tuple5<Integer, Integer, Integer, Integer, Integer>>(
PATH, typeInfo);
format.setFieldDelimiter("|");
format.configure(new Configuration());
format.open(split);
Tuple5<Integer, Integer, Integer, Integer, Integer> result = new Tuple5<>();
result = format.nextRecord(result);
assertThat(result).isNotNull();
assertThat(result.f0).isEqualTo(Integer.valueOf(111));
assertThat(result.f1).isEqualTo(Integer.valueOf(222));
assertThat(result.f2).isEqualTo(Integer.valueOf(333));
assertThat(result.f3).isEqualTo(Integer.valueOf(444));
assertThat(result.f4).isEqualTo(Integer.valueOf(555));
result = format.nextRecord(result);
assertThat(result).isNotNull();
assertThat(result.f0).isEqualTo(Integer.valueOf(666));
assertThat(result.f1).isEqualTo(Integer.valueOf(777));
assertThat(result.f2).isEqualTo(Integer.valueOf(888));
assertThat(result.f3).isEqualTo(Integer.valueOf(999));
assertThat(result.f4).isEqualTo(Integer.valueOf(000));
result = format.nextRecord(result);
assertThat(result).isNull();
assertThat(format.reachedEnd()).isTrue();
} catch (Exception ex) {
fail("Test failed due to a " + ex.getClass().getName() + ": " + ex.getMessage());
}
} |
Set<String> pickMatchingApiDefinitions(ServerWebExchange exchange) {
return GatewayApiMatcherManager.getApiMatcherMap().values()
.stream()
.filter(m -> m.test(exchange))
.map(WebExchangeApiMatcher::getApiName)
.collect(Collectors.toSet());
} | @Test
public void testPickMatchingApiDefinitions() {
// Mock a request.
ServerWebExchange exchange = mock(ServerWebExchange.class);
ServerHttpRequest request = mock(ServerHttpRequest.class);
when(exchange.getRequest()).thenReturn(request);
RequestPath requestPath = mock(RequestPath.class);
when(request.getPath()).thenReturn(requestPath);
// Prepare API definitions.
Set<ApiDefinition> apiDefinitions = new HashSet<>();
String apiName1 = "some_customized_api";
ApiDefinition api1 = new ApiDefinition(apiName1)
.setPredicateItems(Collections.singleton(
new ApiPathPredicateItem().setPattern("/product/**")
.setMatchStrategy(SentinelGatewayConstants.URL_MATCH_STRATEGY_PREFIX)
));
String apiName2 = "another_customized_api";
ApiDefinition api2 = new ApiDefinition(apiName2)
.setPredicateItems(new HashSet<ApiPredicateItem>() {{
add(new ApiPathPredicateItem().setPattern("/something"));
add(new ApiPathPredicateItem().setPattern("/other/**")
.setMatchStrategy(SentinelGatewayConstants.URL_MATCH_STRATEGY_PREFIX));
}});
apiDefinitions.add(api1);
apiDefinitions.add(api2);
GatewayApiDefinitionManager.loadApiDefinitions(apiDefinitions);
SentinelGatewayFilter filter = new SentinelGatewayFilter();
when(requestPath.value()).thenReturn("/product/123");
Set<String> matchingApis = filter.pickMatchingApiDefinitions(exchange);
assertThat(matchingApis.size()).isEqualTo(1);
assertThat(matchingApis.contains(apiName1)).isTrue();
when(requestPath.value()).thenReturn("/products");
assertThat(filter.pickMatchingApiDefinitions(exchange).size()).isZero();
when(requestPath.value()).thenReturn("/something");
matchingApis = filter.pickMatchingApiDefinitions(exchange);
assertThat(matchingApis.size()).isEqualTo(1);
assertThat(matchingApis.contains(apiName2)).isTrue();
when(requestPath.value()).thenReturn("/other/foo/3");
matchingApis = filter.pickMatchingApiDefinitions(exchange);
assertThat(matchingApis.size()).isEqualTo(1);
assertThat(matchingApis.contains(apiName2)).isTrue();
} |
public int remove(long k)
{
if (k == 0L) {
return containsNullKey ? removeNullEntry() : defRetValue;
}
else {
long[] key = this.key;
int pos = (int) mix(k) & mask;
long curr = key[pos];
if (curr == 0L) {
return defRetValue;
}
else if (k == curr) {
return removeEntry(pos);
}
else {
while (curr != 0L) {
if (k == curr) {
return removeEntry(pos);
}
pos = pos + 1 & mask;
curr = key[pos];
}
return defRetValue;
}
}
} | @Test
public void testRemove()
{
// Test remove() and get() without rehashing
testRemove(10);
// Test remove() and get() with rehashing
testRemove(1000);
} |
@Override
public void execute(SensorContext context) {
for (InputFile file : context.fileSystem().inputFiles(context.fileSystem().predicates().hasLanguages(Xoo.KEY, Xoo2.KEY))) {
File ioFile = file.file();
File measureFile = new File(ioFile.getParentFile(), ioFile.getName() + MEASURES_EXTENSION);
processFileMeasures(file, measureFile, context);
InputDir inputDir = context.fileSystem().inputDir(ioFile.getParentFile());
if (inputDir != null) {
processFileMeasures(inputDir, new File(ioFile.getParentFile(), "folder" + MEASURES_EXTENSION), context);
}
}
processFileMeasures(context.module(), new File(context.fileSystem().baseDir(), "module" + MEASURES_EXTENSION), context);
} | @Test
public void failIfMetricNotFound() throws IOException {
File measures = new File(baseDir, "src/foo.xoo.measures");
FileUtils.write(measures, "unknow:12\n\n#comment");
InputFile inputFile = new TestInputFileBuilder("foo", "src/foo.xoo").setLanguage("xoo").setModuleBaseDir(baseDir.toPath()).build();
context.fileSystem().add(inputFile);
assertThatThrownBy(() -> sensor.execute(context))
.isInstanceOf(IllegalStateException.class);
} |
public static Ip6Address makeMaskPrefix(int prefixLength) {
byte[] mask = IpAddress.makeMaskPrefixArray(VERSION, prefixLength);
return new Ip6Address(mask);
} | @Test(expected = IllegalArgumentException.class)
public void testInvalidMakeNegativeMaskPrefixIPv6() {
Ip6Address ipAddress;
ipAddress = Ip6Address.makeMaskPrefix(-1);
} |
@Override
public void onMsg(TbContext ctx, TbMsg msg) {
var tbMsg = ackIfNeeded(ctx, msg);
httpClient.processMessage(ctx, tbMsg,
m -> tellSuccess(ctx, m),
(m, t) -> tellFailure(ctx, m, t));
} | @Test
public void deleteRequestWithoutBody() throws IOException, InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final String path = "/path/to/delete";
setupServer("*", new HttpRequestHandler() {
@Override
public void handle(HttpRequest request, HttpResponse response, HttpContext context)
throws HttpException, IOException {
try {
assertEquals(request.getRequestLine().getUri(), path, "Request path matches");
assertTrue(request.containsHeader("Foo"), "Custom header included");
assertEquals("Bar", request.getFirstHeader("Foo").getValue(), "Custom header value");
response.setStatusCode(200);
new Thread(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(1000L);
} catch (InterruptedException e) {
// ignore
} finally {
latch.countDown();
}
}
}).start();
} catch (Exception e) {
System.out.println("Exception handling request: " + e.toString());
e.printStackTrace();
latch.countDown();
}
}
});
TbRestApiCallNodeConfiguration config = new TbRestApiCallNodeConfiguration().defaultConfiguration();
config.setRequestMethod("DELETE");
config.setHeaders(Collections.singletonMap("Foo", "Bar"));
config.setIgnoreRequestBody(true);
config.setRestEndpointUrlPattern(String.format("http://localhost:%d%s", server.getLocalPort(), path));
initWithConfig(config);
TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, originator, metaData, TbMsgDataType.JSON, TbMsg.EMPTY_JSON_OBJECT, ruleChainId, ruleNodeId);
restNode.onMsg(ctx, msg);
assertTrue(latch.await(10, TimeUnit.SECONDS), "Server handled request");
ArgumentCaptor<TbMsg> msgCaptor = ArgumentCaptor.forClass(TbMsg.class);
ArgumentCaptor<TbMsgMetaData> metadataCaptor = ArgumentCaptor.forClass(TbMsgMetaData.class);
ArgumentCaptor<String> dataCaptor = ArgumentCaptor.forClass(String.class);
verify(ctx).transformMsg(msgCaptor.capture(), metadataCaptor.capture(), dataCaptor.capture());
assertNotSame(metaData, metadataCaptor.getValue());
assertEquals(TbMsg.EMPTY_JSON_OBJECT, dataCaptor.getValue());
} |
public Duration queryTimeout() {
return queryTimeout;
} | @Test
void queryTimeoutBadValues() {
assertThatExceptionOfType(NullPointerException.class)
.isThrownBy(() -> builder.queryTimeout(null));
} |
public boolean validate(
Policies policies, TimePartitionSpec timePartitioning, TableUri tableUri, String schema) {
if (policies != null && policies.getRetention() != null) {
// Two invalid case for timePartitioned table
if (timePartitioning != null) {
if (policies.getRetention().getColumnPattern() != null) {
failureMessage =
String.format(
"You can only specify retention column pattern on non-timestampPartitioned table (table[%s] is time-partitioned by[%s])",
tableUri, timePartitioning.getColumnName());
return false;
}
if (!policies.getRetention().getGranularity().equals(timePartitioning.getGranularity())) {
failureMessage =
String.format(
"invalid policies retention granularity format for table %s. Policies granularity must be equal to or lesser than"
+ " time partition spec granularity",
tableUri);
errorField = "retention";
return false;
}
}
// invalid cases regarding the integrity of retention object.
if (!validateGranularityWithPattern(policies.getRetention())) {
failureMessage =
String.format(
"Provided Retention Granularity[%s] is not supported with default pattern. "
+ "Please define pattern in retention config or use one of supported granularity: %s",
policies.getRetention().getGranularity().name(),
Arrays.toString(DefaultColumnPattern.values()));
return false;
}
if (!validatePatternIfPresent(policies.getRetention(), tableUri, schema)) {
failureMessage =
String.format(
"Provided pattern[%s] is not recognizable by OpenHouse for the table[%s]; Also please make sure the declared column is part of table schema.",
policies.getRetention().getColumnPattern(), tableUri);
return false;
}
if (timePartitioning == null && policies.getRetention().getColumnPattern() == null) {
failureMessage =
String.format(
"For non timestamp-partitioned table %s, column pattern in retention policy is mandatory",
tableUri);
return false;
}
}
return true;
} | @Test
void testValidate() {
// Negative: declared retention column not exists
RetentionColumnPattern pattern0 =
RetentionColumnPattern.builder()
.pattern("yyyy-mm-dd-hh")
.columnName("bb")
.build(); /* dummySchema doesn't have bb*/
Retention retention0 =
Retention.builder()
.count(1)
.granularity(TimePartitionSpec.Granularity.DAY)
.columnPattern(pattern0)
.build();
Policies policies0 = Policies.builder().retention(retention0).build();
Assertions.assertFalse(
validator.validate(
policies0, null, TableUri.builder().build(), getSchemaJsonFromSchema(dummySchema)));
pattern0 =
RetentionColumnPattern.builder()
.pattern("yyyy-mm-dd-hh")
.columnName("Aa") /* casing matters*/
.build();
retention0 =
Retention.builder()
.count(1)
.granularity(TimePartitionSpec.Granularity.DAY)
.columnPattern(pattern0)
.build();
policies0 = Policies.builder().retention(retention0).build();
Assertions.assertFalse(
validator.validate(
policies0, null, TableUri.builder().build(), getSchemaJsonFromSchema(dummySchema)));
pattern0 =
RetentionColumnPattern.builder()
.pattern("yyyy-mm-dd-hh")
.columnName("top1.aaa") /* negative case for nested*/
.build();
retention0 =
Retention.builder()
.count(1)
.granularity(TimePartitionSpec.Granularity.DAY)
.columnPattern(pattern0)
.build();
policies0 = Policies.builder().retention(retention0).build();
Assertions.assertFalse(
validator.validate(
policies0, null, TableUri.builder().build(), getSchemaJsonFromSchema(nestedSchema)));
// Negative: Missing timepartitionspec AND pattern
Retention retention1 =
Retention.builder().count(1).granularity(TimePartitionSpec.Granularity.DAY).build();
Policies policies1 = Policies.builder().retention(retention1).build();
Assertions.assertFalse(
validator.validate(
policies1, null, TableUri.builder().build(), getSchemaJsonFromSchema(dummySchema)));
// Positive: Only have pattern but no timepartitionSpec
RetentionColumnPattern pattern =
RetentionColumnPattern.builder().pattern("yyyy-mm-dd-hh").build();
Retention retention2 = retention1.toBuilder().columnPattern(pattern).build();
Policies policies2 = Policies.builder().retention(retention2).build();
Assertions.assertTrue(
validator.validate(
policies2, null, TableUri.builder().build(), getSchemaJsonFromSchema(dummySchema)));
// Negative: Having both timepartitionspec AND pattern
Retention retention3 =
Retention.builder()
.count(1)
.granularity(TimePartitionSpec.Granularity.DAY)
.columnPattern(pattern)
.build();
Policies policies3 = Policies.builder().retention(retention3).build();
Assertions.assertFalse(
validator.validate(
policies3,
TimePartitionSpec.builder()
.columnName("ts")
.granularity(TimePartitionSpec.Granularity.DAY)
.build(),
TableUri.builder().build(),
getSchemaJsonFromSchema(dummySchema)));
// Negative: Having both timepartitionspec AND invalid-pattern
RetentionColumnPattern malformedPattern =
RetentionColumnPattern.builder().pattern("random_pattern").columnName("aa").build();
Retention retention4 =
Retention.builder()
.count(1)
.granularity(TimePartitionSpec.Granularity.DAY)
.columnPattern(malformedPattern)
.build();
Policies policies4 = Policies.builder().retention(retention4).build();
Assertions.assertFalse(
validator.validate(
policies4,
TimePartitionSpec.builder()
.columnName("ts")
.granularity(TimePartitionSpec.Granularity.DAY)
.build(),
TableUri.builder().build(),
getSchemaJsonFromSchema(dummySchema)));
Field failedMsg =
org.springframework.util.ReflectionUtils.findField(
PoliciesSpecValidator.class, "failureMessage");
Assertions.assertNotNull(failedMsg);
org.springframework.util.ReflectionUtils.makeAccessible(failedMsg);
Assertions.assertTrue(
((String) org.springframework.util.ReflectionUtils.getField(failedMsg, validator))
.contains("You can only specify retention column pattern on non-timestampPartitioned"));
// Negative: having granularity not supported by defaultColumPattern
RetentionColumnPattern defaultPattern =
RetentionColumnPattern.builder().columnName("aa").pattern("").build();
Retention retention5 =
Retention.builder()
.count(1)
.granularity(TimePartitionSpec.Granularity.MONTH)
.columnPattern(defaultPattern)
.build();
Policies policies5 = Policies.builder().retention(retention5).build();
Assertions.assertFalse(
validator.validate(
policies5, null, TableUri.builder().build(), getSchemaJsonFromSchema(dummySchema)));
failedMsg =
org.springframework.util.ReflectionUtils.findField(
PoliciesSpecValidator.class, "failureMessage");
Assertions.assertNotNull(failedMsg);
org.springframework.util.ReflectionUtils.makeAccessible(failedMsg);
Assertions.assertTrue(
((String) org.springframework.util.ReflectionUtils.getField(failedMsg, validator))
.contains("Please define pattern in retention config"));
// The granularity mismatch is covered in
// com.linkedin.openhouse.tables.e2e.h2.TablesControllerTest.testCreateRequestFailsForWithGranularityDifferentFromTimePartitionSpec
// with error message validation
} |
@Deprecated
public static Method findMethodByMethodSignature(Class<?> clazz, String methodName, String[] parameterTypes)
throws NoSuchMethodException, ClassNotFoundException {
Method method;
if (parameterTypes == null) {
List<Method> finded = new ArrayList<>();
for (Method m : clazz.getMethods()) {
if (m.getName().equals(methodName)) {
finded.add(m);
}
}
if (finded.isEmpty()) {
throw new NoSuchMethodException("No such method " + methodName + " in class " + clazz);
}
if (finded.size() > 1) {
String msg = String.format(
"Not unique method for method name(%s) in class(%s), find %d methods.",
methodName, clazz.getName(), finded.size());
throw new IllegalStateException(msg);
}
method = finded.get(0);
} else {
Class<?>[] types = new Class<?>[parameterTypes.length];
for (int i = 0; i < parameterTypes.length; i++) {
types[i] = ReflectUtils.name2class(parameterTypes[i]);
}
method = clazz.getMethod(methodName, types);
}
return method;
} | @Test
void testFindMethodByMethodSignatureOverrideMoreThan1() throws Exception {
try {
ReflectUtils.findMethodByMethodSignature(TestedClass.class, "overrideMethod", null);
fail();
} catch (IllegalStateException expected) {
assertThat(expected.getMessage(), containsString("Not unique method for method name("));
}
} |
@Override
public int compareTo(DateTimeStamp dateTimeStamp) {
return comparator.compare(this,dateTimeStamp);
} | @Test
void testCompareEquals() {
DateTimeStamp object1 = new DateTimeStamp("2018-04-04T10:10:00.586-0100");
DateTimeStamp object2 = new DateTimeStamp("2018-04-04T10:10:00.586-0100");
assertEquals(0, object1.compareTo(object2));
} |
@Override
public void destroy() {
if (this.producer != null) {
try {
this.producer.close();
} catch (Exception e) {
log.error("Failed to close producer during destroy()", e);
}
}
} | @Test
public void givenProducerIsNotNull_whenDestroy_thenShouldClose() {
ReflectionTestUtils.setField(node, "producer", producerMock);
node.destroy();
then(producerMock).should().close();
} |
@Override
public TransferStatus prepare(final Path file, final Local local, final TransferStatus parent, final ProgressListener progress) throws BackgroundException {
final TransferStatus status = super.prepare(file, local, parent, progress);
if(status.isExists()) {
final String filename = file.getName();
int no = 0;
do {
String proposal = String.format("%s-%d", FilenameUtils.getBaseName(filename), ++no);
if(StringUtils.isNotBlank(Path.getExtension(filename))) {
proposal += String.format(".%s", Path.getExtension(filename));
}
status.withRename(LocalFactory.get(local.getParent(), proposal));
}
while(status.getRename().local.exists());
if(log.isInfoEnabled()) {
log.info(String.format("Changed download target from %s to %s", local, status.getRename().local));
}
if(log.isDebugEnabled()) {
log.debug(String.format("Clear exist flag for file %s", local));
}
status.setExists(false);
}
else {
if(parent.getRename().local != null) {
status.withRename(LocalFactory.get(parent.getRename().local, file.getName()));
}
if(log.isInfoEnabled()) {
log.info(String.format("Changed download target from %s to %s", local, status.getRename().local));
}
}
return status;
} | @Test
public void testPrepare() throws Exception {
RenameFilter f = new RenameFilter(new DisabledDownloadSymlinkResolver(), new NullTransferSession(new Host(new TestProtocol())));
final String name = new AsciiRandomStringService().random();
final NullLocal local = new NullLocal("/tmp", name) {
@Override
public boolean exists() {
return name.equals(this.getName());
}
};
final Path t = new Path(name, EnumSet.of(Path.Type.file));
final TransferStatus status = f.prepare(t, local, new TransferStatus().exists(true), new DisabledProgressListener());
assertNotNull(status.getRename().local);
assertEquals(String.format("%s-1", name), status.getRename().local.getName());
} |
@Override
public void doRun() {
if (versionOverride.isPresent()) {
LOG.debug("Elasticsearch version is set manually. Not running check.");
return;
}
final Optional<SearchVersion> probedVersion = this.versionProbe.probe(this.elasticsearchHosts);
probedVersion.ifPresent(version -> {
if (compatible(this.initialElasticsearchVersion, version)) {
notificationService.fixed(Notification.Type.ES_VERSION_MISMATCH);
} else {
LOG.warn("Elasticsearch version currently running ({}) is incompatible with the one Graylog was started " +
"with ({}) - a restart is required!", version, initialElasticsearchVersion);
final Notification notification = notificationService.buildNow()
.addType(Notification.Type.ES_VERSION_MISMATCH)
.addSeverity(Notification.Severity.URGENT)
.addDetail("initial_version", initialElasticsearchVersion.toString())
.addDetail("current_version", version.toString());
notificationService.publishIfFirst(notification);
}
});
} | @Test
void createsNotificationIfCurrentVersionIncompatiblyOlderThanInitialOne() {
returnProbedVersion(Version.of(6, 8, 1));
createPeriodical(SearchVersion.elasticsearch(8, 1, 2)).doRun();
assertNotificationWasRaised();
} |
@Override
@MethodNotAvailable
public CompletionStage<Boolean> deleteAsync(K key) {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testDeleteAsync() {
adapter.deleteAsync(23);
} |
@Override
public Set<ConfigOption<?>> requiredOptions() {
Set<ConfigOption<?>> options = new HashSet<>();
options.add(USERNAME);
options.add(PASSWORD);
options.add(DATABASE_NAME);
options.add(TABLE_NAME);
options.add(SCHEMA_NAME);
return options;
} | @Test
public void testValidation() {
// validate illegal port
try {
Map<String, String> properties = getAllRequiredOptionsWithHost();
properties.put("port", "123b");
createTableSource(properties);
fail("exception expected");
} catch (Throwable t) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
t, "Could not parse value '123b' for key 'port'.")
.isPresent());
}
// validate missing required
Factory factory = new OracleTableSourceFactory();
for (ConfigOption<?> requiredOption : factory.requiredOptions()) {
Map<String, String> properties = getAllRequiredOptionsWithHost();
properties.remove(requiredOption.key());
try {
createTableSource(properties);
fail("exception expected");
} catch (Throwable t) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
t,
"Missing required options are:\n\n" + requiredOption.key())
.isPresent());
}
}
// validate unsupported option
try {
Map<String, String> properties = getAllRequiredOptionsWithHost();
properties.put("unknown", "abc");
createTableSource(properties);
fail("exception expected");
} catch (Throwable t) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(t, "Unsupported options:\n\nunknown")
.isPresent());
}
// validate unsupported option
try {
Map<String, String> properties = getAllRequiredOptionsWithHost();
properties.put("scan.startup.mode", "abc");
createTableSource(properties);
fail("exception expected");
} catch (Throwable t) {
String msg =
"Invalid value for option 'scan.startup.mode'. Supported values are "
+ "[initial, snapshot, latest-offset], "
+ "but was: abc";
assertTrue(ExceptionUtils.findThrowableWithMessage(t, msg).isPresent());
}
} |
public Type getType() {
return type;
} | @Test
public void shouldReturnType() {
// Given:
final TableElement element = new TableElement(NAME, new Type(SqlTypes.STRING));
// Then:
assertThat(element.getType(), is(new Type(SqlTypes.STRING)));
} |
public final String getName() {
return getEnvironment().getTaskInfo().getTaskNameWithSubtasks();
} | @Test
void testStateBackendClosingOnFailure() throws Exception {
Configuration taskManagerConfig = new Configuration();
taskManagerConfig.set(STATE_BACKEND, TestMemoryStateBackendFactory.class.getName());
StreamConfig cfg = new StreamConfig(new Configuration());
cfg.setStateKeySerializer(mock(TypeSerializer.class));
cfg.setOperatorID(new OperatorID(4711L, 42L));
TestStreamSource<Long, MockSourceFunction> streamSource =
new TestStreamSource<>(new MockSourceFunction());
cfg.setStreamOperator(streamSource);
cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
try (NettyShuffleEnvironment shuffleEnvironment =
new NettyShuffleEnvironmentBuilder().build()) {
Task task =
createTask(
StateBackendTestSource.class,
shuffleEnvironment,
cfg,
taskManagerConfig,
EXECUTOR_EXTENSION.getExecutor());
StateBackendTestSource.fail = true;
task.startTaskThread();
// wait for clean termination
task.getExecutingThread().join();
// ensure that the state backends and stream iterables are closed ...
verify(TestStreamSource.operatorStateBackend).close();
verify(TestStreamSource.keyedStateBackend).close();
verify(TestStreamSource.rawOperatorStateInputs).close();
verify(TestStreamSource.rawKeyedStateInputs).close();
// ... and disposed
verify(TestStreamSource.operatorStateBackend).dispose();
verify(TestStreamSource.keyedStateBackend).dispose();
assertThat(task.getExecutionState()).isEqualTo(ExecutionState.FAILED);
}
} |
@Override
public SparkTable loadTable(Identifier ident) throws NoSuchTableException {
Pair<Table, Long> table = load(ident);
return new SparkTable(table.first(), table.second(), false /* refresh eagerly */);
} | @Test
public void testTimeTravel() {
sql("CREATE TABLE %s (id INT, dep STRING) USING iceberg", tableName);
Table table = validationCatalog.loadTable(tableIdent);
sql("INSERT INTO TABLE %s VALUES (1, 'hr')", tableName);
table.refresh();
Snapshot firstSnapshot = table.currentSnapshot();
waitUntilAfter(firstSnapshot.timestampMillis());
sql("INSERT INTO TABLE %s VALUES (2, 'hr')", tableName);
table.refresh();
Snapshot secondSnapshot = table.currentSnapshot();
waitUntilAfter(secondSnapshot.timestampMillis());
sql("INSERT INTO TABLE %s VALUES (3, 'hr')", tableName);
table.refresh();
try {
TABLE_CACHE.add("key", table);
assertEquals(
"Should have expected rows in 3rd snapshot",
ImmutableList.of(row(1, "hr"), row(2, "hr"), row(3, "hr")),
sql("SELECT * FROM testcache.key ORDER BY id"));
assertEquals(
"Should have expected rows in 2nd snapshot",
ImmutableList.of(row(1, "hr"), row(2, "hr")),
sql(
"SELECT * FROM testcache.`key#at_timestamp_%s` ORDER BY id",
secondSnapshot.timestampMillis()));
assertEquals(
"Should have expected rows in 1st snapshot",
ImmutableList.of(row(1, "hr")),
sql(
"SELECT * FROM testcache.`key#snapshot_id_%d` ORDER BY id",
firstSnapshot.snapshotId()));
} finally {
TABLE_CACHE.remove("key");
}
} |
@Override
public boolean add(String e) {
return get(addAsync(e));
} | @Test
public void testRandom() {
RLexSortedSet al = redisson.getLexSortedSet("test");
for (int i = 0; i < 100; i++) {
al.add("" + i);
}
Set<String> values = new HashSet<>();
for (int i = 0; i < 3; i++) {
String v = al.random();
values.add(v);
}
assertThat(values).hasSize(3);
Collection<String> range = al.random(10);
assertThat(range).hasSize(10);
} |
@Override
public Optional<FunctionDefinition> getFunctionDefinition(String name) {
if (BUILT_IN_FUNC_BLACKLIST.contains(name)) {
return Optional.empty();
}
FunctionDefinitionFactory.Context context = () -> classLoader;
// We override some Hive's function by native implementation to supports hash-agg
if (isNativeAggFunctionEnabled() && BUILTIN_NATIVE_AGG_FUNC.contains(name.toLowerCase())) {
return getBuiltInNativeAggFunction(name.toLowerCase());
}
// We override Hive's grouping function. Refer to the implementation for more details.
if (name.equalsIgnoreCase("grouping")) {
return Optional.of(
factory.createFunctionDefinitionFromHiveFunction(
name, HiveGenericUDFGrouping.class.getName(), context));
}
// this function is used to generate legacy GROUPING__ID value for old hive versions
if (name.equalsIgnoreCase(GenericUDFLegacyGroupingID.NAME)) {
return Optional.of(
factory.createFunctionDefinitionFromHiveFunction(
name, GenericUDFLegacyGroupingID.class.getName(), context));
}
// We override Hive's internal_interval. Refer to the implementation for more details
if (name.equalsIgnoreCase("internal_interval")) {
return Optional.of(
factory.createFunctionDefinitionFromHiveFunction(
name, HiveGenericUDFInternalInterval.class.getName(), context));
}
// used to access the field of struct in array
if (name.equalsIgnoreCase(HiveGenericUDFArrayAccessStructField.NAME)) {
return Optional.of(
factory.createFunctionDefinitionFromHiveFunction(
name, HiveGenericUDFArrayAccessStructField.class.getName(), context));
}
// We add a custom to_decimal function. Refer to the implementation for more details.
if (name.equalsIgnoreCase(HiveGenericUDFToDecimal.NAME)) {
return Optional.of(
factory.createFunctionDefinitionFromHiveFunction(
name, HiveGenericUDFToDecimal.class.getName(), context));
}
Optional<FunctionInfo> info = hiveShim.getBuiltInFunctionInfo(name);
return info.map(
functionInfo ->
factory.createFunctionDefinitionFromHiveFunction(
name, functionInfo.getFunctionClass().getName(), context));
} | @Test
public void testHiveBuiltInFunction() {
FunctionDefinition fd = new HiveModule().getFunctionDefinition("reverse").get();
HiveSimpleUDF udf = (HiveSimpleUDF) fd;
DataType[] inputType = new DataType[] {DataTypes.STRING()};
CallContextMock callContext = new CallContextMock();
callContext.argumentDataTypes = Arrays.asList(inputType);
callContext.argumentLiterals = Arrays.asList(new Boolean[inputType.length]);
Collections.fill(callContext.argumentLiterals, false);
udf.getTypeInference(null).getOutputTypeStrategy().inferType(callContext);
udf.open(null);
assertThat(udf.eval("abc")).isEqualTo("cba");
} |
@Override
public boolean containsAll(IntSet set) {
return set.isEmpty();
} | @Test
public void testContainsAll() throws Exception {
IntSet sis2 = new SingletonIntSet(3);
assertFalse(es.containsAll(sis2));
assertTrue(sis2.containsAll(es));
IntSet sis3 = new RangeSet(0);
assertTrue(sis3.containsAll(es));
assertTrue(es.containsAll(sis3));
} |
@Override
public InterpreterResult interpret(String st, InterpreterContext context) {
return helper.interpret(session, st, context);
} | @Test
void should_describe_all_tables() {
// Given
String query = "DESCRIBE TABLES;";
final String expected = reformatHtml(readTestResource(
"/scalate/DescribeTables.html"));
// When
final InterpreterResult actual = interpreter.interpret(query, intrContext);
// Then
assertEquals(Code.SUCCESS, actual.code());
assertEquals(expected, reformatHtml(actual.message().get(0).getData()));
} |
public static MetricsSource makeSource(Object source) {
return new MetricsSourceBuilder(source,
DefaultMetricsFactory.getAnnotatedMetricsFactory()).build();
} | @Test public void testMethods() {
MyMetrics2 metrics = new MyMetrics2();
MetricsSource source = MetricsAnnotations.makeSource(metrics);
MetricsRecordBuilder rb = getMetrics(source);
verify(rb).addGauge(info("G1", "G1"), 1);
verify(rb).addGauge(info("G2", "G2"), 2L);
verify(rb).addGauge(info("G3", "G3"), 3.0f);
verify(rb).addGauge(info("G4", "G4"), 4.0);
verify(rb).addCounter(info("C1", "C1"), 1);
verify(rb).addCounter(info("C2", "C2"), 2L);
verify(rb).tag(info("T1", "T1"), "t1");
} |
@Override
public Result run(GoPluginDescriptor pluginDescriptor, Map<String, List<String>> extensionsInfoOfPlugin) {
final ValidationResult validationResult = validate(pluginDescriptor, extensionsInfoOfPlugin);
return new Result(validationResult.hasError(), validationResult.toErrorMessage());
} | @Test
void shouldNotAddErrorOnSuccessfulValidation() {
final PluginPostLoadHook.Result validationResult = pluginExtensionsAndVersionValidator.run(descriptor, Map.of(ELASTIC_AGENT_EXTENSION, List.of("2.0")));
assertThat(validationResult.isAFailure()).isFalse();
assertThat(validationResult.getMessage()).isNull();
} |
public synchronized void rewind() {
this.readPosition = 0;
this.curReadBufferIndex = 0;
this.readPosInCurBuffer = 0;
if (CollectionUtils.isNotEmpty(bufferList)) {
this.curBuffer = bufferList.get(0);
for (ByteBuffer buffer : bufferList) {
buffer.rewind();
}
}
} | @Test
public void testCommitLogTypeInputStreamWithCoda() {
List<ByteBuffer> uploadBufferList = new ArrayList<>();
int bufferSize = 0;
for (int i = 0; i < MSG_NUM; i++) {
ByteBuffer byteBuffer = MessageFormatUtilTest.buildMockedMessageBuffer();
uploadBufferList.add(byteBuffer);
bufferSize += byteBuffer.remaining();
}
ByteBuffer codaBuffer = ByteBuffer.allocate(MessageFormatUtil.COMMIT_LOG_CODA_SIZE);
codaBuffer.putInt(MessageFormatUtil.COMMIT_LOG_CODA_SIZE);
codaBuffer.putInt(MessageFormatUtil.BLANK_MAGIC_CODE);
long timeMillis = System.currentTimeMillis();
codaBuffer.putLong(timeMillis);
codaBuffer.flip();
int codaBufferSize = codaBuffer.remaining();
bufferSize += codaBufferSize;
// build expected byte buffer for verifying the FileSegmentInputStream
ByteBuffer expectedByteBuffer = ByteBuffer.allocate(bufferSize);
for (ByteBuffer byteBuffer : uploadBufferList) {
expectedByteBuffer.put(byteBuffer);
byteBuffer.rewind();
}
expectedByteBuffer.put(codaBuffer);
codaBuffer.rewind();
// set real physical offset
for (int i = 0; i < MSG_NUM; i++) {
long physicalOffset = COMMIT_LOG_START_OFFSET + i * MSG_LEN;
int position = i * MSG_LEN + MessageFormatUtil.PHYSICAL_OFFSET_POSITION;
expectedByteBuffer.putLong(position, physicalOffset);
}
int finalBufferSize = bufferSize;
int[] batchReadSizeTestSet = {
MessageFormatUtil.PHYSICAL_OFFSET_POSITION - 1, MessageFormatUtil.PHYSICAL_OFFSET_POSITION, MessageFormatUtil.PHYSICAL_OFFSET_POSITION + 1,
MSG_LEN - 1, MSG_LEN, MSG_LEN + 1,
bufferSize - 1, bufferSize, bufferSize + 1
};
verifyReadAndReset(expectedByteBuffer, () -> FileSegmentInputStreamFactory.build(
FileSegmentType.COMMIT_LOG, COMMIT_LOG_START_OFFSET, uploadBufferList, codaBuffer, finalBufferSize), finalBufferSize, batchReadSizeTestSet);
} |
public String[] getTypes() {
List<String> types = new ArrayList<>();
for ( RunConfigurationProvider runConfigurationProvider : getRunConfigurationProviders() ) {
types.add( runConfigurationProvider.getType() );
}
return types.toArray( new String[ 0 ] );
} | @Test
public void testGetTypes() {
String[] types = executionConfigurationManager.getTypes();
assertTrue( Arrays.asList( types ).contains( DefaultRunConfiguration.TYPE ) );
} |
@Override
public LeaderElection createLeaderElection(String componentId) {
synchronized (lock) {
Preconditions.checkState(
!leadershipOperationExecutor.isShutdown(),
"The service was already closed and cannot be reused.");
Preconditions.checkState(
!leaderContenderRegistry.containsKey(componentId),
"There shouldn't be any contender registered under the passed component '%s'.",
componentId);
return new DefaultLeaderElection(this, componentId);
}
} | @Test
void testMultipleDriverCreations() throws Exception {
final AtomicInteger closeCount = new AtomicInteger(0);
final TestingLeaderElectionDriver.Factory driverFactory =
new TestingLeaderElectionDriver.Factory(
TestingLeaderElectionDriver.newNoOpBuilder()
.setCloseConsumer(ignoredLock -> closeCount.incrementAndGet()));
try (final DefaultLeaderElectionService testInstance =
new DefaultLeaderElectionService(driverFactory)) {
final String componentId = "component_id";
final int numberOfStartCloseSessions = 2;
for (int i = 1; i <= numberOfStartCloseSessions; i++) {
assertThat(driverFactory.getCreatedDriverCount()).isEqualTo(i - 1);
assertThat(closeCount).hasValue(i - 1);
try (final LeaderElection leaderElection =
testInstance.createLeaderElection(componentId)) {
leaderElection.startLeaderElection(
TestingGenericLeaderContender.newBuilder().build());
}
assertThat(driverFactory.getCreatedDriverCount()).isEqualTo(i);
assertThat(closeCount).hasValue(i);
}
}
} |
public TagList getStepInstanceTags(
String workflowId,
long workflowInstanceId,
long workflowRunId,
String stepId,
String stepAttempt) {
return getStepInstanceFieldByIds(
StepInstanceField.INSTANCE,
workflowId,
workflowInstanceId,
workflowRunId,
stepId,
stepAttempt,
rs -> ObjectHelper.valueOrDefault(getInstance(rs).getTags(), Defaults.DEFAULT_TAG_LIST));
} | @Test
public void testGetStepInstanceTags() {
TagList tags = stepDao.getStepInstanceTags(TEST_WORKFLOW_ID, 1, 1, "job1", "1");
assertEquals(si.getTags(), tags);
TagList latest = stepDao.getStepInstanceTags(TEST_WORKFLOW_ID, 1, 1, "job1", "latest");
assertEquals(tags, latest);
} |
@Nullable public Span next(TraceContextOrSamplingFlags extracted) {
Tracer tracer = tracer();
if (tracer == null) return null;
Span next = tracer.nextSpan(extracted);
SpanAndScope spanAndScope = new SpanAndScope(next, tracer.withSpanInScope(next));
getCurrentSpanInScopeStack().addFirst(spanAndScope);
return next;
} | @Test void next() {
assertThat(threadLocalSpan.next())
.isEqualTo(threadLocalSpan.remove());
} |
public PostgreSQLPacket describe() {
if (responseHeader instanceof QueryResponseHeader) {
return createRowDescriptionPacket((QueryResponseHeader) responseHeader);
}
if (responseHeader instanceof UpdateResponseHeader) {
return PostgreSQLNoDataPacket.getInstance();
}
throw new IllegalStateException(String.format("Can not describe portal `%s` before bind", name));
} | @Test
void assertDescribeBeforeBind() {
PostgreSQLServerPreparedStatement preparedStatement = mock(PostgreSQLServerPreparedStatement.class);
when(preparedStatement.getSqlStatementContext()).thenReturn(mock(SQLStatementContext.class));
assertThrows(IllegalStateException.class, () -> new Portal("", preparedStatement, Collections.emptyList(), Collections.emptyList(), databaseConnectionManager).describe());
} |
@Deprecated
@Override public void toXML(Object obj, OutputStream out) {
super.toXML(obj, out);
} | @Test
public void marshalValue() {
Foo f = new Foo();
f.r1 = f.r2 = Result.FAILURE;
String xml = Run.XSTREAM.toXML(f);
// we should find two "FAILURE"s as they should be written out twice
assertEquals(xml, 3, xml.split("FAILURE").length);
} |
private List<Integer> getBeSeqIndexes(List<Long> flatBackendsPerBucketSeq, long beId) {
return IntStream.range(0, flatBackendsPerBucketSeq.size()).boxed().filter(
idx -> flatBackendsPerBucketSeq.get(idx).equals(beId)).collect(Collectors.toList());
} | @Test
public void testGetBeSeqIndexes() {
List<Long> flatBackendsPerBucketSeq = Lists.newArrayList(1L, 2L, 2L, 3L, 4L, 2L);
List<Integer> indexes = Deencapsulation.invoke(balancer,
"getBeSeqIndexes", flatBackendsPerBucketSeq, 2L);
Assert.assertArrayEquals(new int[] {1, 2, 5}, indexes.stream().mapToInt(i -> i).toArray());
System.out.println("backend1 id is " + backend1.getId());
} |
@Override
public void run() {
if (processor != null) {
processor.execute();
} else {
if (!beforeHook()) {
logger.info("before-feature hook returned [false], aborting: {}", this);
} else {
scenarios.forEachRemaining(this::processScenario);
}
afterFeature();
}
} | @Test
void testCsv() {
run("csv.feature");
} |
public void untrackAll(JobID jobId) {
checkNotNull(jobId);
synchronized (lock) {
Set<BlobKey> keysToRemove = blobKeyByJob.remove(jobId);
if (keysToRemove != null) {
for (BlobKey key : keysToRemove) {
untrack(jobId, key);
}
}
}
} | @Test
void testUntrackAll() {
tracker.track(jobId, BlobKey.createKey(BlobType.PERMANENT_BLOB), 1L);
JobID anotherJobId = new JobID();
tracker.track(anotherJobId, BlobKey.createKey(BlobType.PERMANENT_BLOB), 1L);
assertThat(tracker.getBlobKeysByJobId(jobId)).hasSize(2);
tracker.untrackAll(jobId);
assertThat(tracker.getBlobKeysByJobId(jobId)).isEmpty();
assertThat(tracker.getBlobKeysByJobId(anotherJobId)).hasSize(1);
} |
public static DataMap convertMap(Map<String, ?> input, boolean stringify)
{
return convertMap(input, false, stringify);
} | @Test
void testConvertDroppingNulls()
{
DataMap parent = DataComplexUtil.convertMap(inputMap());
Assert.assertNotNull(parent);
Assert.assertEquals(parent.size(), 2);
Assert.assertFalse(parent.containsKey("child1"));
Assert.assertTrue(parent.containsKey("child2"));
DataMap child2 = parent.getDataMap("child2");
Assert.assertNotNull(child2);
Assert.assertEquals(child2.size(), 1);
Assert.assertTrue(child2.containsKey("gchild1"));
Assert.assertEquals(child2.get("gchild1"), 123);
Assert.assertFalse(child2.containsKey("gchild2"));
Assert.assertTrue(parent.containsKey("child3"));
DataList child3 = parent.getDataList("child3");
Assert.assertNotNull(child3);
Assert.assertEquals(child3.size(), 1);
Assert.assertEquals(child3.get(0), "gchild3");
} |
public static UParens create(UExpression expression) {
return new AutoValue_UParens(expression);
} | @Test
public void match() {
assertUnifies("(5L)", UParens.create(ULiteral.longLit(5L)));
} |
public SpillSpaceTracker(DataSize maxSize)
{
requireNonNull(maxSize, "maxSize is null");
maxBytes = maxSize.toBytes();
currentBytes = 0;
} | @Test
public void testSpillSpaceTracker()
{
assertEquals(spillSpaceTracker.getCurrentBytes(), 0);
assertEquals(spillSpaceTracker.getMaxBytes(), MAX_DATA_SIZE.toBytes());
long reservedBytes = new DataSize(5, MEGABYTE).toBytes();
spillSpaceTracker.reserve(reservedBytes);
assertEquals(spillSpaceTracker.getCurrentBytes(), reservedBytes);
long otherReservedBytes = new DataSize(2, MEGABYTE).toBytes();
spillSpaceTracker.reserve(otherReservedBytes);
assertEquals(spillSpaceTracker.getCurrentBytes(), (reservedBytes + otherReservedBytes));
spillSpaceTracker.reserve(otherReservedBytes);
assertEquals(spillSpaceTracker.getCurrentBytes(), (reservedBytes + 2 * otherReservedBytes));
spillSpaceTracker.free(otherReservedBytes);
spillSpaceTracker.free(otherReservedBytes);
assertEquals(spillSpaceTracker.getCurrentBytes(), reservedBytes);
spillSpaceTracker.free(reservedBytes);
assertEquals(spillSpaceTracker.getCurrentBytes(), 0);
} |
@Override
public boolean isEmpty() {
return commandTopic.getEndOffset() == 0;
} | @Test
public void shouldComputeEmptyCorrectly() {
// Given:
when(commandTopic.getEndOffset()).thenReturn(0L);
// When/Then:
assertThat(commandStore.isEmpty(), is(true));
} |
@Override
public void unbindSocialUser(Long userId, Integer userType, Integer socialType, String openid) {
// 获得 openid 对应的 SocialUserDO 社交用户
SocialUserDO socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, openid);
if (socialUser == null) {
throw exception(SOCIAL_USER_NOT_FOUND);
}
// 获得对应的社交绑定关系
socialUserBindMapper.deleteByUserTypeAndUserIdAndSocialType(userType, userId, socialUser.getType());
} | @Test
public void testUnbindSocialUser_notFound() {
// 调用,并断言
assertServiceException(
() -> socialUserService.unbindSocialUser(randomLong(), UserTypeEnum.ADMIN.getValue(),
SocialTypeEnum.GITEE.getType(), "test_openid"),
SOCIAL_USER_NOT_FOUND);
} |
public static <T> VerboseCondition<T> verboseCondition(Predicate<T> predicate, String description,
Function<T, String> objectUnderTestDescriptor) {
return new VerboseCondition<>(predicate, description, objectUnderTestDescriptor);
} | @Test
public void should_fail_and_display_actual_description_as_per_transformation_function_with_hasCondition() {
// GIVEN
Condition<String> shortLength = verboseCondition(actual -> actual.length() < 4,
"length shorter than 4",
s -> format(" but length was %s", s.length(), s));
// WHEN
AssertionError assertionError = expectAssertionError(() -> assertThat("foooo").has(shortLength));
// THEN
then(assertionError).hasMessage(format("%nExpecting actual:%n" +
" \"foooo\"%n" +
"to have length shorter than 4 but length was 5"));
} |
@Override
public boolean isUserManaged(DbSession dbSession, String userUuid) {
return findManagedInstanceService()
.map(managedInstanceService -> managedInstanceService.isUserManaged(dbSession, userUuid))
.orElse(false);
} | @Test
public void isUserManaged_whenNoDelegates_returnsFalse() {
DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(Set.of());
assertThat(managedInstanceService.isUserManaged(dbSession, "whatever")).isFalse();
} |
@Override
public String name() {
return "ControllerRegistrationsPublisher";
} | @Test
public void testName() {
ControllerRegistrationsPublisher publisher = new ControllerRegistrationsPublisher();
assertEquals("ControllerRegistrationsPublisher", publisher.name());
} |
public void publishUpdated(Cache<K, V> cache, K key, V oldValue, V newValue) {
publish(cache, EventType.UPDATED, key, /* hasOldValue */ true,
oldValue, newValue, /* quiet */ false);
} | @Test
public void publishUpdated() {
var dispatcher = new EventDispatcher<Integer, Integer>(Runnable::run);
registerAll(dispatcher);
dispatcher.publishUpdated(cache, 1, 2, 3);
verify(updatedListener, times(4)).onUpdated(any());
assertThat(dispatcher.pending.get()).hasSize(2);
assertThat(dispatcher.dispatchQueues.values().stream()
.flatMap(queue -> queue.entrySet().stream())).isEmpty();
} |
public static void notBlank(String str, String message) {
if (StringUtil.isBlank(str)) {
throw new IllegalArgumentException(message);
}
} | @Test(expected = IllegalArgumentException.class)
public void assertNotBlankByStringAndMessageIsNull() {
Assert.notBlank(" ");
} |
public boolean isValid(String value) {
if (value == null) {
return false;
}
URI uri; // ensure value is a valid URI
try {
uri = new URI(value);
} catch (URISyntaxException e) {
return false;
}
// OK, perfom additional validation
String scheme = uri.getScheme();
if (!isValidScheme(scheme)) {
return false;
}
String authority = uri.getRawAuthority();
if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority
return true; // this is a local file - nothing more to do here
} else if ("file".equals(scheme) && authority != null && authority.contains(":")) {
return false;
} else {
// Validate the authority
if (!isValidAuthority(authority)) {
return false;
}
}
if (!isValidPath(uri.getRawPath())) {
return false;
}
if (!isValidQuery(uri.getRawQuery())) {
return false;
}
if (!isValidFragment(uri.getRawFragment())) {
return false;
}
return true;
} | @Test
public void testValidator464() {
String[] schemes = {"file"};
UrlValidator urlValidator = new UrlValidator(schemes);
String fileNAK = "file://bad ^ domain.com/label/test";
assertFalse(fileNAK, urlValidator.isValid(fileNAK));
} |
@Override
public Num getValue(int index) {
return getBarSeries().getBar(index).getLowPrice();
} | @Test
public void indicatorShouldRetrieveBarLowPrice() {
for (int i = 0; i < 10; i++) {
assertEquals(lowPriceIndicator.getValue(i), barSeries.getBar(i).getLowPrice());
}
} |
@Nonnull
static String shortenPathComponent(@Nonnull String pathComponent, int bytesToRemove) {
// We replace the removed part with a #, so we need to remove 1 extra char
bytesToRemove++;
int[] codePoints;
try {
IntBuffer intBuffer = ByteBuffer.wrap(pathComponent.getBytes("UTF-32BE")).asIntBuffer();
codePoints = new int[intBuffer.limit()];
intBuffer.get(codePoints);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
int midPoint = codePoints.length/2;
int firstEnd = midPoint; // exclusive
int secondStart = midPoint+1; // inclusive
int bytesRemoved = utf8Length(codePoints[midPoint]);
// if we have an even number of codepoints, start by removing both middle characters,
// unless just removing the first already removes enough bytes
if (((codePoints.length % 2) == 0) && bytesRemoved < bytesToRemove) {
bytesRemoved += utf8Length(codePoints[secondStart]);
secondStart++;
}
while ((bytesRemoved < bytesToRemove) &&
(firstEnd > 0 || secondStart < codePoints.length)) {
if (firstEnd > 0) {
firstEnd--;
bytesRemoved += utf8Length(codePoints[firstEnd]);
}
if (bytesRemoved < bytesToRemove && secondStart < codePoints.length) {
bytesRemoved += utf8Length(codePoints[secondStart]);
secondStart++;
}
}
StringBuilder sb = new StringBuilder();
for (int i=0; i<firstEnd; i++) {
sb.appendCodePoint(codePoints[i]);
}
sb.append('#');
for (int i=secondStart; i<codePoints.length; i++) {
sb.appendCodePoint(codePoints[i]);
}
return sb.toString();
} | @Test
public void test2ByteEncodings() {
StringBuilder sb = new StringBuilder();
for (int i=0x80; i<0x80+100; i++) {
sb.append((char)i);
}
// remove a total of 3 2-byte characters, and then add back in the 1-byte '#'
String result = ClassFileNameHandler.shortenPathComponent(sb.toString(), 4);
Assert.assertEquals(200, sb.toString().getBytes(UTF8).length);
Assert.assertEquals(195, result.getBytes(UTF8).length);
Assert.assertEquals(98, result.length());
// remove a total of 3 2-byte characters, and then add back in the 1-byte '#'
result = ClassFileNameHandler.shortenPathComponent(sb.toString(), 5);
Assert.assertEquals(200, sb.toString().getBytes(UTF8).length);
Assert.assertEquals(195, result.getBytes(UTF8).length);
Assert.assertEquals(98, result.length());
} |
@Override
public Output run(RunContext runContext) throws Exception {
URI from = new URI(runContext.render(this.from));
final PebbleExpressionPredicate predicate = getExpressionPredication(runContext);
final Path path = runContext.workingDir().createTempFile(".ion");
long processedItemsTotal = 0L;
long droppedItemsTotal = 0L;
try (final BufferedWriter writer = Files.newBufferedWriter(path);
final BufferedReader reader = newBufferedReader(runContext, from)) {
String item;
while ((item = reader.readLine()) != null) {
IllegalVariableEvaluationException exception = null;
Boolean match = null;
try {
match = predicate.apply(item);
} catch (IllegalVariableEvaluationException e) {
exception = e;
}
FilterType action = this.filterType;
if (match == null) {
switch (errorOrNullBehavior) {
case FAIL -> {
if (exception != null) {
throw exception;
} else {
throw new IllegalVariableEvaluationException(String.format(
"Expression `%s` return `null` on item `%s`",
filterCondition,
item
));
}
}
case INCLUDE -> action = FilterType.INCLUDE;
case EXCLUDE -> action = FilterType.EXCLUDE;
}
match = true;
}
if (!match) {
action = action.reverse();
}
switch (action) {
case INCLUDE -> {
writer.write(item);
writer.newLine();
}
case EXCLUDE -> droppedItemsTotal++;
}
processedItemsTotal++;
}
}
URI uri = runContext.storage().putFile(path.toFile());
return Output.builder()
.uri(uri)
.processedItemsTotal(processedItemsTotal)
.droppedItemsTotal(droppedItemsTotal)
.build();
} | @Test
void shouldFilterGivenInvalidRecordsForExclude() throws Exception {
// Given
RunContext runContext = runContextFactory.of();
FilterItems task = FilterItems
.builder()
.from(generateKeyValueFile(TEST_INVALID_ITEMS, runContext).toString())
.filterCondition(" {{ value % 2 == 0 }}")
.filterType(FilterItems.FilterType.INCLUDE)
.errorOrNullBehavior(FilterItems.ErrorOrNullBehavior.EXCLUDE)
.build();
// When
FilterItems.Output output = task.run(runContext);
// Then
Assertions.assertNotNull(output);
Assertions.assertNotNull(output.getUri());
Assertions.assertEquals(3, output.getDroppedItemsTotal());
Assertions.assertEquals(4, output.getProcessedItemsTotal());
assertFile(runContext, output, List.of(new KeyValue("k4", 4)), KeyValue.class);
} |
@Override
public List<TableIdentifier> listTables(Namespace namespace) {
Preconditions.checkArgument(
namespace.levels().length >= 1, "Missing database in table identifier: %s", namespace);
Path nsPath = new Path(warehouseLocation, SLASH.join(namespace.levels()));
Set<TableIdentifier> tblIdents = Sets.newHashSet();
try {
if (!isDirectory(nsPath)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
RemoteIterator<FileStatus> it = fs.listStatusIterator(nsPath);
while (it.hasNext()) {
FileStatus status = it.next();
if (!status.isDirectory()) {
// Ignore the path which is not a directory.
continue;
}
Path path = status.getPath();
if (isTableDir(path)) {
TableIdentifier tblIdent = TableIdentifier.of(namespace, path.getName());
tblIdents.add(tblIdent);
}
}
} catch (IOException ioe) {
throw new RuntimeIOException(ioe, "Failed to list tables under: %s", namespace);
}
return Lists.newArrayList(tblIdents);
} | @Test
public void testListTables() throws Exception {
HadoopCatalog catalog = hadoopCatalog();
TableIdentifier tbl1 = TableIdentifier.of("db", "tbl1");
TableIdentifier tbl2 = TableIdentifier.of("db", "tbl2");
TableIdentifier tbl3 = TableIdentifier.of("db", "ns1", "tbl3");
TableIdentifier tbl4 = TableIdentifier.of("db", "metadata", "metadata");
Lists.newArrayList(tbl1, tbl2, tbl3, tbl4)
.forEach(t -> catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned()));
List<TableIdentifier> tbls1 = catalog.listTables(Namespace.of("db"));
Set<String> tblSet = Sets.newHashSet(tbls1.stream().map(t -> t.name()).iterator());
assertThat(tblSet).hasSize(2).contains("tbl1").contains("tbl2");
List<TableIdentifier> tbls2 = catalog.listTables(Namespace.of("db", "ns1"));
assertThat(tbls2).hasSize(1);
assertThat(tbls2.get(0).name()).isEqualTo("tbl3");
assertThatThrownBy(() -> catalog.listTables(Namespace.of("db", "ns1", "ns2")))
.isInstanceOf(NoSuchNamespaceException.class)
.hasMessage("Namespace does not exist: db.ns1.ns2");
} |
@Override
public BasicTypeDefine<MysqlType> reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.<MysqlType>builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case NULL:
builder.nativeType(MysqlType.NULL);
builder.columnType(MYSQL_NULL);
builder.dataType(MYSQL_NULL);
break;
case BOOLEAN:
builder.nativeType(MysqlType.BOOLEAN);
builder.columnType(String.format("%s(%s)", MYSQL_TINYINT, 1));
builder.dataType(MYSQL_TINYINT);
builder.length(1L);
break;
case TINYINT:
builder.nativeType(MysqlType.TINYINT);
builder.columnType(MYSQL_TINYINT);
builder.dataType(MYSQL_TINYINT);
break;
case SMALLINT:
builder.nativeType(MysqlType.SMALLINT);
builder.columnType(MYSQL_SMALLINT);
builder.dataType(MYSQL_SMALLINT);
break;
case INT:
builder.nativeType(MysqlType.INT);
builder.columnType(MYSQL_INT);
builder.dataType(MYSQL_INT);
break;
case BIGINT:
builder.nativeType(MysqlType.BIGINT);
builder.columnType(MYSQL_BIGINT);
builder.dataType(MYSQL_BIGINT);
break;
case FLOAT:
builder.nativeType(MysqlType.FLOAT);
builder.columnType(MYSQL_FLOAT);
builder.dataType(MYSQL_FLOAT);
break;
case DOUBLE:
builder.nativeType(MysqlType.DOUBLE);
builder.columnType(MYSQL_DOUBLE);
builder.dataType(MYSQL_DOUBLE);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.nativeType(MysqlType.DECIMAL);
builder.columnType(String.format("%s(%s,%s)", MYSQL_DECIMAL, precision, scale));
builder.dataType(MYSQL_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.nativeType(MysqlType.VARBINARY);
builder.columnType(
String.format("%s(%s)", MYSQL_VARBINARY, MAX_VARBINARY_LENGTH / 2));
builder.dataType(MYSQL_VARBINARY);
} else if (column.getColumnLength() < MAX_VARBINARY_LENGTH) {
builder.nativeType(MysqlType.VARBINARY);
builder.columnType(
String.format("%s(%s)", MYSQL_VARBINARY, column.getColumnLength()));
builder.dataType(MYSQL_VARBINARY);
} else if (column.getColumnLength() < POWER_2_24) {
builder.nativeType(MysqlType.MEDIUMBLOB);
builder.columnType(MYSQL_MEDIUMBLOB);
builder.dataType(MYSQL_MEDIUMBLOB);
} else {
builder.nativeType(MysqlType.LONGBLOB);
builder.columnType(MYSQL_LONGBLOB);
builder.dataType(MYSQL_LONGBLOB);
}
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.nativeType(MysqlType.LONGTEXT);
builder.columnType(MYSQL_LONGTEXT);
builder.dataType(MYSQL_LONGTEXT);
} else if (column.getColumnLength() < POWER_2_8) {
builder.nativeType(MysqlType.VARCHAR);
builder.columnType(
String.format("%s(%s)", MYSQL_VARCHAR, column.getColumnLength()));
builder.dataType(MYSQL_VARCHAR);
} else if (column.getColumnLength() < POWER_2_16) {
builder.nativeType(MysqlType.TEXT);
builder.columnType(MYSQL_TEXT);
builder.dataType(MYSQL_TEXT);
} else if (column.getColumnLength() < POWER_2_24) {
builder.nativeType(MysqlType.MEDIUMTEXT);
builder.columnType(MYSQL_MEDIUMTEXT);
builder.dataType(MYSQL_MEDIUMTEXT);
} else {
builder.nativeType(MysqlType.LONGTEXT);
builder.columnType(MYSQL_LONGTEXT);
builder.dataType(MYSQL_LONGTEXT);
}
break;
case DATE:
builder.nativeType(MysqlType.DATE);
builder.columnType(MYSQL_DATE);
builder.dataType(MYSQL_DATE);
break;
case TIME:
builder.nativeType(MysqlType.TIME);
builder.dataType(MYSQL_TIME);
if (version.isAtOrBefore(MySqlVersion.V_5_5)) {
builder.columnType(MYSQL_TIME);
} else if (column.getScale() != null && column.getScale() > 0) {
int timeScale = column.getScale();
if (timeScale > MAX_TIME_SCALE) {
timeScale = MAX_TIME_SCALE;
log.warn(
"The time column {} type time({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to time({})",
column.getName(),
column.getScale(),
MAX_SCALE,
timeScale);
}
builder.columnType(String.format("%s(%s)", MYSQL_TIME, timeScale));
builder.scale(timeScale);
} else {
builder.columnType(MYSQL_TIME);
}
break;
case TIMESTAMP:
builder.nativeType(MysqlType.DATETIME);
builder.dataType(MYSQL_DATETIME);
if (version.isAtOrBefore(MySqlVersion.V_5_5)) {
builder.columnType(MYSQL_DATETIME);
} else if (column.getScale() != null && column.getScale() > 0) {
int timestampScale = column.getScale();
if (timestampScale > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(String.format("%s(%s)", MYSQL_DATETIME, timestampScale));
builder.scale(timestampScale);
} else {
builder.columnType(MYSQL_DATETIME);
}
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.MYSQL,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
} | @Test
public void testReconvertNull() {
Column column =
PhysicalColumn.of("test", BasicType.VOID_TYPE, (Long) null, true, "null", "null");
BasicTypeDefine<MysqlType> typeDefine =
MySqlTypeConverter.DEFAULT_INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(MysqlType.NULL, typeDefine.getNativeType());
Assertions.assertEquals(MySqlTypeConverter.MYSQL_NULL, typeDefine.getColumnType());
Assertions.assertEquals(MySqlTypeConverter.MYSQL_NULL, typeDefine.getDataType());
Assertions.assertEquals(column.isNullable(), typeDefine.isNullable());
Assertions.assertEquals(column.getDefaultValue(), typeDefine.getDefaultValue());
Assertions.assertEquals(column.getComment(), typeDefine.getComment());
} |
public RetriableCommand(String description) {
this.description = description;
} | @Test
public void testRetriableCommand() {
try {
new MyRetriableCommand(5).execute(0);
Assert.assertTrue(false);
}
catch (Exception e) {
Assert.assertTrue(true);
}
try {
new MyRetriableCommand(3).execute(0);
Assert.assertTrue(true);
}
catch (Exception e) {
Assert.assertTrue(false);
}
try {
new MyRetriableCommand(5, RetryPolicies.
retryUpToMaximumCountWithFixedSleep(5, 0, TimeUnit.MILLISECONDS)).execute(0);
Assert.assertTrue(true);
}
catch (Exception e) {
Assert.assertTrue(false);
}
} |
public void displayGiant(GiantModel giant) {
LOGGER.info(giant.toString());
} | @Test
void testDisplayGiant() {
final var view = new GiantView();
final var model = mock(GiantModel.class);
view.displayGiant(model);
assertEquals(model.toString(), appender.getLastMessage());
assertEquals(1, appender.getLogSize());
} |
@Udf
public String ucase(
@UdfParameter(description = "The string to upper-case") final String input) {
if (input == null) {
return null;
}
return input.toUpperCase();
} | @Test
public void shouldRetainLowerCaseInput() {
final String result = udf.ucase("FOO");
assertThat(result, is("FOO"));
} |
public void resetPositionsIfNeeded() {
Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp();
if (offsetResetTimestamps.isEmpty())
return;
resetPositionsAsync(offsetResetTimestamps);
} | @Test
public void testresetPositionsSkipsBlackedOutConnections() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.EARLIEST);
// Check that we skip sending the ListOffset request when the node is blacked out
client.updateMetadata(initialUpdateResponse);
Node node = initialUpdateResponse.brokers().iterator().next();
client.backoff(node, 500);
offsetFetcher.resetPositionsIfNeeded();
assertEquals(0, consumerClient.pendingRequestCount());
consumerClient.pollNoWakeup();
assertTrue(subscriptions.isOffsetResetNeeded(tp0));
assertEquals(OffsetResetStrategy.EARLIEST, subscriptions.resetStrategy(tp0));
time.sleep(500);
client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.EARLIEST_TIMESTAMP),
listOffsetResponse(Errors.NONE, 1L, 5L));
offsetFetcher.resetPositionsIfNeeded();
consumerClient.pollNoWakeup();
assertFalse(subscriptions.isOffsetResetNeeded(tp0));
assertTrue(subscriptions.isFetchable(tp0));
assertEquals(5, subscriptions.position(tp0).offset);
} |
public static void replaceNodeText(Document document, String containerNodeName, String nodeName, String toReplace, String replacement) {
asStream(document.getElementsByTagName(containerNodeName))
.forEach(containerNode -> asStream(containerNode.getChildNodes())
.filter(childNode -> Objects.equals(nodeName, childNode.getNodeName()) && Objects.equals(toReplace, childNode.getTextContent()))
.forEach(childNode -> childNode.setTextContent(replacement)));
} | @Test
public void replaceNodeText() throws Exception {
final String replacement = "replacement";
Document document = DOMParserUtil.getDocument(XML);
DOMParserUtil.replaceNodeText(document, MAIN_NODE, TEST_NODE, TEST_NODE_CONTENT, replacement);
final Map<Node, List<Node>> retrieved = DOMParserUtil.getChildrenNodesMap(document, MAIN_NODE, TEST_NODE);
assertThat(retrieved).isNotNull();
assertThat(retrieved).hasSize(1);
List<Node> testNodes = retrieved.values().iterator().next();
assertThat(testNodes).isNotNull();
assertThat(testNodes).hasSize(1);
assertThat(testNodes.get(0).getTextContent()).isEqualTo(replacement);
} |
@Override
public Map<String, Integer> getRefByUuid() {
return refByUuid;
} | @Test
public void getAll_returns_empty_set() {
assertThat(underTest.getRefByUuid()).isEmpty();
} |
@VisibleForTesting
static boolean isReaperThreadRunning() {
synchronized (REAPER_THREAD_LOCK) {
return null != REAPER_THREAD && REAPER_THREAD.isAlive();
}
} | @Test
void testReaperThreadStartFailed() throws Exception {
try {
new SafetyNetCloseableRegistry(OutOfMemoryReaperThread::new);
} catch (java.lang.OutOfMemoryError error) {
}
assertThat(SafetyNetCloseableRegistry.isReaperThreadRunning()).isFalse();
// the OOM error will not lead to failure of subsequent constructor call.
SafetyNetCloseableRegistry closeableRegistry = new SafetyNetCloseableRegistry();
assertThat(SafetyNetCloseableRegistry.isReaperThreadRunning()).isTrue();
closeableRegistry.close();
} |
@Override
public synchronized ListenableFuture<BufferResult> get(OutputBufferId bufferId, long startSequenceId, DataSize maxSize)
{
requireNonNull(bufferId, "outputBufferId is null");
checkArgument(bufferId.getId() == outputBufferId.getId(), "Invalid buffer id");
checkArgument(maxSize.toBytes() > 0, "maxSize must be at least 1 byte");
acknowledge(bufferId, startSequenceId);
long currentSequenceId = this.currentSequenceId.get();
// process the request if we have no more data coming in, have data to read, or if this is an outdated request
if (noMorePages.get() || !handleInfoQueue.isEmpty() || !pages.isEmpty() || currentSequenceId != startSequenceId) {
return processRead(startSequenceId, maxSize);
}
// creating a pending read, and abort the previous one
PendingRead oldPendingRead = pendingRead;
pendingRead = new PendingRead(taskInstanceId, currentSequenceId, maxSize);
if (oldPendingRead != null) {
oldPendingRead.completeResultFutureWithEmpty();
}
return pendingRead.getResultFuture();
} | @Test
public void testMultiplePendingReads()
{
SpoolingOutputBuffer buffer = createSpoolingOutputBuffer();
// attempt to get a page
ListenableFuture<BufferResult> oldPendingRead = buffer.get(BUFFER_ID, 0, sizeOfPages(3));
assertFalse(oldPendingRead.isDone());
ListenableFuture<BufferResult> newPendingRead = buffer.get(BUFFER_ID, 0, sizeOfPages(3));
assertFalse(newPendingRead.isDone());
assertTrue(oldPendingRead.isDone());
// add three pages
List<Page> pages = new LinkedList<>();
for (int i = 0; i < 3; i++) {
pages.add(createPage(i));
}
addPages(buffer, pages);
assertBufferResultEquals(TYPES, getFuture(oldPendingRead, MAX_WAIT), emptyResults(TASK_INSTANCE_ID, 0, false));
assertBufferResultEquals(TYPES, getFuture(newPendingRead, MAX_WAIT), createBufferResult(TASK_INSTANCE_ID, 0, pages));
} |
@Override
public TopicAssignment place(
PlacementSpec placement,
ClusterDescriber cluster
) throws InvalidReplicationFactorException {
RackList rackList = new RackList(random, cluster.usableBrokers());
throwInvalidReplicationFactorIfNonPositive(placement.numReplicas());
throwInvalidReplicationFactorIfZero(rackList.numUnfencedBrokers());
throwInvalidReplicationFactorIfTooFewBrokers(placement.numReplicas(),
rackList.numTotalBrokers());
List<List<Integer>> placements = new ArrayList<>(placement.numPartitions());
for (int partition = 0; partition < placement.numPartitions(); partition++) {
placements.add(rackList.place(placement.numReplicas()));
}
return new TopicAssignment(
placements.stream().map(replicas -> new PartitionAssignment(replicas, cluster)).collect(Collectors.toList())
);
} | @Test
public void testRackListNonPositiveReplicationFactor() {
MockRandom random = new MockRandom();
RackList rackList = new RackList(random, Arrays.asList(
new UsableBroker(11, Optional.of("1"), false),
new UsableBroker(10, Optional.of("1"), false)).iterator());
assertEquals("Invalid replication factor -1: the replication factor must be positive.",
assertThrows(InvalidReplicationFactorException.class,
() -> rackList.place(-1)).getMessage());
} |
public static List<String> filterTopics(List<String> original, String regex) {
Pattern topicsPattern = Pattern.compile(regex);
return filterTopics(original, topicsPattern);
} | @Test
public void testFilterTopics() {
String topicName1 = "persistent://my-property/my-ns/pattern-topic-1";
String topicName2 = "persistent://my-property/my-ns/pattern-topic-2";
String topicName3 = "persistent://my-property/my-ns/hello-3";
String topicName4 = "non-persistent://my-property/my-ns/hello-4";
List<String> topicsNames = Lists.newArrayList(topicName1, topicName2, topicName3, topicName4);
Pattern pattern1 = Pattern.compile("persistent://my-property/my-ns/pattern-topic.*");
List<String> result1 = TopicList.filterTopics(topicsNames, pattern1);
assertTrue(result1.size() == 2 && result1.contains(topicName1) && result1.contains(topicName2));
Pattern pattern2 = Pattern.compile("persistent://my-property/my-ns/.*");
List<String> result2 = TopicList.filterTopics(topicsNames, pattern2);
assertTrue(result2.size() == 4
&& Stream.of(topicName1, topicName2, topicName3, topicName4).allMatch(result2::contains));
} |
public TableRecords() {
} | @Test
public void testTableRecords() {
Assertions.assertThrows(ShouldNeverHappenException.class, () -> {
TableRecords tableRecords = new TableRecords(new TableMeta());
tableRecords.setTableMeta(new TableMeta());
});
TableRecords tableRecords = new TableRecords(new TableMeta());
Assertions.assertEquals(0, tableRecords.size());
} |
public abstract byte[] encode(MutableSpan input); | @Test void localSpan_JSON_V2() {
assertThat(new String(encoder.encode(localSpan), UTF_8))
.isEqualTo(
"{\"traceId\":\"dc955a1d4768875d\",\"id\":\"dc955a1d4768875d\",\"name\":\"encode\",\"timestamp\":1510256710021866,\"duration\":1117,\"localEndpoint\":{\"serviceName\":\"isao01\",\"ipv4\":\"10.23.14.72\"}}");
} |
public static List<Global> getVariableList( final List<String> variableCells ){
final List<Global> variableList = new ArrayList<>();
if ( variableCells == null ) {
return variableList;
}
for( String variableCell: variableCells ){
final StringTokenizer tokens = new StringTokenizer( variableCell, "," );
while ( tokens.hasMoreTokens() ) {
final String token = tokens.nextToken();
final Global vars = new Global();
final StringTokenizer paramTokens = new StringTokenizer( token, " " );
vars.setClassName( paramTokens.nextToken() );
if ( !paramTokens.hasMoreTokens() ) {
throw new DecisionTableParseException( "The format for global variables is incorrect. " + "It should be: [Class name, Class otherName]. But it was: [" + variableCell + "]" );
}
vars.setIdentifier( paramTokens.nextToken() );
variableList.add( vars );
}
}
return variableList;
} | @Test
public void testBadVariableFormat() {
List<String> varCells = List.of("class1, object2");
assertThatExceptionOfType(DecisionTableParseException.class).isThrownBy(() -> getVariableList(varCells));
} |
@VisibleForTesting
static CPUResource getDefaultCpus(final Configuration configuration) {
int fallback = configuration.get(YarnConfigOptions.VCORES);
double cpuCoresDouble =
TaskExecutorProcessUtils.getCpuCoresWithFallback(configuration, fallback)
.getValue()
.doubleValue();
@SuppressWarnings("NumericCastThatLosesPrecision")
long cpuCoresLong = Math.max((long) Math.ceil(cpuCoresDouble), 1L);
//noinspection FloatingPointEquality
if (cpuCoresLong != cpuCoresDouble) {
LOG.info(
"The amount of cpu cores must be a positive integer on Yarn. Rounding {} up to the closest positive integer {}.",
cpuCoresDouble,
cpuCoresLong);
}
if (cpuCoresLong > Integer.MAX_VALUE) {
throw new IllegalConfigurationException(
String.format(
"The amount of cpu cores %d cannot exceed Integer.MAX_VALUE: %d",
cpuCoresLong, Integer.MAX_VALUE));
}
//noinspection NumericCastThatLosesPrecision
return new CPUResource(cpuCoresLong);
} | @Test
void testGetCpuRoundUp() {
final Configuration configuration = new Configuration();
configuration.set(TaskManagerOptions.CPU_CORES, 0.5);
assertThat(YarnWorkerResourceSpecFactory.getDefaultCpus(configuration))
.isEqualTo(new CPUResource(1.0));
} |
public static void runner(Bank bank, CountDownLatch latch) {
try {
SecureRandom random = new SecureRandom();
Thread.sleep(random.nextInt(1000));
LOGGER.info("Start transferring...");
for (int i = 0; i < 1000000; i++) {
bank.transfer(random.nextInt(4), random.nextInt(4), random.nextInt(0, BASE_AMOUNT));
}
LOGGER.info("Finished transferring.");
latch.countDown();
} catch (InterruptedException e) {
LOGGER.error(e.getMessage());
Thread.currentThread().interrupt();
}
} | @Test
void runnerShouldExecuteWithoutException() {
var bank = new Bank(4, 1000);
var latch = new CountDownLatch(1);
assertDoesNotThrow(() -> Main.runner(bank, latch));
assertEquals(0, latch.getCount());
} |
public Type parse(final String schema) {
try {
final TypeContext typeContext = parseTypeContext(schema);
return getType(typeContext);
} catch (final ParsingException e) {
throw new KsqlStatementException(
"Failed to parse schema",
"Failed to parse: " + schema,
schema,
KsqlStatementException.Problem.STATEMENT,
e
);
}
} | @Test
public void shouldGetTypeFromStruct() {
// Given:
final String schemaString = "STRUCT<A VARCHAR>";
// When:
final Type type = parser.parse(schemaString);
// Then:
assertThat(type, is(new Type(SqlTypes.struct().field("A", SqlTypes.STRING).build())));
} |
public FEELFnResult<TemporalAmount> invoke(@ParameterName( "from" ) String val) {
if ( val == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null"));
}
try {
// try to parse as days/hours/minute/seconds
return FEELFnResult.ofResult( Duration.parse( val ) );
} catch( DateTimeParseException e ) {
// if it failed, try to parse as years/months
try {
return FEELFnResult.ofResult(ComparablePeriod.parse(val).normalized());
} catch( DateTimeParseException e2 ) {
// failed to parse, so return null according to the spec
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "date-parsing exception",
new RuntimeException(new Throwable() { public final List<Throwable> causes = Arrays.asList( new Throwable[]{e, e2} ); } )));
}
}
} | @Test
void invokeParamTemporalPeriod() {
FunctionTestUtil.assertResult(durationFunction.invoke(ComparablePeriod.parse("P2Y3M4D")), ComparablePeriod.of(2, 3, 4));
} |
void removeActivity(Activity activity) {
if (activity != null) {
hashSet.remove(activity.hashCode());
}
} | @Test
public void removeActivity() {
mActivityLifecycle.removeActivity(mActivity);
} |
@Override
public Schema getSourceSchema() {
if (schema == null) {
try {
Schema.Parser parser = new Schema.Parser();
schema = parser.parse(schemaString);
} catch (Exception e) {
throw new HoodieSchemaException("Failed to parse schema: " + schemaString, e);
}
}
return schema;
} | @Test
public void validateDefaultSchemaGeneration() throws IOException {
TypedProperties properties = new TypedProperties();
properties.setProperty(ProtoClassBasedSchemaProviderConfig.PROTO_SCHEMA_CLASS_NAME.key(), Sample.class.getName());
ProtoClassBasedSchemaProvider protoToAvroSchemaProvider = new ProtoClassBasedSchemaProvider(properties, null);
Schema convertedSchema = protoToAvroSchemaProvider.getSourceSchema();
Schema.Parser parser = new Schema.Parser();
Schema expectedSchema = parser.parse(getClass().getClassLoader().getResourceAsStream("schema-provider/proto/sample_schema_defaults.avsc"));
Assertions.assertEquals(expectedSchema, convertedSchema);
} |
String getProfileViewResponseFromBody(String responseBody) {
String template = (String) DEFAULT_GSON.fromJson(responseBody, Map.class).get("template");
if (StringUtils.isBlank(template)) {
throw new RuntimeException("Template was blank!");
}
return template;
} | @Test
public void shouldUnJSONizeGetProfileViewResponseFromBody() {
String template = new ElasticAgentExtensionConverterV5().getProfileViewResponseFromBody("{\"template\":\"foo\"}");
assertThat(template, is("foo"));
} |
@Override
public ProjectRepositories load(String projectKey, @Nullable String branchBase) {
GetRequest request = new GetRequest(getUrl(projectKey, branchBase));
try (WsResponse response = wsClient.call(request)) {
try (InputStream is = response.contentStream()) {
return processStream(is);
} catch (IOException e) {
throw new IllegalStateException("Couldn't load project repository for " + projectKey, e);
}
} catch (RuntimeException e) {
if (shouldThrow(e)) {
throw e;
}
LOG.debug("Project repository not available - continuing without it");
return new SingleProjectRepository();
}
} | @Test(expected = IllegalStateException.class)
public void failFastHttpError() {
HttpException http = new HttpException("url", 403, null);
IllegalStateException e = new IllegalStateException("http error", http);
WsTestUtil.mockException(wsClient, e);
loader.load(PROJECT_KEY, null);
} |
@Override
public SendResult send(final Message message) {
return send(message, this.rocketmqProducer.getSendMsgTimeout());
} | @Test
public void testSend_OK() throws InterruptedException, RemotingException, MQClientException, MQBrokerException {
SendResult sendResult = new SendResult();
sendResult.setMsgId("TestMsgID");
sendResult.setSendStatus(SendStatus.SEND_OK);
when(rocketmqProducer.send(any(Message.class), anyLong())).thenReturn(sendResult);
io.openmessaging.producer.SendResult omsResult =
producer.send(producer.createBytesMessage("HELLO_TOPIC", new byte[] {'a'}));
assertThat(omsResult.messageId()).isEqualTo("TestMsgID");
} |
public static String[] splitString( String string, String separator ) {
/*
* 0123456 Example a;b;c;d --> new String[] { a, b, c, d }
*/
// System.out.println("splitString ["+path+"] using ["+separator+"]");
List<String> list = new ArrayList<>();
if ( string == null || string.length() == 0 ) {
return new String[] {};
}
int sepLen = separator.length();
int from = 0;
int end = string.length() - sepLen + 1;
for ( int i = from; i < end; i += sepLen ) {
if ( string.substring( i, i + sepLen ).equalsIgnoreCase( separator ) ) {
// OK, we found a separator, the string to add to the list
// is [from, i[
list.add( nullToEmpty( string.substring( from, i ) ) );
from = i + sepLen;
}
}
// Wait, if the string didn't end with a separator, we still have information at the end of the string...
// In our example that would be "d"...
if ( from + sepLen <= string.length() ) {
list.add( nullToEmpty( string.substring( from, string.length() ) ) );
}
return list.toArray( new String[list.size()] );
} | @Test
public void testSplitStringWithDelimiterNullAndEnclosureNull() {
String stringToSplit = "Hello, world";
String[] result = Const.splitString( stringToSplit, null, null );
assertSplit( result, stringToSplit );
} |
public static NetworkEndpoint forIp(String ipAddress) {
checkArgument(InetAddresses.isInetAddress(ipAddress), "'%s' is not an IP address.", ipAddress);
return NetworkEndpoint.newBuilder()
.setType(NetworkEndpoint.Type.IP)
.setIpAddress(
IpAddress.newBuilder()
.setAddressFamily(ipAddressFamily(ipAddress))
.setAddress(ipAddress))
.build();
} | @Test
public void forIp_withIpV6Address_returnsIpV6NetworkEndpoint() {
assertThat(NetworkEndpointUtils.forIp("3ffe::1"))
.isEqualTo(
NetworkEndpoint.newBuilder()
.setType(NetworkEndpoint.Type.IP)
.setIpAddress(
IpAddress.newBuilder()
.setAddressFamily(AddressFamily.IPV6)
.setAddress("3ffe::1"))
.build());
} |
public static TypeDescriptor javaTypeForFieldType(FieldType fieldType) {
switch (fieldType.getTypeName()) {
case LOGICAL_TYPE:
// TODO: shouldn't we handle this differently?
return javaTypeForFieldType(fieldType.getLogicalType().getBaseType());
case ARRAY:
return TypeDescriptors.lists(javaTypeForFieldType(fieldType.getCollectionElementType()));
case ITERABLE:
return TypeDescriptors.iterables(
javaTypeForFieldType(fieldType.getCollectionElementType()));
case MAP:
return TypeDescriptors.maps(
javaTypeForFieldType(fieldType.getMapKeyType()),
javaTypeForFieldType(fieldType.getMapValueType()));
case ROW:
return TypeDescriptors.rows();
default:
return PRIMITIVE_MAPPING.get(fieldType.getTypeName());
}
} | @Test
public void testRowTypeToJavaType() {
assertEquals(
TypeDescriptors.lists(TypeDescriptors.rows()),
FieldTypeDescriptors.javaTypeForFieldType(
FieldType.array(FieldType.row(Schema.builder().build()))));
} |
@Override
public FindCoordinatorRequest.Builder buildRequest(Set<CoordinatorKey> keys) {
unrepresentableKeys = keys.stream().filter(k -> k == null || !isRepresentableKey(k.idValue)).collect(Collectors.toSet());
Set<CoordinatorKey> representableKeys = keys.stream().filter(k -> k != null && isRepresentableKey(k.idValue)).collect(Collectors.toSet());
if (batch) {
ensureSameType(representableKeys);
FindCoordinatorRequestData data = new FindCoordinatorRequestData()
.setKeyType(type.id())
.setCoordinatorKeys(representableKeys.stream().map(k -> k.idValue).collect(Collectors.toList()));
return new FindCoordinatorRequest.Builder(data);
} else {
CoordinatorKey key = requireSingletonAndType(representableKeys);
return new FindCoordinatorRequest.Builder(
new FindCoordinatorRequestData()
.setKey(key.idValue)
.setKeyType(key.type.id())
);
}
} | @Test
public void testBuildLookupRequestRequiresKeySameType() {
CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext());
assertThrows(IllegalArgumentException.class, () -> strategy.buildRequest(
new HashSet<>(Arrays.asList(
CoordinatorKey.byGroupId("group"),
CoordinatorKey.byTransactionalId("txnid")))));
} |
@Override
public Path move(final Path file, final Path target, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException {
try {
final BrickApiClient client = new BrickApiClient(session);
if(status.isExists()) {
if(!new CaseInsensitivePathPredicate(file).test(target)) {
if(log.isWarnEnabled()) {
log.warn(String.format("Delete file %s to be replaced with %s", target, file));
}
new BrickDeleteFeature(session).delete(Collections.singletonList(target), callback, delete);
}
}
final FileActionEntity entity = new FileActionsApi(client)
.move(new MovePathBody().destination(StringUtils.removeStart(target.getAbsolute(), String.valueOf(Path.DELIMITER))),
StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER)));
if(entity.getFileMigrationId() != null) {
this.poll(client, entity);
}
return target.withAttributes(file.attributes());
}
catch(ApiException e) {
throw new BrickExceptionMappingService().map("Cannot rename {0}", e, file);
}
} | @Test
public void testRename() throws Exception {
final Path test = new BrickTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(),
new AlphanumericRandomStringService().random().toLowerCase(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Path target = new BrickMoveFeature(session).move(test, new Path(new DefaultHomeFinderService(session).find(),
new AlphanumericRandomStringService().random().toLowerCase(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(new BrickFindFeature(session).find(test));
assertTrue(new BrickFindFeature(session).find(target));
assertEquals(test.attributes(), target.attributes());
new BrickDeleteFeature(session).delete(Collections.singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
@MethodNotAvailable
public CompletionStage<Void> setAsync(K key, V value) {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testSetAsyncWithExpiryPolicy() {
ExpiryPolicy expiryPolicy = new HazelcastExpiryPolicy(1, 1, 1, TimeUnit.MILLISECONDS);
adapter.setAsync(42, "value", expiryPolicy);
} |
@Override
public String toString() {
return "SingleColumn{"
+ ", alias=" + alias
+ ", expression=" + expression
+ '}';
} | @Test
public void shouldCreateSingleColumn() {
// When:
SingleColumn col = new SingleColumn(A_LOCATION, AN_EXPRESSION, Optional.of(WINDOWSTART_NAME));
// Then:
assertThat(col.toString(), containsString("SingleColumn{, alias=Optional[`WINDOWSTART`], expression='foo'}"));
} |
public final void register(Class type, Serializer serializer) {
if (type == null) {
throw new IllegalArgumentException("type is required");
}
if (serializer.getTypeId() <= 0) {
throw new IllegalArgumentException(
"Type ID must be positive. Current: " + serializer.getTypeId() + ", Serializer: " + serializer);
}
safeRegister(type, createSerializerAdapter(serializer));
} | @Test(expected = IllegalArgumentException.class)
public void testRegister_typeIdNegative() {
StringBufferSerializer serializer = new StringBufferSerializer(true);
serializer.typeId = -10000;
abstractSerializationService.register(StringBuffer.class, serializer);
} |
public synchronized void write(Mutation tableRecord) throws IllegalStateException {
write(ImmutableList.of(tableRecord));
} | @Test
public void testWriteMultipleRecordsShouldThrowExceptionWhenSpannerWriteFails()
throws ExecutionException, InterruptedException {
// arrange
prepareTable();
when(spanner.getDatabaseClient(any()).write(any())).thenThrow(SpannerException.class);
ImmutableList<Mutation> testMutations =
ImmutableList.of(
Mutation.newInsertOrUpdateBuilder("SingerId")
.set("SingerId")
.to(1)
.set("FirstName")
.to("Marc")
.set("LastName")
.to("Richards")
.build(),
Mutation.newInsertOrUpdateBuilder("SingerId")
.set("SingerId")
.to(2)
.set("FirstName")
.to("Catalina")
.set("LastName")
.to("Smith")
.build());
// act & assert
assertThrows(SpannerResourceManagerException.class, () -> testManager.write(testMutations));
} |
public List<B2FileInfoResponse> find(final Path file) throws BackgroundException {
if(log.isDebugEnabled()) {
log.debug(String.format("Finding multipart uploads for %s", file));
}
try {
final List<B2FileInfoResponse> uploads = new ArrayList<B2FileInfoResponse>();
// This operation lists in-progress multipart uploads. An in-progress multipart upload is a
// multipart upload that has been initiated, using the Initiate Multipart Upload request, but has
// not yet been completed or aborted.
String startFileId = null;
do {
final B2ListFilesResponse chunk;
chunk = session.getClient().listUnfinishedLargeFiles(
fileid.getVersionId(containerService.getContainer(file)), startFileId, null);
for(B2FileInfoResponse upload : chunk.getFiles()) {
if(file.isDirectory()) {
final Path parent = new Path(containerService.getContainer(file), upload.getFileName(), EnumSet.of(Path.Type.file)).getParent();
if(new SimplePathPredicate(parent).test(file)) {
uploads.add(upload);
}
}
else {
if(StringUtils.equals(upload.getFileName(), containerService.getKey(file))) {
uploads.add(upload);
}
}
}
if(log.isInfoEnabled()) {
log.info(String.format("Found %d previous multipart uploads for %s", uploads.size(), file));
}
startFileId = chunk.getNextFileId();
}
while(startFileId != null);
if(log.isInfoEnabled()) {
for(B2FileInfoResponse upload : uploads) {
log.info(String.format("Found multipart upload %s for %s", upload, file));
}
}
// Uploads are listed in the order they were started, with the oldest one first
uploads.sort(new Comparator<B2FileInfoResponse>() {
@Override
public int compare(final B2FileInfoResponse o1, final B2FileInfoResponse o2) {
return o1.getUploadTimestamp().compareTo(o2.getUploadTimestamp());
}
});
Collections.reverse(uploads);
return uploads;
}
catch(B2ApiException e) {
throw new B2ExceptionMappingService(fileid).map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
} | @Test
public void testFindAllPendingInBucket() throws Exception {
final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path file = new Path(bucket, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final B2VersionIdProvider fileid = new B2VersionIdProvider(session);
final B2StartLargeFileResponse start1Response = session.getClient().startLargeFileUpload(
fileid.getVersionId(bucket),
file.getName(), null, Collections.emptyMap());
final B2StartLargeFileResponse start2Response = session.getClient().startLargeFileUpload(
fileid.getVersionId(bucket),
file.getName(), null, Collections.emptyMap());
final List<B2FileInfoResponse> list = new B2LargeUploadPartService(session, fileid).find(file);
assertFalse(list.isEmpty());
assertEquals(start2Response.getFileId(), list.get(0).getFileId());
assertEquals(start1Response.getFileId(), list.get(1).getFileId());
session.getClient().cancelLargeFileUpload(start1Response.getFileId());
session.getClient().cancelLargeFileUpload(start2Response.getFileId());
} |
@Override
public String getMethod() {
return PATH;
} | @Test
public void testSetMyCommandsWithEmptyStringLanguageCode() {
SetMyCommands setMyCommands = SetMyCommands
.builder()
.command(BotCommand.builder().command("test").description("Test description").build())
.languageCode("")
.scope(BotCommandScopeDefault.builder().build())
.build();
assertEquals("setMyCommands", setMyCommands.getMethod());
Throwable thrown = assertThrows(TelegramApiValidationException.class, setMyCommands::validate);
assertEquals("LanguageCode parameter can't be empty string", thrown.getMessage());
} |
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof ImplicitLinkedHashCollection))
return false;
ImplicitLinkedHashCollection<?> ilhs = (ImplicitLinkedHashCollection<?>) o;
return this.valuesList().equals(ilhs.valuesList());
} | @Test
public void testEquals() {
ImplicitLinkedHashCollection<TestElement> coll1 = new ImplicitLinkedHashCollection<>();
coll1.add(new TestElement(1));
coll1.add(new TestElement(2));
coll1.add(new TestElement(3));
ImplicitLinkedHashCollection<TestElement> coll2 = new ImplicitLinkedHashCollection<>();
coll2.add(new TestElement(1));
coll2.add(new TestElement(2));
coll2.add(new TestElement(3));
ImplicitLinkedHashCollection<TestElement> coll3 = new ImplicitLinkedHashCollection<>();
coll3.add(new TestElement(1));
coll3.add(new TestElement(3));
coll3.add(new TestElement(2));
assertEquals(coll1, coll2);
assertNotEquals(coll1, coll3);
assertNotEquals(coll2, coll3);
} |
public static Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodWithEmptyCollectionArguments(
final MethodCallExpr methodExpression,
final MvelCompilerContext mvelCompilerContext,
final Optional<TypedExpression> scope,
List<TypedExpression> arguments,
List<Integer> emptyCollectionArgumentsIndexes) {
Objects.requireNonNull(methodExpression, "MethodExpression parameter cannot be null as the method searches methods based on this expression!");
Objects.requireNonNull(mvelCompilerContext, "MvelCompilerContext parameter cannot be null!");
Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead.");
Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead.");
if (emptyCollectionArgumentsIndexes.size() > arguments.size()) {
throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. "
+ "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")");
} else {
final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments);
Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodResult =
MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList);
if (resolveMethodResult.a.isPresent()) {
return resolveMethodResult;
} else {
// Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it.
// This needs to go through all possible combinations.
final int indexesListSize = emptyCollectionArgumentsIndexes.size();
for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) {
for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) {
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
resolveMethodResult =
MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList);
if (resolveMethodResult.a.isPresent()) {
modifyArgumentsBasedOnCoercedCollectionArguments(arguments, coercedArgumentsTypesList);
return resolveMethodResult;
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes));
}
// No method found, return empty.
return new Pair<>(Optional.empty(), scope);
}
}
} | @Test
public void resolveMethodWithEmptyCollectionArgumentsMethodExpressionIsNull() {
Assertions.assertThatThrownBy(
() -> MethodResolutionUtils.resolveMethodWithEmptyCollectionArguments(
null,
null,
Optional.empty(),
null,
null))
.isInstanceOf(NullPointerException.class);
} |
@VisibleForTesting
public void validateNoticeExists(Long id) {
if (id == null) {
return;
}
NoticeDO notice = noticeMapper.selectById(id);
if (notice == null) {
throw exception(NOTICE_NOT_FOUND);
}
} | @Test
public void testValidateNoticeExists_success() {
// 插入前置数据
NoticeDO dbNotice = randomPojo(NoticeDO.class);
noticeMapper.insert(dbNotice);
// 成功调用
noticeService.validateNoticeExists(dbNotice.getId());
} |
public static Map<String, Collection<String>> caseInsensitiveCopyOf(Map<String, Collection<String>> map) {
if (map == null) {
return Collections.emptyMap();
}
Map<String, Collection<String>> result =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
for (Map.Entry<String, Collection<String>> entry : map.entrySet()) {
String key = entry.getKey();
if (!result.containsKey(key)) {
result.put(key.toLowerCase(Locale.ROOT), new LinkedList<>());
}
result.get(key).addAll(entry.getValue());
}
result.replaceAll((key, value) -> Collections.unmodifiableCollection(value));
return Collections.unmodifiableMap(result);
} | @Test
void caseInsensitiveCopyOfMap() {
// Arrange
Map<String, Collection<String>> sourceMap = new HashMap<>();
sourceMap.put("First", Arrays.asList("abc", "qwerty", "xyz"));
sourceMap.put("camelCase", Collections.singleton("123"));
// Act
Map<String, Collection<String>> actualMap = caseInsensitiveCopyOf(sourceMap);
// Assert result
assertThat(actualMap)
.hasEntrySatisfying("First", value -> {
assertThat(value).contains("xyz", "abc", "qwerty");
}).hasEntrySatisfying("first", value -> {
assertThat(value).contains("xyz", "abc", "qwerty");
}).hasEntrySatisfying("CAMELCASE", value -> {
assertThat(value).contains("123");
}).hasEntrySatisfying("camelcase", value -> {
assertThat(value).contains("123");
});
} |
public <T> void compareSupplierResult(final T expected, final Supplier<T> experimentSupplier) {
final Timer.Sample sample = Timer.start();
try {
final T result = experimentSupplier.get();
recordResult(expected, result, sample);
} catch (final Exception e) {
recordError(e, sample);
}
} | @Test
void compareSupplierResultMatch() {
experiment.compareSupplierResult(12, () -> 12);
verify(matchTimer).record(anyLong(), eq(TimeUnit.NANOSECONDS));
} |
@Nullable
@Override
public String getMainClassFromJarPlugin() {
Jar jarTask = (Jar) project.getTasks().findByName("jar");
if (jarTask == null) {
return null;
}
Object value = jarTask.getManifest().getAttributes().get("Main-Class");
if (value instanceof Provider) {
value = ((Provider<?>) value).getOrNull();
}
if (value instanceof String) {
return (String) value;
}
if (value == null) {
return null;
}
return String.valueOf(value);
} | @Test
public void testGetMainClassFromJarAsProperty_success() {
Property<String> mainClass =
project.getObjects().property(String.class).value("some.main.class");
Jar jar = project.getTasks().withType(Jar.class).getByName("jar");
jar.setManifest(new DefaultManifest(null).attributes(ImmutableMap.of("Main-Class", mainClass)));
assertThat(gradleProjectProperties.getMainClassFromJarPlugin()).isEqualTo("some.main.class");
} |
Integer getMaxSize( Collection<BeanInjectionInfo.Property> properties, Object obj ) {
int max = Integer.MIN_VALUE;
for ( BeanInjectionInfo.Property property: properties ) {
max = Math.max( max,
( isCollection( property )
? getCollectionSize( property, obj )
// if not collection then field of length one
: 1 ) );
}
return ( max != Integer.MIN_VALUE ) ? max : null;
} | @Test
public void getMaxSize_OnlyOneField() {
BeanInjector bi = new BeanInjector(null );
BeanInjectionInfo bii = new BeanInjectionInfo( MetaBeanLevel1.class );
MetaBeanLevel1 mbl1 = new MetaBeanLevel1();
mbl1.setSub( new MetaBeanLevel2() );
mbl1.getSub().setSeparator( "/" );
assertEquals(new Integer(1 ), bi.getMaxSize( bii.getProperties().values(), mbl1.getSub() ) );
} |
public DropSourceCommand create(final DropStream statement) {
return create(
statement.getName(),
statement.getIfExists(),
statement.isDeleteTopic(),
DataSourceType.KSTREAM
);
} | @Test
public void shouldCreateDropSourceOnMissingSourceWithIfExistsForStream() {
// Given:
final DropStream dropStream = new DropStream(SOME_NAME, true, true);
when(metaStore.getSource(SOME_NAME)).thenReturn(null);
// When:
final DropSourceCommand cmd = dropSourceFactory.create(dropStream);
// Then:
assertThat(cmd.getSourceName(), equalTo(SourceName.of("bob")));
} |
public void run() {
try {
InputStreamReader isr = new InputStreamReader( this.is );
BufferedReader br = new BufferedReader( isr );
String line = null;
while ( ( line = br.readLine() ) != null ) {
String logEntry = this.type + " " + line;
switch ( this.logLevel ) {
case MINIMAL:
log.logMinimal( logEntry );
break;
case BASIC:
log.logBasic( logEntry );
break;
case DETAILED:
log.logDetailed( logEntry );
break;
case DEBUG:
log.logDebug( logEntry );
break;
case ROWLEVEL:
log.logRowlevel( logEntry );
break;
case ERROR:
log.logError( logEntry );
break;
default: // NONE
break;
}
}
} catch ( IOException ioe ) {
if ( log.isError() ) {
log.logError( this.type + " " + Const.getStackTracker( ioe ) );
}
}
} | @Test
public void testLogDebug() {
streamLogger = new ConfigurableStreamLogger( log, is, LogLevel.DEBUG, PREFIX );
streamLogger.run();
Mockito.verify( log ).logDebug( OUT1 );
Mockito.verify( log ).logDebug( OUT2 );
} |
@Override
protected int getJDBCPort() {
return DefaultMSSQLServerContainer.MS_SQL_SERVER_PORT;
} | @Test
public void testGetJDBCPortReturnsCorrectValue() {
assertThat(testManager.getJDBCPort())
.isEqualTo(MSSQLResourceManager.DefaultMSSQLServerContainer.MS_SQL_SERVER_PORT);
} |
@VisibleForTesting
KsqlConfig buildConfigWithPort() {
final Map<String, Object> props = ksqlConfigNoPort.originals();
// Wire up KS IQ so that pull queries work across KSQL nodes:
props.put(
KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.APPLICATION_SERVER_CONFIG,
restConfig.getInterNodeListener(this::resolvePort).toString()
);
return new KsqlConfig(props);
} | @Test
public void shouldConfigureIQWithInterNodeListenerIfSet() {
// Given:
givenAppWithRestConfig(
ImmutableMap.of(
KsqlRestConfig.LISTENERS_CONFIG, "http://localhost:0",
KsqlRestConfig.ADVERTISED_LISTENER_CONFIG, "https://some.host:12345"
),
new MetricCollectors()
);
// When:
final KsqlConfig ksqlConfig = app.buildConfigWithPort();
// Then:
assertThat(
ksqlConfig.getKsqlStreamConfigProps().get(StreamsConfig.APPLICATION_SERVER_CONFIG),
is("https://some.host:12345")
);
} |
public static boolean parse(final String str, ResTable_config out) {
return parse(str, out, true);
} | @Test
public void parse_density_xhdpi() {
ResTable_config config = new ResTable_config();
ConfigDescription.parse("xhdpi", config);
assertThat(config.density).isEqualTo(DENSITY_XHIGH);
} |
@Override
public Write.Append append(final Path file, final TransferStatus status) throws BackgroundException {
final List<Path> segments;
long size = 0L;
try {
segments = new SwiftObjectListService(session, regionService).list(new SwiftSegmentService(session, regionService)
.getSegmentsDirectory(file), new DisabledListProgressListener()).toList();
if(segments.isEmpty()) {
return Write.override;
}
}
catch(NotfoundException e) {
return Write.override;
}
for(Path segment : segments) {
size += segment.attributes().getSize();
}
return new Write.Append(true).withStatus(status).withOffset(size);
} | @Test
public void testAppendNoSegmentFound() throws Exception {
final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume));
container.attributes().setRegion("IAD");
final SwiftRegionService regionService = new SwiftRegionService(session);
final Write.Append append = new SwiftLargeObjectUploadFeature(session, regionService,
new SwiftWriteFeature(session, regionService))
.append(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertFalse(append.append);
assertEquals(Write.override, append);
} |
public void doMonitorWork() throws RemotingException, MQClientException, InterruptedException {
long beginTime = System.currentTimeMillis();
this.monitorListener.beginRound();
TopicList topicList = defaultMQAdminExt.fetchAllTopicList();
for (String topic : topicList.getTopicList()) {
if (topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
String consumerGroup = KeyBuilder.parseGroup(topic);
try {
this.reportUndoneMsgs(consumerGroup);
} catch (Exception e) {
// log.error("reportUndoneMsgs Exception", e);
}
try {
this.reportConsumerRunningInfo(consumerGroup);
} catch (Exception e) {
// log.error("reportConsumerRunningInfo Exception", e);
}
}
}
this.monitorListener.endRound();
long spentTimeMills = System.currentTimeMillis() - beginTime;
logger.info("Execute one round monitor work, spent timemills: {}", spentTimeMills);
} | @Test
public void testDoMonitorWork() throws RemotingException, MQClientException, InterruptedException {
monitorService.doMonitorWork();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.