focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static long getPid()
{
return PID;
} | @Test
void shouldReturnPid()
{
assertNotEquals(PID_NOT_FOUND, getPid());
} |
@Override
protected void rename(
List<LocalResourceId> srcResourceIds,
List<LocalResourceId> destResourceIds,
MoveOptions... moveOptions)
throws IOException {
if (moveOptions.length > 0) {
throw new UnsupportedOperationException("Support for move options is not yet implemented.");
}
checkArgument(
srcResourceIds.size() == destResourceIds.size(),
"Number of source files %s must equal number of destination files %s",
srcResourceIds.size(),
destResourceIds.size());
int numFiles = srcResourceIds.size();
for (int i = 0; i < numFiles; i++) {
LocalResourceId src = srcResourceIds.get(i);
LocalResourceId dst = destResourceIds.get(i);
LOG.debug("Renaming {} to {}", src, dst);
File parent = dst.getCurrentDirectory().getPath().toFile();
if (!parent.exists()) {
checkArgument(
parent.mkdirs() || parent.exists(),
"Unable to make output directory %s in order to move into file %s",
parent,
dst.getPath());
}
// Rename the source file, replacing the existing destination.
Files.move(
src.getPath(),
dst.getPath(),
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.ATOMIC_MOVE);
}
} | @Test
public void testMoveWithExistingSrcFile() throws Exception {
Path srcPath1 = temporaryFolder.newFile().toPath();
Path srcPath2 = temporaryFolder.newFile().toPath();
Path destPath1 = temporaryFolder.getRoot().toPath().resolve("nonexistentdir").resolve("dest1");
Path destPath2 = srcPath2.resolveSibling("dest2");
createFileWithContent(srcPath1, "content1");
createFileWithContent(srcPath2, "content2");
localFileSystem.rename(
toLocalResourceIds(ImmutableList.of(srcPath1, srcPath2), false /* isDirectory */),
toLocalResourceIds(ImmutableList.of(destPath1, destPath2), false /* isDirectory */));
assertContents(
ImmutableList.of(destPath1, destPath2), ImmutableList.of("content1", "content2"));
assertFalse(srcPath1 + "exists", srcPath1.toFile().exists());
assertFalse(srcPath2 + "exists", srcPath2.toFile().exists());
} |
@Override
protected void configurePipeline(ChannelHandlerContext ctx, String protocol) throws Exception {
if (ApplicationProtocolNames.HTTP_2.equals(protocol)) {
ctx.channel().attr(PROTOCOL_NAME).set(PROTOCOL_HTTP_2);
configureHttp2(ctx.pipeline());
return;
}
if (ApplicationProtocolNames.HTTP_1_1.equals(protocol)) {
ctx.channel().attr(PROTOCOL_NAME).set(PROTOCOL_HTTP_1_1);
configureHttp1(ctx.pipeline());
return;
}
throw new IllegalStateException("unknown protocol: " + protocol);
} | @Test
void skipProtocolCloseHandler() throws Exception {
EmbeddedChannel channel = new EmbeddedChannel();
ChannelConfig channelConfig = new ChannelConfig();
channelConfig.add(new ChannelConfigValue<>(CommonChannelConfigKeys.http2CatchConnectionErrors, false));
channelConfig.add(new ChannelConfigValue<>(CommonChannelConfigKeys.maxHttp2HeaderListSize, 32768));
Http2OrHttpHandler http2OrHttpHandler =
new Http2OrHttpHandler(new ChannelInboundHandlerAdapter(), channelConfig, cp -> {});
channel.pipeline().addLast("codec_placeholder", new DummyChannelHandler());
channel.pipeline().addLast(Http2OrHttpHandler.class.getSimpleName(), http2OrHttpHandler);
http2OrHttpHandler.configurePipeline(channel.pipeline().lastContext(), ApplicationProtocolNames.HTTP_2);
assertNull(channel.pipeline().context(Http2ConnectionErrorHandler.class));
} |
public static boolean isEmpty(Collection coll) {
return (coll == null || coll.isEmpty());
} | @Test
void testIsEmpty() {
assertFalse(CollectionUtils.isEmpty(Collections.singletonList("target")));
assertTrue(CollectionUtils.isEmpty(Collections.emptyList()));
assertTrue(CollectionUtils.isEmpty(null));
} |
public AMRMClient.ContainerRequest getContainerRequest(
Resource containerResource, Priority priority, String nodeLabel) {
if (StringUtils.isNullOrWhitespaceOnly(nodeLabel) || defaultConstructor == null) {
return new AMRMClient.ContainerRequest(containerResource, null, null, priority);
}
try {
/**
* Set the param of relaxLocality to true, which tells the Yarn ResourceManager if the
* application wants locality to be loose (i.e. allows fall-through to rack or any)
*/
return defaultConstructor.newInstance(
containerResource, null, null, priority, true, nodeLabel);
} catch (InvocationTargetException | InstantiationException | IllegalAccessException e) {
LOG.warn("Errors on creating Container Request.", e);
}
return new AMRMClient.ContainerRequest(containerResource, null, null, priority);
} | @Test
void testGetContainerRequestWithYarnSupport()
throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
assumeThat(HadoopUtils.isMinHadoopVersion(2, 6)).isTrue();
Resource resource = Resource.newInstance(100, 1);
Priority priority = Priority.newInstance(1);
AMRMClient.ContainerRequest containerRequest =
ContainerRequestReflector.INSTANCE.getContainerRequest(resource, priority, "GPU");
assertThat(getNodeLabelExpressionWithReflector(containerRequest)).isEqualTo("GPU");
containerRequest =
ContainerRequestReflector.INSTANCE.getContainerRequest(resource, priority, null);
assertThat(getNodeLabelExpressionWithReflector(containerRequest)).isNull();
containerRequest =
ContainerRequestReflector.INSTANCE.getContainerRequest(resource, priority, "");
assertThat(getNodeLabelExpressionWithReflector(containerRequest)).isNull();
} |
public void cacheSubscriberForRedo(String serviceName, String groupName, String cluster) {
String key = ServiceInfo.getKey(NamingUtils.getGroupedName(serviceName, groupName), cluster);
SubscriberRedoData redoData = SubscriberRedoData.build(serviceName, groupName, cluster);
synchronized (subscribes) {
subscribes.put(key, redoData);
}
} | @Test
void testCacheSubscriberForRedo() {
ConcurrentMap<String, SubscriberRedoData> subscribes = getSubscriberRedoDataMap();
assertTrue(subscribes.isEmpty());
redoService.cacheSubscriberForRedo(SERVICE, GROUP, CLUSTER);
assertFalse(subscribes.isEmpty());
SubscriberRedoData actual = subscribes.entrySet().iterator().next().getValue();
assertEquals(SERVICE, actual.getServiceName());
assertEquals(GROUP, actual.getGroupName());
assertEquals(CLUSTER, actual.get());
assertFalse(actual.isRegistered());
assertFalse(actual.isUnregistering());
} |
public String getContent(String path) {
String htmlPath = HTML_PATHS.contains(path) ? path : INDEX_HTML_PATH;
checkState(servletContext != null, "init has not been called");
// Optimization to not have to call platform.currentStatus on each call
if (Objects.equals(status, UP)) {
return indexHtmlByPath.get(htmlPath);
}
Status currentStatus = platform.status();
if (!Objects.equals(status, currentStatus)) {
generate(currentStatus);
}
return indexHtmlByPath.get(htmlPath);
} | @Test
public void contains_web_context() {
doInit();
assertThat(underTest.getContent("/foo"))
.contains(TEST_CONTEXT);
} |
public List<ScanFilterData> createScanFilterDataForBeaconParser(BeaconParser beaconParser, List<Identifier> identifiers) {
ArrayList<ScanFilterData> scanFilters = new ArrayList<ScanFilterData>();
long typeCode = beaconParser.getMatchingBeaconTypeCode();
int startOffset = beaconParser.getMatchingBeaconTypeCodeStartOffset();
int endOffset = beaconParser.getMatchingBeaconTypeCodeEndOffset();
byte[] typeCodeBytes = BeaconParser.longToByteArray(typeCode, endOffset-startOffset+1);
if (identifiers != null && identifiers.size() > 0 && identifiers.get(0) != null && beaconParser.getMatchingBeaconTypeCode() == 0x0215) {
// If type code 0215 ibeacon, we allow also adding identifiers to the filter
for (int manufacturer : beaconParser.getHardwareAssistManufacturers()) {
ScanFilterData sfd = new ScanFilterData();
sfd.manufacturer = manufacturer;
int length = 18;
if (identifiers.size() == 2) {
length = 20;
}
if (identifiers.size() == 3) {
length = 22;
}
sfd.filter = new byte[length];
sfd.filter[0] = typeCodeBytes[0];
sfd.filter[1] = typeCodeBytes[1];
byte[] idBytes = identifiers.get(0).toByteArray();
for (int i = 0; i < idBytes.length; i++) {
sfd.filter[i+2] = idBytes[i];
}
if (identifiers.size() > 1 && identifiers.get(1) != null) {
idBytes = identifiers.get(1).toByteArray();
for (int i = 0; i < idBytes.length; i++) {
sfd.filter[i+18] = idBytes[i];
}
}
if (identifiers.size() > 2 && identifiers.get(2) != null) {
idBytes = identifiers.get(2).toByteArray();
for (int i = 0; i < idBytes.length; i++) {
sfd.filter[i+20] = idBytes[i];
}
}
sfd.mask = new byte[length];
for (int i = 0 ; i < length; i++) {
sfd.mask[i] = (byte) 0xff;
}
sfd.serviceUuid = null;
sfd.serviceUuid128Bit = new byte[0];
scanFilters.add(sfd);
return scanFilters;
}
}
for (int manufacturer : beaconParser.getHardwareAssistManufacturers()) {
ScanFilterData sfd = new ScanFilterData();
Long serviceUuid = beaconParser.getServiceUuid();
// Note: the -2 here is because we want the filter and mask to start after the
// two-byte manufacturer code, and the beacon parser expression is based on offsets
// from the start of the two byte code
int length = endOffset + 1 - 2;
byte[] filter = new byte[0];
byte[] mask = new byte[0];
if (length > 0) {
filter = new byte[length];
mask = new byte[length];
for (int layoutIndex = 2; layoutIndex <= endOffset; layoutIndex++) {
int filterIndex = layoutIndex-2;
if (layoutIndex < startOffset) {
filter[filterIndex] = 0;
mask[filterIndex] = 0;
} else {
filter[filterIndex] = typeCodeBytes[layoutIndex-startOffset];
mask[filterIndex] = (byte) 0xff;
}
}
}
sfd.manufacturer = manufacturer;
sfd.filter = filter;
sfd.mask = mask;
sfd.serviceUuid = serviceUuid;
sfd.serviceUuid128Bit = beaconParser.getServiceUuid128Bit();
scanFilters.add(sfd);
}
return scanFilters;
} | @Test
public void testEddystoneScanFilterData() throws Exception {
org.robolectric.shadows.ShadowLog.stream = System.err;
BeaconParser parser = new BeaconParser();
parser.setBeaconLayout(BeaconParser.EDDYSTONE_UID_LAYOUT);
BeaconManager.setManifestCheckingDisabled(true); // no manifest available in robolectric
List<ScanFilterUtils.ScanFilterData> scanFilterDatas = new ScanFilterUtils().createScanFilterDataForBeaconParser(parser, null);
assertEquals("scanFilters should be of correct size", 1, scanFilterDatas.size());
ScanFilterUtils.ScanFilterData sfd = scanFilterDatas.get(0);
assertEquals("serviceUuid should be right", new Long(0xfeaa), sfd.serviceUuid);
} |
protected String escape(final String path) {
final StringBuilder escaped = new StringBuilder();
for(char c : path.toCharArray()) {
if(StringUtils.isAlphanumeric(String.valueOf(c))
|| c == Path.DELIMITER) {
escaped.append(c);
}
else {
escaped.append("\\").append(c);
}
}
return escaped.toString();
} | @Test
public void testEscape() {
Archive a = Archive.TAR;
assertEquals("file\\ name", a.escape("file name"));
assertEquals("file\\(name", a.escape("file(name"));
assertEquals("\\$filename", a.escape("$filename"));
} |
void registerSessionIfNeeded(HttpSession session) {
if (session != null) {
synchronized (session) {
if (!SESSION_MAP_BY_ID.containsKey(session.getId())) {
sessionCreated(new HttpSessionEvent(session));
}
}
}
} | @Test
public void registerSessionIfNeeded() {
final HttpSession session = createSession();
sessionListener.registerSessionIfNeeded(session);
sessionListener.registerSessionIfNeeded(session);
sessionListener.registerSessionIfNeeded(null);
sessionListener.unregisterInvalidatedSessions();
sessionListener.unregisterSessionIfNeeded(session);
sessionListener.unregisterSessionIfNeeded(session);
sessionListener.unregisterSessionIfNeeded(null);
} |
public static <K> KTableHolder<K> build(
final KTableHolder<K> left,
final KTableHolder<K> right,
final TableTableJoin<K> join
) {
final LogicalSchema leftSchema;
final LogicalSchema rightSchema;
if (join.getJoinType().equals(RIGHT)) {
leftSchema = right.getSchema();
rightSchema = left.getSchema();
} else {
leftSchema = left.getSchema();
rightSchema = right.getSchema();
}
final JoinParams joinParams = JoinParamsFactory
.create(join.getKeyColName(), leftSchema, rightSchema);
final KTable<K, GenericRow> result;
switch (join.getJoinType()) {
case INNER:
result = left.getTable().join(right.getTable(), joinParams.getJoiner());
break;
case LEFT:
result = left.getTable().leftJoin(right.getTable(), joinParams.getJoiner());
break;
case RIGHT:
result = right.getTable().leftJoin(left.getTable(), joinParams.getJoiner());
break;
case OUTER:
result = left.getTable().outerJoin(right.getTable(), joinParams.getJoiner());
break;
default:
throw new IllegalStateException("invalid join type: " + join.getJoinType());
}
return KTableHolder.unmaterialized(
result,
joinParams.getSchema(),
left.getExecutionKeyFactory());
} | @Test
public void shouldDoLeftJoin() {
// Given:
givenLeftJoin(L_KEY);
// When:
final KTableHolder<Struct> result = join.build(planBuilder, planInfo);
// Then:
verify(leftKTable).leftJoin(
same(rightKTable),
eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 0))
);
verifyNoMoreInteractions(leftKTable, rightKTable, resultKTable);
assertThat(result.getTable(), is(resultKTable));
assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory));
} |
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) {
return Optional.ofNullable(HANDLERS.get(step.getClass()))
.map(h -> h.handle(this, schema, step))
.orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass()));
} | @Test
public void shouldResolveSchemaForStreamSelectWithoutColumnNames() {
// Given:
final StreamSelect<?> step = new StreamSelect<>(
PROPERTIES,
streamSource,
ImmutableList.of(ColumnName.of("NEW_KEY")),
Optional.empty(),
ImmutableList.of(
add("JUICE", "ORANGE", "APPLE"),
ref("PLANTAIN", "BANANA"),
ref("CITRUS", "ORANGE"))
);
// When:
final LogicalSchema result = resolver.resolve(step, SCHEMA);
// Then:
assertThat(result, is(
LogicalSchema.builder()
.keyColumn(ColumnName.of("NEW_KEY"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of("JUICE"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of("PLANTAIN"), SqlTypes.STRING)
.valueColumn(ColumnName.of("CITRUS"), SqlTypes.INTEGER)
.build())
);
} |
static KiePMMLTargetValue getKiePMMLTargetValue(final TargetValue targetValue) {
final String value = targetValue.getValue() != null ? targetValue.getValue().toString() : null;
final String displayValue = targetValue.getDisplayValue() != null ? targetValue.getDisplayValue() : null;
final org.kie.pmml.api.models.TargetValue kieTargetValue = new org.kie.pmml.api.models.TargetValue(value,
displayValue,
targetValue.getPriorProbability(),
targetValue.getDefaultValue());
return KiePMMLTargetValue.builder(kieTargetValue.getName(),
Collections.emptyList(), kieTargetValue)
.build();
} | @Test
void getKiePMMLTargetValue() {
final TargetValue toConvert = getRandomTargetValue();
KiePMMLTargetValue retrieved = KiePMMLTargetValueInstanceFactory.getKiePMMLTargetValue(toConvert);
commonVerifyKiePMMLTargetValue(retrieved, toConvert);
} |
@Override
public OffsetFetchResponseData data() {
return data;
} | @Test
public void testNullableMetadataV0ToV7() {
PartitionData pd = new PartitionData(
offset,
leaderEpochOne,
null,
Errors.UNKNOWN_TOPIC_OR_PARTITION);
// test PartitionData.equals with null metadata
assertEquals(pd, pd);
partitionDataMap.clear();
partitionDataMap.put(new TopicPartition(topicOne, partitionOne), pd);
OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, Errors.GROUP_AUTHORIZATION_FAILED, partitionDataMap);
OffsetFetchResponseData expectedData =
new OffsetFetchResponseData()
.setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code())
.setThrottleTimeMs(throttleTimeMs)
.setTopics(Collections.singletonList(
new OffsetFetchResponseTopic()
.setName(topicOne)
.setPartitions(Collections.singletonList(
new OffsetFetchResponsePartition()
.setPartitionIndex(partitionOne)
.setCommittedOffset(offset)
.setCommittedLeaderEpoch(leaderEpochOne.orElse(-1))
.setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code())
.setMetadata(null))
))
);
assertEquals(expectedData, response.data());
} |
public static Builder withSchema(Schema schema) {
return new Builder(schema);
} | @Test
public void testThrowsForIncorrectNumberOfFields() {
Schema type =
Stream.of(
Schema.Field.of("f_int", FieldType.INT32),
Schema.Field.of("f_str", FieldType.STRING),
Schema.Field.of("f_double", FieldType.DOUBLE))
.collect(toSchema());
thrown.expect(IllegalArgumentException.class);
Row.withSchema(type).addValues(1, "2").build();
} |
static String formatDuration(long durationInMinutes) {
if (durationInMinutes == 0) {
return ZERO;
}
double days = (double) durationInMinutes / DURATION_HOURS_IN_DAY / DURATION_OF_ONE_HOUR_IN_MINUTES;
if (days > DURATION_ALMOST_ONE) {
return format(DURATION_DAYS_FORMAT, Math.round(days));
}
double remainingDuration = durationInMinutes - (Math.floor(days) * DURATION_HOURS_IN_DAY * DURATION_OF_ONE_HOUR_IN_MINUTES);
double hours = remainingDuration / DURATION_OF_ONE_HOUR_IN_MINUTES;
if (hours > DURATION_ALMOST_ONE) {
return format(DURATION_HOURS_FORMAT, Math.round(hours));
}
double minutes = remainingDuration - (Math.floor(hours) * DURATION_OF_ONE_HOUR_IN_MINUTES);
return format(DURATION_MINUTES_FORMAT, Math.round(minutes));
} | @Test
public void format_duration_is_rounding_result() {
// When starting to add more than 4 hours, the result will be rounded to the next day (as 4 hour is a half day)
assertThat(formatDuration(5 * ONE_DAY + 4 * ONE_HOUR)).isEqualTo("6d");
assertThat(formatDuration(5 * ONE_DAY + 5 * ONE_HOUR)).isEqualTo("6d");
// When starting to add more than 30 minutes, the result will be rounded to the next hour
assertThat(formatDuration(3 * ONE_HOUR + 30 * ONE_MINUTE)).isEqualTo("4h");
assertThat(formatDuration(3 * ONE_HOUR + 40 * ONE_MINUTE)).isEqualTo("4h");
// When duration is close to next unit (0.9), the result is rounded to next unit
assertThat(formatDuration(7 * ONE_HOUR + 20 + ONE_MINUTE)).isEqualTo("1d");
assertThat(formatDuration(55 * ONE_MINUTE)).isEqualTo("1h");
} |
public String doLayout(ILoggingEvent event) {
StringBuilder buf = new StringBuilder();
startNewTableIfLimitReached(buf);
boolean odd = true;
if (((counter++) & 1) == 0) {
odd = false;
}
String level = event.getLevel().toString().toLowerCase(Locale.US);
buf.append(LINE_SEPARATOR);
buf.append("<tr class=\"");
buf.append(level);
if (odd) {
buf.append(" odd\">");
} else {
buf.append(" even\">");
}
buf.append(LINE_SEPARATOR);
Converter<ILoggingEvent> c = head;
while (c != null) {
appendEventToBuffer(buf, c, event);
c = c.getNext();
}
buf.append("</tr>");
buf.append(LINE_SEPARATOR);
if (event.getThrowableProxy() != null) {
throwableRenderer.render(buf, event);
}
return buf.toString();
} | @SuppressWarnings("unchecked")
@Test
public void layoutWithException() throws Exception {
layout.setPattern("%level %thread %msg %ex");
LoggingEvent le = createLoggingEvent();
le.setThrowableProxy(new ThrowableProxy(new Exception("test Exception")));
String result = layout.doLayout(le);
String stringToParse = layout.getFileHeader();
stringToParse = stringToParse + layout.getPresentationHeader();
stringToParse += result;
stringToParse += "</table></body></html>";
// System.out.println(stringToParse);
Document doc = parseOutput(stringToParse);
Element rootElement = doc.getRootElement();
Element bodyElement = rootElement.element("body");
Element tableElement = bodyElement.element("table");
List<Element> trElementList = tableElement.elements();
Element exceptionRowElement = trElementList.get(2);
Element exceptionElement = exceptionRowElement.element("td");
assertEquals(3, tableElement.elements().size());
assertTrue(exceptionElement.getText().contains(
"java.lang.Exception: test Exception"));
} |
@Override
public Database getDb(String dbName) {
if (databases.containsKey(dbName)) {
return databases.get(dbName);
}
Database db;
try {
db = icebergCatalog.getDB(dbName);
} catch (NoSuchNamespaceException e) {
LOG.error("Database {} not found", dbName, e);
return null;
}
databases.put(dbName, db);
return db;
} | @Test
public void testGetDB(@Mocked IcebergHiveCatalog icebergHiveCatalog) {
String db = "db";
new Expectations() {
{
icebergHiveCatalog.getDB(db);
result = new Database(0, db);
minTimes = 0;
}
};
IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergHiveCatalog,
Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null);
Database expectResult = new Database(0, db);
Assert.assertEquals(expectResult, metadata.getDb(db));
} |
public HollowHashIndexResult findMatches(Object... query) {
if (hashStateVolatile == null) {
throw new IllegalStateException(this + " wasn't initialized");
}
int hashCode = 0;
for(int i=0;i<query.length;i++) {
if(query[i] == null)
throw new IllegalArgumentException("querying by null unsupported; i=" + i);
hashCode ^= HashCodes.hashInt(keyHashCode(query[i], i));
}
HollowHashIndexResult result;
HollowHashIndexState hashState;
do {
result = null;
hashState = hashStateVolatile;
long bucket = hashCode & hashState.getMatchHashMask();
long hashBucketBit = bucket * hashState.getBitsPerMatchHashEntry();
boolean bucketIsEmpty = hashState.getMatchHashTable().getElementValue(hashBucketBit, hashState.getBitsPerTraverserField()[0]) == 0;
while (!bucketIsEmpty) {
if (matchIsEqual(hashState.getMatchHashTable(), hashBucketBit, query)) {
int selectSize = (int) hashState.getMatchHashTable().getElementValue(hashBucketBit + hashState.getBitsPerMatchHashKey(), hashState.getBitsPerSelectTableSize());
long selectBucketPointer = hashState.getMatchHashTable().getElementValue(hashBucketBit + hashState.getBitsPerMatchHashKey() + hashState.getBitsPerSelectTableSize(), hashState.getBitsPerSelectTablePointer());
result = new HollowHashIndexResult(hashState, selectBucketPointer, selectSize);
break;
}
bucket = (bucket + 1) & hashState.getMatchHashMask();
hashBucketBit = bucket * hashState.getBitsPerMatchHashEntry();
bucketIsEmpty = hashState.getMatchHashTable().getElementValue(hashBucketBit, hashState.getBitsPerTraverserField()[0]) == 0;
}
} while (hashState != hashStateVolatile);
return result;
} | @Test(expected = IllegalArgumentException.class)
public void testFindingMatchForNullQueryValue() throws Exception {
mapper.add(new TypeB("one:"));
roundTripSnapshot();
HollowHashIndex index = new HollowHashIndex(readStateEngine, "TypeB", "", "b1.value");
index.findMatches(new Object[]{null});
fail("exception expected");
} |
public static String rangeKey(long value) {
return encodeBase62(value, true);
} | @Test
public void testLargeRangeKey() {
Assert.assertEquals("44C92", IdHelper.rangeKey(1000000L));
Assert.assertEquals("BAzL8n0Y58m7", IdHelper.rangeKey(Long.MAX_VALUE));
Assert.assertTrue(IdHelper.rangeKey(1000000L).compareTo(IdHelper.rangeKey(Long.MAX_VALUE)) < 0);
} |
public StatisticRange intersect(StatisticRange other)
{
double newLow = max(low, other.low);
boolean newOpenLow = newLow == low ? openLow : other.openLow;
// epsilon is an arbitrary choice
newOpenLow = nearlyEqual(low, other.low, 1E-10) ? openLow || other.openLow : newOpenLow;
double newHigh = min(high, other.high);
boolean newOpenHigh = newHigh == high ? openHigh : other.openHigh;
newOpenHigh = nearlyEqual(high, other.high, 1E-10) ? openHigh || other.openHigh : newOpenHigh;
if (newLow <= newHigh) {
return new StatisticRange(newLow, newOpenLow, newHigh, newOpenHigh, overlappingDistinctValues(other));
}
return empty();
} | @Test
public void testIntersect()
{
StatisticRange zeroToTen = range(0, 10, 10);
StatisticRange fiveToFifteen = range(5, 15, 60);
assertEquals(zeroToTen.intersect(fiveToFifteen), range(5, 10, 10));
} |
public static Map<String, Object> getResolvedPropertiesByPath(String pathSpec, DataSchema dataSchema)
{
if (dataSchema == null)
{
throw new IllegalArgumentException("Invalid data schema input");
}
if (pathSpec == null || (!pathSpec.isEmpty() && !PathSpec.validatePathSpecString(pathSpec)))
{
throw new IllegalArgumentException(String.format("Invalid inputs: PathSpec %s", pathSpec));
}
DataSchema dataSchemaToPath = findDataSchemaByPath(dataSchema, pathSpec);
return dataSchemaToPath.getResolvedProperties();
} | @Test
public void testGetResolvedPropertiesByPath() throws Exception
{
RecordDataSchema testSchema = (RecordDataSchema) TestUtil.dataSchemaFromPdlString(simpleTestSchema);
try
{
SchemaAnnotationProcessor.getResolvedPropertiesByPath("/f0/f3", testSchema);
}
catch (IllegalArgumentException e)
{
assert (e.getMessage().equals("Could not find path segment \"f3\" in PathSpec \"/f0/f3\""));
}
try
{
SchemaAnnotationProcessor.getResolvedPropertiesByPath("/f1/string", testSchema);
}
catch (IllegalArgumentException e)
{
assert (e.getMessage().equals("Could not find path segment \"string\" in PathSpec \"/f1/string\""));
}
} |
public CompletableFuture<Void> storeEcOneTimePreKeys(final UUID identifier, final byte deviceId,
final List<ECPreKey> preKeys) {
return ecPreKeys.store(identifier, deviceId, preKeys);
} | @Test
void storeEcOneTimePreKeys() {
assertEquals(0, keysManager.getEcCount(ACCOUNT_UUID, DEVICE_ID).join(),
"Initial pre-key count for an account should be zero");
keysManager.storeEcOneTimePreKeys(ACCOUNT_UUID, DEVICE_ID, List.of(generateTestPreKey(1))).join();
assertEquals(1, keysManager.getEcCount(ACCOUNT_UUID, DEVICE_ID).join());
keysManager.storeEcOneTimePreKeys(ACCOUNT_UUID, DEVICE_ID, List.of(generateTestPreKey(1))).join();
assertEquals(1, keysManager.getEcCount(ACCOUNT_UUID, DEVICE_ID).join(),
"Repeatedly storing same key should have no effect");
} |
public T send() throws IOException {
return web3jService.send(this, responseType);
} | @Test
public void testShhPost() throws Exception {
web3j.shhPost(
new ShhPost(
"0x04f96a5e25610293e42a73908e93ccc8c4d4dc0edcfa9fa872f50cb214e08ebf61a03e245533f97284d442460f2998cd41858798ddfd4d661997d3940272b717b1",
"0x3e245533f97284d442460f2998cd41858798ddf04f96a5e25610293e42a73908e93ccc8c4d4dc0edcfa9fa872f50cb214e08ebf61a0d4d661997d3940272b717b1",
Arrays.asList(
"0x776869737065722d636861742d636c69656e74",
"0x4d5a695276454c39425154466b61693532"),
"0x7b2274797065223a226d6",
Numeric.toBigInt("0x64"),
Numeric.toBigInt("0x64")))
.send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"shh_post\",\"params\":[{\"from\":\"0x04f96a5e25610293e42a73908e93ccc8c4d4dc0edcfa9fa872f50cb214e08ebf61a03e245533f97284d442460f2998cd41858798ddfd4d661997d3940272b717b1\",\"to\":\"0x3e245533f97284d442460f2998cd41858798ddf04f96a5e25610293e42a73908e93ccc8c4d4dc0edcfa9fa872f50cb214e08ebf61a0d4d661997d3940272b717b1\",\"topics\":[\"0x776869737065722d636861742d636c69656e74\",\"0x4d5a695276454c39425154466b61693532\"],\"payload\":\"0x7b2274797065223a226d6\",\"priority\":\"0x64\",\"ttl\":\"0x64\"}],\"id\":1}");
} |
public T send() throws IOException {
return web3jService.send(this, responseType);
} | @Test
public void testEthGasPrice() throws Exception {
web3j.ethGasPrice().send();
verifyResult("{\"jsonrpc\":\"2.0\",\"method\":\"eth_gasPrice\",\"params\":[],\"id\":1}");
} |
public boolean containsValue(final int value)
{
boolean found = false;
if (initialValue != value)
{
final int[] entries = this.entries;
@DoNotSub final int length = entries.length;
for (@DoNotSub int i = 1; i < length; i += 2)
{
if (value == entries[i])
{
found = true;
break;
}
}
}
return found;
} | @Test
void shouldNotContainValueForAMissingEntry()
{
assertFalse(map.containsValue(1));
} |
boolean isWriteEnclosed( ValueMetaInterface v ) {
return meta.isEnclosureForced() && data.binaryEnclosure.length > 0 && v != null && v.isString();
} | @Test
public void testWriteEnclosed() {
TextFileOutputData data = new TextFileOutputData();
data.binaryEnclosure = new byte[1];
data.writer = new ByteArrayOutputStream();
TextFileOutputMeta meta = getTextFileOutputMeta();
meta.setEnclosureForced(true);
stepMockHelper.stepMeta.setStepMetaInterface( meta );
TextFileOutput textFileOutput = getTextFileOutput(data, meta);
ValueMetaBase valueMetaInterface = getValueMetaInterface();
valueMetaInterface.setType(ValueMetaInterface.TYPE_NUMBER);
assertFalse(textFileOutput.isWriteEnclosed(valueMetaInterface));
assertFalse(textFileOutput.isWriteEnclosed(null));
} |
<K, V> ShareInFlightBatch<K, V> fetchRecords(final Deserializers<K, V> deserializers,
final int maxRecords,
final boolean checkCrcs) {
// Creating an empty ShareInFlightBatch
ShareInFlightBatch<K, V> inFlightBatch = new ShareInFlightBatch<>(partition);
if (cachedBatchException != null) {
// If the event that a CRC check fails, reject the entire record batch because it is corrupt.
rejectRecordBatch(inFlightBatch, currentBatch);
inFlightBatch.setException(cachedBatchException);
cachedBatchException = null;
return inFlightBatch;
}
if (cachedRecordException != null) {
inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE);
inFlightBatch.setException(cachedRecordException);
cachedRecordException = null;
return inFlightBatch;
}
if (isConsumed)
return inFlightBatch;
initializeNextAcquired();
try {
int recordsInBatch = 0;
while (recordsInBatch < maxRecords) {
lastRecord = nextFetchedRecord(checkCrcs);
if (lastRecord == null) {
// Any remaining acquired records are gaps
while (nextAcquired != null) {
inFlightBatch.addGap(nextAcquired.offset);
nextAcquired = nextAcquiredRecord();
}
break;
}
while (nextAcquired != null) {
if (lastRecord.offset() == nextAcquired.offset) {
// It's acquired, so we parse it and add it to the batch
Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch());
TimestampType timestampType = currentBatch.timestampType();
ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch,
timestampType, lastRecord, nextAcquired.deliveryCount);
inFlightBatch.addRecord(record);
recordsRead++;
bytesRead += lastRecord.sizeInBytes();
recordsInBatch++;
nextAcquired = nextAcquiredRecord();
break;
} else if (lastRecord.offset() < nextAcquired.offset) {
// It's not acquired, so we skip it
break;
} else {
// It's acquired, but there's no non-control record at this offset, so it's a gap
inFlightBatch.addGap(nextAcquired.offset);
}
nextAcquired = nextAcquiredRecord();
}
}
} catch (SerializationException se) {
nextAcquired = nextAcquiredRecord();
if (inFlightBatch.isEmpty()) {
inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE);
inFlightBatch.setException(se);
} else {
cachedRecordException = se;
inFlightBatch.setHasCachedException(true);
}
} catch (CorruptRecordException e) {
if (inFlightBatch.isEmpty()) {
// If the event that a CRC check fails, reject the entire record batch because it is corrupt.
rejectRecordBatch(inFlightBatch, currentBatch);
inFlightBatch.setException(e);
} else {
cachedBatchException = e;
inFlightBatch.setHasCachedException(true);
}
}
return inFlightBatch;
} | @Test
public void testSimple() {
long firstMessageId = 5;
long startingOffset = 10L;
int numRecords = 11; // Records for 10-20
ShareFetchResponseData.PartitionData partitionData = new ShareFetchResponseData.PartitionData()
.setRecords(newRecords(startingOffset, numRecords, firstMessageId))
.setAcquiredRecords(acquiredRecords(startingOffset, numRecords));
Deserializers<String, String> deserializers = newStringDeserializers();
ShareCompletedFetch completedFetch = newShareCompletedFetch(partitionData);
ShareInFlightBatch<String, String> batch = completedFetch.fetchRecords(deserializers, 10, true);
List<ConsumerRecord<String, String>> records = batch.getInFlightRecords();
assertEquals(10, records.size());
ConsumerRecord<String, String> record = records.get(0);
assertEquals(10L, record.offset());
assertEquals(Optional.of((short) 1), record.deliveryCount());
Acknowledgements acknowledgements = batch.getAcknowledgements();
assertEquals(0, acknowledgements.size());
batch = completedFetch.fetchRecords(deserializers, 10, true);
records = batch.getInFlightRecords();
assertEquals(1, records.size());
record = records.get(0);
assertEquals(20L, record.offset());
assertEquals(Optional.of((short) 1), record.deliveryCount());
acknowledgements = batch.getAcknowledgements();
assertEquals(0, acknowledgements.size());
batch = completedFetch.fetchRecords(deserializers, 10, true);
records = batch.getInFlightRecords();
assertEquals(0, records.size());
acknowledgements = batch.getAcknowledgements();
assertEquals(0, acknowledgements.size());
} |
@Override
public void close() {
close(Duration.ofMillis(0));
} | @Test
public void shouldThrowOnInitTransactionIfProducerIsClosed() {
buildMockProducer(true);
producer.close();
assertThrows(IllegalStateException.class, producer::initTransactions);
} |
@Override
public ObjectNode encode(MappingInstruction instruction, CodecContext context) {
checkNotNull(instruction, "Mapping instruction cannot be null");
return new EncodeMappingInstructionCodecHelper(instruction, context).encode();
} | @Test
public void unicastPriorityInstructionTest() {
final UnicastMappingInstruction.PriorityMappingInstruction instruction =
(UnicastMappingInstruction.PriorityMappingInstruction)
MappingInstructions.unicastPriority(UNICAST_PRIORITY);
final ObjectNode instructionJson =
instructionCodec.encode(instruction, context);
assertThat(instructionJson, matchesInstruction(instruction));
} |
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema == null && value == null) {
return null;
}
JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (SerializationException e) {
throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
} | @Test
public void dateToJson() {
GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
calendar.setTimeZone(TimeZone.getTimeZone("UTC"));
calendar.add(Calendar.DATE, 10000);
java.util.Date date = calendar.getTime();
JsonNode converted = parse(converter.fromConnectData(TOPIC, Date.SCHEMA, date));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"int32\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1 }"),
converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
JsonNode payload = converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME);
assertTrue(payload.isInt());
assertEquals(10000, payload.intValue());
} |
@Override
public void setMaxParallelism(int maxParallelism) {
maxParallelism = normalizeAndCheckMaxParallelism(maxParallelism);
Optional<String> validationResult = rescaleMaxValidator.apply(maxParallelism);
if (validationResult.isPresent()) {
throw new IllegalArgumentException(
String.format(
"Rescaling max parallelism from %s to %s is not allowed: %s",
this.maxParallelism, maxParallelism, validationResult.get()));
}
this.maxParallelism = maxParallelism;
} | @Test
void setMaxInvalid() {
DefaultVertexParallelismInfo info =
new DefaultVertexParallelismInfo(1, 1, (max) -> Optional.of("not valid"));
assertThatThrownBy(() -> info.setMaxParallelism(4))
.withFailMessage("not valid")
.isInstanceOf(IllegalArgumentException.class);
} |
void writeConfigToDisk() {
VespaTlsConfig config = VespaZookeeperTlsContextUtils.tlsContext()
.map(ctx -> new VespaTlsConfig(ctx, TransportSecurityUtils.getInsecureMixedMode()))
.orElse(VespaTlsConfig.tlsDisabled());
writeConfigToDisk(config);
} | @Test
public void config_is_written_correctly_with_tls_for_quorum_communication_tls_with_mixed_mode() {
ZookeeperServerConfig.Builder builder = createConfigBuilderForSingleHost(cfgFile, idFile);
TlsContext tlsContext = createTlsContext();
new Configurator(builder.build()).writeConfigToDisk(new VespaTlsConfig(tlsContext, MixedMode.TLS_CLIENT_MIXED_SERVER));
validateConfigFileTlsWithMixedMode(cfgFile, false);
} |
@Override
public List<Namespace> listNamespaces(Namespace namespace) {
SnowflakeIdentifier scope = NamespaceHelpers.toSnowflakeIdentifier(namespace);
List<SnowflakeIdentifier> results;
switch (scope.type()) {
case ROOT:
results = snowflakeClient.listDatabases();
break;
case DATABASE:
results = snowflakeClient.listSchemas(scope);
break;
default:
throw new IllegalArgumentException(
String.format(
"listNamespaces must be at either ROOT or DATABASE level; got %s from namespace %s",
scope, namespace));
}
return results.stream().map(NamespaceHelpers::toIcebergNamespace).collect(Collectors.toList());
} | @Test
public void testListNamespaceWithinDB() {
String dbName = "DB_1";
assertThat(catalog.listNamespaces(Namespace.of(dbName)))
.containsExactly(Namespace.of(dbName, "SCHEMA_1"));
} |
@VisibleForTesting
@Nullable
public UUID getLeaderSessionID(String componentId) {
synchronized (lock) {
return leaderContenderRegistry.containsKey(componentId)
? confirmedLeaderInformation
.forComponentIdOrEmpty(componentId)
.getLeaderSessionID()
: null;
}
} | @Test
void testOnGrantAndRevokeLeadership() throws Exception {
final AtomicReference<LeaderInformationRegister> storedLeaderInformation =
new AtomicReference<>(LeaderInformationRegister.empty());
new Context(storedLeaderInformation) {
{
runTestWithSynchronousEventHandling(
() -> {
// grant leadership
final UUID leaderSessionID = UUID.randomUUID();
grantLeadership(leaderSessionID);
applyToBothContenderContexts(
ctx -> {
ctx.contender.waitForLeader();
assertThat(ctx.contender.getLeaderSessionID())
.isEqualTo(
leaderElectionService.getLeaderSessionID(
ctx.componentId))
.isEqualTo(leaderSessionID);
final LeaderInformation
expectedLeaderInformationInHaBackend =
LeaderInformation.known(
leaderSessionID, ctx.address);
assertThat(
storedLeaderInformation
.get()
.forComponentId(ctx.componentId))
.as(
"The HA backend should have its leader information updated.")
.hasValue(expectedLeaderInformationInHaBackend);
});
revokeLeadership();
applyToBothContenderContexts(
ctx -> {
ctx.contender.waitForRevokeLeader();
assertThat(ctx.contender.getLeaderSessionID()).isNull();
assertThat(
leaderElectionService.getLeaderSessionID(
ctx.componentId))
.isNull();
final LeaderInformation
expectedLeaderInformationInHaBackend =
LeaderInformation.known(
leaderSessionID, ctx.address);
assertThat(
storedLeaderInformation
.get()
.forComponentId(ctx.componentId))
.as(
"External storage is not touched by the leader session because the leadership is already lost.")
.hasValue(expectedLeaderInformationInHaBackend);
});
});
}
};
} |
public static NamespaceName get(String tenant, String namespace) {
validateNamespaceName(tenant, namespace);
return get(tenant + '/' + namespace);
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void namespace_null() {
NamespaceName.get(null);
} |
public HttpResponse error(Throwable t) {
data(500, ContentType.TEXT_PLAIN, stream -> t.printStackTrace(new PrintStream(stream)));
return this;
} | @Test
void testError() throws IOException {
httpResponse.error(new Exception());
verify(httpExchange).sendResponseHeaders(500, 0);
verify(outputStream, atLeastOnce()).write(any(), anyInt(), anyInt());
} |
@SuppressWarnings("unchecked")
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
NodeId nodeId = request.getNodeId();
String host = nodeId.getHost();
int cmPort = nodeId.getPort();
int httpPort = request.getHttpPort();
Resource capability = request.getResource();
String nodeManagerVersion = request.getNMVersion();
Resource physicalResource = request.getPhysicalResource();
NodeStatus nodeStatus = request.getNodeStatus();
RegisterNodeManagerResponse response = recordFactory
.newRecordInstance(RegisterNodeManagerResponse.class);
if (!minimumNodeManagerVersion.equals("NONE")) {
if (minimumNodeManagerVersion.equals("EqualToRM")) {
minimumNodeManagerVersion = YarnVersionInfo.getVersion();
}
if ((nodeManagerVersion == null) ||
(VersionUtil.compareVersions(nodeManagerVersion,minimumNodeManagerVersion)) < 0) {
String message =
"Disallowed NodeManager Version " + nodeManagerVersion
+ ", is less than the minimum version "
+ minimumNodeManagerVersion + " sending SHUTDOWN signal to "
+ "NodeManager.";
LOG.info(message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
}
if (checkIpHostnameInRegistration) {
InetSocketAddress nmAddress =
NetUtils.createSocketAddrForHost(host, cmPort);
InetAddress inetAddress = Server.getRemoteIp();
if (inetAddress != null && nmAddress.isUnresolved()) {
// Reject registration of unresolved nm to prevent resourcemanager
// getting stuck at allocations.
final String message =
"hostname cannot be resolved (ip=" + inetAddress.getHostAddress()
+ ", hostname=" + host + ")";
LOG.warn("Unresolved nodemanager registration: " + message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
}
// Check if this node is a 'valid' node
if (!this.nodesListManager.isValidNode(host) &&
!isNodeInDecommissioning(nodeId)) {
String message =
"Disallowed NodeManager from " + host
+ ", Sending SHUTDOWN signal to the NodeManager.";
LOG.info(message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
// check if node's capacity is load from dynamic-resources.xml
String nid = nodeId.toString();
Resource dynamicLoadCapability = loadNodeResourceFromDRConfiguration(nid);
if (dynamicLoadCapability != null) {
LOG.debug("Resource for node: {} is adjusted from: {} to: {} due to"
+ " settings in dynamic-resources.xml.", nid, capability,
dynamicLoadCapability);
capability = dynamicLoadCapability;
// sync back with new resource.
response.setResource(capability);
}
// Check if this node has minimum allocations
if (capability.getMemorySize() < minAllocMb
|| capability.getVirtualCores() < minAllocVcores) {
String message = "NodeManager from " + host
+ " doesn't satisfy minimum allocations, Sending SHUTDOWN"
+ " signal to the NodeManager. Node capabilities are " + capability
+ "; minimums are " + minAllocMb + "mb and " + minAllocVcores
+ " vcores";
LOG.info(message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
response.setContainerTokenMasterKey(containerTokenSecretManager
.getCurrentKey());
response.setNMTokenMasterKey(nmTokenSecretManager
.getCurrentKey());
RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort,
resolve(host), capability, nodeManagerVersion, physicalResource);
RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode);
if (oldNode == null) {
RMNodeStartedEvent startEvent = new RMNodeStartedEvent(nodeId,
request.getNMContainerStatuses(),
request.getRunningApplications(), nodeStatus);
if (request.getLogAggregationReportsForApps() != null
&& !request.getLogAggregationReportsForApps().isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found the number of previous cached log aggregation "
+ "status from nodemanager:" + nodeId + " is :"
+ request.getLogAggregationReportsForApps().size());
}
startEvent.setLogAggregationReportsForApps(request
.getLogAggregationReportsForApps());
}
this.rmContext.getDispatcher().getEventHandler().handle(
startEvent);
} else {
LOG.info("Reconnect from the node at: " + host);
this.nmLivelinessMonitor.unregister(nodeId);
if (CollectionUtils.isEmpty(request.getRunningApplications())
&& rmNode.getState() != NodeState.DECOMMISSIONING
&& rmNode.getHttpPort() != oldNode.getHttpPort()) {
// Reconnected node differs, so replace old node and start new node
switch (rmNode.getState()) {
case RUNNING:
ClusterMetrics.getMetrics().decrNumActiveNodes();
break;
case UNHEALTHY:
ClusterMetrics.getMetrics().decrNumUnhealthyNMs();
break;
default:
LOG.debug("Unexpected Rmnode state");
}
this.rmContext.getDispatcher().getEventHandler()
.handle(new NodeRemovedSchedulerEvent(rmNode));
this.rmContext.getRMNodes().put(nodeId, rmNode);
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMNodeStartedEvent(nodeId, null, null, nodeStatus));
} else {
// Reset heartbeat ID since node just restarted.
oldNode.resetLastNodeHeartBeatResponse();
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMNodeReconnectEvent(nodeId, rmNode,
request.getRunningApplications(),
request.getNMContainerStatuses()));
}
}
// On every node manager register we will be clearing NMToken keys if
// present for any running application.
this.nmTokenSecretManager.removeNodeKey(nodeId);
this.nmLivelinessMonitor.register(nodeId);
// Handle received container status, this should be processed after new
// RMNode inserted
if (!rmContext.isWorkPreservingRecoveryEnabled()) {
if (!request.getNMContainerStatuses().isEmpty()) {
LOG.info("received container statuses on node manager register :"
+ request.getNMContainerStatuses());
for (NMContainerStatus status : request.getNMContainerStatuses()) {
handleNMContainerStatus(status, nodeId);
}
}
}
// Update node's labels to RM's NodeLabelManager.
Set<String> nodeLabels = NodeLabelsUtils.convertToStringSet(
request.getNodeLabels());
if (isDistributedNodeLabelsConf && nodeLabels != null) {
try {
updateNodeLabelsFromNMReport(nodeLabels, nodeId);
response.setAreNodeLabelsAcceptedByRM(true);
} catch (IOException ex) {
// Ensure the exception is captured in the response
response.setDiagnosticsMessage(ex.getMessage());
response.setAreNodeLabelsAcceptedByRM(false);
}
} else if (isDelegatedCentralizedNodeLabelsConf) {
this.rmContext.getRMDelegatedNodeLabelsUpdater().updateNodeLabels(nodeId);
}
// Update node's attributes to RM's NodeAttributesManager.
if (request.getNodeAttributes() != null) {
try {
// update node attributes if necessary then update heartbeat response
updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes());
response.setAreNodeAttributesAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
String errorMsg = response.getDiagnosticsMessage() == null ?
ex.getMessage() :
response.getDiagnosticsMessage() + "\n" + ex.getMessage();
response.setDiagnosticsMessage(errorMsg);
response.setAreNodeAttributesAcceptedByRM(false);
}
}
StringBuilder message = new StringBuilder();
message.append("NodeManager from node ").append(host).append("(cmPort: ")
.append(cmPort).append(" httpPort: ");
message.append(httpPort).append(") ")
.append("registered with capability: ").append(capability);
message.append(", assigned nodeId ").append(nodeId);
if (response.getAreNodeLabelsAcceptedByRM()) {
message.append(", node labels { ").append(
StringUtils.join(",", nodeLabels) + " } ");
}
if (response.getAreNodeAttributesAcceptedByRM()) {
message.append(", node attributes { ")
.append(request.getNodeAttributes() + " } ");
}
LOG.info(message.toString());
response.setNodeAction(NodeAction.NORMAL);
response.setRMIdentifier(ResourceManager.getClusterTimeStamp());
response.setRMVersion(YarnVersionInfo.getVersion());
return response;
} | @Test
public void testNodeRegistrationWithInvalidAttributes() throws Exception {
writeToHostsFile("host2");
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,
hostFile.getAbsolutePath());
conf.setClass(YarnConfiguration.FS_NODE_ATTRIBUTE_STORE_IMPL_CLASS,
FileSystemNodeAttributeStore.class, NodeAttributeStore.class);
conf.set(YarnConfiguration.FS_NODE_ATTRIBUTE_STORE_ROOT_DIR,
TEMP_DIR.getAbsolutePath());
rm = new MockRM(conf);
rm.start();
ResourceTrackerService resourceTrackerService =
rm.getResourceTrackerService();
RegisterNodeManagerRequest req =
Records.newRecord(RegisterNodeManagerRequest.class);
NodeId nodeId = NodeId.newInstance("host2", 1234);
Resource capability = Resources.createResource(1024);
NodeAttribute validNodeAttribute = NodeAttribute
.newInstance(NodeAttribute.PREFIX_DISTRIBUTED, "Attr1",
NodeAttributeType.STRING, "V1");
NodeAttribute invalidPrefixNodeAttribute = NodeAttribute
.newInstance("_P", "Attr1",
NodeAttributeType.STRING, "V2");
NodeAttribute invalidNameNodeAttribute = NodeAttribute
.newInstance(NodeAttribute.PREFIX_DISTRIBUTED, "_N",
NodeAttributeType.STRING, "V2");
NodeAttribute invalidValueNodeAttribute = NodeAttribute
.newInstance(NodeAttribute.PREFIX_DISTRIBUTED, "Attr2",
NodeAttributeType.STRING, "...");
req.setResource(capability);
req.setNodeId(nodeId);
req.setHttpPort(1234);
req.setNMVersion(YarnVersionInfo.getVersion());
// check invalid prefix
req.setNodeAttributes(
toSet(validNodeAttribute, invalidPrefixNodeAttribute));
RegisterNodeManagerResponse response =
resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(0, rm.getRMContext().getNodeAttributesManager()
.getAttributesForNode(nodeId.getHost()).size());
assertRegisterResponseForInvalidAttributes(response);
Assert.assertTrue(response.getDiagnosticsMessage()
.endsWith("attributes in HB must have prefix nm.yarn.io"));
// check invalid name
req.setNodeAttributes(toSet(validNodeAttribute, invalidNameNodeAttribute));
response = resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(0, rm.getRMContext().getNodeAttributesManager()
.getAttributesForNode(nodeId.getHost()).size());
assertRegisterResponseForInvalidAttributes(response);
Assert.assertTrue(response.getDiagnosticsMessage()
.startsWith("attribute name should only contains"));
// check invalid value
req.setNodeAttributes(toSet(validNodeAttribute, invalidValueNodeAttribute));
response = resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(0, rm.getRMContext().getNodeAttributesManager()
.getAttributesForNode(nodeId.getHost()).size());
assertRegisterResponseForInvalidAttributes(response);
Assert.assertTrue(response.getDiagnosticsMessage()
.startsWith("attribute value should only contains"));
if (rm != null) {
rm.stop();
}
} |
@Nullable
@Override
public Object invoke(@Nonnull MethodInvocation invocation) throws Throwable {
AdapterInvocationWrapper adapterInvocationWrapper = new AdapterInvocationWrapper(invocation);
Object result = proxyInvocationHandler.invoke(adapterInvocationWrapper);
return result;
} | @Test
void should_success_when_call_prepare_with_ProxyInvocationHandler() throws Throwable {
MyMockMethodInvocation myMockMethodInvocation = new MyMockMethodInvocation(NormalTccAction.class.getMethod("prepare", BusinessActionContext.class), () -> normalTccAction.prepare(null));
//when then
Assertions.assertTrue((Boolean) adapterSpringSeataInterceptor.invoke(myMockMethodInvocation));
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
if(containerService.isContainer(file)) {
final PathAttributes attributes = new PathAttributes();
if(log.isDebugEnabled()) {
log.debug(String.format("Read location for bucket %s", file));
}
attributes.setRegion(new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(file).getIdentifier());
return attributes;
}
if(file.getType().contains(Path.Type.upload)) {
final Write.Append append = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl).append(file, new TransferStatus());
if(append.append) {
return new PathAttributes().withSize(append.offset);
}
throw new NotfoundException(file.getAbsolute());
}
try {
PathAttributes attr;
final Path bucket = containerService.getContainer(file);
try {
attr = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getVersionedObjectDetails(
file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)));
}
catch(ServiceException e) {
switch(e.getResponseCode()) {
case 405:
if(log.isDebugEnabled()) {
log.debug(String.format("Mark file %s as delete marker", file));
}
// Only DELETE method is allowed for delete markers
attr = new PathAttributes();
attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString()));
attr.setDuplicate(true);
return attr;
}
throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
if(StringUtils.isNotBlank(attr.getVersionId())) {
if(log.isDebugEnabled()) {
log.debug(String.format("Determine if %s is latest version for %s", attr.getVersionId(), file));
}
// Determine if latest version
try {
final String latest = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getObjectDetails(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))).getVersionId();
if(null != latest) {
if(log.isDebugEnabled()) {
log.debug(String.format("Found later version %s for %s", latest, file));
}
// Duplicate if not latest version
attr.setDuplicate(!latest.equals(attr.getVersionId()));
}
}
catch(ServiceException e) {
final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file);
if(failure instanceof NotfoundException) {
attr.setDuplicate(true);
}
else {
throw failure;
}
}
}
return attr;
}
catch(NotfoundException e) {
if(file.isDirectory()) {
if(log.isDebugEnabled()) {
log.debug(String.format("Search for common prefix %s", file));
}
// File may be marked as placeholder but no placeholder file exists. Check for common prefix returned.
try {
new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1);
}
catch(ListCanceledException l) {
// Found common prefix
return PathAttributes.EMPTY;
}
catch(NotfoundException n) {
throw e;
}
// Found common prefix
return PathAttributes.EMPTY;
}
throw e;
}
} | @Test
public void testDeleted() throws Exception {
final Path bucket = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
final Path test = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertNotNull(test.attributes().getVersionId());
assertNotEquals(PathAttributes.EMPTY, new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(test));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledPasswordCallback(), new Delete.DisabledCallback());
assertThrows(NotfoundException.class, () -> new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(test));
} |
public static Subject.Factory<Re2jStringSubject, String> re2jString() {
return Re2jStringSubject.FACTORY;
} | @Test
public void doesNotContainMatch_pattern_succeeds() {
assertAbout(re2jString()).that("hello cruel world").doesNotContainMatch(PATTERN);
} |
public static FileLocationOptions defaults() {
return new FileLocationOptions();
} | @Test
public void defaults() throws IOException {
FileLocationOptions options = FileLocationOptions.defaults();
assertEquals(0, options.getOffset());
} |
@SuppressWarnings("unchecked")
public static void validateResponse(HttpURLConnection conn,
int expectedStatus) throws IOException {
if (conn.getResponseCode() != expectedStatus) {
Exception toThrow;
InputStream es = null;
try {
es = conn.getErrorStream();
Map json = JsonSerialization.mapReader().readValue(es);
json = (Map) json.get(ERROR_JSON);
String exClass = (String) json.get(ERROR_CLASSNAME_JSON);
String exMsg = (String) json.get(ERROR_MESSAGE_JSON);
if (exClass != null) {
try {
ClassLoader cl = HttpExceptionUtils.class.getClassLoader();
Class klass = cl.loadClass(exClass);
Preconditions.checkState(Exception.class.isAssignableFrom(klass),
"Class [%s] is not a subclass of Exception", klass);
MethodHandle methodHandle = PUBLIC_LOOKUP.findConstructor(
klass, EXCEPTION_CONSTRUCTOR_TYPE);
toThrow = (Exception) methodHandle.invoke(exMsg);
} catch (Throwable t) {
toThrow = new IOException(String.format(
"HTTP status [%d], exception [%s], message [%s], URL [%s]",
conn.getResponseCode(), exClass, exMsg, conn.getURL()));
}
} else {
String msg = (exMsg != null) ? exMsg : conn.getResponseMessage();
toThrow = new IOException(String.format(
"HTTP status [%d], message [%s], URL [%s]",
conn.getResponseCode(), msg, conn.getURL()));
}
} catch (Exception ex) {
toThrow = new IOException(String.format(
"HTTP status [%d], message [%s], URL [%s], exception [%s]",
conn.getResponseCode(), conn.getResponseMessage(), conn.getURL(),
ex.toString()), ex);
} finally {
if (es != null) {
try {
es.close();
} catch (IOException ex) {
//ignore
}
}
}
throwEx(toThrow);
}
} | @Test
public void testValidateResponseJsonErrorNonException() throws Exception {
Map<String, Object> json = new HashMap<String, Object>();
json.put(HttpExceptionUtils.ERROR_EXCEPTION_JSON, "invalid");
// test case where the exception classname is not a valid exception class
json.put(HttpExceptionUtils.ERROR_CLASSNAME_JSON, String.class.getName());
json.put(HttpExceptionUtils.ERROR_MESSAGE_JSON, "EX");
Map<String, Object> response = new HashMap<String, Object>();
response.put(HttpExceptionUtils.ERROR_JSON, json);
ObjectMapper jsonMapper = new ObjectMapper();
String msg = jsonMapper.writeValueAsString(response);
InputStream is = new ByteArrayInputStream(msg.getBytes(StandardCharsets.UTF_8));
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
Mockito.when(conn.getErrorStream()).thenReturn(is);
Mockito.when(conn.getResponseMessage()).thenReturn("msg");
Mockito.when(conn.getResponseCode()).thenReturn(HttpURLConnection.HTTP_BAD_REQUEST);
LambdaTestUtils.interceptAndValidateMessageContains(IOException.class,
Arrays.asList(Integer.toString(HttpURLConnection.HTTP_BAD_REQUEST),
"java.lang.String", "EX"),
() -> HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED));
} |
@JsonProperty
public boolean isAppendRowNumberEnabled()
{
return appendRowNumberEnabled;
} | @Test
public void testIsAppendRowNumberEnabled()
{
List<Column> dataColumns = new ArrayList<>();
List<BaseHiveColumnHandle> partitionColumns = new ArrayList<>();
Map<String, String> tableParameters = Collections.emptyMap();
TupleDomain<Subfield> domainPredicate = TupleDomain.none();
boolean appendRowNumber = false;
RowExpression remainingPredicate = new ConstantExpression(null, CharType.createCharType(5));
Map<String, HiveColumnHandle> predicateColumns = Collections.emptyMap();
TupleDomain<ColumnHandle> partitionColumnPredicate = TupleDomain.none();
Optional<HiveBucketHandle> bucketHandle = Optional.empty();
Optional<HiveBucketing.HiveBucketFilter> bucketFilter = Optional.empty();
Optional<Set<HiveColumnHandle>> requestedColumns = Optional.empty();
SchemaTableName schemaTableName = SchemaTableName.valueOf("schema.TableName");
Optional<List<HivePartition>> partitions = Optional.empty();
Optional<HiveTableHandle> hiveTableHandle = Optional.empty();
HiveTableLayoutHandle handle = new HiveTableLayoutHandle(
schemaTableName,
"tablePath",
partitionColumns,
dataColumns,
tableParameters,
domainPredicate,
remainingPredicate,
predicateColumns,
partitionColumnPredicate,
bucketHandle,
bucketFilter,
false,
"layoutString",
requestedColumns,
false,
appendRowNumber,
partitions,
false,
hiveTableHandle);
assertFalse(handle.isAppendRowNumberEnabled());
} |
@Override
public Iterable<DefaultLogicalResult> getConsumedResults() {
return jobVertex.getInputs().stream()
.map(JobEdge::getSource)
.map(IntermediateDataSet::getId)
.map(resultRetriever)
.collect(Collectors.toList());
} | @Test
public void testGetConsumedResults() {
assertResultsEquals(results, downstreamLogicalVertex.getConsumedResults());
} |
public static ErrorProneOptions processArgs(Iterable<String> args) {
Preconditions.checkNotNull(args);
ImmutableList.Builder<String> remainingArgs = ImmutableList.builder();
/* By default, we throw an error when an unknown option is passed in, if for example you
* try to disable a check that doesn't match any of the known checks. This catches typos from
* the command line.
*
* You can pass the IGNORE_UNKNOWN_CHECKS_FLAG to opt-out of that checking. This allows you to
* use command lines from different versions of error-prone interchangeably.
*/
boolean patchLocationSet = false;
boolean patchCheckSet = false;
Builder builder = new Builder();
for (String arg : args) {
switch (arg) {
case IGNORE_SUPPRESSION_ANNOTATIONS:
builder.setIgnoreSuppressionAnnotations(true);
break;
case IGNORE_UNKNOWN_CHECKS_FLAG:
builder.setIgnoreUnknownChecks(true);
break;
case DISABLE_WARNINGS_IN_GENERATED_CODE_FLAG:
builder.setDisableWarningsInGeneratedCode(true);
break;
case ERRORS_AS_WARNINGS_FLAG:
builder.setDropErrorsToWarnings(true);
break;
case SUGGESTIONS_AS_WARNINGS_FLAG:
builder.setSuggestionsAsWarnings(true);
break;
case ENABLE_ALL_CHECKS:
builder.setEnableAllChecksAsWarnings(true);
break;
case DISABLE_ALL_CHECKS:
builder.setDisableAllChecks(true);
break;
case COMPILING_TEST_ONLY_CODE:
builder.setTestOnlyTarget(true);
break;
case COMPILING_PUBLICLY_VISIBLE_CODE:
builder.setPubliclyVisibleTarget(true);
break;
case DISABLE_ALL_WARNINGS:
builder.setDisableAllWarnings(true);
break;
default:
if (arg.startsWith(SEVERITY_PREFIX)) {
builder.parseSeverity(arg);
} else if (arg.startsWith(ErrorProneFlags.PREFIX)) {
builder.parseFlag(arg);
} else if (arg.startsWith(PATCH_OUTPUT_LOCATION)) {
patchLocationSet = true;
String remaining = arg.substring(PATCH_OUTPUT_LOCATION.length());
if (remaining.equals("IN_PLACE")) {
builder.patchingOptionsBuilder().inPlace(true);
} else {
if (remaining.isEmpty()) {
throw new InvalidCommandLineOptionException("invalid flag: " + arg);
}
builder.patchingOptionsBuilder().baseDirectory(remaining);
}
} else if (arg.startsWith(PATCH_CHECKS_PREFIX)) {
patchCheckSet = true;
String remaining = arg.substring(PATCH_CHECKS_PREFIX.length());
if (remaining.startsWith("refaster:")) {
// Refaster rule, load from InputStream at file
builder
.patchingOptionsBuilder()
.customRefactorer(
() -> {
String path = remaining.substring("refaster:".length());
try (InputStream in =
Files.newInputStream(FileSystems.getDefault().getPath(path));
ObjectInputStream ois = new ObjectInputStream(in)) {
return (CodeTransformer) ois.readObject();
} catch (IOException | ClassNotFoundException e) {
throw new RuntimeException("Can't load Refaster rule from " + path, e);
}
});
} else {
Iterable<String> checks =
Splitter.on(',').trimResults().omitEmptyStrings().split(remaining);
builder.patchingOptionsBuilder().namedCheckers(ImmutableSet.copyOf(checks));
}
} else if (arg.startsWith(PATCH_IMPORT_ORDER_PREFIX)) {
String remaining = arg.substring(PATCH_IMPORT_ORDER_PREFIX.length());
ImportOrganizer importOrganizer = ImportOrderParser.getImportOrganizer(remaining);
builder.patchingOptionsBuilder().importOrganizer(importOrganizer);
} else if (arg.startsWith(EXCLUDED_PATHS_PREFIX)) {
String pathRegex = arg.substring(EXCLUDED_PATHS_PREFIX.length());
builder.setExcludedPattern(Pattern.compile(pathRegex));
} else {
if (arg.startsWith(PREFIX)) {
throw new InvalidCommandLineOptionException("invalid flag: " + arg);
}
remainingArgs.add(arg);
}
}
}
if (patchCheckSet && !patchLocationSet) {
throw new InvalidCommandLineOptionException(
"-XepPatchLocation must be specified when -XepPatchChecks is");
}
return builder.build(remainingArgs.build());
} | @Test
public void noSuchXepFlag() {
assertThrows(
InvalidCommandLineOptionException.class,
() -> ErrorProneOptions.processArgs(new String[] {"-XepNoSuchFlag"}));
} |
protected static boolean isNewVersion(String latest, String current) {
String gephiVersionTst = current.replaceAll("[0-9]{12}", "").replaceAll("[a-zA-Z -]", "");
latest = latest.replaceAll("[a-zA-Z -]", "");
int res = ModuleDescriptor.Version.parse(gephiVersionTst)
.compareTo(ModuleDescriptor.Version.parse(latest));
return res < 0;
} | @Test
public void testNewVersion() {
Assert.assertFalse(Installer.isNewVersion("0.9.2", "0.9.2"));
Assert.assertTrue(Installer.isNewVersion("0.9.3", "0.9.2"));
Assert.assertTrue(Installer.isNewVersion("0.10.0", "0.9.2"));
Assert.assertTrue(Installer.isNewVersion("0.10.1", "0.10.0"));
Assert.assertTrue(Installer.isNewVersion("1.0.0", "0.10.0"));
} |
@Override
public NearCacheConfig setName(String name) {
this.name = isNotNull(name, "name");
return this;
} | @Test(expected = IllegalArgumentException.class)
public void test_null_name_throws_exception() {
config.setName(null);
} |
@EventListener
void updateTask(TaskChangeEvent event) {
removeTaskFromScheduler(event.getTask().getId());
if (!event.isRemoved() && event.getTask().getActive()) {
addTaskToScheduler(event.getTask().getId(), new SimpleTaskRunnable(event.getTask(), clientFactory.getClientForApplication(event.getTask().getApplication())), event.getTask().getCron());
}
} | @Test
public void updatedTaskIsRemovedFromScheduleWhenInactive() {
Task taskA = new Task();
taskA.setId(1l);
taskA.setName("old");
taskA.setCron("0 0 * * * *");
Map<Long, ScheduledFuture<?>> jobsMap = new HashMap<>();
jobsMap.put(taskA.getId(), mock(ScheduledFuture.class));
ReflectionTestUtils.setField(service, "jobsMap", jobsMap);
assertNotNull(((Map<Long, ScheduledFuture<?>>) ReflectionTestUtils.getField(service, "jobsMap")).get(1l));
Task taskAUpdated = new Task();
taskAUpdated.setId(1l);
taskAUpdated.setName("new");
taskAUpdated.setActive(false);
service.updateTask(new TaskChangeEvent(this, taskAUpdated, true));
assertEquals(1, ((Map<Long, ScheduledFuture<?>>) ReflectionTestUtils.getField(service, "jobsMap")).size());
assertNull(((Map<Long, ScheduledFuture<?>>) ReflectionTestUtils.getField(service, "jobsMap")).get(1l));
} |
static VersionInfo parseVersionInfo(String str) throws ParseException {
Map<String, String> map = Util.parseMap(str);
VersionInfo.Builder vib = new VersionInfo.Builder();
for (Map.Entry<String, String> entry: map.entrySet()) {
switch (entry.getKey()) {
case "major" -> vib.withMajor(map.get(entry.getKey()));
case "minor" -> vib.withMinor(map.get(entry.getKey()));
case "gitVersion" -> vib.withGitVersion(map.get(entry.getKey()));
case "gitCommit" -> vib.withGitCommit(map.get(entry.getKey()));
case "gitTreeState" -> vib.withGitTreeState(map.get(entry.getKey()));
case "buildDate" -> vib.withBuildDate(map.get(entry.getKey()));
case "goVersion" -> vib.withGoVersion(map.get(entry.getKey()));
case "compiler" -> vib.withCompiler(map.get(entry.getKey()));
case "platform" -> vib.withPlatform(map.get(entry.getKey()));
default -> LOGGER.warn("Unknown key {} found", entry.getKey());
}
}
return vib.build();
} | @Test
public void versionInfoFromMap(VertxTestContext context) throws ParseException {
String version = """
major=1
minor=16
gitVersion=v1.16.2
gitCommit=c97fe5036ef3df2967d086711e6c0c405941e14b
gitTreeState=clean
buildDate=2019-10-15T19:09:08Z
goVersion=go1.12.10
compiler=gc
platform=linux/amd64""";
VersionInfo vi = PlatformFeaturesAvailability.parseVersionInfo(version);
context.verify(() -> {
assertThat(vi.getMajor(), is("1"));
assertThat(vi.getMinor(), is("16"));
});
context.completeNow();
} |
public static ExpressionEvaluator compileExpression(
String code,
List<String> argumentNames,
List<Class<?>> argumentClasses,
Class<?> returnClass) {
try {
ExpressionKey key =
new ExpressionKey(code, argumentNames, argumentClasses, returnClass);
return COMPILED_EXPRESSION_CACHE.get(
key,
() -> {
ExpressionEvaluator expressionEvaluator = new ExpressionEvaluator();
// Input args
expressionEvaluator.setParameters(
argumentNames.toArray(new String[0]),
argumentClasses.toArray(new Class[0]));
// Result type
expressionEvaluator.setExpressionType(returnClass);
try {
// Compile
expressionEvaluator.cook(code);
} catch (CompileException e) {
throw new InvalidProgramException(
"Table program cannot be compiled. This is a bug. Please file an issue.\nExpression: "
+ code,
e);
}
return expressionEvaluator;
});
} catch (Exception e) {
throw new FlinkRuntimeException(e.getMessage(), e);
}
} | @Test
public void testExpressionCacheReuse() {
String code = "a + b";
ExpressionEvaluator evaluator1 =
CompileUtils.compileExpression(
code,
Arrays.asList("a", "b"),
Arrays.asList(Integer.class, Integer.class),
Integer.class);
ExpressionEvaluator evaluator2 =
CompileUtils.compileExpression(
code,
Arrays.asList("a", "b"),
Arrays.asList(Integer.class, Integer.class),
Integer.class);
ExpressionEvaluator evaluator3 =
CompileUtils.compileExpression(
code,
Arrays.asList("a", "b"),
Arrays.asList(String.class, String.class),
String.class);
assertThat(evaluator2).isSameAs(evaluator1);
assertThat(evaluator3).isNotSameAs(evaluator1);
} |
UserDetails toUserDetails(UserCredentials userCredentials) {
return User.withUsername(userCredentials.getUsername())
.password(userCredentials.getPassword())
.roles(userCredentials.getRoles().toArray(String[]::new))
.build();
} | @Test
void toUserDetails() {
// given
UserCredentials userCredentials =
UserCredentials.builder()
.enabled(true)
.password("password")
.username("user")
.roles(Set.of("USER", "ADMIN"))
.build();
// when
UserDetails userDetails = userDetailsMapper.toUserDetails(userCredentials);
// then
assertThat(userDetails.getUsername()).isEqualTo("user");
assertThat(userDetails.getPassword()).isEqualTo("password");
assertThat(userDetails.isEnabled()).isTrue();
} |
@Override
public int hashCode() {
return Objects.hash(start, end);
} | @Test
public void hashcode_is_based__on_start_and_end() {
assertThat(new TextBlock(15, 15)).hasSameHashCodeAs(new TextBlock(15, 15));
assertThat(new TextBlock(15, 300)).hasSameHashCodeAs(new TextBlock(15, 300));
assertThat(new TextBlock(15, 300).hashCode()).isNotEqualTo(new TextBlock(15, 15).hashCode());
} |
public void onBuffer(Buffer buffer, int sequenceNumber, int backlog, int subpartitionId)
throws IOException {
boolean recycleBuffer = true;
try {
if (expectedSequenceNumber != sequenceNumber) {
onError(new BufferReorderingException(expectedSequenceNumber, sequenceNumber));
return;
}
if (buffer.getDataType().isBlockingUpstream()) {
onBlockingUpstream();
checkArgument(backlog == 0, "Illegal number of backlog: %s, should be 0.", backlog);
}
final boolean wasEmpty;
boolean firstPriorityEvent = false;
synchronized (receivedBuffers) {
NetworkActionsLogger.traceInput(
"RemoteInputChannel#onBuffer",
buffer,
inputGate.getOwningTaskName(),
channelInfo,
channelStatePersister,
sequenceNumber);
// Similar to notifyBufferAvailable(), make sure that we never add a buffer
// after releaseAllResources() released all buffers from receivedBuffers
// (see above for details).
if (isReleased.get()) {
return;
}
wasEmpty = receivedBuffers.isEmpty();
SequenceBuffer sequenceBuffer =
new SequenceBuffer(buffer, sequenceNumber, subpartitionId);
DataType dataType = buffer.getDataType();
if (dataType.hasPriority()) {
firstPriorityEvent = addPriorityBuffer(sequenceBuffer);
recycleBuffer = false;
} else {
receivedBuffers.add(sequenceBuffer);
recycleBuffer = false;
if (dataType.requiresAnnouncement()) {
firstPriorityEvent = addPriorityBuffer(announce(sequenceBuffer));
}
}
totalQueueSizeInBytes += buffer.getSize();
final OptionalLong barrierId =
channelStatePersister.checkForBarrier(sequenceBuffer.buffer);
if (barrierId.isPresent() && barrierId.getAsLong() > lastBarrierId) {
// checkpoint was not yet started by task thread,
// so remember the numbers of buffers to spill for the time when
// it will be started
lastBarrierId = barrierId.getAsLong();
lastBarrierSequenceNumber = sequenceBuffer.sequenceNumber;
}
channelStatePersister.maybePersist(buffer);
++expectedSequenceNumber;
}
if (firstPriorityEvent) {
notifyPriorityEvent(sequenceNumber);
}
if (wasEmpty) {
notifyChannelNonEmpty();
}
if (backlog >= 0) {
onSenderBacklog(backlog);
}
} finally {
if (recycleBuffer) {
buffer.recycleBuffer();
}
}
} | @Test
void testConcurrentOnBufferAndRelease() throws Exception {
testConcurrentReleaseAndSomething(
8192,
(inputChannel, buffer, j) -> {
inputChannel.onBuffer(buffer, j, -1, 0);
return true;
});
} |
public XAQueueConnection xaQueueConnection(XAQueueConnection connection) {
return TracingXAConnection.create(connection, this);
} | @Test void xaQueueConnection_doesntDoubleWrap() {
XAQueueConnection wrapped = jmsTracing.xaQueueConnection(mock(XAQueueConnection.class));
assertThat(jmsTracing.xaQueueConnection(wrapped))
.isSameAs(wrapped);
} |
@Override
public Map<String, ByteBuffer> performAssignment(String leaderId, String protocol,
List<JoinGroupResponseMember> allMemberMetadata,
WorkerCoordinator coordinator) {
log.debug("Performing task assignment");
Map<String, ExtendedWorkerState> memberConfigs = new HashMap<>();
for (JoinGroupResponseMember member : allMemberMetadata) {
memberConfigs.put(
member.memberId(),
IncrementalCooperativeConnectProtocol.deserializeMetadata(ByteBuffer.wrap(member.metadata())));
}
log.debug("Member configs: {}", memberConfigs);
// The new config offset is the maximum seen by any member. We always perform assignment using this offset,
// even if some members have fallen behind. The config offset used to generate the assignment is included in
// the response so members that have fallen behind will not use the assignment until they have caught up.
long maxOffset = memberConfigs.values().stream().map(ExtendedWorkerState::offset).max(Long::compare).get();
log.debug("Max config offset root: {}, local snapshot config offsets root: {}",
maxOffset, coordinator.configSnapshot().offset());
short protocolVersion = ConnectProtocolCompatibility.fromProtocol(protocol).protocolVersion();
Long leaderOffset = ensureLeaderConfig(maxOffset, coordinator);
if (leaderOffset == null) {
Map<String, ExtendedAssignment> assignments = fillAssignments(
memberConfigs.keySet(), Assignment.CONFIG_MISMATCH,
leaderId, memberConfigs.get(leaderId).url(), maxOffset,
ClusterAssignment.EMPTY, 0, protocolVersion);
return serializeAssignments(assignments, protocolVersion);
}
return performTaskAssignment(leaderId, leaderOffset, memberConfigs, coordinator, protocolVersion);
} | @Test
public void testProtocolV2() {
// Sanity test to make sure that the right protocol is chosen during the assignment
connectors.clear();
String leader = "followMe";
List<JoinGroupResponseData.JoinGroupResponseMember> memberMetadata = new ArrayList<>();
ExtendedAssignment leaderAssignment = new ExtendedAssignment(
IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2,
ConnectProtocol.Assignment.NO_ERROR,
leader,
"followMe:618",
CONFIG_OFFSET,
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet(),
0
);
ExtendedWorkerState leaderState = new ExtendedWorkerState("followMe:618", CONFIG_OFFSET, leaderAssignment);
JoinGroupResponseData.JoinGroupResponseMember leaderMetadata = new JoinGroupResponseData.JoinGroupResponseMember()
.setMemberId(leader)
.setMetadata(IncrementalCooperativeConnectProtocol.serializeMetadata(leaderState, true).array());
memberMetadata.add(leaderMetadata);
WorkerCoordinator coordinator = mock(WorkerCoordinator.class);
when(coordinator.configSnapshot()).thenReturn(configState());
Map<String, ByteBuffer> serializedAssignments = assignor.performAssignment(
leader,
ConnectProtocolCompatibility.SESSIONED.protocol(),
memberMetadata,
coordinator
);
serializedAssignments.forEach((worker, serializedAssignment) -> {
ExtendedAssignment assignment = IncrementalCooperativeConnectProtocol.deserializeAssignment(serializedAssignment);
assertEquals(
IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2,
assignment.version(),
"Incorrect protocol version in assignment for worker " + worker
);
});
} |
@Override
@Transactional
public boolean checkForPreApproval(Long userId, Integer userType, String clientId, Collection<String> requestedScopes) {
// 第一步,基于 Client 的自动授权计算,如果 scopes 都在自动授权中,则返回 true 通过
OAuth2ClientDO clientDO = oauth2ClientService.validOAuthClientFromCache(clientId);
Assert.notNull(clientDO, "客户端不能为空"); // 防御性编程
if (CollUtil.containsAll(clientDO.getAutoApproveScopes(), requestedScopes)) {
// gh-877 - if all scopes are auto approved, approvals still need to be added to the approval store.
LocalDateTime expireTime = LocalDateTime.now().plusSeconds(TIMEOUT);
for (String scope : requestedScopes) {
saveApprove(userId, userType, clientId, scope, true, expireTime);
}
return true;
}
// 第二步,算上用户已经批准的授权。如果 scopes 都包含,则返回 true
List<OAuth2ApproveDO> approveDOs = getApproveList(userId, userType, clientId);
Set<String> scopes = convertSet(approveDOs, OAuth2ApproveDO::getScope,
OAuth2ApproveDO::getApproved); // 只保留未过期的 + 同意的
return CollUtil.containsAll(scopes, requestedScopes);
} | @Test
public void checkForPreApproval_reject() {
// 准备参数
Long userId = randomLongId();
Integer userType = randomEle(UserTypeEnum.values()).getValue();
String clientId = randomString();
List<String> requestedScopes = Lists.newArrayList("read");
// mock 方法
when(oauth2ClientService.validOAuthClientFromCache(eq(clientId)))
.thenReturn(randomPojo(OAuth2ClientDO.class).setAutoApproveScopes(null));
// mock 数据
OAuth2ApproveDO approve = randomPojo(OAuth2ApproveDO.class).setUserId(userId)
.setUserType(userType).setClientId(clientId).setScope("read")
.setExpiresTime(LocalDateTimeUtil.offset(LocalDateTime.now(), 1L, ChronoUnit.DAYS)).setApproved(false); // 拒绝
oauth2ApproveMapper.insert(approve);
// 调用
boolean success = oauth2ApproveService.checkForPreApproval(userId, userType,
clientId, requestedScopes);
// 断言
assertFalse(success);
} |
@Override
public ListenableFuture<List<Device>> findDevicesByTenantIdCustomerIdAndIdsAsync(UUID tenantId, UUID customerId, List<UUID> deviceIds) {
return service.submit(() -> DaoUtil.convertDataList(
deviceRepository.findDevicesByTenantIdAndCustomerIdAndIdIn(tenantId, customerId, deviceIds)));
} | @Test
public void testFindDevicesByTenantIdAndCustomerIdAndIdsAsync() throws ExecutionException, InterruptedException, TimeoutException {
ListenableFuture<List<Device>> devicesFuture = deviceDao.findDevicesByTenantIdCustomerIdAndIdsAsync(tenantId1, customerId1, deviceIds);
List<Device> devices = devicesFuture.get(30, TimeUnit.SECONDS);
assertEquals(20, devices.size());
} |
public static Model readPom(File file) throws AnalysisException {
Model model = null;
final PomParser parser = new PomParser();
try {
model = parser.parse(file);
} catch (PomParseException ex) {
if (ex.getCause() instanceof SAXParseException) {
try {
model = parser.parseWithoutDocTypeCleanup(file);
} catch (PomParseException ex1) {
LOGGER.warn("Unable to parse pom '{}'", file.getPath());
LOGGER.debug("", ex1);
throw new AnalysisException(ex1);
}
}
if (model == null) {
LOGGER.warn("Unable to parse pom '{}'", file.getPath());
LOGGER.debug("", ex);
throw new AnalysisException(ex);
}
} catch (Throwable ex) {
LOGGER.warn("Unexpected error during parsing of the pom '{}'", file.getPath());
LOGGER.debug("", ex);
throw new AnalysisException(ex);
}
if (model == null) {
throw new AnalysisException(String.format("Unable to parse pom '%s'", file.getPath()));
}
return model;
} | @Test
public void testReadPom_File() throws Exception {
File file = BaseTest.getResourceAsFile(this, "dwr-pom.xml");
String expResult = "Direct Web Remoting";
Model result = PomUtils.readPom(file);
assertEquals(expResult, result.getName());
expResult = "get ahead";
assertEquals(expResult, result.getOrganization());
expResult = "http://getahead.ltd.uk/dwr";
assertEquals(expResult, result.getOrganizationUrl());
file = BaseTest.getResourceAsFile(this, "jmockit-1.26.pom");
expResult = "Main ø modified to test issue #710 and #801 (&s;)";
result = PomUtils.readPom(file);
assertEquals(expResult, result.getName());
file = BaseTest.getResourceAsFile(this, "pom/mailapi-1.4.3_projectcomment.pom");
expResult = "JavaMail API jar";
result = PomUtils.readPom(file);
assertEquals(expResult, result.getName());
} |
@Override
public int run(String[] args) throws Exception {
try {
webServiceClient = WebServiceClient.getWebServiceClient().createClient();
return runCommand(args);
} finally {
if (yarnClient != null) {
yarnClient.close();
}
if (webServiceClient != null) {
webServiceClient.destroy();
}
}
} | @Test(timeout = 10000l)
public void testInvalidOpts() throws Exception {
YarnClient mockYarnClient = createMockYarnClient(
YarnApplicationState.FINISHED,
UserGroupInformation.getCurrentUser().getShortUserName());
LogsCLI cli = new LogsCLIForTest(mockYarnClient);
cli.setConf(conf);
int exitCode = cli.run( new String[] { "-InvalidOpts"});
assertTrue(exitCode == -1);
assertTrue(sysErrStream.toString().contains(
"options parsing failed: Unrecognized option: -InvalidOpts"));
} |
@Override
public OAuth2AccessTokenDO checkAccessToken(String accessToken) {
OAuth2AccessTokenDO accessTokenDO = getAccessToken(accessToken);
if (accessTokenDO == null) {
throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "访问令牌不存在");
}
if (DateUtils.isExpired(accessTokenDO.getExpiresTime())) {
throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "访问令牌已过期");
}
return accessTokenDO;
} | @Test
public void testCheckAccessToken_null() {
// 调研,并断言
assertServiceException(() -> oauth2TokenService.checkAccessToken(randomString()),
new ErrorCode(401, "访问令牌不存在"));
} |
@Override
public Optional<SensorCacheData> load() {
String url = URL + "?project=" + project.key();
if (branchConfiguration.referenceBranchName() != null) {
url = url + "&branch=" + branchConfiguration.referenceBranchName();
}
Profiler profiler = Profiler.create(LOG).startInfo(LOG_MSG);
GetRequest request = new GetRequest(url).setHeader(ACCEPT_ENCODING, "gzip");
try (WsResponse response = wsClient.call(request); InputStream is = response.contentStream()) {
Optional<String> contentEncoding = response.header(CONTENT_ENCODING);
Optional<Integer> length = response.header(CONTENT_LENGTH).map(Integer::parseInt);
boolean hasGzipEncoding = contentEncoding.isPresent() && contentEncoding.get().equals("gzip");
SensorCacheData cache = hasGzipEncoding ? decompress(is) : read(is);
if (length.isPresent()) {
profiler.stopInfo(LOG_MSG + String.format(" (%s)", humanReadableByteCountSI(length.get())));
} else {
profiler.stopInfo(LOG_MSG);
}
return Optional.of(cache);
} catch (HttpException e) {
if (e.code() == 404) {
profiler.stopInfo(LOG_MSG + " (404)");
return Optional.empty();
}
throw MessageException.of("Failed to download analysis cache: " + DefaultScannerWsClient.createErrorMessage(e));
} catch (Exception e) {
throw new IllegalStateException("Failed to download analysis cache", e);
}
} | @Test
public void loads_content_for_branch() throws IOException {
when(branchConfiguration.referenceBranchName()).thenReturn("name");
setResponse(MSG);
SensorCacheData msg = loader.load().get();
assertThat(msg.getEntries()).containsOnly(entry(MSG.getKey(), MSG.getData()));
assertRequestPath("api/analysis_cache/get?project=myproject&branch=name");
assertThat(logs.logs()).anyMatch(s -> s.startsWith("Load analysis cache | time="));
} |
public static RectL getBounds(final RectL pIn,
final long pCenterX, final long pCenterY, final double pDegrees,
final RectL pReuse) {
final RectL out = pReuse != null ? pReuse : new RectL();
if (pDegrees == 0) { // optimization
out.top = pIn.top;
out.left = pIn.left;
out.bottom = pIn.bottom;
out.right = pIn.right;
return out;
}
final double radians = pDegrees * Math.PI / 180.;
final double cos = Math.cos(radians);
final double sin = Math.sin(radians);
long inputX;
long inputY;
long outputX;
long outputY;
inputX = pIn.left; // corner 1
inputY = pIn.top;
outputX = getRotatedX(inputX, inputY, pCenterX, pCenterY, cos, sin);
outputY = getRotatedY(inputX, inputY, pCenterX, pCenterY, cos, sin);
out.top = out.bottom = outputY;
out.left = out.right = outputX;
inputX = pIn.right; // corner 2
inputY = pIn.top;
outputX = getRotatedX(inputX, inputY, pCenterX, pCenterY, cos, sin);
outputY = getRotatedY(inputX, inputY, pCenterX, pCenterY, cos, sin);
if (out.top > outputY) {
out.top = outputY;
}
if (out.bottom < outputY) {
out.bottom = outputY;
}
if (out.left > outputX) {
out.left = outputX;
}
if (out.right < outputX) {
out.right = outputX;
}
inputX = pIn.right; // corner 3
inputY = pIn.bottom;
outputX = getRotatedX(inputX, inputY, pCenterX, pCenterY, cos, sin);
outputY = getRotatedY(inputX, inputY, pCenterX, pCenterY, cos, sin);
if (out.top > outputY) {
out.top = outputY;
}
if (out.bottom < outputY) {
out.bottom = outputY;
}
if (out.left > outputX) {
out.left = outputX;
}
if (out.right < outputX) {
out.right = outputX;
}
inputX = pIn.left; // corner 4
inputY = pIn.bottom;
outputX = getRotatedX(inputX, inputY, pCenterX, pCenterY, cos, sin);
outputY = getRotatedY(inputX, inputY, pCenterX, pCenterY, cos, sin);
if (out.top > outputY) {
out.top = outputY;
}
if (out.bottom < outputY) {
out.bottom = outputY;
}
if (out.left > outputX) {
out.left = outputX;
}
if (out.right < outputX) {
out.right = outputX;
}
return out;
} | @Test
public void testGetBounds180() {
final double degrees = 180;
final RectL in = new RectL();
final RectL out = new RectL();
for (int i = 0; i < mIterations; i++) {
in.top = getRandomCoordinate();
in.left = getRandomCoordinate();
in.bottom = getRandomCoordinate();
in.right = getRandomCoordinate();
final long centerX = getRandomCoordinate();
final long centerY = getRandomCoordinate();
RectL.getBounds(in, centerX, centerY, degrees, out);
final long top = centerY - (in.top - centerY);
final long bottom = centerY - (in.bottom - centerY);
final long left = centerX - (in.left - centerX);
final long right = centerX - (in.right - centerX);
Assert.assertEquals(Math.min(top, bottom), out.top);
Assert.assertEquals(Math.min(left, right), out.left);
Assert.assertEquals(Math.max(top, bottom), out.bottom);
Assert.assertEquals(Math.max(left, right), out.right);
}
} |
@Override
public ConfigData load(ConfigDataLoaderContext context, PolarisConfigDataResource resource)
throws ConfigDataResourceNotFoundException {
try {
return load(context.getBootstrapContext(), resource);
}
catch (Exception e) {
log.warn("Error getting properties from polaris: " + resource, e);
if (!resource.isOptional()) {
throw new ConfigDataResourceNotFoundException(resource, e);
}
return null;
}
} | @Test
public void loadConfigDataInternalConfigFilesTest() {
try (MockedStatic<ConfigFileServiceFactory> mockedStatic = mockStatic(ConfigFileServiceFactory.class)) {
ConfigDataLoaderContext context = mock(ConfigDataLoaderContext.class);
PolarisConfigDataResource polarisConfigDataResource = mock(PolarisConfigDataResource.class);
ConfigurableBootstrapContext bootstrapContext = mock(ConfigurableBootstrapContext.class);
PolarisConfigProperties polarisConfigProperties = mock(PolarisConfigProperties.class);
PolarisContextProperties polarisContextProperties = mock(PolarisContextProperties.class);
ConfigFileService configFileService = mock(ConfigFileService.class);
Profiles profiles = mock(Profiles.class);
Map<String, Object> emptyMap = new HashMap<>();
ConfigKVFile emptyConfigFile = new MockedConfigKVFile(emptyMap);
when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "application.yml")).thenReturn(emptyConfigFile);
when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "application.yaml")).thenReturn(emptyConfigFile);
when(configFileService.getConfigPropertiesFile(testNamespace, testServiceName, "bootstrap.properties")).thenReturn(emptyConfigFile);
when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "bootstrap.yml")).thenReturn(emptyConfigFile);
when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "bootstrap.yaml")).thenReturn(emptyConfigFile);
Map<String, Object> applicationProperties = new HashMap<>();
applicationProperties.put("k1", "v1");
applicationProperties.put("k2", "v2");
applicationProperties.put("k3", "v3");
ConfigKVFile propertiesFile = new MockedConfigKVFile(applicationProperties);
when(configFileService.getConfigPropertiesFile(testNamespace, testServiceName, "application.properties"))
.thenReturn(propertiesFile);
when(context.getBootstrapContext()).thenReturn(bootstrapContext);
when(polarisContextProperties.getNamespace()).thenReturn(testNamespace);
when(polarisContextProperties.getService()).thenReturn(testServiceName);
when(polarisConfigProperties.getGroups()).thenReturn(null);
when(polarisConfigProperties.isInternalEnabled()).thenReturn(true);
when(profiles.getActive()).thenReturn(Lists.newArrayList());
PolarisConfigDataLoader polarisConfigDataLoader = new PolarisConfigDataLoader(new DeferredLogs());
if (INTERNAL_CONFIG_FILES_LOADED.get()) {
INTERNAL_CONFIG_FILES_LOADED.compareAndSet(true, false);
}
if (CUSTOM_POLARIS_CONFIG_FILE_LOADED.get()) {
CUSTOM_POLARIS_CONFIG_FILE_LOADED.compareAndSet(true, false);
}
when(polarisConfigDataResource.getPolarisConfigProperties()).thenReturn(polarisConfigProperties);
when(polarisConfigDataResource.getPolarisContextProperties()).thenReturn(polarisContextProperties);
when(polarisConfigDataResource.getServiceName()).thenReturn(testServiceName);
when(polarisConfigDataResource.getProfiles()).thenReturn(profiles);
mockedStatic.when(() -> {
ConfigFileServiceFactory.createConfigFileService(any(SDKContext.class));
}).thenReturn(configFileService);
ConfigData configData = polarisConfigDataLoader.load(context, polarisConfigDataResource);
List<PropertySource<?>> propertySources = configData.getPropertySources();
CompositePropertySource compositePropertySource = new CompositePropertySource(polarisConfigPropertySourceName);
propertySources.forEach(compositePropertySource::addPropertySource);
assertThat(compositePropertySource.getProperty("k1")).isEqualTo("v1");
assertThat(compositePropertySource.getProperty("k2")).isEqualTo("v2");
assertThat(compositePropertySource.getProperty("k3")).isEqualTo("v3");
}
} |
ControllerResult<ElectLeadersResponseData> electLeaders(ElectLeadersRequestData request) {
ElectionType electionType = electionType(request.electionType());
List<ApiMessageAndVersion> records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP);
ElectLeadersResponseData response = new ElectLeadersResponseData();
if (request.topicPartitions() == null) {
// If topicPartitions is null, we try to elect a new leader for every partition. There
// are some obvious issues with this wire protocol. For example, what if we have too
// many partitions to fit the results in a single RPC? This behavior should probably be
// removed from the protocol. For now, however, we have to implement this for
// compatibility with the old controller.
for (Entry<String, Uuid> topicEntry : topicsByName.entrySet()) {
String topicName = topicEntry.getKey();
ReplicaElectionResult topicResults =
new ReplicaElectionResult().setTopic(topicName);
response.replicaElectionResults().add(topicResults);
TopicControlInfo topic = topics.get(topicEntry.getValue());
if (topic != null) {
for (int partitionId : topic.parts.keySet()) {
ApiError error = electLeader(topicName, partitionId, electionType, records);
// When electing leaders for all partitions, we do not return
// partitions which already have the desired leader.
if (error.error() != Errors.ELECTION_NOT_NEEDED) {
topicResults.partitionResult().add(new PartitionResult().
setPartitionId(partitionId).
setErrorCode(error.error().code()).
setErrorMessage(error.message()));
}
}
}
}
} else {
for (TopicPartitions topic : request.topicPartitions()) {
ReplicaElectionResult topicResults =
new ReplicaElectionResult().setTopic(topic.topic());
response.replicaElectionResults().add(topicResults);
for (int partitionId : topic.partitions()) {
ApiError error = electLeader(topic.topic(), partitionId, electionType, records);
topicResults.partitionResult().add(new PartitionResult().
setPartitionId(partitionId).
setErrorCode(error.error().code()).
setErrorMessage(error.message()));
}
}
}
return ControllerResult.of(records, response);
} | @Test
public void testPreferredElectionDoesNotTriggerUncleanElection() {
ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().build();
ReplicationControlManager replication = ctx.replicationControl;
ctx.registerBrokers(1, 2, 3, 4);
ctx.unfenceBrokers(1, 2, 3, 4);
Uuid fooId = ctx.createTestTopic("foo", new int[][]{new int[]{1, 2, 3}}).topicId();
TopicIdPartition partition = new TopicIdPartition(fooId, 0);
ctx.fenceBrokers(Utils.mkSet(2, 3));
ctx.fenceBrokers(Utils.mkSet(1, 2, 3));
ctx.unfenceBrokers(Utils.mkSet(2));
assertLeaderAndIsr(replication, partition, NO_LEADER, new int[]{1});
ctx.alterTopicConfig("foo", "unclean.leader.election.enable", "true");
ElectLeadersRequestData request = buildElectLeadersRequest(
ElectionType.PREFERRED,
singletonMap("foo", singletonList(0))
);
// No election should be done even though unclean election is available
ControllerResult<ElectLeadersResponseData> result = replication.electLeaders(request);
assertEquals(Collections.emptyList(), result.records());
ElectLeadersResponseData expectedResponse = buildElectLeadersResponse(NONE, false, singletonMap(
new TopicPartition("foo", 0), new ApiError(PREFERRED_LEADER_NOT_AVAILABLE)
));
assertEquals(expectedResponse, result.response());
} |
public void startsWith(@Nullable String string) {
checkNotNull(string);
if (actual == null) {
failWithActual("expected a string that starts with", string);
} else if (!actual.startsWith(string)) {
failWithActual("expected to start with", string);
}
} | @Test
public void stringStartsWithFail() {
expectFailureWhenTestingThat("abc").startsWith("bc");
assertFailureValue("expected to start with", "bc");
} |
public static void main( String[] args )
{
// suppress the Dock icon on OS X
System.setProperty("apple.awt.UIElement", "true");
int exitCode = new CommandLine(new ExtractText()).execute(args);
System.exit(exitCode);
} | @Test
void testPDFBoxRepeatableSubcommandAddFileNameOutfile(@TempDir Path tempDir) throws Exception
{
Path path = null;
try
{
path = tempDir.resolve("outfile.txt");
Files.deleteIfExists(path);
}
catch (InvalidPathException ipe)
{
System.err.println(
"Error creating temporary test file in " + this.getClass().getSimpleName());
}
assertNotNull(path);
PDFBox.main(new String[] { "export:text", "-i", testfile1, "-encoding", "UTF-8",
"-addFileName", "-o", path.toString(), //
"export:text", "-i", testfile2, "-encoding", "UTF-8", //
"-addFileName", "-o", path.toString() });
String result = new String(Files.readAllBytes(path), "UTF-8");
assertFalse(result.contains("PDF1"));
assertFalse(result.contains("PDF2"));
assertFalse(result.contains("PDF file: " + filename1));
assertTrue(result.contains("Hello"));
assertTrue(result.contains("World."));
assertTrue(result.contains("PDF file: " + filename2));
} |
public static TimelineEvent getApplicationEvent(TimelineEntity te,
String eventId) {
if (isApplicationEntity(te)) {
for (TimelineEvent event : te.getEvents()) {
if (event.getId().equals(eventId)) {
return event;
}
}
}
return null;
} | @Test
void testGetApplicationEvent() {
TimelineEntity te = null;
TimelineEvent tEvent = ApplicationEntity.getApplicationEvent(te,
"no event");
assertEquals(null, tEvent);
te = new TimelineEntity();
te.setType(TimelineEntityType.YARN_APPLICATION.toString());
TimelineEvent event = new TimelineEvent();
event.setId("start_event");
event.setTimestamp(System.currentTimeMillis());
te.addEvent(event);
tEvent = ApplicationEntity.getApplicationEvent(te, "start_event");
assertEquals(event, tEvent);
te = new TimelineEntity();
te.setType(TimelineEntityType.YARN_CLUSTER.toString());
event = new TimelineEvent();
event.setId("start_event_cluster");
event.setTimestamp(System.currentTimeMillis());
te.addEvent(event);
tEvent = ApplicationEntity.getApplicationEvent(te, "start_event_cluster");
assertEquals(null, tEvent);
} |
protected void commonDeclarePatternWithConstraint(final CEDescrBuilder<?, ?> descrBuilder, final String patternType, final String constraintString) {
descrBuilder.pattern(patternType).constraint(constraintString);
} | @Test
void commonDeclarePatternWithConstraint() {
String patternType = "TEMPERATURE";
String constraintsString = "value < 35";
final CEDescrBuilder<CEDescrBuilder<CEDescrBuilder<RuleDescrBuilder, AndDescr>, NotDescr>, ExistsDescr> existsBuilder = lhsBuilder.not().exists();
KiePMMLDescrLhsFactory.factory(lhsBuilder).commonDeclarePatternWithConstraint(existsBuilder, patternType,
constraintsString);
assertThat(existsBuilder.getDescr()).isNotNull();
final List<BaseDescr> descrs = existsBuilder.getDescr().getDescrs();
assertThat(descrs).isNotNull();
assertThat(descrs).hasSize(1);
assertThat(descrs.get(0)).isInstanceOf(PatternDescr.class);
PatternDescr patternDescr = (PatternDescr) descrs.get(0);
assertThat(patternDescr.getObjectType()).isEqualTo(patternType);
assertThat(patternDescr.getIdentifier()).isNull();
assertThat(patternDescr.getConstraint()).isInstanceOf(AndDescr.class);
AndDescr andDescr = (AndDescr) patternDescr.getConstraint();
assertThat(andDescr.getDescrs()).hasSize(1);
assertThat(andDescr.getDescrs().get(0)).isInstanceOf(ExprConstraintDescr.class);
ExprConstraintDescr exprConstraintDescr = (ExprConstraintDescr) andDescr.getDescrs().get(0);
assertThat(exprConstraintDescr.isNegated()).isFalse();
assertThat(exprConstraintDescr.getType()).isEqualTo(ExprConstraintDescr.Type.NAMED);
assertThat(exprConstraintDescr.getExpression()).isEqualTo(constraintsString);
} |
@Config("discovery-server.enabled")
public EmbeddedDiscoveryConfig setEnabled(boolean enabled)
{
this.enabled = enabled;
return this;
} | @Test
public void testDefaults()
{
assertRecordedDefaults(recordDefaults(EmbeddedDiscoveryConfig.class)
.setEnabled(false));
} |
Future<String> findZookeeperLeader(Reconciliation reconciliation, Set<String> pods, TlsPemIdentity coTlsPemIdentity) {
if (pods.size() == 0) {
return Future.succeededFuture(UNKNOWN_LEADER);
} else if (pods.size() == 1) {
return Future.succeededFuture(pods.stream().findFirst().get());
}
try {
NetClientOptions netClientOptions = clientOptions(coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity());
return zookeeperLeaderWithBackoff(reconciliation, pods, netClientOptions);
} catch (Throwable e) {
return Future.failedFuture(e);
}
} | @Test
public void test0PodsClusterReturnsUnknownLeader(VertxTestContext context) {
ZookeeperLeaderFinder finder = new ZookeeperLeaderFinder(vertx, this::backoff);
Checkpoint a = context.checkpoint();
finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, emptySet(), DUMMY_IDENTITY)
.onComplete(context.succeeding(leader -> {
context.verify(() -> assertThat(leader, is(ZookeeperLeaderFinder.UNKNOWN_LEADER)));
a.flag();
}));
} |
public ZKClientConfig toConfig(Path configFile) throws IOException, QuorumPeerConfig.ConfigException {
String configString = toConfigString();
Files.createDirectories(configFile.getParent());
Path tempFile = Files.createTempFile(configFile.toAbsolutePath().getParent(), "." + configFile.getFileName(), ".tmp");
Files.writeString(tempFile, configString);
Files.move(tempFile, configFile, StandardCopyOption.ATOMIC_MOVE);
return new ZKClientConfig(configFile.toString());
} | @Test
void config_when_not_using_tls_context() {
ZkClientConfigBuilder builder = new ZkClientConfigBuilder(null);
ZKClientConfig config = builder.toConfig();
assertEquals("false", config.getProperty(CLIENT_SECURE_PROPERTY));
assertEquals("org.apache.zookeeper.ClientCnxnSocketNetty", config.getProperty(CLIENT_CONNECTION_SOCKET));
assertNull(config.getProperty(SSL_CONTEXT_SUPPLIER_CLASS_PROPERTY));
assertNull(config.getProperty(SSL_CLIENTAUTH_PROPERTY));
assertNull(config.getProperty(SSL_CONTEXT_SUPPLIER_CLASS_PROPERTY));
} |
@Override
public Iterator<MessageProcessor> iterator() {
return sortedProcessors.get().iterator();
} | @Test
public void testIterator() throws Exception {
final Iterator<MessageProcessor> iterator = orderedMessageProcessors.iterator();
assertEquals("A is first", A.class, iterator.next().getClass());
assertEquals("B is last", B.class, iterator.next().getClass());
assertFalse("Iterator exhausted", iterator.hasNext());
when(clusterConfigService.get(MessageProcessorsConfig.class)).thenReturn(
MessageProcessorsConfig.create(Lists.newArrayList(B.class.getCanonicalName(),
A.class.getCanonicalName())));
orderedMessageProcessors.handleOrderingUpdate(getClusterConfigChangedEvent());
final Iterator<MessageProcessor> it2 = orderedMessageProcessors.iterator();
assertEquals("B is first", B.class, it2.next().getClass());
assertEquals("A is last", A.class, it2.next().getClass());
assertFalse("Iterator exhausted", it2.hasNext());
when(clusterConfigService.get(MessageProcessorsConfig.class)).thenReturn(
MessageProcessorsConfig.create(Lists.newArrayList(B.class.getCanonicalName(),
A.class.getCanonicalName()),
Sets.newHashSet(B.class.getCanonicalName())));
orderedMessageProcessors.handleOrderingUpdate(getClusterConfigChangedEvent());
final Iterator<MessageProcessor> it3 = orderedMessageProcessors.iterator();
assertEquals("A is only element", A.class, it3.next().getClass());
assertFalse("Iterator exhausted", it3.hasNext());
} |
@Override
public TimelineEntity getApplicationEntity(ApplicationId appId, String fields,
Map<String, String> filters)
throws IOException {
String path = PATH_JOINER.join("clusters", clusterId, "apps", appId);
if (fields == null || fields.isEmpty()) {
fields = "INFO";
}
MultivaluedMap<String, String> params = new MultivaluedMapImpl();
params.add("fields", fields);
mergeFilters(params, filters);
ClientResponse response = doGetUri(baseUri, path, params);
TimelineEntity entity = response.getEntity(TimelineEntity.class);
return entity;
} | @Test
void testGetApplication() throws Exception {
ApplicationId applicationId =
ApplicationId.fromString("application_1234_0001");
TimelineEntity entity = client.getApplicationEntity(applicationId,
null, null);
assertEquals("mockApp1", entity.getId());
} |
@Override
public void initChannel(final Channel channel) {
channel.pipeline().addBefore(FrontendChannelInboundHandler.class.getSimpleName(), MySQLSequenceIdInboundHandler.class.getSimpleName(), new MySQLSequenceIdInboundHandler(channel));
} | @Test
void assertInitChannel() {
engine.initChannel(channel);
verify(channel.attr(MySQLConstants.SEQUENCE_ID_ATTRIBUTE_KEY)).set(any(AtomicInteger.class));
verify(channel.pipeline())
.addBefore(eq(FrontendChannelInboundHandler.class.getSimpleName()), eq(MySQLSequenceIdInboundHandler.class.getSimpleName()), isA(MySQLSequenceIdInboundHandler.class));
} |
@Override
public Local getFile() {
return LocalFactory.get(PreferencesFactory.get().getProperty("bookmark.import.fetch.location"));
} | @Test
public void testGetFile() throws Exception {
FetchBookmarkCollection c = new FetchBookmarkCollection();
assertEquals(0, c.size());
c.parse(new ProtocolFactory(new HashSet<>(Arrays.asList(new TestProtocol(Scheme.ftp), new TestProtocol(Scheme.ftps), new TestProtocol(Scheme.sftp)))), new Local("src/test/resources/com.fetchsoftworks.Fetch.Shortcuts.plist"));
assertEquals(2, c.size());
} |
@NotNull
@Override
public List<InetAddress> lookup(@NotNull String host) throws UnknownHostException {
InetAddress address = InetAddress.getByName(host);
if (configuration.getBoolean(SONAR_VALIDATE_WEBHOOKS_PROPERTY).orElse(SONAR_VALIDATE_WEBHOOKS_DEFAULT_VALUE)
&& (address.isLoopbackAddress() || address.isAnyLocalAddress() || isLocalAddress(address))) {
throw new IllegalArgumentException("Invalid URL: loopback and wildcard addresses are not allowed for webhooks.");
}
return Collections.singletonList(address);
} | @Test
public void lookup_fail_on_127_0_0_1() {
when(configuration.getBoolean(SONAR_VALIDATE_WEBHOOKS_PROPERTY))
.thenReturn(Optional.of(true));
Assertions.assertThatThrownBy(() -> underTest.lookup("127.0.0.1"))
.hasMessageContaining(INVALID_URL)
.isInstanceOf(IllegalArgumentException.class);
} |
@Override
public List<Point> geoPos(byte[] key, byte[]... members) {
List<Object> params = new ArrayList<Object>(members.length + 1);
params.add(key);
params.addAll(Arrays.asList(members));
MultiDecoder<Map<Object, Object>> decoder = new ListMultiDecoder2(new ObjectListReplayDecoder2(), new PointDecoder());
RedisCommand<Map<Object, Object>> command = new RedisCommand<Map<Object, Object>>("GEOPOS", decoder);
return read(key, StringCodec.INSTANCE, command, params.toArray());
} | @Test
public void testGeoPos() {
connection.geoAdd("key1".getBytes(), new Point(13.361389, 38.115556), "value1".getBytes());
connection.geoAdd("key1".getBytes(), new Point(15.087269, 37.502669), "value2".getBytes());
List<Point> s = connection.geoPos("key1".getBytes(), "value1".getBytes(), "value2".getBytes());
assertThat(s).hasSize(2);
List<Point> se = connection.geoPos("key2".getBytes(), "value1".getBytes(), "value2".getBytes());
assertThat(se).isEmpty();
} |
void handleTestStepFinished(TestStepFinished event) {
if (event.getTestStep() instanceof PickleStepTestStep && event.getResult().getStatus().is(Status.PASSED)) {
PickleStepTestStep testStep = (PickleStepTestStep) event.getTestStep();
addUsageEntry(event.getResult(), testStep);
}
} | @Test
void resultWithPassedAndFailedStep() {
OutputStream out = new ByteArrayOutputStream();
UsageFormatter usageFormatter = new UsageFormatter(out);
TestStep testStep = mockTestStep();
Result passed = new Result(Status.PASSED, Duration.ofSeconds(12345L), null);
usageFormatter
.handleTestStepFinished(new TestStepFinished(Instant.EPOCH, mock(TestCase.class), testStep, passed));
Result failed = new Result(Status.FAILED, Duration.ZERO, null);
usageFormatter
.handleTestStepFinished(new TestStepFinished(Instant.EPOCH, mock(TestCase.class), testStep, failed));
Map<String, List<UsageFormatter.StepContainer>> usageMap = usageFormatter.usageMap;
assertThat(usageMap.size(), is(equalTo(1)));
List<UsageFormatter.StepContainer> durationEntries = usageMap.get("stepDef");
assertThat(durationEntries.size(), is(equalTo(1)));
assertThat(durationEntries.get(0).getName(), is(equalTo("step")));
assertThat(durationEntries.get(0).getDurations().size(), is(equalTo(1)));
assertThat(durationEntries.get(0).getDurations().get(0).getDuration(), is(closeTo(12345.0, EPSILON)));
} |
public static SortOrder buildSortOrder(Table table) {
return buildSortOrder(table.schema(), table.spec(), table.sortOrder());
} | @Test
public void testSortOrderClusteringSatisfiedPartitionLast() {
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).identity("category").day("ts").build();
SortOrder order =
SortOrder.builderFor(SCHEMA)
.withOrderId(1)
.asc("category")
.asc("ts") // satisfies the ordering of days(ts)
.desc("id")
.build();
SortOrder expected =
SortOrder.builderFor(SCHEMA).withOrderId(1).asc("category").asc("ts").desc("id").build();
assertThat(SortOrderUtil.buildSortOrder(SCHEMA, spec, order))
.as("Should add spec fields as prefix")
.isEqualTo(expected);
} |
@Override
public void deleteFiles(Iterable<String> paths) throws BulkDeletionFailureException {
if (s3FileIOProperties.deleteTags() != null && !s3FileIOProperties.deleteTags().isEmpty()) {
Tasks.foreach(paths)
.noRetry()
.executeWith(executorService())
.suppressFailureWhenFinished()
.onFailure(
(path, exc) ->
LOG.warn(
"Failed to add delete tags: {} to {}",
s3FileIOProperties.deleteTags(),
path,
exc))
.run(path -> tagFileToDelete(path, s3FileIOProperties.deleteTags()));
}
if (s3FileIOProperties.isDeleteEnabled()) {
SetMultimap<String, String> bucketToObjects =
Multimaps.newSetMultimap(Maps.newHashMap(), Sets::newHashSet);
List<Future<List<String>>> deletionTasks = Lists.newArrayList();
for (String path : paths) {
S3URI location = new S3URI(path, s3FileIOProperties.bucketToAccessPointMapping());
String bucket = location.bucket();
String objectKey = location.key();
bucketToObjects.get(bucket).add(objectKey);
if (bucketToObjects.get(bucket).size() == s3FileIOProperties.deleteBatchSize()) {
Set<String> keys = Sets.newHashSet(bucketToObjects.get(bucket));
Future<List<String>> deletionTask =
executorService().submit(() -> deleteBatch(bucket, keys));
deletionTasks.add(deletionTask);
bucketToObjects.removeAll(bucket);
}
}
// Delete the remainder
for (Map.Entry<String, Collection<String>> bucketToObjectsEntry :
bucketToObjects.asMap().entrySet()) {
String bucket = bucketToObjectsEntry.getKey();
Collection<String> keys = bucketToObjectsEntry.getValue();
Future<List<String>> deletionTask =
executorService().submit(() -> deleteBatch(bucket, keys));
deletionTasks.add(deletionTask);
}
int totalFailedDeletions = 0;
for (Future<List<String>> deletionTask : deletionTasks) {
try {
List<String> failedDeletions = deletionTask.get();
failedDeletions.forEach(path -> LOG.warn("Failed to delete object at path {}", path));
totalFailedDeletions += failedDeletions.size();
} catch (ExecutionException e) {
LOG.warn("Caught unexpected exception during batch deletion: ", e.getCause());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
deletionTasks.stream().filter(task -> !task.isDone()).forEach(task -> task.cancel(true));
throw new RuntimeException("Interrupted when waiting for deletions to complete", e);
}
}
if (totalFailedDeletions > 0) {
throw new BulkDeletionFailureException(totalFailedDeletions);
}
}
} | @Test
public void testDeleteFilesS3ReturnsError() {
String location = "s3://bucket/path/to/file-to-delete.txt";
DeleteObjectsResponse deleteObjectsResponse =
DeleteObjectsResponse.builder()
.errors(ImmutableList.of(S3Error.builder().key("path/to/file.txt").build()))
.build();
doReturn(deleteObjectsResponse).when(s3mock).deleteObjects((DeleteObjectsRequest) any());
assertThatThrownBy(() -> s3FileIO.deleteFiles(Lists.newArrayList(location)))
.isInstanceOf(BulkDeletionFailureException.class)
.hasMessage("Failed to delete 1 files");
} |
@Override
public abstract int compare(@NonNull String id1, @NonNull String id2); | @Test
public void testCompareCaseInsensitive() {
IdStrategy idStrategy = IdStrategy.CASE_INSENSITIVE;
assertTrue(idStrategy.compare("user1", "user2") < 0);
assertTrue(idStrategy.compare("user2", "user1") > 0);
assertEquals(0, idStrategy.compare("user1", "user1"));
assertTrue(idStrategy.compare("USER1", "user2") < 0);
assertTrue(idStrategy.compare("USER2", "user1") > 0);
assertEquals(0, idStrategy.compare("User1", "user1"));
} |
static boolean isLeaf(int nodeOrder, int depth) {
checkTrue(depth > 0, "Invalid depth: " + depth);
int leafLevel = depth - 1;
int numberOfNodes = getNumberOfNodes(depth);
int maxNodeOrder = numberOfNodes - 1;
checkTrue(nodeOrder >= 0 && nodeOrder <= maxNodeOrder, "Invalid nodeOrder: " + nodeOrder + " in a tree with depth "
+ depth);
int leftMostLeafOrder = MerkleTreeUtil.getLeftMostNodeOrderOnLevel(leafLevel);
return nodeOrder >= leftMostLeafOrder;
} | @Test(expected = IllegalArgumentException.class)
public void testIsLeafThrowsOnInvalidDepth() {
MerkleTreeUtil.isLeaf(0, 0);
} |
public void delete(final String id) throws BackgroundException {
if(log.isInfoEnabled()) {
log.info(String.format("Delete multipart upload for fileid %s", id));
}
try {
session.getClient().cancelLargeFileUpload(id);
}
catch(B2ApiException e) {
throw new B2ExceptionMappingService(fileid).map(e);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
} | @Test
public void testDelete() throws Exception {
final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path file = new Path(bucket, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final B2StartLargeFileResponse startResponse = session.getClient().startLargeFileUpload(
new B2VersionIdProvider(session).getVersionId(bucket),
file.getName(), null, Collections.emptyMap());
final String fileid = startResponse.getFileId();
new B2LargeUploadPartService(session, new B2VersionIdProvider(session)).delete(startResponse.getFileId());
} |
@Override
public void start() {
DefaultInputModule root = moduleHierarchy.root();
componentStore.put(root);
indexChildren(root);
} | @Test
public void testIndex() {
ProjectDefinition rootDef = mock(ProjectDefinition.class);
ProjectDefinition def = mock(ProjectDefinition.class);
when(rootDef.getParent()).thenReturn(null);
when(def.getParent()).thenReturn(rootDef);
DefaultInputModule root = mock(DefaultInputModule.class);
DefaultInputModule mod1 = mock(DefaultInputModule.class);
DefaultInputModule mod2 = mock(DefaultInputModule.class);
DefaultInputModule mod3 = mock(DefaultInputModule.class);
when(root.key()).thenReturn("root");
when(mod1.key()).thenReturn("mod1");
when(mod2.key()).thenReturn("mod2");
when(mod3.key()).thenReturn("mod3");
when(root.definition()).thenReturn(rootDef);
when(mod1.definition()).thenReturn(def);
when(mod2.definition()).thenReturn(def);
when(mod3.definition()).thenReturn(def);
createIndexer();
when(moduleHierarchy.root()).thenReturn(root);
when(moduleHierarchy.children(root)).thenReturn(Arrays.asList(mod1, mod2, mod3));
indexer.start();
DefaultInputModule rootModule = moduleHierarchy.root();
assertThat(rootModule).isNotNull();
assertThat(moduleHierarchy.children(rootModule)).hasSize(3);
} |
@Override
public Path copy(final Path file, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
try {
if(status.isExists()) {
if(log.isWarnEnabled()) {
log.warn(String.format("Delete file %s to be replaced with %s", target, file));
}
new BoxDeleteFeature(session, fileid).delete(Collections.singletonList(target), callback, new Delete.DisabledCallback());
}
if(file.isDirectory()) {
return target.withAttributes(new BoxAttributesFinderFeature(session, fileid).toAttributes(
new FoldersApi(new BoxApiClient(session.getClient())).postFoldersIdCopy(
fileid.getFileId(file),
new FolderIdCopyBody().name(target.getName()).parent(new FoldersfolderIdcopyParent().id(fileid.getFileId(target.getParent()))),
BoxAttributesFinderFeature.DEFAULT_FIELDS)
));
}
return target.withAttributes(new BoxAttributesFinderFeature(session, fileid).toAttributes(
new FilesApi(new BoxApiClient(session.getClient())).postFilesIdCopy(
fileid.getFileId(file),
new FileIdCopyBody()
.name(target.getName())
.parent(new FilesfileIdcopyParent().id(fileid.getFileId(target.getParent()))),
null, BoxAttributesFinderFeature.DEFAULT_FIELDS)
));
}
catch(ApiException e) {
throw new BoxExceptionMappingService(fileid).map("Cannot copy {0}", e, file);
}
} | @Test
public void testCopyFile() throws Exception {
final BoxFileidProvider fileid = new BoxFileidProvider(session);
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new BoxTouchFeature(session, fileid).touch(test, new TransferStatus());
final Path copy = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new BoxCopyFeature(session, fileid).copy(test, copy, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener());
assertTrue(new BoxFindFeature(session, fileid).find(test.withAttributes(PathAttributes.EMPTY)));
assertTrue(new BoxFindFeature(session, fileid).find(copy.withAttributes(PathAttributes.EMPTY)));
new BoxDeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
new BoxDeleteFeature(session, fileid).delete(Collections.<Path>singletonList(copy), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static void checkNullOrNonNullNonEmptyEntries(
@Nullable Collection<String> values, String propertyName) {
if (values == null) {
// pass
return;
}
for (String value : values) {
Preconditions.checkNotNull(
value, "Property '" + propertyName + "' cannot contain null entries");
Preconditions.checkArgument(
!value.trim().isEmpty(), "Property '" + propertyName + "' cannot contain empty strings");
}
} | @Test
public void testCheckNullOrNonNullNonEmptyEntries_mapNullValueFail() {
try {
Validator.checkNullOrNonNullNonEmptyEntries(Collections.singletonMap("key1", null), "test");
Assert.fail();
} catch (NullPointerException npe) {
Assert.assertEquals("Property 'test' cannot contain null values", npe.getMessage());
}
} |
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
} | @Test
public void testUnpartitionedBucketLong() throws Exception {
createUnpartitionedTable(spark, tableName);
SparkScanBuilder builder = scanBuilder();
BucketFunction.BucketLong function = new BucketFunction.BucketLong(DataTypes.LongType);
UserDefinedScalarFunc udf = toUDF(function, expressions(intLit(5), fieldRef("id")));
Predicate predicate = new Predicate(">=", expressions(udf, intLit(2)));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
// NOT GTEQ
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
} |
@Override
public String getMime(final String filename) {
if(StringUtils.startsWith(filename, "._")) {
return DEFAULT_CONTENT_TYPE;
}
// Reads from core.mime.types in classpath
return types.getMimetype(StringUtils.lowerCase(filename));
} | @Test
public void testGetMime() {
MappingMimeTypeService s = new MappingMimeTypeService();
assertEquals("text/plain", s.getMime("f.txt"));
assertEquals("text/plain", s.getMime("f.TXT"));
assertEquals("video/x-f4v", s.getMime("f.f4v"));
assertEquals("application/javascript", s.getMime("f.js"));
assertEquals("video/mp2t", s.getMime("f.ts"));
assertEquals("application/x-mpegurl", s.getMime("f.m3u8"));
assertEquals("application/octet-stream", s.getMime("._f.txt"));
} |
public Map<String, String> getTypes(final Set<String> streamIds,
final Set<String> fields) {
final Map<String, Set<String>> allFieldTypes = this.get(streamIds);
final Map<String, String> result = new HashMap<>(fields.size());
fields.forEach(field -> {
final Set<String> fieldTypes = allFieldTypes.get(field);
typeFromFieldType(fieldTypes).ifPresent(s -> result.put(field, s));
});
return result;
} | @Test
void getTypesReturnsEmptyMapIfStreamsAreEmpty() {
final Pair<IndexFieldTypesService, StreamService> services = mockServices();
final FieldTypesLookup lookup = new FieldTypesLookup(services.getLeft(), services.getRight());
assertThat(lookup.getTypes(Set.of(), Set.of("somefield"))).isEmpty();
} |
public static IpPrefix valueOf(int address, int prefixLength) {
return new IpPrefix(IpAddress.valueOf(address), prefixLength);
} | @Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfStringNegativePrefixLengthIPv6() {
IpPrefix ipPrefix;
ipPrefix =
IpPrefix.valueOf("1111:2222:3333:4444:5555:6666:7777:8888/-1");
} |
@Override
public Set<City> findAllByCanton(Canton canton) {
return cityRepository.findAllByCanton(canton).stream()
.map(city -> City.builder()
.of(city)
.setCanton(canton)
.build())
.collect(Collectors.toSet());
} | @Test
void findAllByCanton() {
City city = createCity();
Canton canton = city.getCanton();
Mockito.when(cityRepository.findAllByCanton(canton))
.thenReturn(Collections.singleton(city));
Set<City> expected = Set.of(city);
Set<City> actual = cityService.findAllByCanton(canton);
ReflectionAssert.assertReflectionEquals(expected, actual);
} |
public void retrieveDocuments() throws DocumentRetrieverException {
boolean first = true;
String route = params.cluster.isEmpty() ? params.route : resolveClusterRoute(params.cluster);
MessageBusParams messageBusParams = createMessageBusParams(params.configId, params.timeout, route);
documentAccess = documentAccessFactory.createDocumentAccess(messageBusParams);
session = documentAccess.createSyncSession(new SyncParameters.Builder().build());
int trace = params.traceLevel;
if (trace > 0) {
session.setTraceLevel(trace);
}
Iterator<String> iter = params.documentIds;
if (params.jsonOutput && !params.printIdsOnly) {
System.out.println('[');
}
while (iter.hasNext()) {
if (params.jsonOutput && !params.printIdsOnly) {
if (!first) {
System.out.println(',');
} else {
first = false;
}
}
String docid = iter.next();
Message msg = createDocumentRequest(docid);
Reply reply = session.syncSend(msg);
printReply(reply);
}
if (params.jsonOutput && !params.printIdsOnly) {
System.out.println(']');
}
} | @Test
void testHandlingErrorFromMessageBus() throws DocumentRetrieverException {
ClientParameters params = createParameters()
.setDocumentIds(asIterator(DOC_ID_1))
.build();
Reply r = new GetDocumentReply(null);
r.addError(new Error(0, "Error message"));
when(mockedSession.syncSend(any())).thenReturn(r);
DocumentRetriever documentRetriever = createDocumentRetriever(params);
documentRetriever.retrieveDocuments();
assertTrue(errContent.toString().contains("Request failed"));
} |
public boolean isIp4() {
return (version() == Ip4Address.VERSION);
} | @Test
public void testIsIp4() {
IpAddress ipAddress;
// IPv4
ipAddress = IpAddress.valueOf("0.0.0.0");
assertTrue(ipAddress.isIp4());
// IPv6
ipAddress = IpAddress.valueOf("::");
assertFalse(ipAddress.isIp4());
} |
Queue<String> prepareRollingOrder(List<String> podNamesToConsider, List<Pod> pods) {
Deque<String> rollingOrder = new ArrayDeque<>();
for (String podName : podNamesToConsider) {
Pod matchingPod = pods.stream().filter(pod -> podName.equals(pod.getMetadata().getName())).findFirst().orElse(null);
if (matchingPod == null || !Readiness.isPodReady(matchingPod)) {
// Non-existing or unready pods are handled first
// This helps to avoid rolling all pods into some situation where they would be all failing
rollingOrder.addFirst(podName);
} else {
// Ready pods are rolled only at the end
rollingOrder.addLast(podName);
}
}
return rollingOrder;
} | @Test
public void testRollingWithSomePodsOnly() {
List<Pod> pods = List.of(
renamePod(READY_POD, "my-connect-connect-0"),
renamePod(READY_POD, "my-connect-connect-1"),
renamePod(READY_POD, "my-connect-connect-2")
);
KafkaConnectRoller roller = new KafkaConnectRoller(RECONCILIATION, CLUSTER, 1_000L, null);
Queue<String> rollingOrder = roller.prepareRollingOrder(List.of("my-connect-connect-1"), pods);
assertThat(rollingOrder.size(), is(1));
assertThat(rollingOrder.poll(), is("my-connect-connect-1"));
} |
public static Db use() {
return use(DSFactory.get());
} | @Test
public void pageTest2() throws SQLException {
String sql = "select * from user order by name";
// 测试数据库中一共4条数据,第0页有3条,第1页有1条
List<Entity> page0 = Db.use().page(
sql, Page.of(0, 3));
assertEquals(3, page0.size());
List<Entity> page1 = Db.use().page(
sql, Page.of(1, 3));
assertEquals(1, page1.size());
} |
@Override
public void setChannelStateWriter(ChannelStateWriter channelStateWriter) {
checkState(this.channelStateWriter == null, "Already initialized");
this.channelStateWriter = checkNotNull(channelStateWriter);
} | @TestTemplate
void testTimeoutAlignedToUnalignedBarrier() throws Exception {
PipelinedSubpartition subpartition = createSubpartition();
subpartition.setChannelStateWriter(ChannelStateWriter.NO_OP);
assertSubpartitionChannelStateFuturesAndQueuedBuffers(subpartition, null, true, 0, false);
// test without data buffer
testTimeoutWithNDataBuffers(0, subpartition, 7L);
// test with data buffer
testTimeoutWithNDataBuffers(1, subpartition, 8L);
} |
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof DeviceFragmentId)) {
return false;
}
DeviceFragmentId that = (DeviceFragmentId) obj;
return Objects.equals(this.deviceId, that.deviceId) &&
Objects.equals(this.providerId, that.providerId);
} | @Test
public final void testEquals() {
new EqualsTester()
.addEqualityGroup(new DeviceFragmentId(DID1, PID),
new DeviceFragmentId(DID1, PID))
.addEqualityGroup(new DeviceFragmentId(DID2, PID),
new DeviceFragmentId(DID2, PID))
.addEqualityGroup(new DeviceFragmentId(DID1, PIDA),
new DeviceFragmentId(DID1, PIDA))
.addEqualityGroup(new DeviceFragmentId(DID2, PIDA),
new DeviceFragmentId(DID2, PIDA))
.testEquals();
} |
public static GenericSchemaImpl of(SchemaInfo schemaInfo) {
return of(schemaInfo, true);
} | @Test
public void testGenericJsonSchema() {
Schema<Foo> encodeSchema = Schema.JSON(Foo.class);
GenericSchema decodeSchema = GenericSchemaImpl.of(encodeSchema.getSchemaInfo());
testEncodeAndDecodeGenericRecord(encodeSchema, decodeSchema);
} |
@Override
public void printItem(Map<String, String> item) {
int cols = item.size();
if (rowPrinter.showHeader() && header) {
for (int row = 0; row < 2; row++) {
for (int col = 0; col < cols; col++) {
if (col > 0) {
shell.write(row == 0 ? "|" : "+");
}
if (row == 0) {
String format = "%-" + rowPrinter.columnWidth(col) + "s";
shell.write(String.format(format, rowPrinter.columnHeader(col)));
} else {
shell.write("-".repeat(rowPrinter.columnWidth(col)));
}
}
shell.writeln("");
}
header = false;
}
int[] colsWrap = new int[cols];
int remaining = cols;
do {
int i = 0;
for (Map.Entry<String, String> col : item.entrySet()) {
if (i > 0) {
shell.write("|");
}
String v = rowPrinter.formatColumn(i, col.getValue());
int width = rowPrinter.columnWidth(i);
String format = "%-" + width + "s";
if (i < 4) {
int offset = colsWrap[i];
if (offset < 0) {
// We've already printed the whole value
v = "";
} else {
int lf = v.indexOf("\n", offset) - offset;
if (lf < 0 || lf > width) {
// No LFs inside the range
if (v.length() - offset <= width) {
// The rest of the value fits
v = v.substring(offset);
colsWrap[i] = -1;
remaining--;
} else {
// Just print characters that fit skipping any that we've already printed
v = v.substring(offset, offset + width);
colsWrap[i] += width;
}
} else {
// LF inside the range, just print up to it
v = v.substring(offset, offset + lf);
colsWrap[i] += lf + 1;
}
}
shell.write(String.format(format, v));
} else {
if (colsWrap[i] == 0) {
shell.write(String.format(format, v));
colsWrap[i] = -1;
remaining--;
} else {
shell.write(" ".repeat(width));
}
}
i++;
}
shell.writeln("");
} while (remaining > 0);
} | @Test
public void testTableWrapping() throws IOException {
CacheEntryRowPrinter rowPrinter = new CacheEntryRowPrinter(WIDTH, COLUMNS);
AeshTestShell shell = new AeshTestShell();
TablePrettyPrinter t = new TablePrettyPrinter(shell, rowPrinter);
try (InputStream is = TablePrettyPrinter.class.getResourceAsStream("/printers/entries.json")) {
Iterator<Map<String, String>> it = new JsonReaderIterable(is).iterator();
t.printItem(it.next());
checkRow(rowPrinter, shell.getBuffer(), 17);
shell.clear();
t.printItem(it.next());
checkRow(rowPrinter, shell.getBuffer(), 15);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.