focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public NormalDistStats scaleBy(double v) {
double newMean = mean * v;
double shiftAmount = newMean - mean;
return new NormalDistStats(Math.max(0, mean + shiftAmount), stddev,
Math.max(0, min + shiftAmount), Math.max(0, max + shiftAmount));
} | @Test
public void scaleBy() {
NormalDistStats orig = new NormalDistStats(1.0, 0.5, 0.0, 2.0);
assertNDSEquals(orig, orig.scaleBy(1.0));
NormalDistStats expectedDouble = new NormalDistStats(2.0, 0.5, 1.0, 3.0);
assertNDSEquals(expectedDouble, orig.scaleBy(2.0));
NormalDistStats expectedHalf = new NormalDistStats(0.5, 0.5, 0.0, 1.5);
assertNDSEquals(expectedHalf, orig.scaleBy(0.5));
} |
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
if(containerService.isContainer(folder)) {
final S3BucketCreateService service = new S3BucketCreateService(session);
service.create(folder, StringUtils.isBlank(status.getRegion()) ?
new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getDefault().getIdentifier() : status.getRegion());
return folder;
}
else {
final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType());
type.add(Path.Type.placeholder);
return new S3TouchFeature(session, acl).withWriter(writer).touch(folder
.withType(type), status
// Add placeholder object
.withMime(MIMETYPE)
.withChecksum(writer.checksum(folder, status).compute(new NullInputStream(0L), status)));
}
} | @Test
public void testCreatePlaceholderVersioningDeleteWithMarker() throws Exception {
final Path bucket = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final Path directory = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path test = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(test.getType().contains(Path.Type.placeholder));
assertTrue(new S3FindFeature(session, acl).find(test));
assertTrue(new S3VersionedObjectListService(session, acl).list(directory, new DisabledListProgressListener()).contains(test));
// Add delete marker
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(new Path(test).withAttributes(PathAttributes.EMPTY)), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new DefaultFindFeature(session).find(new Path(test).withAttributes(PathAttributes.EMPTY)));
assertFalse(new S3FindFeature(session, acl).find(new Path(test).withAttributes(PathAttributes.EMPTY)));
assertTrue(new DefaultFindFeature(session).find(test));
assertTrue(new S3FindFeature(session, acl).find(test));
} |
public void setMode(short mode) {
mMode = mode;
} | @Test
public void setMode() {
AccessControlList acl = new AccessControlList();
short mode = new Mode(Mode.Bits.EXECUTE, Mode.Bits.WRITE, Mode.Bits.READ).toShort();
acl.setMode(mode);
assertEquals(mode, acl.getMode());
} |
public static byte[] toByteArray(long value, int length) {
final byte[] buffer = ByteBuffer.allocate(8).putLong(value).array();
for (int i = 0; i < 8 - length; i++) {
if (buffer[i] != 0) {
throw new IllegalArgumentException(
"Value is does not fit into byte array " + (8 - i) + " > " + length);
}
}
return adjustLength(buffer, length);
} | @Test
public void toByteArrayBigIntegerShouldRemove() {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Value is does not fit into byte array 9 > 8");
assertArrayEquals(new byte[] { 2 }, ByteArrayUtils.toByteArray(BigInteger.valueOf(0x102L), 1));
} |
public void ipCheck(EidSession session, String clientIp) {
if (session.getClientIpAddress() != null && !session.getClientIpAddress().isEmpty()) {
String[] clientIps = clientIp.split(", ");
byte[] data = clientIps[0].concat(sourceIpSalt).getBytes(StandardCharsets.UTF_8);
String anonimizedIp = Base64.toBase64String(DigestUtils.digest("SHA256").digest(data));
if (!anonimizedIp.equals(session.getClientIpAddress())) {
String logMessage = String.format(
"Security exception: Browser and Desktop client IP doesn't match: %s expected: %s",
anonimizedIp, session.getClientIpAddress());
if (sourceIpCheck) {
throw new ClientException(logMessage);
} else {
logger.warn(logMessage);
}
}
}
} | @Test
public void testIpCheckCorrectIp() {
setIpCheck(true);
EidSession session = new EidSession();
session.setClientIpAddress("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS");
assertDoesNotThrow(() -> target.ipCheck(session, "SSSSSSSSSSSSSS"));
} |
@Override
public Entry next(Entry reuse) throws IOException {
// Ignore reuse, because each HeadStream has its own reuse BinaryRowData.
return next();
} | @Test
public void testMergeOfTenStreams() throws Exception {
List<MutableObjectIterator<BinaryRowData>> iterators = new ArrayList<>();
iterators.add(
newIterator(new int[] {1, 2, 17, 23, 23}, new String[] {"A", "B", "C", "D", "E"}));
iterators.add(
newIterator(new int[] {2, 6, 7, 8, 9}, new String[] {"A", "B", "C", "D", "E"}));
iterators.add(
newIterator(new int[] {4, 10, 11, 11, 12}, new String[] {"A", "B", "C", "D", "E"}));
iterators.add(
newIterator(new int[] {3, 6, 7, 10, 12}, new String[] {"A", "B", "C", "D", "E"}));
iterators.add(
newIterator(new int[] {7, 10, 15, 19, 44}, new String[] {"A", "B", "C", "D", "E"}));
iterators.add(
newIterator(new int[] {6, 6, 11, 17, 18}, new String[] {"A", "B", "C", "D", "E"}));
iterators.add(
newIterator(new int[] {1, 2, 4, 5, 10}, new String[] {"A", "B", "C", "D", "E"}));
iterators.add(
newIterator(new int[] {5, 10, 19, 23, 29}, new String[] {"A", "B", "C", "D", "E"}));
iterators.add(
newIterator(new int[] {9, 9, 9, 9, 9}, new String[] {"A", "B", "C", "D", "E"}));
iterators.add(
newIterator(new int[] {8, 8, 14, 14, 15}, new String[] {"A", "B", "C", "D", "E"}));
TypeComparator<Integer> comparator = new IntComparator(true);
MutableObjectIterator<BinaryRowData> iterator =
new BinaryMergeIterator<>(
iterators, reused(10), (o1, o2) -> this.comparator.compare(o1, o2));
BinaryRowData row = serializer.createInstance();
int pre = 0;
while ((row = iterator.next(row)) != null) {
assertThat(comparator.compare(row.getInt(0), pre)).isGreaterThanOrEqualTo(0);
pre = row.getInt(0);
}
} |
public void run(OutputReceiver<PartitionRecord> receiver) throws InvalidProtocolBufferException {
// Erase any existing missing partitions.
metadataTableDao.writeDetectNewPartitionMissingPartitions(new HashMap<>());
List<PartitionRecord> partitions = metadataTableDao.readAllStreamPartitions();
for (PartitionRecord partitionRecord : partitions) {
if (partitionRecord.getUuid().isEmpty()) {
partitionRecord.setUuid(UniqueIdGenerator.getNextId());
}
if (endTime != null) {
partitionRecord.setEndTime(endTime);
}
LOG.info("DNP: Outputting existing partition: {}", partitionRecord);
metrics.incListPartitionsCount();
receiver.outputWithTimestamp(partitionRecord, Instant.EPOCH);
}
List<NewPartition> newPartitions = metadataTableDao.readNewPartitionsIncludingDeleted();
for (NewPartition newPartition : newPartitions) {
processNewPartitionsAction.processNewPartition(newPartition, receiver);
}
} | @Test
public void testOutputExistingStreamPartitions() throws InvalidProtocolBufferException {
ByteStringRange partition1 = ByteStringRange.create("A", "B");
Instant watermark1 = Instant.now().minus(Duration.standardSeconds(10));
PartitionRecord partitionWithStartTime =
new PartitionRecord(partition1, watermark1, "1", watermark1, Collections.emptyList(), null);
metadataTableDao.lockAndRecordPartition(partitionWithStartTime);
ByteStringRange partition2 = ByteStringRange.create("B", "D");
ChangeStreamContinuationToken partition2Token1 =
ChangeStreamContinuationToken.create(ByteStringRange.create("B", "C"), "tokenBC");
ChangeStreamContinuationToken partition2Token2 =
ChangeStreamContinuationToken.create(ByteStringRange.create("C", "D"), "tokenCD");
Instant watermark2 = Instant.now().plus(Duration.standardMinutes(1));
PartitionRecord partitionWithInitialTokens =
new PartitionRecord(
partition2,
Arrays.asList(partition2Token1, partition2Token2),
"2",
watermark2,
Collections.emptyList(),
null);
metadataTableDao.lockAndRecordPartition(partitionWithInitialTokens);
ByteStringRange partition3 = ByteStringRange.create("D", "H");
Instant watermark3 = Instant.now();
PartitionRecord partitionWithContinuationToken =
new PartitionRecord(partition3, watermark3, "3", watermark3, Collections.emptyList(), null);
metadataTableDao.lockAndRecordPartition(partitionWithContinuationToken);
ChangeStreamContinuationToken partition3token =
ChangeStreamContinuationToken.create(ByteStringRange.create("D", "H"), "tokenDH");
metadataTableDao.updateWatermark(partition3, watermark3, partition3token);
ByteStringRange partition4 = ByteStringRange.create("H", "I");
ChangeStreamContinuationToken partition4token =
ChangeStreamContinuationToken.create(ByteStringRange.create("H", "I"), "tokenHI");
Instant watermark4 = Instant.now();
PartitionRecord unlockedPartition =
new PartitionRecord(
partition4,
Collections.singletonList(partition4token),
"4",
watermark4,
Collections.emptyList(),
null);
metadataTableDao.lockAndRecordPartition(unlockedPartition);
metadataTableDao.releaseStreamPartitionLockForDeletion(partition4, unlockedPartition.getUuid());
action.run(receiver);
verify(receiver, times(4))
.outputWithTimestamp(partitionRecordArgumentCaptor.capture(), eq(Instant.EPOCH));
List<PartitionRecord> actualPartitions = partitionRecordArgumentCaptor.getAllValues();
PartitionRecord actualPartition1 = actualPartitions.get(0);
assertEquals(partition1, actualPartition1.getPartition());
assertEquals(partitionWithStartTime.getUuid(), actualPartition1.getUuid());
assertNull(actualPartition1.getChangeStreamContinuationTokens());
assertEquals(partitionWithStartTime.getStartTime(), actualPartition1.getStartTime());
assertEquals(endTime, actualPartition1.getEndTime());
assertEquals(
partitionWithStartTime.getParentLowWatermark(), actualPartition1.getParentLowWatermark());
PartitionRecord actualPartition2 = actualPartitions.get(1);
assertEquals(partition2, actualPartition2.getPartition());
assertEquals(partitionWithInitialTokens.getUuid(), actualPartition2.getUuid());
assertNull(actualPartition2.getStartTime());
assertEquals(
partitionWithInitialTokens.getChangeStreamContinuationTokens(),
actualPartition2.getChangeStreamContinuationTokens());
assertEquals(endTime, actualPartition2.getEndTime());
assertEquals(
partitionWithInitialTokens.getParentLowWatermark(),
actualPartition2.getParentLowWatermark());
PartitionRecord actualPartition3 = actualPartitions.get(2);
assertEquals(partition3, actualPartition3.getPartition());
assertEquals(partitionWithContinuationToken.getUuid(), actualPartition3.getUuid());
assertNull(actualPartition3.getStartTime());
assertNotNull(actualPartition3.getChangeStreamContinuationTokens());
assertEquals(1, actualPartition3.getChangeStreamContinuationTokens().size());
assertEquals(partition3token, actualPartition3.getChangeStreamContinuationTokens().get(0));
assertEquals(endTime, actualPartition3.getEndTime());
assertEquals(
partitionWithContinuationToken.getParentLowWatermark(),
actualPartition3.getParentLowWatermark());
PartitionRecord actualPartition4 = actualPartitions.get(3);
assertEquals(partition4, actualPartition4.getPartition());
// partition4 was unlocked so it gets a new uuid.
assertNotEquals("4", actualPartition4.getUuid());
assertNull(actualPartition4.getStartTime());
assertNotNull(actualPartition4.getChangeStreamContinuationTokens());
assertEquals(1, actualPartition4.getChangeStreamContinuationTokens().size());
assertEquals(partition4token, actualPartition4.getChangeStreamContinuationTokens().get(0));
assertEquals(endTime, actualPartition4.getEndTime());
assertEquals(
unlockedPartition.getParentLowWatermark(), actualPartition4.getParentLowWatermark());
} |
@Override
public List<Container> allocateContainers(ResourceBlacklistRequest blackList,
List<ResourceRequest> oppResourceReqs,
ApplicationAttemptId applicationAttemptId,
OpportunisticContainerContext opportContext, long rmIdentifier,
String appSubmitter) throws YarnException {
// Update black list.
updateBlacklist(blackList, opportContext);
// Add OPPORTUNISTIC requests to the outstanding ones.
opportContext.addToOutstandingReqs(oppResourceReqs);
Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist());
Set<String> allocatedNodes = new HashSet<>();
List<Container> allocatedContainers = new ArrayList<>();
// Satisfy the outstanding OPPORTUNISTIC requests.
boolean continueLoop = true;
while (continueLoop) {
continueLoop = false;
List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>();
for (SchedulerRequestKey schedulerKey :
opportContext.getOutstandingOpReqs().descendingKeySet()) {
// Allocated containers :
// Key = Requested Capability,
// Value = List of Containers of given cap (the actual container size
// might be different than what is requested, which is why
// we need the requested capability (key) to match against
// the outstanding reqs)
int remAllocs = -1;
int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat();
if (maxAllocationsPerAMHeartbeat > 0) {
remAllocs =
maxAllocationsPerAMHeartbeat - allocatedContainers.size()
- getTotalAllocations(allocations);
if (remAllocs <= 0) {
LOG.info("Not allocating more containers as we have reached max "
+ "allocations per AM heartbeat {}",
maxAllocationsPerAMHeartbeat);
break;
}
}
Map<Resource, List<Allocation>> allocation = allocate(
rmIdentifier, opportContext, schedulerKey, applicationAttemptId,
appSubmitter, nodeBlackList, allocatedNodes, remAllocs);
if (allocation.size() > 0) {
allocations.add(allocation);
continueLoop = true;
}
}
matchAllocation(allocations, allocatedContainers, opportContext);
}
return allocatedContainers;
} | @Test
public void testMaxAllocationsPerAMHeartbeatDifferentSchedKey()
throws Exception {
ResourceBlacklistRequest blacklistRequest =
ResourceBlacklistRequest.newInstance(
new ArrayList<>(), new ArrayList<>());
allocator.setMaxAllocationsPerAMHeartbeat(2);
final ExecutionTypeRequest oppRequest = ExecutionTypeRequest.newInstance(
ExecutionType.OPPORTUNISTIC, true);
List<ResourceRequest> reqs =
Arrays.asList(
ResourceRequest.newInstance(Priority.newInstance(1), "*",
CAPABILITY_1GB, 1, true, null, OPPORTUNISTIC_REQ),
ResourceRequest.newInstance(Priority.newInstance(2), "h6",
CAPABILITY_1GB, 2, true, null, OPPORTUNISTIC_REQ),
ResourceRequest.newInstance(Priority.newInstance(3), "/r3",
CAPABILITY_1GB, 2, true, null, OPPORTUNISTIC_REQ));
ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(0L, 1), 1);
oppCntxt.updateNodeList(
Arrays.asList(
RemoteNode.newInstance(
NodeId.newInstance("h3", 1234), "h3:1234", "/r2"),
RemoteNode.newInstance(
NodeId.newInstance("h2", 1234), "h2:1234", "/r1"),
RemoteNode.newInstance(
NodeId.newInstance("h5", 1234), "h5:1234", "/r1"),
RemoteNode.newInstance(
NodeId.newInstance("h4", 1234), "h4:1234", "/r2")));
List<Container> containers = allocator.allocateContainers(
blacklistRequest, reqs, appAttId, oppCntxt, 1L, "user1");
LOG.info("Containers: {}", containers);
// Although capacity is present, but only 2 containers should be allocated
// as max allocation per AM heartbeat is set to 2.
Assert.assertEquals(2, containers.size());
containers = allocator.allocateContainers(
blacklistRequest, new ArrayList<>(), appAttId, oppCntxt, 1L, "user1");
LOG.info("Containers: {}", containers);
// 2 more containers should be allocated from pending allocation requests.
Assert.assertEquals(2, containers.size());
containers = allocator.allocateContainers(
blacklistRequest, new ArrayList<>(), appAttId, oppCntxt, 1L, "user1");
LOG.info("Containers: {}", containers);
// Remaining 1 container should be allocated.
Assert.assertEquals(1, containers.size());
} |
public void removeProblemsOfType(String type) {
removeIf(problem -> type.equals(problem.type));
} | @Test
void testRemoveProblemsOfType() {
problems.addProblem(new CpuAllocationIrregularityProblem(new ArrayList<>()));
assertThat(problems).hasSize(1);
problems.removeProblemsOfType(CpuAllocationIrregularityProblem.PROBLEM_TYPE);
assertThat(problems).isEmpty();
} |
@Override
public ObjectNode encode(MaintenanceDomain md, CodecContext context) {
checkNotNull(md, "Maintenance Domain cannot be null");
ObjectNode result = context.mapper().createObjectNode()
.put(MD_NAME, md.mdId().toString())
.put(MD_NAME_TYPE, md.mdId().nameType().name())
.put(MD_LEVEL, md.mdLevel().name());
if (md.mdNumericId() > 0) {
result = result.put(MD_NUMERIC_ID, md.mdNumericId());
}
result.set("maList",
new MaintenanceAssociationCodec()
.encode(md.maintenanceAssociationList(), context));
return result;
} | @Test
public void testEncodeMd2() throws CfmConfigException {
MaintenanceDomain md2 = DefaultMaintenanceDomain.builder(MDID2_DOMAIN)
.mdLevel(MaintenanceDomain.MdLevel.LEVEL2).build();
ObjectNode node = mapper.createObjectNode();
node.set("md", context.codec(MaintenanceDomain.class).encode(md2, context));
assertEquals("{\"md\":{" +
"\"mdName\":\"test.opennetworking.org\"," +
"\"mdNameType\":\"DOMAINNAME\"," +
"\"mdLevel\":\"LEVEL2\"," +
"\"maList\":[]}}", node.toString());
} |
protected CompletableFuture<Triple<MessageExt, String, Boolean>> getMessageFromRemoteAsync(String topic, long offset, int queueId, String brokerName) {
try {
String brokerAddr = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInSubscribe(brokerName, MixAll.MASTER_ID, false);
if (null == brokerAddr) {
this.brokerController.getTopicRouteInfoManager().updateTopicRouteInfoFromNameServer(topic, true, false);
brokerAddr = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInSubscribe(brokerName, MixAll.MASTER_ID, false);
if (null == brokerAddr) {
LOG.warn("can't find broker address for topic {}, {}", topic, brokerName);
return CompletableFuture.completedFuture(Triple.of(null, "brokerAddress not found", true)); // maybe offline temporarily, so need retry
}
}
return this.brokerController.getBrokerOuterAPI().pullMessageFromSpecificBrokerAsync(brokerName,
brokerAddr, this.innerConsumerGroupName, topic, queueId, offset, 1, DEFAULT_PULL_TIMEOUT_MILLIS)
.thenApply(pullResult -> {
if (pullResult.getLeft() != null
&& PullStatus.FOUND.equals(pullResult.getLeft().getPullStatus())
&& CollectionUtils.isNotEmpty(pullResult.getLeft().getMsgFoundList())) {
return Triple.of(pullResult.getLeft().getMsgFoundList().get(0), "", false);
}
return Triple.of(null, pullResult.getMiddle(), pullResult.getRight());
});
} catch (Exception e) {
LOG.error("Get message from remote failed. {}, {}, {}, {}", topic, offset, queueId, brokerName, e);
}
return CompletableFuture.completedFuture(Triple.of(null, "Get message from remote failed", true)); // need retry
} | @Test
public void getMessageFromRemoteAsyncTest_message_notFound() throws Exception {
PullResult pullResult = new PullResult(PullStatus.NO_MATCHED_MSG, 1, 1, 1, null);
when(brokerOuterAPI.pullMessageFromSpecificBrokerAsync(anyString(), anyString(), anyString(), anyString(), anyInt(), anyLong(), anyInt(), anyLong()))
.thenReturn(CompletableFuture.completedFuture(Triple.of(pullResult, "no msg", false)));
Triple<MessageExt, String, Boolean> rst = escapeBridge.getMessageFromRemoteAsync(TEST_TOPIC, 1, DEFAULT_QUEUE_ID, BROKER_NAME).join();
Assert.assertNull(rst.getLeft());
Assert.assertEquals("no msg", rst.getMiddle());
Assert.assertFalse(rst.getRight()); // no retry
when(brokerOuterAPI.pullMessageFromSpecificBrokerAsync(anyString(), anyString(), anyString(), anyString(), anyInt(), anyLong(), anyInt(), anyLong()))
.thenReturn(CompletableFuture.completedFuture(Triple.of(null, "other resp code", true)));
rst = escapeBridge.getMessageFromRemoteAsync(TEST_TOPIC, 1, DEFAULT_QUEUE_ID, BROKER_NAME).join();
Assert.assertNull(rst.getLeft());
Assert.assertEquals("other resp code", rst.getMiddle());
Assert.assertTrue(rst.getRight()); // need retry
} |
public static byte[] toBytes(String input) {
if (input == null) {
return EMPTY;
}
return input.getBytes(StandardCharsets.UTF_8);
} | @Test
void stringToByte() {
byte[] bytes = ByteUtils.toBytes("google");
assertNotNull(bytes);
} |
public Group getGroup(JID jid) throws GroupNotFoundException {
JID groupJID = GroupJID.fromJID(jid);
return (groupJID instanceof GroupJID) ? getGroup(((GroupJID)groupJID).getGroupName()) : null;
} | @Test
public void willUseACacheHit() throws Exception {
groupCache.put(GROUP_NAME, CacheableOptional.of(cachedGroup));
final Group returnedGroup = groupManager.getGroup(GROUP_NAME, false);
assertThat(returnedGroup, is(cachedGroup));
verifyNoMoreInteractions(groupProvider);
} |
@Override
public GrokPattern save(GrokPattern pattern) throws ValidationException {
try {
if (!validate(pattern)) {
throw new ValidationException("Pattern " + pattern.name() + " invalid.");
}
} catch (GrokException | PatternSyntaxException e) {
throw new ValidationException("Invalid pattern " + pattern + "\n" + e.getMessage());
}
GrokPattern toSave;
if (pattern.id() == null) {
toSave = pattern.toBuilder().id(createId()).build();
} else {
toSave = pattern;
}
store.put(toSave.id(), toSave);
clusterBus.post(GrokPatternsUpdatedEvent.create(ImmutableSet.of(toSave.name())));
return toSave;
} | @Test
public void save() throws Exception {
// new pattern
final GrokPattern pattern = service.save(GrokPattern.create("NEW", ".*"));
assertThat(pattern).isNotNull();
assertThat(pattern.id()).isNotEmpty();
// check that updating works
final GrokPattern updated = service.save(pattern.toBuilder().name("OTHERNAME").build());
final GrokPattern loaded = service.load(pattern.id());
assertThat(loaded).isEqualTo(updated);
//check that using stored patterns works
final GrokPattern newPattern = service.save(GrokPattern.create("NEWONE", "%{OTHERNAME}"));
final GrokPattern newLoaded = service.load(newPattern.id());
assertThat(newLoaded).isEqualTo(newPattern);
// save should validate
try {
service.save(GrokPattern.create("INVALID", "*"));
fail("Should throw ValidationException");
} catch (ValidationException ignored) {
}
} |
public static String format(double amount, boolean isUseTraditional) {
return format(amount, isUseTraditional, false);
} | @Test
public void formatTraditionalTest() {
String f1 = NumberChineseFormatter.format(10889.72356, true);
assertEquals("壹万零捌佰捌拾玖点柒贰", f1);
f1 = NumberChineseFormatter.format(12653, true);
assertEquals("壹万贰仟陆佰伍拾叁", f1);
f1 = NumberChineseFormatter.format(215.6387, true);
assertEquals("贰佰壹拾伍点陆肆", f1);
f1 = NumberChineseFormatter.format(1024, true);
assertEquals("壹仟零贰拾肆", f1);
f1 = NumberChineseFormatter.format(100350089, true);
assertEquals("壹亿零叁拾伍万零捌拾玖", f1);
f1 = NumberChineseFormatter.format(1200, true);
assertEquals("壹仟贰佰", f1);
f1 = NumberChineseFormatter.format(12, true);
assertEquals("壹拾贰", f1);
f1 = NumberChineseFormatter.format(0.05, true);
assertEquals("零点零伍", f1);
} |
public void write(final ConsumerRecord<byte[], byte[]> record) throws IOException {
if (!writable) {
throw new IOException("Write permission denied.");
}
final File dirty = dirty(file);
final File tmp = tmp(file);
// first write to the dirty copy
appendRecordToFile(record, dirty, filesystem);
// atomically rename the dirty copy to the "live" copy while copying the live copy to
// the "dirty" copy via a temporary hard link
Files.createLink(tmp.toPath(), file.toPath());
Files.move(
dirty.toPath(),
file.toPath(),
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.ATOMIC_MOVE
);
Files.move(tmp.toPath(), dirty.toPath());
// keep the dirty copy in sync with the live copy, which now has the write
appendRecordToFile(record, dirty, filesystem);
} | @Test
public void shouldWriteRecord() throws IOException {
// Given
final ConsumerRecord<byte[], byte[]> record = newStreamRecord("stream1");
// When
replayFile.write(record);
// Then
final List<String> commands = Files.readAllLines(internalReplayFile.toPath());
assertThat(commands.size(), is(1));
assertThat(commands.get(0), is(
"\"stream/stream1/create\"" + KEY_VALUE_SEPARATOR
+ "{\"statement\":\"CREATE STREAM stream1 (id INT) WITH (kafka_topic='stream1')\""
+ ",\"streamsProperties\":{},\"originalProperties\":{},\"plan\":null}"
));
} |
@Override
public Object getServiceDetail(String namespaceId, String groupName, String serviceName) throws NacosException {
Service service = Service.newService(namespaceId, groupName, serviceName);
if (!ServiceManager.getInstance().containSingleton(service)) {
throw new NacosException(NacosException.NOT_FOUND,
String.format("service %s@@%s is not found!", groupName, serviceName));
}
Optional<ServiceMetadata> metadata = metadataManager.getServiceMetadata(service);
ServiceMetadata detailedService = metadata.orElseGet(ServiceMetadata::new);
ObjectNode serviceObject = JacksonUtils.createEmptyJsonNode();
serviceObject.put(FieldsConstants.NAME, serviceName);
serviceObject.put(FieldsConstants.GROUP_NAME, groupName);
serviceObject.put(FieldsConstants.PROTECT_THRESHOLD, detailedService.getProtectThreshold());
serviceObject.replace(FieldsConstants.SELECTOR, JacksonUtils.transferToJsonNode(detailedService.getSelector()));
serviceObject.replace(FieldsConstants.METADATA,
JacksonUtils.transferToJsonNode(detailedService.getExtendData()));
ObjectNode detailView = JacksonUtils.createEmptyJsonNode();
detailView.replace(FieldsConstants.SERVICE, serviceObject);
List<com.alibaba.nacos.api.naming.pojo.Cluster> clusters = new ArrayList<>();
for (String each : serviceStorage.getClusters(service)) {
ClusterMetadata clusterMetadata =
detailedService.getClusters().containsKey(each) ? detailedService.getClusters().get(each)
: new ClusterMetadata();
com.alibaba.nacos.api.naming.pojo.Cluster clusterView = new Cluster();
clusterView.setName(each);
clusterView.setHealthChecker(clusterMetadata.getHealthChecker());
clusterView.setMetadata(clusterMetadata.getExtendData());
clusterView.setUseIPPort4Check(clusterMetadata.isUseInstancePortForCheck());
clusterView.setDefaultPort(DEFAULT_PORT);
clusterView.setDefaultCheckPort(clusterMetadata.getHealthyCheckPort());
clusterView.setServiceName(service.getGroupedServiceName());
clusters.add(clusterView);
}
detailView.replace(FieldsConstants.CLUSTERS, JacksonUtils.transferToJsonNode(clusters));
return detailView;
} | @Test
void testGetServiceDetailNonExist() throws NacosException {
assertThrows(NacosException.class, () -> {
catalogServiceV2Impl.getServiceDetail("A", "BB", "CC");
});
} |
@Override
public Long createFileConfig(FileConfigSaveReqVO createReqVO) {
FileConfigDO fileConfig = FileConfigConvert.INSTANCE.convert(createReqVO)
.setConfig(parseClientConfig(createReqVO.getStorage(), createReqVO.getConfig()))
.setMaster(false); // 默认非 master
fileConfigMapper.insert(fileConfig);
return fileConfig.getId();
} | @Test
public void testCreateFileConfig_success() {
// 准备参数
Map<String, Object> config = MapUtil.<String, Object>builder().put("basePath", "/yunai")
.put("domain", "https://www.iocoder.cn").build();
FileConfigSaveReqVO reqVO = randomPojo(FileConfigSaveReqVO.class,
o -> o.setStorage(FileStorageEnum.LOCAL.getStorage()).setConfig(config))
.setId(null); // 避免 id 被赋值
// 调用
Long fileConfigId = fileConfigService.createFileConfig(reqVO);
// 断言
assertNotNull(fileConfigId);
// 校验记录的属性是否正确
FileConfigDO fileConfig = fileConfigMapper.selectById(fileConfigId);
assertPojoEquals(reqVO, fileConfig, "id", "config");
assertFalse(fileConfig.getMaster());
assertEquals("/yunai", ((LocalFileClientConfig) fileConfig.getConfig()).getBasePath());
assertEquals("https://www.iocoder.cn", ((LocalFileClientConfig) fileConfig.getConfig()).getDomain());
// 验证 cache
assertNull(fileConfigService.getClientCache().getIfPresent(fileConfigId));
} |
public Set<String> filesInDirectory(File dir) throws IOException {
Set<String> fileList = new HashSet<>();
Path dirPath = Paths.get(dir.getPath());
if (!Files.exists(dirPath)) {
return Collections.emptySet();
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(dirPath)) {
for (Path path : stream) {
if (!Files.isDirectory(path)) {
fileList.add(path.toString());
}
}
}
return fileList;
} | @Test
public void testFilesInDirectory() throws IOException {
String content = "x y z";
Path path = Paths.get(temporaryDirectory.getPath(), FOOBAR);
File letters = path.toFile();
Files.write(letters.toPath(), Collections.singletonList(content));
path = Paths.get(temporaryDirectory.getPath(), FOOBAR_HTML);
File letters2 = path.toFile();
Files.write(letters2.toPath(), Collections.singletonList(content));
Set<String> result = fileUtil.filesInDirectory(temporaryDirectory);
assertAll(
() -> assertEquals(2, result.size()),
() -> assertTrue(result.contains(letters.getPath())),
() -> assertTrue(result.contains(letters2.getPath())));
} |
public static Optional<Expression> convert(
org.apache.flink.table.expressions.Expression flinkExpression) {
if (!(flinkExpression instanceof CallExpression)) {
return Optional.empty();
}
CallExpression call = (CallExpression) flinkExpression;
Operation op = FILTERS.get(call.getFunctionDefinition());
if (op != null) {
switch (op) {
case IS_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::isNull);
case NOT_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::notNull);
case LT:
return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call);
case LT_EQ:
return convertFieldAndLiteral(
Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call);
case GT:
return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call);
case GT_EQ:
return convertFieldAndLiteral(
Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call);
case EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.isNaN(ref);
} else {
return Expressions.equal(ref, lit);
}
},
call);
case NOT_EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.notNaN(ref);
} else {
return Expressions.notEqual(ref, lit);
}
},
call);
case NOT:
return onlyChildAs(call, CallExpression.class)
.flatMap(FlinkFilters::convert)
.map(Expressions::not);
case AND:
return convertLogicExpression(Expressions::and, call);
case OR:
return convertLogicExpression(Expressions::or, call);
case STARTS_WITH:
return convertLike(call);
}
}
return Optional.empty();
} | @Test
public void testEqualsNaN() {
UnboundPredicate<Float> expected = org.apache.iceberg.expressions.Expressions.isNaN("field3");
Optional<org.apache.iceberg.expressions.Expression> actual =
FlinkFilters.convert(resolve(Expressions.$("field3").isEqual(Expressions.lit(Float.NaN))));
assertThat(actual).isPresent();
assertPredicatesMatch(expected, actual.get());
Optional<org.apache.iceberg.expressions.Expression> actual1 =
FlinkFilters.convert(resolve(Expressions.lit(Float.NaN).isEqual(Expressions.$("field3"))));
assertThat(actual1).isPresent();
assertPredicatesMatch(expected, actual1.get());
} |
public Span nextSpan(Message message) {
TraceContextOrSamplingFlags extracted =
extractAndClearTraceIdProperties(processorExtractor, message, message);
Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler.
// When an upstream context was not present, lookup keys are unlikely added
if (extracted.context() == null && !result.isNoop()) {
// simplify code by re-using an existing MessagingRequest impl
tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result);
}
return result;
} | @Test void nextSpan_should_tag_queue_when_no_incoming_context() throws JMSException {
message.setJMSDestination(createDestination("foo", TYPE.QUEUE));
jmsTracing.nextSpan(message).start().finish();
assertThat(testSpanHandler.takeLocalSpan().tags())
.containsOnly(entry("jms.queue", "foo"));
} |
@POST
@Path("/generate_regex")
@Timed
@ApiOperation(value = "Generates a regex that can be used as a value for a whitelist entry.")
@NoAuditEvent("Utility function only.")
@Consumes(MediaType.APPLICATION_JSON)
public WhitelistRegexGenerationResponse generateRegex(@ApiParam(name = "JSON body", required = true)
@Valid @NotNull final WhitelistRegexGenerationRequest generationRequest) {
final String regex;
if (generationRequest.placeholder() == null) {
regex = regexHelper.createRegexForUrl(generationRequest.urlTemplate());
} else {
regex = regexHelper.createRegexForUrlTemplate(generationRequest.urlTemplate(),
generationRequest.placeholder());
}
return WhitelistRegexGenerationResponse.create(regex);
} | @Test
public void generateRegexForTemplate() {
final WhitelistRegexGenerationRequest request =
WhitelistRegexGenerationRequest.create("https://example.com/api/lookup?key=${key}", "${key}");
final WhitelistRegexGenerationResponse response = urlWhitelistResource.generateRegex(request);
assertThat(response.regex()).isNotBlank();
} |
@PostMapping(value = "/localCache")
@Secured(resource = Constants.OPS_CONTROLLER_PATH, action = ActionTypes.WRITE, signType = SignType.CONSOLE)
public String updateLocalCacheFromStore() {
LOGGER.info("start to dump all data from store.");
dumpService.dumpAll();
LOGGER.info("finish to dump all data from store.");
return HttpServletResponse.SC_OK + "";
} | @Test
void testUpdateLocalCacheFromStore() throws Exception {
MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.post(Constants.OPS_CONTROLLER_PATH + "/localCache");
int actualValue = mockMvc.perform(builder).andReturn().getResponse().getStatus();
assertEquals(200, actualValue);
} |
@Override
public JCExpression inline(Inliner inliner) throws CouldNotResolveImportException {
JCExpression expression = getExpression().inline(inliner);
if (expression.toString().equals(CONVERT_TO_IDENT)) {
return inliner.maker().Ident(getIdentifier().inline(inliner));
}
// TODO(lowasser): consider inlining this.foo() as foo()
return inliner.maker().Select(getExpression().inline(inliner), getIdentifier().inline(inliner));
} | @Test
public void inline() {
ULiteral fooLit = ULiteral.stringLit("foo");
UType type = mock(UType.class);
UMemberSelect memberSelect = UMemberSelect.create(fooLit, "length", type);
assertInlines("\"foo\".length", memberSelect);
} |
@Override
public ImagesAndRegistryClient call()
throws IOException, RegistryException, LayerPropertyNotFoundException,
LayerCountMismatchException, BadContainerConfigurationFormatException,
CacheCorruptedException, CredentialRetrievalException {
EventHandlers eventHandlers = buildContext.getEventHandlers();
try (ProgressEventDispatcher progressDispatcher =
progressDispatcherFactory.create("pulling base image manifest", 4);
TimerEventDispatcher ignored1 = new TimerEventDispatcher(eventHandlers, DESCRIPTION)) {
// Skip this step if this is a scratch image
ImageReference imageReference = buildContext.getBaseImageConfiguration().getImage();
if (imageReference.isScratch()) {
Set<Platform> platforms = buildContext.getContainerConfiguration().getPlatforms();
Verify.verify(!platforms.isEmpty());
eventHandlers.dispatch(LogEvent.progress("Getting scratch base image..."));
ImmutableList.Builder<Image> images = ImmutableList.builder();
for (Platform platform : platforms) {
Image.Builder imageBuilder = Image.builder(buildContext.getTargetFormat());
imageBuilder.setArchitecture(platform.getArchitecture()).setOs(platform.getOs());
images.add(imageBuilder.build());
}
return new ImagesAndRegistryClient(images.build(), null);
}
eventHandlers.dispatch(
LogEvent.progress("Getting manifest for base image " + imageReference + "..."));
if (buildContext.isOffline()) {
List<Image> images = getCachedBaseImages();
if (!images.isEmpty()) {
return new ImagesAndRegistryClient(images, null);
}
throw new IOException(
"Cannot run Jib in offline mode; " + imageReference + " not found in local Jib cache");
} else if (imageReference.getDigest().isPresent()) {
List<Image> images = getCachedBaseImages();
if (!images.isEmpty()) {
RegistryClient noAuthRegistryClient =
buildContext.newBaseImageRegistryClientFactory().newRegistryClient();
// TODO: passing noAuthRegistryClient may be problematic. It may return 401 unauthorized
// if layers have to be downloaded.
// https://github.com/GoogleContainerTools/jib/issues/2220
return new ImagesAndRegistryClient(images, noAuthRegistryClient);
}
}
Optional<ImagesAndRegistryClient> mirrorPull =
tryMirrors(buildContext, progressDispatcher.newChildProducer());
if (mirrorPull.isPresent()) {
return mirrorPull.get();
}
try {
// First, try with no credentials. This works with public GCR images (but not Docker Hub).
// TODO: investigate if we should just pass credentials up front. However, this involves
// some risk. https://github.com/GoogleContainerTools/jib/pull/2200#discussion_r359069026
// contains some related discussions.
RegistryClient noAuthRegistryClient =
buildContext.newBaseImageRegistryClientFactory().newRegistryClient();
return new ImagesAndRegistryClient(
pullBaseImages(noAuthRegistryClient, progressDispatcher.newChildProducer()),
noAuthRegistryClient);
} catch (RegistryUnauthorizedException ex) {
eventHandlers.dispatch(
LogEvent.lifecycle(
"The base image requires auth. Trying again for " + imageReference + "..."));
Credential credential =
RegistryCredentialRetriever.getBaseImageCredential(buildContext).orElse(null);
RegistryClient registryClient =
buildContext
.newBaseImageRegistryClientFactory()
.setCredential(credential)
.newRegistryClient();
String wwwAuthenticate = ex.getHttpResponseException().getHeaders().getAuthenticate();
if (wwwAuthenticate != null) {
eventHandlers.dispatch(
LogEvent.debug("WWW-Authenticate for " + imageReference + ": " + wwwAuthenticate));
registryClient.authPullByWwwAuthenticate(wwwAuthenticate);
return new ImagesAndRegistryClient(
pullBaseImages(registryClient, progressDispatcher.newChildProducer()),
registryClient);
} else {
// Not getting WWW-Authenticate is unexpected in practice, and we may just blame the
// server and fail. However, to keep some old behavior, try a few things as a last resort.
// TODO: consider removing this fallback branch.
if (credential != null && !credential.isOAuth2RefreshToken()) {
eventHandlers.dispatch(
LogEvent.debug("Trying basic auth as fallback for " + imageReference + "..."));
registryClient.configureBasicAuth();
try {
return new ImagesAndRegistryClient(
pullBaseImages(registryClient, progressDispatcher.newChildProducer()),
registryClient);
} catch (RegistryUnauthorizedException ignored) {
// Fall back to try bearer auth.
}
}
eventHandlers.dispatch(
LogEvent.debug("Trying bearer auth as fallback for " + imageReference + "..."));
registryClient.doPullBearerAuth();
return new ImagesAndRegistryClient(
pullBaseImages(registryClient, progressDispatcher.newChildProducer()),
registryClient);
}
}
}
} | @Test
public void testCall_ManifestList()
throws InvalidImageReferenceException, IOException, RegistryException,
LayerPropertyNotFoundException, LayerCountMismatchException,
BadContainerConfigurationFormatException, CacheCorruptedException,
CredentialRetrievalException {
Mockito.when(buildContext.getBaseImageConfiguration())
.thenReturn(ImageConfiguration.builder(ImageReference.parse("multiarch")).build());
Mockito.when(buildContext.getRegistryMirrors())
.thenReturn(ImmutableListMultimap.of("registry", "gcr.io"));
Mockito.when(containerConfig.getPlatforms())
.thenReturn(ImmutableSet.of(new Platform("amd64", "linux")));
RegistryClient.Factory dockerHubRegistryClientFactory =
setUpWorkingRegistryClientFactoryWithV22ManifestList();
Mockito.when(buildContext.newBaseImageRegistryClientFactory())
.thenReturn(dockerHubRegistryClientFactory);
ImagesAndRegistryClient result = pullBaseImageStep.call();
Assert.assertEquals(V22ManifestTemplate.class, result.images.get(0).getImageFormat());
Assert.assertEquals("linux", result.images.get(0).getOs());
Assert.assertEquals("amd64", result.images.get(0).getArchitecture());
} |
@Override
public boolean addAggrConfigInfo(final String dataId, final String group, String tenant, final String datumId,
String appName, final String content) {
String appNameTmp = StringUtils.isBlank(appName) ? StringUtils.EMPTY : appName;
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
String contentTmp = StringUtils.isBlank(content) ? StringUtils.EMPTY : content;
final Timestamp now = new Timestamp(System.currentTimeMillis());
ConfigInfoAggrMapper configInfoAggrMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_AGGR);
final String select = configInfoAggrMapper.select(Collections.singletonList("content"),
Arrays.asList("data_id", "group_id", "tenant_id", "datum_id"));
final String insert = configInfoAggrMapper.insert(
Arrays.asList("data_id", "group_id", "tenant_id", "datum_id", "app_name", "content", "gmt_modified"));
final String update = configInfoAggrMapper.update(Arrays.asList("content", "gmt_modified"),
Arrays.asList("data_id", "group_id", "tenant_id", "datum_id"));
String dbContent = databaseOperate.queryOne(select, new Object[] {dataId, group, tenantTmp, datumId},
String.class);
if (Objects.isNull(dbContent)) {
final Object[] args = new Object[] {dataId, group, tenantTmp, datumId, appNameTmp, contentTmp, now};
EmbeddedStorageContextHolder.addSqlContext(insert, args);
} else if (!dbContent.equals(content)) {
final Object[] args = new Object[] {contentTmp, now, dataId, group, tenantTmp, datumId};
EmbeddedStorageContextHolder.addSqlContext(update, args);
}
try {
boolean result = databaseOperate.update(EmbeddedStorageContextHolder.getCurrentSqlContext());
if (!result) {
throw new NacosConfigException("[Merge] Configuration release failed");
}
return true;
} finally {
EmbeddedStorageContextHolder.cleanAllContext();
}
} | @Test
void testAddAggrConfigInfoOfEqualContent() {
String dataId = "dataId111";
String group = "group";
String tenant = "tenant";
String datumId = "datumId";
String appName = "appname1234";
String content = "content1234";
//mock query datumId and equal with current content param.
String existContent = "content1234";
Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant, datumId}), eq(String.class)))
.thenReturn(existContent);
//mock insert success
Mockito.when(databaseOperate.update(any(List.class))).thenReturn(true);
boolean result = embededConfigInfoAggrPersistService.addAggrConfigInfo(dataId, group, tenant, datumId, appName, content);
assertTrue(result);
} |
TrashPolicy getTrashPolicy() {
return trashPolicy;
} | @Test
public void testPluggableTrash() throws IOException {
Configuration conf = new Configuration();
// Test plugged TrashPolicy
conf.setClass("fs.trash.classname", TestTrashPolicy.class, TrashPolicy.class);
Trash trash = new Trash(conf);
assertTrue(trash.getTrashPolicy().getClass().equals(TestTrashPolicy.class));
} |
@Override
protected CouchbaseEndpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
CouchbaseEndpoint endpoint = new CouchbaseEndpoint(uri, remaining, this);
setProperties(endpoint, parameters);
return endpoint;
} | @Test
public void testCouchbaseURI() throws Exception {
Map<String, Object> params = new HashMap<>();
params.put("bucket", "bucket");
String uri = "couchbase:http://localhost";
String remaining = "http://localhost";
CouchbaseEndpoint endpoint = new CouchbaseComponent(context).createEndpoint(uri, remaining, params);
assertEquals(new URI("http://localhost:8091/pools"), endpoint.makeBootstrapURI()[0]);
} |
@Override
public CompletableFuture<StreamedQueryResult> continueFromLastContinuationToken() {
if (!this.hasContinuationToken()) {
throw new KsqlClientException(
"Can only continue queries that have saved a continuation token.");
}
return this.client.streamQuery(sql, properties);
} | @Test
public void shouldThrowOnContinueIfNoContinuationToken() {
// When
final Exception e = assertThrows(
KsqlClientException.class,
() -> queryResult.continueFromLastContinuationToken().get()
);
// Then
assertThat(e.getMessage(), containsString("Can only continue queries that have saved a continuation token."));
} |
@Override
public void run() {
try {
load();
if (!checkCompleted()) {
GlobalExecutor.submitLoadDataTask(this, distroConfig.getLoadDataRetryDelayMillis());
} else {
loadCallback.onSuccess();
Loggers.DISTRO.info("[DISTRO-INIT] load snapshot data success");
}
} catch (Exception e) {
loadCallback.onFailed(e);
Loggers.DISTRO.error("[DISTRO-INIT] load snapshot data failed. ", e);
}
} | @Test
void testRun() {
distroLoadDataTask.run();
Map<String, Boolean> loadCompletedMap = (Map<String, Boolean>) ReflectionTestUtils.getField(distroLoadDataTask, "loadCompletedMap");
assertNotNull(loadCompletedMap);
assertTrue(loadCompletedMap.containsKey(type));
verify(distroTransportAgent).getDatumSnapshot(any(String.class));
} |
@Override
@CacheEvict(cacheNames = RedisKeyConstants.OAUTH_CLIENT,
allEntries = true) // allEntries 清空所有缓存,因为 id 不是直接的缓存 key,不好清理
public void deleteOAuth2Client(Long id) {
// 校验存在
validateOAuth2ClientExists(id);
// 删除
oauth2ClientMapper.deleteById(id);
} | @Test
public void testDeleteOAuth2Client_success() {
// mock 数据
OAuth2ClientDO dbOAuth2Client = randomPojo(OAuth2ClientDO.class);
oauth2ClientMapper.insert(dbOAuth2Client);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbOAuth2Client.getId();
// 调用
oauth2ClientService.deleteOAuth2Client(id);
// 校验数据不存在了
assertNull(oauth2ClientMapper.selectById(id));
} |
public void init() throws ServerException {
if (status != Status.UNDEF) {
throw new IllegalStateException("Server already initialized");
}
status = Status.BOOTING;
verifyDir(homeDir);
verifyDir(tempDir);
Properties serverInfo = new Properties();
try {
InputStream is = getResource(name + ".properties");
serverInfo.load(is);
is.close();
} catch (IOException ex) {
throw new RuntimeException("Could not load server information file: " + name + ".properties");
}
initLog();
log.info("++++++++++++++++++++++++++++++++++++++++++++++++++++++");
log.info("Server [{}] starting", name);
log.info(" Built information:");
log.info(" Version : {}", serverInfo.getProperty(name + ".version", "undef"));
log.info(" Source Repository : {}", serverInfo.getProperty(name + ".source.repository", "undef"));
log.info(" Source Revision : {}", serverInfo.getProperty(name + ".source.revision", "undef"));
log.info(" Built by : {}", serverInfo.getProperty(name + ".build.username", "undef"));
log.info(" Built timestamp : {}", serverInfo.getProperty(name + ".build.timestamp", "undef"));
log.info(" Runtime information:");
log.info(" Home dir: {}", homeDir);
log.info(" Config dir: {}", (config == null) ? configDir : "-");
log.info(" Log dir: {}", logDir);
log.info(" Temp dir: {}", tempDir);
initConfig();
log.debug("Loading services");
List<Service> list = loadServices();
try {
log.debug("Initializing services");
initServices(list);
log.info("Services initialized");
} catch (ServerException ex) {
log.error("Services initialization failure, destroying initialized services");
destroyServices();
throw ex;
}
Status status = Status.valueOf(getConfig().get(getPrefixedName(CONF_STARTUP_STATUS), Status.NORMAL.toString()));
setStatus(status);
log.info("Server [{}] started!, status [{}]", name, status);
} | @Test
@TestException(exception = ServerException.class, msgRegExp = "S05.*")
@TestDir
public void siteFileNotAFile() throws Exception {
String homeDir = TestDirHelper.getTestDir().getAbsolutePath();
File siteFile = new File(homeDir, "server-site.xml");
assertTrue(siteFile.mkdir());
Server server = new Server("server", homeDir, homeDir, homeDir, homeDir);
server.init();
} |
@Override
public final boolean add(E newElement) {
if (newElement == null) {
return false;
}
if (newElement.prev() != INVALID_INDEX || newElement.next() != INVALID_INDEX) {
return false;
}
if ((size + 1) >= elements.length / 2) {
changeCapacity(calculateCapacity(elements.length));
}
int slot = addInternal(newElement, elements);
if (slot >= 0) {
addToListTail(head, elements, slot);
size++;
return true;
}
return false;
} | @Test
public void testNullForbidden() {
ImplicitLinkedHashMultiCollection<TestElement> multiColl = new ImplicitLinkedHashMultiCollection<>();
assertFalse(multiColl.add(null));
} |
@Transactional(readOnly = true)
public AuthFindDto.FindUsernameRes findUsername(String phone) {
User user = readGeneralSignUpUser(phone);
return AuthFindDto.FindUsernameRes.of(user);
} | @DisplayName("휴대폰 번호를 통해 유저를 찾아 User를 반환한다.")
@Test
void findUsernameIfUserFound() {
// given
String phone = "010-1234-5678";
String username = "jayang";
User user = UserFixture.GENERAL_USER.toUser();
given(userService.readUserByPhone(phone)).willReturn(Optional.of(user));
// when
AuthFindDto.FindUsernameRes result = authFindService.findUsername(phone);
// then
assertEquals(result, new AuthFindDto.FindUsernameRes(username));
} |
@Override
public void onMsg(TbContext ctx, TbMsg msg) {
locks.computeIfAbsent(msg.getOriginator(), SemaphoreWithTbMsgQueue::new)
.addToQueueAndTryProcess(msg, ctx, this::processMsgAsync);
} | @Test
public void test_sqrt_5_default_value_failure() {
var node = initNode(TbRuleNodeMathFunctionType.SQRT,
new TbMathResult(TbMathArgumentType.TIME_SERIES, "result", 3, true, false, DataConstants.SERVER_SCOPE),
new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "TestKey")
);
TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, originator, TbMsgMetaData.EMPTY, JacksonUtil.newObjectNode().put("a", 10).toString());
node.onMsg(ctx, msg);
ArgumentCaptor<Throwable> tCaptor = ArgumentCaptor.forClass(Throwable.class);
Mockito.verify(ctx, timeout(TIMEOUT)).tellFailure(eq(msg), tCaptor.capture());
assertNotNull(tCaptor.getValue().getMessage());
} |
public static String preprocess(String literal) {
if (literal == null) {
return null;
}
StringBuilder sb = new StringBuilder(literal.length() - 2);
for (int i = 1; i < literal.length() - 1; i++) {
char ch = literal.charAt(i);
if (ch == '\\') {
if (i >= literal.length() - 2) {
throw new IllegalArgumentException("Invalid escaped literal string: " + literal);
}
char next = literal.charAt(++i);
switch (next) {
case 'b':
ch = '\b';
break;
case 'n':
ch = '\n';
break;
case 't':
ch = '\t';
break;
case 'f':
ch = '\f';
break;
case 'r':
ch = '\r';
break;
case '\\':
ch = '\\';
break;
case '\"':
ch = '\"';
break;
case '\'':
ch = '\'';
break;
default:
throw new IllegalArgumentException("Invalid escaped literal string: " + literal);
}
}
sb.append(ch);
}
return sb.toString();
} | @Test(expected = IllegalArgumentException.class)
public void preprocessWrongStr() {
SelTypeUtil.preprocess("\"\\\"");
} |
public static String obfuscateCredentials(String originalUrl) {
HttpUrl parsedUrl = HttpUrl.parse(originalUrl);
if (parsedUrl != null) {
return obfuscateCredentials(originalUrl, parsedUrl);
}
return originalUrl;
} | @Test
@UseDataProvider("obfuscateCredentialsUseCases")
public void verify_obfuscateCredentials(String originalUrl, String expectedUrl) {
assertThat(obfuscateCredentials(originalUrl, HttpUrl.parse(originalUrl)))
.isEqualTo(obfuscateCredentials(originalUrl))
.isEqualTo(expectedUrl);
} |
@Override
public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
writer.keyword("CREATE");
if (getReplace()) {
writer.keyword("OR REPLACE");
}
writer.keyword("EXTERNAL MAPPING");
if (ifNotExists) {
writer.keyword("IF NOT EXISTS");
}
name.unparse(writer, leftPrec, rightPrec);
if (externalName != null) {
writer.keyword("EXTERNAL NAME");
externalName.unparse(writer, leftPrec, rightPrec);
}
if (!columns.isEmpty()) {
SqlWriter.Frame frame = writer.startList("(", ")");
for (SqlNode column : columns) {
printIndent(writer);
column.unparse(writer, 0, 0);
}
writer.newlineAndIndent();
writer.endList(frame);
}
if (dataConnection != null) {
writer.newlineAndIndent();
writer.keyword("DATA CONNECTION");
dataConnection.unparse(writer, leftPrec, rightPrec);
} else {
assert connectorType != null;
writer.newlineAndIndent();
writer.keyword("TYPE");
connectorType.unparse(writer, leftPrec, rightPrec);
}
if (objectType != null) {
writer.newlineAndIndent();
writer.keyword("OBJECT TYPE");
objectType.unparse(writer, leftPrec, rightPrec);
}
unparseOptions(writer, options);
} | @Test
public void test_unparse_quoting() {
Mapping mapping = new Mapping(
"na\"me",
"external\"name",
null,
"Type",
null,
singletonList(new MappingField("fi\"eld", QueryDataType.VARCHAR, "__key\"field")),
ImmutableMap.of("ke'y", "val'ue")
);
String sql = SqlCreateMapping.unparse(mapping);
assertThat(sql).isEqualTo("CREATE OR REPLACE EXTERNAL MAPPING \"hazelcast\".\"public\".\"na\"\"me\" " +
"EXTERNAL NAME \"external\"\"name\" (" + LE +
" \"fi\"\"eld\" VARCHAR EXTERNAL NAME \"__key\"\"field\"" + LE +
")" + LE +
"TYPE \"Type\"" + LE +
"OPTIONS (" + LE +
" 'ke''y'='val''ue'" + LE +
")"
);
} |
public static String decodeObjectIdentifier(byte[] data) {
return decodeObjectIdentifier(data, 0, data.length);
} | @Test
public void decodeObjectIdentifierWithZeros() {
assertEquals("0.1.0.2.0.3", Asn1Utils.decodeObjectIdentifier(new byte[] { 1, 0, 2, 0, 3 }));
} |
@VisibleForTesting
static int getIdForInsertionRequest(EditorInfo info) {
return info == null
? 0
: Arrays.hashCode(new int[] {info.fieldId, info.packageName.hashCode()});
} | @Test
public void testQueueImageInsertionTillTargetTextBoxEntered() {
Assert.assertEquals(0, ShadowToast.shownToastCount());
simulateFinishInputFlow();
EditorInfo info = createEditorInfoTextWithSuggestionsForSetUp();
EditorInfoCompat.setContentMimeTypes(info, new String[] {"image/gif"});
simulateOnStartInputFlow(false, info);
mAnySoftKeyboardUnderTest.simulateKeyPress(KeyCodes.IMAGE_MEDIA_POPUP);
ArgumentCaptor<InsertionRequestCallback> argumentCaptor =
ArgumentCaptor.forClass(InsertionRequestCallback.class);
Mockito.verify(mRemoteInsertion)
.startMediaRequest(Mockito.any(), Mockito.anyInt(), argumentCaptor.capture());
simulateFinishInputFlow();
argumentCaptor
.getValue()
.onMediaRequestDone(
AnySoftKeyboardMediaInsertion.getIdForInsertionRequest(info),
new InputContentInfoCompat(
Uri.EMPTY,
new ClipDescription("", EditorInfoCompat.getContentMimeTypes(info)),
null));
Assert.assertNull(mAnySoftKeyboardUnderTest.getCommitedInputContentInfo());
Assert.assertEquals(1, ShadowToast.shownToastCount());
Assert.assertNotNull(ShadowToast.getLatestToast());
Assert.assertEquals(Toast.LENGTH_LONG, ShadowToast.getLatestToast().getDuration());
Assert.assertEquals("Click text-box to insert image", ShadowToast.getTextOfLatestToast());
// entering the actual text
simulateOnStartInputFlow(false, info);
Assert.assertNotNull(mAnySoftKeyboardUnderTest.getCommitedInputContentInfo());
Assert.assertEquals(1, ShadowToast.shownToastCount());
} |
@Override
public DeserializationHandlerResponse handle(
final ProcessorContext context,
final ConsumerRecord<byte[], byte[]> record,
final Exception exception
) {
log.debug(
String.format("Exception caught during Deserialization, "
+ "taskId: %s, topic: %s, partition: %d, offset: %d",
context.taskId(), record.topic(), record.partition(), record.offset()),
exception
);
streamsErrorCollector.recordError(record.topic());
if (isCausedByAuthorizationError(exception)) {
log.info(
String.format(
"Authorization error when attempting to access the schema during deserialization. "
+ "taskId: %s, topic: %s, partition: %d, offset: %d",
context.taskId(), record.topic(), record.partition(), record.offset()));
return DeserializationHandlerResponse.FAIL;
}
return DeserializationHandlerResponse.CONTINUE;
} | @Test
public void shouldCallErrorCollector() {
when(record.topic()).thenReturn("test");
exceptionHandler.handle(context, record, mock(Exception.class));
verify(streamsErrorCollector).recordError("test");
} |
public static boolean isProperClass(Class<?> clazz) {
int mods = clazz.getModifiers();
return !(Modifier.isAbstract(mods)
|| Modifier.isInterface(mods)
|| Modifier.isNative(mods));
} | @Test
void testClassIsNotProper() {
assertThat(InstantiationUtil.isProperClass(Value.class)).isFalse();
} |
@Override
public void handleTenantInfo(TenantInfoHandler handler) {
// 如果禁用,则不执行逻辑
if (isTenantDisable()) {
return;
}
// 获得租户
TenantDO tenant = getTenant(TenantContextHolder.getRequiredTenantId());
// 执行处理器
handler.handle(tenant);
} | @Test
public void testHandleTenantInfo_success() {
// 准备参数
TenantInfoHandler handler = mock(TenantInfoHandler.class);
// mock 未禁用
when(tenantProperties.getEnable()).thenReturn(true);
// mock 租户
TenantDO dbTenant = randomPojo(TenantDO.class);
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
TenantContextHolder.setTenantId(dbTenant.getId());
// 调用
tenantService.handleTenantInfo(handler);
// 断言
verify(handler).handle(argThat(argument -> {
assertPojoEquals(dbTenant, argument);
return true;
}));
} |
@GET
@Path("/entity-uid/{uid}/")
@Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
public TimelineEntity getEntity(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("uid") String uId,
@QueryParam("confstoretrieve") String confsToRetrieve,
@QueryParam("metricstoretrieve") String metricsToRetrieve,
@QueryParam("fields") String fields,
@QueryParam("metricslimit") String metricsLimit,
@QueryParam("metricstimestart") String metricsTimeStart,
@QueryParam("metricstimeend") String metricsTimeEnd) {
String url = req.getRequestURI() +
(req.getQueryString() == null ? "" :
QUERY_STRING_SEP + req.getQueryString());
UserGroupInformation callerUGI =
TimelineReaderWebServicesUtils.getUser(req);
LOG.info("Received URL {} from user {}",
url, TimelineReaderWebServicesUtils.getUserName(callerUGI));
long startTime = Time.monotonicNow();
boolean succeeded = false;
init(res);
TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
TimelineEntity entity = null;
try {
TimelineReaderContext context =
TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uId);
if (context == null) {
throw new BadRequestException("Incorrect UID " + uId);
}
entity = timelineReaderManager.getEntity(context,
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
checkAccessForGenericEntity(entity, callerUGI);
succeeded = true;
} catch (Exception e) {
handleException(e, url, startTime, "Either metricslimit or metricstime"
+ " start/end");
} finally {
long latency = Time.monotonicNow() - startTime;
METRICS.addGetEntitiesLatency(latency, succeeded);
LOG.info("Processed URL {} (Took {} ms.)", url, latency);
}
if (entity == null) {
LOG.info("Processed URL {} but entity not found" + " (Took {} ms.)",
url, (Time.monotonicNow() - startTime));
throw new NotFoundException("Timeline entity with uid: " + uId +
"is not found");
}
return entity;
} | @Test
void testGetEntitiesWithLimit() throws Exception {
Client client = createClient();
try {
URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
"timeline/clusters/cluster1/apps/app1/entities/app?limit=2");
ClientResponse resp = getResponse(client, uri);
Set<TimelineEntity> entities =
resp.getEntity(new GenericType<Set<TimelineEntity>>(){
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entities);
assertEquals(2, entities.size());
// Entities returned are based on most recent created time.
assertTrue(entities.contains(newEntity("app", "id_1")) &&
entities.contains(newEntity("app", "id_4")),
"Entities with id_1 and id_4 should have been present " +
"in response based on entity created time.");
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
"clusters/cluster1/apps/app1/entities/app?limit=3");
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entities);
// Even though 2 entities out of 4 have same created time, one entity
// is left out due to limit
assertEquals(3, entities.size());
} finally {
client.destroy();
}
} |
public SortedSet<String> getNames() {
return Collections.unmodifiableSortedSet(new TreeSet<>(healthChecks.keySet()));
} | @Test
public void hasASetOfHealthCheckNames() {
assertThat(registry.getNames()).containsOnly("hc1", "hc2", "ahc");
} |
public byte[] toByteArray() {
ByteArrayOutputStream stream = new ByteArrayOutputStream();
try {
write(stream);
} catch (IOException e) {
// Should not happen as ByteArrayOutputStream does not throw IOException on write
throw new RuntimeException(e);
}
return stream.toByteArray();
} | @Test
public void testToByteArray_OP_PUSHDATA1() {
// OP_PUSHDATA1
byte[] bytes = new byte[0xFF];
RANDOM.nextBytes(bytes);
byte[] expected = ByteUtils.concat(new byte[] { OP_PUSHDATA1, (byte) 0xFF }, bytes);
byte[] actual = new ScriptChunk(OP_PUSHDATA1, bytes).toByteArray();
assertArrayEquals(expected, actual);
} |
synchronized Object[] getOneRow( RowMetaInterface rowMeta, Object[] row ) throws KettleException {
Object[] rowData = RowDataUtil.resizeArray( row, data.outputRowMeta.size() );
int index = 0;
Set<Integer> numFieldsAlreadyBeenTransformed = new HashSet<Integer>();
for ( int i = 0; i < data.numFields; i++ ) {
RowMetaInterface currentRowMeta =
( numFieldsAlreadyBeenTransformed.contains( data.inStreamNrs[i] ) ) ? data.outputRowMeta : getInputRowMeta();
String value =
replaceString( currentRowMeta.getString( rowData, data.inStreamNrs[i] ), data.patterns[i],
getResolvedReplaceByString( i, row ) );
if ( Utils.isEmpty( data.outStreamNrs[i] ) ) {
// update field value
rowData[data.inStreamNrs[i]] = value;
numFieldsAlreadyBeenTransformed.add( data.inStreamNrs[i] );
} else {
// add new field value
rowData[data.inputFieldsNr + index++] = value;
}
}
return rowData;
} | @Test
public void testGetOneRow() throws Exception {
ReplaceStringData data = new ReplaceStringData();
ReplaceString replaceString =
new ReplaceString( stepMockHelper.stepMeta, data, 0, stepMockHelper.transMeta, stepMockHelper.trans );
RowMetaInterface inputRowMeta = new RowMeta();
inputRowMeta.addValueMeta( 0, new ValueMetaString( "SomeDataMeta" ) );
inputRowMeta.addValueMeta( 1, new ValueMetaString( "AnotherDataMeta" ) );
replaceString.init( stepMockHelper.processRowsStepMetaInterface, data );
replaceString.setInputRowMeta( inputRowMeta );
data.outputRowMeta = inputRowMeta;
data.outputRowMeta.addValueMeta( new ValueMetaString( "AnotherDataMeta" ) );
data.inputFieldsNr = 2;
data.numFields = 2;
data.inStreamNrs = new int[] { 1, 1 };
data.patterns = new Pattern[] { Pattern.compile( "a" ), Pattern.compile( "t" ) };
data.replaceFieldIndex = new int[] { -1, -1 };
data.outStreamNrs = new String[] { StringUtils.EMPTY, "1" };
data.replaceByString = new String[] { "1", "2" };
data.setEmptyString = new boolean[] { false, false };
Object[] output = replaceString.getOneRow( inputRowMeta, row );
assertArrayEquals( "Output varies", expectedRow, output );
} |
@VisibleForTesting
public Path getAllocationFile(Configuration conf)
throws UnsupportedFileSystemException {
String allocFilePath = conf.get(FairSchedulerConfiguration.ALLOCATION_FILE,
FairSchedulerConfiguration.DEFAULT_ALLOCATION_FILE);
Path allocPath = new Path(allocFilePath);
String allocPathScheme = allocPath.toUri().getScheme();
if(allocPathScheme != null && !allocPathScheme.matches(SUPPORTED_FS_REGEX)){
throw new UnsupportedFileSystemException("Allocation file "
+ allocFilePath + " uses an unsupported filesystem");
} else if (!allocPath.isAbsolute()) {
URL url = Thread.currentThread().getContextClassLoader()
.getResource(allocFilePath);
if (url == null) {
LOG.warn(allocFilePath + " not found on the classpath.");
allocPath = null;
} else if (!url.getProtocol().equalsIgnoreCase("file")) {
throw new RuntimeException("Allocation file " + url
+ " found on the classpath is not on the local filesystem.");
} else {
allocPath = new Path(url.getProtocol(), null, url.getPath());
}
} else if (allocPath.isAbsoluteAndSchemeAuthorityNull()){
allocPath = new Path("file", null, allocFilePath);
}
return allocPath;
} | @Test (expected = UnsupportedFileSystemException.class)
public void testDenyGetAllocationFileFromUnsupportedFileSystem()
throws UnsupportedFileSystemException {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, "badfs:///badfile");
AllocationFileLoaderService allocLoader =
new AllocationFileLoaderService(scheduler);
allocLoader.getAllocationFile(conf);
} |
public static JsonNode toJsonNode(Object datum) {
if (datum == null) {
return null;
}
try {
TokenBuffer generator = new TokenBuffer(new ObjectMapper(), false);
toJson(datum, generator);
return new ObjectMapper().readTree(generator.asParser());
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
} | @Test
void testToJsonNode() {
assertNull(toJsonNode(null));
assertEquals(NullNode.getInstance(), toJsonNode(JsonProperties.NULL_VALUE));
assertEquals(BooleanNode.TRUE, toJsonNode(true));
assertEquals(IntNode.valueOf(1), toJsonNode(1));
assertEquals(LongNode.valueOf(2), toJsonNode(2L));
assertEquals(FloatNode.valueOf(1.0f), toJsonNode(1.0f));
assertEquals(FloatNode.valueOf(33.33000183105469f), toJsonNode(33.33000183105469f));
assertEquals(DoubleNode.valueOf(2.0), toJsonNode(2.0d));
assertEquals(BinaryNode.valueOf(new byte[] { 1, 2 }), toJsonNode(new byte[] { 1, 2 }));
assertEquals(TextNode.valueOf("a"), toJsonNode("a"));
assertEquals(TextNode.valueOf("UP"), toJsonNode(Direction.UP));
assertEquals(BigIntegerNode.valueOf(BigInteger.ONE), toJsonNode(BigInteger.ONE));
assertEquals(DecimalNode.valueOf(BigDecimal.ONE), toJsonNode(BigDecimal.ONE));
ArrayNode an = JsonNodeFactory.instance.arrayNode();
an.add(1);
assertEquals(an, toJsonNode(Collections.singletonList(1)));
ObjectNode on = JsonNodeFactory.instance.objectNode();
on.put("a", 1);
assertEquals(on, toJsonNode(Collections.singletonMap("a", 1)));
} |
@Private
@InterfaceStability.Unstable
public static NodeId toNodeIdWithDefaultPort(String nodeIdStr) {
if (nodeIdStr.indexOf(":") < 0) {
return NodeId.fromString(nodeIdStr + ":0");
}
return NodeId.fromString(nodeIdStr);
} | @Test
void testNodeIdWithDefaultPort() throws URISyntaxException {
NodeId nid;
nid = ConverterUtils.toNodeIdWithDefaultPort("node:10");
assertThat(nid.getPort()).isEqualTo(10);
assertThat(nid.getHost()).isEqualTo("node");
nid = ConverterUtils.toNodeIdWithDefaultPort("node");
assertThat(nid.getPort()).isEqualTo(0);
assertThat(nid.getHost()).isEqualTo("node");
} |
Optional<String> getFromDiscretizeBins(Number toEvaluate) {
return discretizeBins
.stream()
.map(kiePMMLNameValue -> kiePMMLNameValue.evaluate(toEvaluate))
.filter(Optional::isPresent)
.findFirst()
.map(Optional::get);
} | @Test
void getFromDiscretizeBins() {
KiePMMLDiscretize kiePMMLDiscretize = getKiePMMLDiscretize(null, null);
Optional<String> retrieved = kiePMMLDiscretize.getFromDiscretizeBins(10);
assertThat(retrieved).isPresent();
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(20);
assertThat(retrieved).isNotPresent();
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(21);
assertThat(retrieved).isNotPresent();
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(29);
assertThat(retrieved).isPresent();
assertThat(retrieved.get()).isEqualTo(kiePMMLDiscretizeBin2.getBinValue());
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(30);
assertThat(retrieved).isPresent();
assertThat(retrieved.get()).isEqualTo(kiePMMLDiscretizeBin2.getBinValue());
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(31);
assertThat(retrieved).isPresent();
assertThat(retrieved.get()).isEqualTo(kiePMMLDiscretizeBin3.getBinValue());
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(32);
assertThat(retrieved).isPresent();
assertThat(retrieved.get()).isEqualTo(kiePMMLDiscretizeBin3.getBinValue());
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(40);
assertThat(retrieved).isNotPresent();
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(41);
assertThat(retrieved).isPresent();
assertThat(retrieved.get()).isEqualTo(kiePMMLDiscretizeBin4.getBinValue());
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(42);
assertThat(retrieved).isPresent();
assertThat(retrieved.get()).isEqualTo(kiePMMLDiscretizeBin4.getBinValue());
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(49);
assertThat(retrieved).isPresent();
assertThat(retrieved.get()).isEqualTo(kiePMMLDiscretizeBin4.getBinValue());
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(50);
assertThat(retrieved).isPresent();
assertThat(retrieved.get()).isEqualTo(kiePMMLDiscretizeBin4.getBinValue());
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(51);
assertThat(retrieved).isPresent();
assertThat(retrieved.get()).isEqualTo(kiePMMLDiscretizeBin5.getBinValue());
retrieved = kiePMMLDiscretize.getFromDiscretizeBins(52);
assertThat(retrieved).isPresent();
assertThat(retrieved.get()).isEqualTo(kiePMMLDiscretizeBin5.getBinValue());
} |
public static void addCurSizeAllMemTables(final StreamsMetricsImpl streamsMetrics,
final RocksDBMetricContext metricContext,
final Gauge<BigInteger> valueProvider) {
addMutableMetric(
streamsMetrics,
metricContext,
valueProvider,
CURRENT_SIZE_OF_ALL_MEMTABLES,
CURRENT_SIZE_OF_ALL_MEMTABLES_DESCRIPTION
);
} | @Test
public void shouldAddCurSizeAllMemTablesMetric() {
final String name = "cur-size-all-mem-tables";
final String description = "Approximate size of active and unflushed immutable memtables in bytes";
runAndVerifyMutableMetric(
name,
description,
() -> RocksDBMetrics.addCurSizeAllMemTables(streamsMetrics, ROCKSDB_METRIC_CONTEXT, VALUE_PROVIDER)
);
} |
public static String getSystemProperty(String key) {
String value = System.getenv(key);
if (StringUtils.isEmpty(value)) {
value = System.getProperty(key);
}
return value;
} | @Test
void testGetSystemProperty() throws Exception {
try {
System.setProperty("dubbo", "system-only");
assertThat(ConfigUtils.getSystemProperty("dubbo"), equalTo("system-only"));
} finally {
System.clearProperty("dubbo");
}
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final Activity activity = (Activity) o;
return Objects.equals(caller, activity.caller) && Objects.equals(message, activity.message);
} | @Test
public void testEquals() throws Exception {
EqualsVerifier.forClass(Activity.class)
.suppress(Warning.NONFINAL_FIELDS)
.verify();
} |
@Bean
public ShenyuContextDecorator springCloudShenyuContextDecorator() {
return new SpringCloudShenyuContextDecorator();
} | @Test
public void testSpringCloudShenyuContextDecorator() {
applicationContextRunner.run(context -> {
ShenyuContextDecorator decorator = context.getBean("springCloudShenyuContextDecorator", ShenyuContextDecorator.class);
assertNotNull(decorator);
}
);
} |
@Override
public String createToken(Authentication authentication) throws AccessException {
return getExecuteTokenManager().createToken(authentication);
} | @Test
void testCreateToken2() throws AccessException {
assertEquals("token", tokenManagerDelegate.createToken("nacos"));
} |
@Override
@PublicAPI(usage = ACCESS)
public Set<JavaClass> getAllInvolvedRawTypes() {
return Stream.of(
Stream.of(this.returnType.get()),
this.parameters.getParameterTypes().stream(),
this.typeParameters.stream()
).flatMap(s -> s).map(JavaType::getAllInvolvedRawTypes).flatMap(Set::stream).collect(toSet());
} | @Test
public void offers_all_involved_raw_types() {
class SampleClass<T extends Collection<? super File> & Serializable> {
@SuppressWarnings("unused")
T method(List<Map<? extends Number, Set<? super String[][]>>> input) {
return null;
}
}
JavaMethod method = new ClassFileImporter().importClass(SampleClass.class).getMethod("method", List.class);
assertThatTypes(method.getAllInvolvedRawTypes())
.matchInAnyOrder(Collection.class, File.class, Serializable.class, List.class, Map.class, Number.class, Set.class, String.class);
} |
@Override
public Boolean authenticate(final Host bookmark, final LoginCallback prompt, final CancelCallback cancel)
throws BackgroundException {
if(log.isDebugEnabled()) {
log.debug(String.format("Login using none authentication with credentials %s", bookmark.getCredentials()));
}
try {
client.auth(bookmark.getCredentials().getUsername(), new AuthNone());
return client.isAuthenticated();
}
catch(IOException e) {
throw new SFTPExceptionMappingService().map(e);
}
} | @Test(expected = LoginFailureException.class)
@Ignore
public void testAuthenticate() throws Exception {
assertFalse(new SFTPNoneAuthentication(session.getClient()).authenticate(session.getHost(), new DisabledLoginCallback(), new DisabledCancelCallback()));
} |
public RowMetaInterface getStepFields( String stepname ) throws KettleStepException {
StepMeta stepMeta = findStep( stepname );
if ( stepMeta != null ) {
return getStepFields( stepMeta );
} else {
return null;
}
} | @Test
public void infoStepFieldsAreNotIncludedInGetStepFields() throws KettleStepException {
// validates that the fields from info steps are not included in the resulting step fields for a stepMeta.
// This is important with steps like StreamLookup and Append, where the previous steps may or may not
// have their fields included in the current step.
TransMeta transMeta = new TransMeta( new Variables() );
StepMeta toBeAppended1 = testStep( "toBeAppended1",
emptyList(), // no info steps
asList( "field1", "field2" ) // names of fields from this step
);
StepMeta toBeAppended2 = testStep( "toBeAppended2", emptyList(), asList( "field1", "field2" ) );
StepMeta append = testStep( "append",
asList( "toBeAppended1", "toBeAppended2" ), // info step names
singletonList( "outputField" ) // output field of this step
);
StepMeta after = new StepMeta( "after", new DummyTransMeta() );
wireUpTestTransMeta( transMeta, toBeAppended1, toBeAppended2, append, after );
RowMetaInterface results = transMeta.getStepFields( append, after, mock( ProgressMonitorListener.class ) );
assertThat( 1, equalTo( results.size() ) );
assertThat( "outputField", equalTo( results.getFieldNames()[ 0 ] ) );
} |
@Override
public UrlPattern doGetPattern() {
return UrlPattern.builder()
.includes(includeUrls)
.excludes(excludeUrls)
.build();
} | @Test
public void does_not_match_web_services_using_servlet_filter() {
initWebServiceEngine(newWsUrl("api/authentication", "login").setHandler(ServletFilterHandler.INSTANCE));
assertThat(underTest.doGetPattern().matches("/api/authentication/login")).isFalse();
} |
public List<GrantDTO> getForTargetExcludingGrantee(GRN target, GRN grantee) {
return db.find(DBQuery.and(
DBQuery.is(GrantDTO.FIELD_TARGET, target.toString()),
DBQuery.notEquals(GrantDTO.FIELD_GRANTEE, grantee.toString())
)).toArray();
} | @Test
@MongoDBFixtures("grants.json")
public void getForTargetExcludingGrantee() {
final GRN stream = grnRegistry.parse("grn::::stream:54e3deadbeefdeadbeef0001");
final GRN grantee = grnRegistry.parse("grn::::user:john");
assertThat(dbService.getForTargetExcludingGrantee(stream, grantee)).hasSize(2);
} |
@Override
public V load(K key) {
awaitSuccessfulInit();
try (SqlResult queryResult = sqlService.execute(queries.load(), key)) {
Iterator<SqlRow> it = queryResult.iterator();
V value = null;
if (it.hasNext()) {
SqlRow sqlRow = it.next();
if (it.hasNext()) {
throw new IllegalStateException("multiple matching rows for a key " + key);
}
// If there is a single column as the value, return that column as the value
if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) {
value = sqlRow.getObject(1);
} else {
//noinspection unchecked
value = (V) toGenericRecord(sqlRow, genericMapStoreProperties);
}
}
return value;
}
} | @Test
public void givenTable_whenSetColumns_thenGenericRecordHasSetColumns() {
ObjectSpec spec = new ObjectSpec(mapName,
col("id", INT),
col("name", STRING),
col("age", INT),
col("address", STRING));
objectProvider.createObject(spec);
objectProvider.insertItems(spec, 1);
Properties properties =
new Properties();
properties.setProperty(DATA_CONNECTION_REF_PROPERTY, TEST_DATABASE_REF);
properties.setProperty(COLUMNS_PROPERTY, "id,name,age");
mapLoader = createMapLoader(properties, hz);
GenericRecord genericRecord = mapLoader.load(0);
assertThat(genericRecord.getInt32("id")).isZero();
assertThat(genericRecord.getString("name")).isEqualTo("name-0");
assertThat(genericRecord.getInt32("age")).isEqualTo(2);
assertThat(genericRecord.getFieldKind("address")).isEqualTo(NOT_AVAILABLE);
} |
@Override
public void symlink(final Path file, String target) throws BackgroundException {
try {
session.sftp().symlink(target, file.getAbsolute());
}
catch(IOException e) {
throw new SFTPExceptionMappingService().map("Cannot create {0}", e, file);
}
} | @Test
public void testSymlink() throws Exception {
final SFTPHomeDirectoryService workdir = new SFTPHomeDirectoryService(session);
final Path target = new Path(workdir.find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
new SFTPTouchFeature(session).touch(target, new TransferStatus());
final Path link = new Path(workdir.find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file, AbstractPath.Type.symboliclink));
new SFTPSymlinkFeature(session).symlink(link, target.getName());
assertTrue(new SFTPFindFeature(session).find(link));
assertEquals(EnumSet.of(Path.Type.file, AbstractPath.Type.symboliclink),
new SFTPListService(session).list(workdir.find(), new DisabledListProgressListener()).get(link).getType());
new SFTPDeleteFeature(session).delete(Collections.singletonList(link), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new SFTPFindFeature(session).find(link));
assertTrue(new SFTPFindFeature(session).find(target));
new SFTPDeleteFeature(session).delete(Collections.singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), firstChunkMsgId.hashCode());
} | @Test
public void hashCodeTest() {
ChunkMessageIdImpl chunkMsgId1 = new ChunkMessageIdImpl(
new MessageIdImpl(0, 0, 0),
new MessageIdImpl(1, 1, 1)
);
ChunkMessageIdImpl chunkMsgId2 = new ChunkMessageIdImpl(
new MessageIdImpl(2, 2, 2),
new MessageIdImpl(3, 3, 3)
);
assertEquals(chunkMsgId1.hashCode(), chunkMsgId1.hashCode());
assertNotEquals(chunkMsgId1.hashCode(), chunkMsgId2.hashCode());
} |
@Override
public SQLRecognizer getSelectForUpdateRecognizer(String sql, SQLStatement ast) {
if (((SQLSelectStatement) ast).getSelect().getFirstQueryBlock().isForUpdate()) {
return new PolarDBXSelectForUpdateRecognizer(sql, ast);
}
return null;
} | @Test
public void getSelectForUpdateTest() {
// common select without lock
String sql = "SELECT name FROM t1 WHERE id = 1";
SQLStatement sqlStatement = getSQLStatement(sql);
Assertions.assertNull(new PolarDBXOperateRecognizerHolder().getSelectForUpdateRecognizer(sql, sqlStatement));
// select for update
sql += " FOR UPDATE";
sqlStatement = getSQLStatement(sql);
Assertions.assertNotNull(new PolarDBXOperateRecognizerHolder().getSelectForUpdateRecognizer(sql, sqlStatement));
} |
@ExceptionHandler(DuplicateKeyException.class)
protected ShenyuAdminResult handleDuplicateKeyException(final DuplicateKeyException exception) {
LOG.error("duplicate key exception ", exception);
return ShenyuAdminResult.error(ShenyuResultMessage.UNIQUE_INDEX_CONFLICT_ERROR);
} | @Test
public void testServerExceptionHandlerByDuplicateKeyException() {
DuplicateKeyException duplicateKeyException = new DuplicateKeyException("Test duplicateKeyException message!");
ShenyuAdminResult result = exceptionHandlersUnderTest.handleDuplicateKeyException(duplicateKeyException);
Assertions.assertEquals(result.getCode().intValue(), CommonErrorCode.ERROR);
Assertions.assertEquals(result.getMessage(), ShenyuResultMessage.UNIQUE_INDEX_CONFLICT_ERROR);
} |
@Override
public void run()
throws Exception {
//init all file systems
List<PinotFSSpec> pinotFSSpecs = _spec.getPinotFSSpecs();
for (PinotFSSpec pinotFSSpec : pinotFSSpecs) {
PinotFSFactory.register(pinotFSSpec.getScheme(), pinotFSSpec.getClassName(), new PinotConfiguration(pinotFSSpec));
}
//Get list of files to process
URI inputDirURI = new URI(_spec.getInputDirURI());
if (inputDirURI.getScheme() == null) {
inputDirURI = new File(_spec.getInputDirURI()).toURI();
}
PinotFS inputDirFS = PinotFSFactory.create(inputDirURI.getScheme());
List<String> filteredFiles = SegmentGenerationUtils.listMatchedFilesWithRecursiveOption(inputDirFS, inputDirURI,
_spec.getIncludeFileNamePattern(), _spec.getExcludeFileNamePattern(), _spec.isSearchRecursively());
LOGGER.info("Found {} files to create Pinot segments!", filteredFiles.size());
//Get outputFS for writing output pinot segments
URI outputDirURI = new URI(_spec.getOutputDirURI());
if (outputDirURI.getScheme() == null) {
outputDirURI = new File(_spec.getOutputDirURI()).toURI();
}
PinotFS outputDirFS = PinotFSFactory.create(outputDirURI.getScheme());
outputDirFS.mkdir(outputDirURI);
//Get staging directory for temporary output pinot segments
String stagingDir = _spec.getExecutionFrameworkSpec().getExtraConfigs().get(STAGING_DIR);
URI stagingDirURI = null;
if (stagingDir != null) {
stagingDirURI = URI.create(stagingDir);
if (stagingDirURI.getScheme() == null) {
stagingDirURI = new File(stagingDir).toURI();
}
if (!outputDirURI.getScheme().equals(stagingDirURI.getScheme())) {
throw new RuntimeException(String
.format("The scheme of staging directory URI [%s] and output directory URI [%s] has to be same.",
stagingDirURI, outputDirURI));
}
outputDirFS.mkdir(stagingDirURI);
}
try {
JavaSparkContext sparkContext = JavaSparkContext.fromSparkContext(SparkContext.getOrCreate());
// Pinot plugins are necessary to launch Pinot ingestion job from every mapper.
// In order to ensure pinot plugins would be loaded to each worker, this method
// tars entire plugins directory and set this file into Distributed cache.
// Then each executor job will untar the plugin tarball, and set system properties accordingly.
packPluginsToDistributedCache(sparkContext);
// Add dependency jars
if (_spec.getExecutionFrameworkSpec().getExtraConfigs().containsKey(DEPS_JAR_DIR)) {
addDepsJarToDistributedCache(sparkContext,
_spec.getExecutionFrameworkSpec().getExtraConfigs().get(DEPS_JAR_DIR));
}
List<String> pathAndIdxList = new ArrayList<>();
if (!SegmentGenerationJobUtils.useGlobalDirectorySequenceId(_spec.getSegmentNameGeneratorSpec())) {
Map<String, List<String>> localDirIndex = new HashMap<>();
for (String filteredFile : filteredFiles) {
Path filteredParentPath = Paths.get(filteredFile).getParent();
if (!localDirIndex.containsKey(filteredParentPath.toString())) {
localDirIndex.put(filteredParentPath.toString(), new ArrayList<>());
}
localDirIndex.get(filteredParentPath.toString()).add(filteredFile);
}
for (String parentPath : localDirIndex.keySet()) {
List<String> siblingFiles = localDirIndex.get(parentPath);
Collections.sort(siblingFiles);
for (int i = 0; i < siblingFiles.size(); i++) {
pathAndIdxList.add(String.format("%s %d", siblingFiles.get(i), i));
}
}
} else {
for (int i = 0; i < filteredFiles.size(); i++) {
pathAndIdxList.add(String.format("%s %d", filteredFiles.get(i), i));
}
}
int numDataFiles = pathAndIdxList.size();
int jobParallelism = _spec.getSegmentCreationJobParallelism();
if (jobParallelism <= 0 || jobParallelism > numDataFiles) {
jobParallelism = numDataFiles;
}
JavaRDD<String> pathRDD = sparkContext.parallelize(pathAndIdxList, jobParallelism);
final String pluginsInclude =
(sparkContext.getConf().contains(PLUGINS_INCLUDE_PROPERTY_NAME)) ? sparkContext.getConf()
.get(PLUGINS_INCLUDE_PROPERTY_NAME) : null;
final URI finalInputDirURI = inputDirURI;
final URI finalOutputDirURI = (stagingDirURI == null) ? outputDirURI : stagingDirURI;
// Prevent using lambda expression in Spark to avoid potential serialization exceptions, use inner function
// instead.
pathRDD.foreach(new VoidFunction<String>() {
@Override
public void call(String pathAndIdx)
throws Exception {
PluginManager.get().init();
for (PinotFSSpec pinotFSSpec : _spec.getPinotFSSpecs()) {
PinotFSFactory
.register(pinotFSSpec.getScheme(), pinotFSSpec.getClassName(), new PinotConfiguration(pinotFSSpec));
}
PinotFS finalOutputDirFS = PinotFSFactory.create(finalOutputDirURI.getScheme());
String[] splits = pathAndIdx.split(" ");
String path = splits[0];
int idx = Integer.valueOf(splits[1]);
// Load Pinot Plugins copied from Distributed cache.
File localPluginsTarFile = new File(PINOT_PLUGINS_TAR_GZ);
if (localPluginsTarFile.exists()) {
File pluginsDirFile = new File(PINOT_PLUGINS_DIR + "-" + idx);
try {
TarCompressionUtils.untar(localPluginsTarFile, pluginsDirFile);
} catch (Exception e) {
LOGGER.error("Failed to untar local Pinot plugins tarball file [{}]", localPluginsTarFile, e);
throw new RuntimeException(e);
}
LOGGER.info("Trying to set System Property: [{}={}]", PLUGINS_DIR_PROPERTY_NAME,
pluginsDirFile.getAbsolutePath());
System.setProperty(PLUGINS_DIR_PROPERTY_NAME, pluginsDirFile.getAbsolutePath());
if (pluginsInclude != null) {
LOGGER.info("Trying to set System Property: [{}={}]", PLUGINS_INCLUDE_PROPERTY_NAME, pluginsInclude);
System.setProperty(PLUGINS_INCLUDE_PROPERTY_NAME, pluginsInclude);
}
LOGGER.info("Pinot plugins System Properties are set at [{}], plugins includes [{}]",
System.getProperty(PLUGINS_DIR_PROPERTY_NAME), System.getProperty(PLUGINS_INCLUDE_PROPERTY_NAME));
} else {
LOGGER.warn("Cannot find local Pinot plugins tar file at [{}]", localPluginsTarFile.getAbsolutePath());
}
URI inputFileURI = URI.create(path);
if (inputFileURI.getScheme() == null) {
inputFileURI =
new URI(finalInputDirURI.getScheme(), inputFileURI.getSchemeSpecificPart(), inputFileURI.getFragment());
}
//create localTempDir for input and output
File localTempDir = new File(FileUtils.getTempDirectory(), "pinot-" + UUID.randomUUID());
File localInputTempDir = new File(localTempDir, "input");
FileUtils.forceMkdir(localInputTempDir);
File localOutputTempDir = new File(localTempDir, "output");
FileUtils.forceMkdir(localOutputTempDir);
//copy input path to local
File localInputDataFile = new File(localInputTempDir, getFileName(inputFileURI));
LOGGER.info("Trying to copy input file from {} to {}", inputFileURI, localInputDataFile);
PinotFSFactory.create(inputFileURI.getScheme()).copyToLocalFile(inputFileURI, localInputDataFile);
//create task spec
SegmentGenerationTaskSpec taskSpec = new SegmentGenerationTaskSpec();
taskSpec.setInputFilePath(localInputDataFile.getAbsolutePath());
taskSpec.setOutputDirectoryPath(localOutputTempDir.getAbsolutePath());
taskSpec.setRecordReaderSpec(_spec.getRecordReaderSpec());
taskSpec
.setSchema(SegmentGenerationUtils.getSchema(_spec.getTableSpec().getSchemaURI(), _spec.getAuthToken()));
taskSpec.setTableConfig(
SegmentGenerationUtils.getTableConfig(_spec.getTableSpec().getTableConfigURI(), _spec.getAuthToken()));
taskSpec.setSequenceId(idx);
taskSpec.setSegmentNameGeneratorSpec(_spec.getSegmentNameGeneratorSpec());
taskSpec.setFailOnEmptySegment(_spec.isFailOnEmptySegment());
taskSpec.setCustomProperty(BatchConfigProperties.INPUT_DATA_FILE_URI_KEY, inputFileURI.toString());
SegmentGenerationTaskRunner taskRunner = new SegmentGenerationTaskRunner(taskSpec);
String segmentName = taskRunner.run();
// Tar segment directory to compress file
File localSegmentDir = new File(localOutputTempDir, segmentName);
String segmentTarFileName = URIUtils.encode(segmentName + Constants.TAR_GZ_FILE_EXT);
File localSegmentTarFile = new File(localOutputTempDir, segmentTarFileName);
LOGGER.info("Tarring segment from: {} to: {}", localSegmentDir, localSegmentTarFile);
TarCompressionUtils.createCompressedTarFile(localSegmentDir, localSegmentTarFile);
long uncompressedSegmentSize = FileUtils.sizeOf(localSegmentDir);
long compressedSegmentSize = FileUtils.sizeOf(localSegmentTarFile);
LOGGER.info("Size for segment: {}, uncompressed: {}, compressed: {}", segmentName,
DataSizeUtils.fromBytes(uncompressedSegmentSize), DataSizeUtils.fromBytes(compressedSegmentSize));
// Move segment to output PinotFS
URI relativeOutputPath =
SegmentGenerationUtils.getRelativeOutputPath(finalInputDirURI, inputFileURI, finalOutputDirURI);
URI outputSegmentTarURI = relativeOutputPath.resolve(segmentTarFileName);
SegmentGenerationJobUtils.moveLocalTarFileToRemote(localSegmentTarFile, outputSegmentTarURI,
_spec.isOverwriteOutput());
// Create and upload segment metadata tar file
String metadataTarFileName = URIUtils.encode(segmentName + Constants.METADATA_TAR_GZ_FILE_EXT);
URI outputMetadataTarURI = relativeOutputPath.resolve(metadataTarFileName);
if (finalOutputDirFS.exists(outputMetadataTarURI) && (_spec.isOverwriteOutput()
|| !_spec.isCreateMetadataTarGz())) {
LOGGER.info("Deleting existing metadata tar gz file: {}", outputMetadataTarURI);
finalOutputDirFS.delete(outputMetadataTarURI, true);
}
if (taskSpec.isCreateMetadataTarGz()) {
File localMetadataTarFile = new File(localOutputTempDir, metadataTarFileName);
SegmentGenerationJobUtils.createSegmentMetadataTarGz(localSegmentDir, localMetadataTarFile);
SegmentGenerationJobUtils.moveLocalTarFileToRemote(localMetadataTarFile, outputMetadataTarURI,
_spec.isOverwriteOutput());
}
FileUtils.deleteQuietly(localSegmentDir);
FileUtils.deleteQuietly(localInputDataFile);
}
});
if (stagingDirURI != null) {
LOGGER.info("Trying to copy segment tars from staging directory: [{}] to output directory [{}]", stagingDirURI,
outputDirURI);
outputDirFS.copyDir(stagingDirURI, outputDirURI);
}
} finally {
if (stagingDirURI != null) {
LOGGER.info("Trying to clean up staging directory: [{}]", stagingDirURI);
outputDirFS.delete(stagingDirURI, true);
}
}
} | @Test
public void testInputFilesWithSameNameInDifferentDirectories()
throws Exception {
File testDir = Files.createTempDirectory("testSegmentGeneration-").toFile();
testDir.delete();
testDir.mkdirs();
File inputDir = new File(testDir, "input");
File inputSubDir1 = new File(inputDir, "2009");
File inputSubDir2 = new File(inputDir, "2010");
inputSubDir1.mkdirs();
inputSubDir2.mkdirs();
File inputFile1 = new File(inputSubDir1, "input.csv");
FileUtils.writeLines(inputFile1, Lists.newArrayList("col1,col2", "value1,1", "value2,2"));
File inputFile2 = new File(inputSubDir2, "input.csv");
FileUtils.writeLines(inputFile2, Lists.newArrayList("col1,col2", "value3,3", "value4,4"));
File outputDir = new File(testDir, "output");
// Set up schema file.
final String schemaName = "mySchema";
File schemaFile = new File(testDir, "schema");
Schema schema = new SchemaBuilder()
.setSchemaName(schemaName)
.addSingleValueDimension("col1", DataType.STRING)
.addMetric("col2", DataType.INT)
.build();
FileUtils.write(schemaFile, schema.toPrettyJsonString(), StandardCharsets.UTF_8);
// Set up table config file.
File tableConfigFile = new File(testDir, "tableConfig");
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE)
.setTableName("myTable")
.setNumReplicas(1)
.build();
FileUtils.write(tableConfigFile, tableConfig.toJsonString(), StandardCharsets.UTF_8);
SegmentGenerationJobSpec jobSpec = new SegmentGenerationJobSpec();
jobSpec.setJobType("SegmentCreation");
jobSpec.setInputDirURI(inputDir.toURI().toString());
jobSpec.setSearchRecursively(true);
jobSpec.setOutputDirURI(outputDir.toURI().toString());
jobSpec.setOverwriteOutput(true);
RecordReaderSpec recordReaderSpec = new RecordReaderSpec();
recordReaderSpec.setDataFormat("csv");
recordReaderSpec.setClassName(CSVRecordReader.class.getName());
recordReaderSpec.setConfigClassName(CSVRecordReaderConfig.class.getName());
jobSpec.setRecordReaderSpec(recordReaderSpec);
TableSpec tableSpec = new TableSpec();
tableSpec.setTableName("myTable");
tableSpec.setSchemaURI(schemaFile.toURI().toString());
tableSpec.setTableConfigURI(tableConfigFile.toURI().toString());
jobSpec.setTableSpec(tableSpec);
ExecutionFrameworkSpec efSpec = new ExecutionFrameworkSpec();
efSpec.setName("standalone");
efSpec.setSegmentGenerationJobRunnerClassName(SparkSegmentGenerationJobRunner.class.getName());
jobSpec.setExecutionFrameworkSpec(efSpec);
PinotFSSpec pfsSpec = new PinotFSSpec();
pfsSpec.setScheme("file");
pfsSpec.setClassName(LocalPinotFS.class.getName());
jobSpec.setPinotFSSpecs(Collections.singletonList(pfsSpec));
SparkSegmentGenerationJobRunner jobRunner = new SparkSegmentGenerationJobRunner(jobSpec);
jobRunner.run();
// Check that both segment files are created
File newSegmentFile2009 = new File(outputDir, "2009/myTable_OFFLINE_0.tar.gz");
Assert.assertTrue(newSegmentFile2009.exists());
Assert.assertTrue(newSegmentFile2009.isFile());
Assert.assertTrue(newSegmentFile2009.length() > 0);
File newSegmentFile2010 = new File(outputDir, "2010/myTable_OFFLINE_0.tar.gz");
Assert.assertTrue(newSegmentFile2010.exists());
Assert.assertTrue(newSegmentFile2010.isFile());
Assert.assertTrue(newSegmentFile2010.length() > 0);
} |
@Udf
public Long round(@UdfParameter final long val) {
return val;
} | @Test
public void shouldRoundSimpleDoubleNegative() {
assertThat(udf.round(-1.23d), is(-1L));
assertThat(udf.round(-1.0d), is(-1L));
assertThat(udf.round(-1.5d), is(-1L));
assertThat(udf.round(-1.75d), is(-2L));
assertThat(udf.round(-1.53e6d), is(-1530000L));
assertThat(udf.round(-10.01d), is(-10L));
assertThat(udf.round(-12345.5d), is(-12345L));
assertThat(udf.round(-9.99d), is(-10L));
assertThat(udf.round(-110.1), is(-110L));
assertThat(udf.round(-1530000.01d), is(-1530000L));
assertThat(udf.round(-9999999.99d), is(-10000000L));
} |
@Override
public void monitor(RedisServer master) {
connection.sync(RedisCommands.SENTINEL_MONITOR, master.getName(), master.getHost(),
master.getPort().intValue(), master.getQuorum().intValue());
} | @Test
public void testMonitor() {
Collection<RedisServer> masters = connection.masters();
RedisServer master = masters.iterator().next();
master.setName(master.getName() + ":");
connection.monitor(master);
} |
public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description) throws IOException {
if (position < 0) {
throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position);
}
int expectedReadBytes = destinationBuffer.remaining();
readFully(channel, destinationBuffer, position);
if (destinationBuffer.hasRemaining()) {
throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " +
"but reached end of file after reading %d bytes. Started read from position %d.",
description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position));
}
} | @Test
public void testReadFullyOrFailWithRealFile() throws IOException {
try (FileChannel channel = FileChannel.open(TestUtils.tempFile().toPath(), StandardOpenOption.READ, StandardOpenOption.WRITE)) {
// prepare channel
String msg = "hello, world";
channel.write(ByteBuffer.wrap(msg.getBytes()), 0);
channel.force(true);
assertEquals(channel.size(), msg.length(), "Message should be written to the file channel");
ByteBuffer perfectBuffer = ByteBuffer.allocate(msg.length());
ByteBuffer smallBuffer = ByteBuffer.allocate(5);
ByteBuffer largeBuffer = ByteBuffer.allocate(msg.length() + 1);
// Scenario 1: test reading into a perfectly-sized buffer
Utils.readFullyOrFail(channel, perfectBuffer, 0, "perfect");
assertFalse(perfectBuffer.hasRemaining(), "Buffer should be filled up");
assertEquals(msg, new String(perfectBuffer.array()), "Buffer should be populated correctly");
// Scenario 2: test reading into a smaller buffer
Utils.readFullyOrFail(channel, smallBuffer, 0, "small");
assertFalse(smallBuffer.hasRemaining(), "Buffer should be filled");
assertEquals("hello", new String(smallBuffer.array()), "Buffer should be populated correctly");
// Scenario 3: test reading starting from a non-zero position
smallBuffer.clear();
Utils.readFullyOrFail(channel, smallBuffer, 7, "small");
assertFalse(smallBuffer.hasRemaining(), "Buffer should be filled");
assertEquals("world", new String(smallBuffer.array()), "Buffer should be populated correctly");
// Scenario 4: test end of stream is reached before buffer is filled up
try {
Utils.readFullyOrFail(channel, largeBuffer, 0, "large");
fail("Expected EOFException to be raised");
} catch (EOFException e) {
// expected
}
}
} |
public BootstrapMetadata read() throws Exception {
Path path = Paths.get(directoryPath);
if (!Files.isDirectory(path)) {
if (Files.exists(path)) {
throw new RuntimeException("Path " + directoryPath + " exists, but is not " +
"a directory.");
} else {
throw new RuntimeException("No such directory as " + directoryPath);
}
}
Path binaryBootstrapPath = Paths.get(directoryPath, BINARY_BOOTSTRAP_FILENAME);
if (!Files.exists(binaryBootstrapPath)) {
return readFromConfiguration();
} else {
return readFromBinaryFile(binaryBootstrapPath.toString());
}
} | @Test
public void testReadFromConfigurationWithAncientVersion() throws Exception {
try (BootstrapTestDirectory testDirectory = new BootstrapTestDirectory().createDirectory()) {
assertEquals(BootstrapMetadata.fromVersion(MetadataVersion.MINIMUM_BOOTSTRAP_VERSION,
"the minimum version bootstrap with metadata.version 3.3-IV0"),
new BootstrapDirectory(testDirectory.path(), Optional.of("2.7")).read());
}
} |
@Override
public String setOnu(String target) {
DriverHandler handler = handler();
NetconfController controller = handler.get(NetconfController.class);
MastershipService mastershipService = handler.get(MastershipService.class);
DeviceId ncDeviceId = handler.data().deviceId();
checkNotNull(controller, "Netconf controller is null");
String reply = null;
if (!mastershipService.isLocalMaster(ncDeviceId)) {
log.warn("Not master for {} Use {} to execute command",
ncDeviceId,
mastershipService.getMasterFor(ncDeviceId));
return null;
}
String[] data = target.split(COLON);
if (data.length != THREE) {
log.error("Invalid number of arguments");
return null;
}
String[] onuId = checkIdString(data[FIRST_PART]);
if ((onuId == null) || (onuId.length != TWO)) {
log.error("Invalid ONU identifier {}", target);
return null;
}
if (!checkSetParam(data[SECOND_PART],
data[THIRD_PART])) {
log.error("Failed to check input {}", target);
return null;
}
try {
StringBuilder request = new StringBuilder();
request.append(ANGLE_LEFT + ONU_SET_CONFIG + SPACE);
request.append(VOLT_NE_NAMESPACE + ANGLE_RIGHT + NEW_LINE);
request.append(buildStartTag(PONLINK_ID, false))
.append(onuId[FIRST_PART])
.append(buildEndTag(PONLINK_ID))
.append(buildStartTag(ONU_ID, false))
.append(onuId[SECOND_PART])
.append(buildEndTag(ONU_ID))
.append(buildStartTag(CONFIG_INFO))
.append(buildStartTag(data[SECOND_PART], false))
.append(data[THIRD_PART])
.append(buildEndTag(data[SECOND_PART]))
.append(buildEndTag(CONFIG_INFO))
.append(buildEndTag(ONU_SET_CONFIG));
reply = controller
.getDevicesMap()
.get(ncDeviceId)
.getSession()
.doWrappedRpc(request.toString());
} catch (NetconfException e) {
log.error("Cannot communicate to device {} exception {}", ncDeviceId, e);
}
return reply;
} | @Test
public void testValidSetOnu() throws Exception {
String target;
String reply;
for (int i = ZERO; i < VALID_SET_TCS.length; i++) {
target = VALID_SET_TCS[i];
currentKey = i;
reply = voltConfig.setOnu(target);
assertNotNull("Incorrect response for VALID_SET_TCS", reply);
}
} |
public static Messages getInstance() {
return instance;
} | @Test
public void testEncoding() {
assertEquals( "Wrong message returned", "", Messages.getInstance().getEncodedString( null ) ); //$NON-NLS-1$ //$NON-NLS-2$
assertEquals( "Wrong message returned", "test:  ™", Messages.getInstance().getXslString( "test.encode1" ) ); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
} |
@GetMapping("/by-namespace")
public PageDTO<InstanceDTO> getInstancesByNamespace(
@RequestParam("appId") String appId, @RequestParam("clusterName") String clusterName,
@RequestParam("namespaceName") String namespaceName,
@RequestParam(value = "instanceAppId", required = false) String instanceAppId,
Pageable pageable) {
Page<Instance> instances;
if (Strings.isNullOrEmpty(instanceAppId)) {
instances = instanceService.findInstancesByNamespace(appId, clusterName,
namespaceName, pageable);
} else {
instances = instanceService.findInstancesByNamespaceAndInstanceAppId(instanceAppId, appId,
clusterName, namespaceName, pageable);
}
List<InstanceDTO> instanceDTOs = BeanUtils.batchTransform(InstanceDTO.class, instances.getContent());
return new PageDTO<>(instanceDTOs, pageable, instances.getTotalElements());
} | @Test
public void testGetInstancesByNamespaceAndInstanceAppId() throws Exception {
String someInstanceAppId = "someInstanceAppId";
String someAppId = "someAppId";
String someClusterName = "someClusterName";
String someNamespaceName = "someNamespaceName";
String someIp = "someIp";
long someInstanceId = 1;
long anotherInstanceId = 2;
Instance someInstance = assembleInstance(someInstanceId, someAppId, someClusterName,
someNamespaceName, someIp);
Instance anotherInstance = assembleInstance(anotherInstanceId, someAppId, someClusterName,
someNamespaceName, someIp);
Page<Instance> instances = new PageImpl<>(Lists.newArrayList(someInstance, anotherInstance),
pageable, 2);
when(instanceService.findInstancesByNamespaceAndInstanceAppId(someInstanceAppId, someAppId,
someClusterName, someNamespaceName, pageable)).thenReturn(instances);
PageDTO<InstanceDTO> result = instanceConfigController.getInstancesByNamespace(someAppId,
someClusterName, someNamespaceName, someInstanceAppId, pageable);
assertEquals(2, result.getContent().size());
InstanceDTO someInstanceDto = null;
InstanceDTO anotherInstanceDto = null;
for (InstanceDTO instanceDTO : result.getContent()) {
if (instanceDTO.getId() == someInstanceId) {
someInstanceDto = instanceDTO;
} else if (instanceDTO.getId() == anotherInstanceId) {
anotherInstanceDto = instanceDTO;
}
}
verifyInstance(someInstance, someInstanceDto);
verifyInstance(anotherInstance, anotherInstanceDto);
} |
@Override
public <R extends MessageResponse<?>> void chatStream(Prompt<R> prompt, StreamResponseListener<R> listener, ChatOptions options) {
LlmClient llmClient = new SseClient();
Map<String, String> headers = new HashMap<>();
headers.put("Content-Type", "application/json");
headers.put("Authorization", "Bearer " + config.getApiKey());
String payload = OllamaLlmUtil.promptToPayload(prompt, config, true);
String endpoint = config.getEndpoint();
LlmClientListener clientListener = new BaseLlmClientListener(this, llmClient, listener, prompt, aiMessageParser, null);
dnjsonClient.start(endpoint + "/api/chat", headers, payload, clientListener, config);
} | @Test
public void testChatStream() throws InterruptedException {
OllamaLlmConfig config = new OllamaLlmConfig();
config.setEndpoint("http://localhost:11434");
config.setModel("llama3");
config.setDebug(true);
Llm llm = new OllamaLlm(config);
llm.chatStream("who are your", (context, response) -> System.out.println(response.getMessage().getContent()));
Thread.sleep(20000);
} |
@DELETE
@Path("{networkId}/devices/{deviceId}")
public Response removeVirtualDevice(@PathParam("networkId") long networkId,
@PathParam("deviceId") String deviceId) {
NetworkId nid = NetworkId.networkId(networkId);
DeviceId did = DeviceId.deviceId(deviceId);
vnetAdminService.removeVirtualDevice(nid, did);
return Response.noContent().build();
} | @Test
public void testDeleteVirtualDevice() {
NetworkId networkId = networkId3;
DeviceId deviceId = devId2;
mockVnetAdminService.removeVirtualDevice(networkId, deviceId);
expectLastCall();
replay(mockVnetAdminService);
WebTarget wt = target()
.property(ClientProperties.SUPPRESS_HTTP_COMPLIANCE_VALIDATION, true);
String reqLocation = "vnets/" + networkId.toString() + "/devices/" + deviceId.toString();
Response response = wt.path(reqLocation)
.request(MediaType.APPLICATION_JSON_TYPE)
.delete();
assertThat(response.getStatus(), is(HttpURLConnection.HTTP_NO_CONTENT));
verify(mockVnetAdminService);
} |
public static <K, V> Read<K, V> read() {
return new AutoValue_CdapIO_Read.Builder<K, V>().build();
} | @Test
public void testReadExpandingFailsMissingCdapPluginClass() {
PBegin testPBegin = PBegin.in(TestPipeline.create());
CdapIO.Read<String, String> read = CdapIO.read();
assertThrows(IllegalStateException.class, () -> read.expand(testPBegin));
} |
public static boolean checkUserInfo( IUser user ) {
return !StringUtils.isBlank( user.getLogin() ) && !StringUtils.isBlank( user.getName() );
} | @Test
public void checkUserInfo_LoginIsNull() {
assertFalse( RepositoryCommonValidations.checkUserInfo( user( null, "name" ) ) );
} |
@Nullable static String method(Invocation invocation) {
String methodName = invocation.getMethodName();
if ("$invoke".equals(methodName) || "$invokeAsync".equals(methodName)) {
Object[] arguments = invocation.getArguments();
if (arguments != null && arguments.length > 0 && arguments[0] instanceof String) {
methodName = (String) arguments[0];
} else {
methodName = null;
}
}
return methodName != null && !methodName.isEmpty() ? methodName : null;
} | @Test void method_invoke() {
when(invocation.getMethodName()).thenReturn("$invoke");
when(invocation.getArguments()).thenReturn(new Object[] {"sayHello"});
assertThat(DubboParser.method(invocation))
.isEqualTo("sayHello");
} |
public static String getGroupFromServiceName(String group) {
return group;
} | @Test
public void testGetGroupFromServiceName() {
String tempGroup = ConsulUtils.getGroupFromServiceName(testServiceName);
assertEquals(testGroup, tempGroup);
} |
public synchronized @Nullable WorkItemServiceState reportUpdate(
@Nullable DynamicSplitResult dynamicSplitResult, Duration requestedLeaseDuration)
throws Exception {
checkState(worker != null, "setWorker should be called before reportUpdate");
checkState(!finalStateSent, "cannot reportUpdates after sending a final state");
checkArgument(requestedLeaseDuration != null, "requestLeaseDuration must be non-null");
if (wasAskedToAbort) {
LOG.info("Service already asked to abort work item, not reporting ignored progress.");
return null;
}
WorkItemStatus status = createStatusUpdate(false);
status.setRequestedLeaseDuration(TimeUtil.toCloudDuration(requestedLeaseDuration));
populateProgress(status);
populateSplitResult(status, dynamicSplitResult);
return execute(status);
} | @Test
public void reportUpdateBeforeSetWorker() throws Exception {
thrown.expect(IllegalStateException.class);
thrown.expectMessage("setWorker");
thrown.expectMessage("reportUpdate");
statusClient.reportUpdate(null, null);
} |
public static KernelPlugin createFromObject(Object target, String pluginName) {
Class<?> clazz = target.getClass();
return KernelPluginFactory.createFromObject(clazz, target, pluginName);
} | @Test
public void createFromObjectTest() {
KernelPlugin plugin = KernelPluginFactory.createFromObject(
new TestPlugin(),
"test");
Assertions.assertNotNull(plugin);
Assertions.assertEquals(plugin.getName(), "test");
Assertions.assertEquals(plugin.getFunctions().size(), 2);
KernelFunction<?> testFunction = plugin.getFunctions()
.get("testFunction");
Assertions.assertNotNull(testFunction);
Assertions.assertNotNull(testFunction.getMetadata().getOutputVariableType());
Assertions.assertEquals(testFunction.getMetadata().getOutputVariableType().getType(),
String.class);
Assertions.assertEquals(testFunction.getMetadata().getParameters().size(), 1);
Assertions.assertEquals(testFunction.getMetadata().getParameters().get(0).getType(),
String.class.getName());
} |
@Override
public Publisher<Exchange> from(String uri) {
final String name = publishedUriToStream.computeIfAbsent(uri, camelUri -> {
try {
String uuid = context.getUuidGenerator().generateUuid();
RouteBuilder.addRoutes(context, rb -> rb.from(camelUri).to("reactive-streams:" + uuid));
return uuid;
} catch (Exception e) {
throw new IllegalStateException("Unable to create source reactive stream from direct URI: " + uri, e);
}
});
return fromStream(name);
} | @Test
public void testFrom() throws Exception {
context.start();
Publisher<Exchange> timer = crs.from("timer:reactive?period=250&repeatCount=3&includeMetadata=true");
AtomicInteger value = new AtomicInteger();
CountDownLatch latch = new CountDownLatch(3);
Flowable.fromPublisher(timer)
.map(exchange -> ExchangeHelper.getHeaderOrProperty(exchange, Exchange.TIMER_COUNTER, Integer.class))
.doOnNext(res -> assertEquals(value.incrementAndGet(), res.intValue()))
.doOnNext(res -> latch.countDown())
.subscribe();
assertTrue(latch.await(2, TimeUnit.SECONDS));
} |
@SuppressWarnings("unchecked")
public static <T> AgentServiceLoader<T> getServiceLoader(final Class<T> service) {
return (AgentServiceLoader<T>) LOADERS.computeIfAbsent(service, AgentServiceLoader::new);
} | @Test
void assertGetServiceLoaderWithNoInterface() {
assertThrows(IllegalArgumentException.class, () -> AgentServiceLoader.getServiceLoader(Object.class));
} |
@Override
public Set<CeWorker> getWorkers() {
return ceWorkers;
} | @Test
public void getWorkers_returns_empty_if_create_has_not_been_called_before() {
assertThat(underTest.getWorkers()).isEmpty();
} |
@Override
public <T> Mono<T> run(Mono<T> toRun, Function<Throwable, Mono<T>> fallback) {
Mono<T> toReturn = toRun.transform(new SentinelReactorTransformer<>(
new EntryConfig(resourceName, entryType)));
if (fallback != null) {
toReturn = toReturn.onErrorResume(fallback);
}
return toReturn;
} | @Test
public void runMonoWithFallback() {
ReactiveCircuitBreaker cb = new ReactiveSentinelCircuitBreakerFactory()
.create("foo");
assertThat(Mono.error(new RuntimeException("boom"))
.transform(it -> cb.run(it, t -> Mono.just("fallback"))).block())
.isEqualTo("fallback");
} |
public void validatePositionsIfNeeded() {
Map<TopicPartition, SubscriptionState.FetchPosition> partitionsToValidate =
offsetFetcherUtils.getPartitionsToValidate();
validatePositionsAsync(partitionsToValidate);
} | @Test
public void testOffsetValidationFencing() {
buildFetcher();
assignFromUser(singleton(tp0));
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(tp0.topic(), 4);
final int epochOne = 1;
final int epochTwo = 2;
final int epochThree = 3;
// Start with metadata, epoch=1
metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWithIds("dummy", 1,
Collections.emptyMap(), partitionCounts, tp -> epochOne, topicIds), false, 0L);
// Offset validation requires OffsetForLeaderEpoch request v3 or higher
Node node = metadata.fetch().nodes().get(0);
apiVersions.update(node.idString(), NodeApiVersions.create());
// Seek with a position and leader+epoch
Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch(metadata.currentLeader(tp0).leader, Optional.of(epochOne));
subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.of(epochOne), leaderAndEpoch));
// Update metadata to epoch=2, enter validation
metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWithIds("dummy", 1,
Collections.emptyMap(), partitionCounts, tp -> epochTwo, topicIds), false, 0L);
offsetFetcher.validatePositionsIfNeeded();
assertTrue(subscriptions.awaitingValidation(tp0));
// Update the position to epoch=3, as we would from a fetch
subscriptions.completeValidation(tp0);
SubscriptionState.FetchPosition nextPosition = new SubscriptionState.FetchPosition(
10,
Optional.of(epochTwo),
new Metadata.LeaderAndEpoch(leaderAndEpoch.leader, Optional.of(epochTwo)));
subscriptions.position(tp0, nextPosition);
subscriptions.maybeValidatePositionForCurrentLeader(apiVersions, tp0, new Metadata.LeaderAndEpoch(leaderAndEpoch.leader, Optional.of(epochThree)));
// Prepare offset list response from async validation with epoch=2
client.prepareResponse(prepareOffsetsForLeaderEpochResponse(tp0, epochTwo, 10L));
consumerClient.pollNoWakeup();
assertTrue(subscriptions.awaitingValidation(tp0), "Expected validation to fail since leader epoch changed");
// Next round of validation, should succeed in validating the position
offsetFetcher.validatePositionsIfNeeded();
client.prepareResponse(prepareOffsetsForLeaderEpochResponse(tp0, epochThree, 10L));
consumerClient.pollNoWakeup();
assertFalse(subscriptions.awaitingValidation(tp0), "Expected validation to succeed with latest epoch");
} |
public static ClusterOperatorConfig buildFromMap(Map<String, String> map) {
warningsForRemovedEndVars(map);
KafkaVersion.Lookup lookup = parseKafkaVersions(map.get(STRIMZI_KAFKA_IMAGES), map.get(STRIMZI_KAFKA_CONNECT_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES));
return buildFromMap(map, lookup);
} | @Test
public void testImagePullSecretsThrowsWithUpperCaseCharacter() {
Map<String, String> envVars = new HashMap<>(ClusterOperatorConfigTest.ENV_VARS);
envVars.put(ClusterOperatorConfig.IMAGE_PULL_SECRETS.key(), "Secret");
assertThrows(InvalidConfigurationException.class, () ->
ClusterOperatorConfig.buildFromMap(envVars, KafkaVersionTestUtils.getKafkaVersionLookup())
);
} |
@VisibleForTesting
static <T> List<T> topologicalSort(Graph<T> graph)
{
MutableGraph<T> graphCopy = Graphs.copyOf(graph);
List<T> l = new ArrayList<>();
Set<T> s = graphCopy.nodes().stream()
.filter(node -> graphCopy.inDegree(node) == 0)
.collect(Collectors.toSet());
while (!s.isEmpty())
{
Iterator<T> it = s.iterator();
T n = it.next();
it.remove();
l.add(n);
for (T m : new HashSet<>(graphCopy.successors(n)))
{
graphCopy.removeEdge(n, m);
if (graphCopy.inDegree(m) == 0)
{
s.add(m);
}
}
}
if (!graphCopy.edges().isEmpty())
{
throw new RuntimeException("Graph has at least one cycle");
}
return l;
} | @Test
public void testTopologicalSort()
{
MutableGraph<Integer> graph = GraphBuilder
.directed()
.build();
graph.addNode(1);
graph.addNode(2);
graph.addNode(3);
graph.putEdge(1, 2);
graph.putEdge(1, 3);
List<Integer> sorted = PluginManager.topologicalSort(graph);
assertTrue(sorted.indexOf(1) < sorted.indexOf(2));
assertTrue(sorted.indexOf(1) < sorted.indexOf(3));
} |
public static ConnectedComponents findComponentsRecursive(Graph graph, EdgeTransitionFilter edgeTransitionFilter, boolean excludeSingleEdgeComponents) {
return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, excludeSingleEdgeComponents).findComponentsRecursive();
} | @Test
public void tree() {
// 0 - 1 - 2 - 4 - 5
// | \- 6 - 7
// 3 \- 8
g.edge(0, 1).setDistance(1).set(speedEnc, 10, 10);
g.edge(1, 2).setDistance(1).set(speedEnc, 10, 10);
g.edge(1, 3).setDistance(1).set(speedEnc, 10, 10);
g.edge(2, 4).setDistance(1).set(speedEnc, 10, 10);
g.edge(2, 6).setDistance(1).set(speedEnc, 10, 10);
g.edge(4, 5).setDistance(1).set(speedEnc, 10, 10);
g.edge(6, 7).setDistance(1).set(speedEnc, 10, 10);
g.edge(6, 8).setDistance(1).set(speedEnc, 10, 10);
ConnectedComponents result = EdgeBasedTarjanSCC.findComponentsRecursive(g, fwdAccessFilter, false);
assertEquals(16, result.getEdgeKeys());
assertEquals(1, result.getTotalComponents());
assertEquals(1, result.getComponents().size());
assertTrue(result.getSingleEdgeComponents().isEmpty());
assertEquals(result.getComponents().get(0), result.getBiggestComponent());
assertEquals(IntArrayList.from(1, 3, 7, 11, 10, 6, 9, 13, 12, 15, 14, 8, 2, 5, 4, 0), result.getComponents().get(0));
} |
public static String stripTillLastOccurrenceOf(String input, String pattern) {
if (!StringUtils.isBlank(input) && !StringUtils.isBlank(pattern)) {
int index = input.lastIndexOf(pattern);
if (index > 0) {
input = input.substring(index + pattern.length());
}
}
return input;
} | @Test
public void shouldStripTillLastOccurrenceOfGivenString() {
assertThat(stripTillLastOccurrenceOf("HelloWorld@@\\nfoobar\\nquux@@keep_this", "@@"), is("keep_this"));
assertThat(stripTillLastOccurrenceOf("HelloWorld", "@@"), is("HelloWorld"));
assertThat(stripTillLastOccurrenceOf(null, "@@"), is(nullValue()));
assertThat(stripTillLastOccurrenceOf("HelloWorld", null), is("HelloWorld"));
assertThat(stripTillLastOccurrenceOf(null, null), is(nullValue()));
assertThat(stripTillLastOccurrenceOf("", "@@"), is(""));
} |
@Override public void commandSucceeded(CommandSucceededEvent event) {
Span span = threadLocalSpan.remove();
if (span == null) return;
span.finish();
} | @Test void commandSucceeded_withoutCommandStarted() {
listener.commandSucceeded(createCommandSucceededEvent());
verify(threadLocalSpan).remove();
verifyNoMoreInteractions(threadLocalSpan);
} |
public static TableRecords empty(TableMeta tableMeta) {
return new EmptyTableRecords(tableMeta);
} | @Test
public void testEmpty() {
TableRecords.EmptyTableRecords emptyTableRecords = new TableRecords.EmptyTableRecords();
Assertions.assertEquals(0, emptyTableRecords.size());
TableRecords empty = TableRecords.empty(new TableMeta());
Assertions.assertEquals(0, empty.size());
Assertions.assertEquals(0, empty.getRows().size());
Assertions.assertEquals(0, empty.pkRows().size());
Assertions.assertThrows(UnsupportedOperationException.class, () -> empty.add(new Row()));
Assertions.assertThrows(UnsupportedOperationException.class, empty::getTableMeta);
} |
void writeLogs(OutputStream out, Instant from, Instant to, long maxLines, Optional<String> hostname) {
double fromSeconds = from.getEpochSecond() + from.getNano() / 1e9;
double toSeconds = to.getEpochSecond() + to.getNano() / 1e9;
long linesWritten = 0;
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
for (List<Path> logs : getMatchingFiles(from, to)) {
List<LogLineIterator> logLineIterators = new ArrayList<>();
try {
// Logs in each sub-list contain entries covering the same time interval, so do a merge sort while reading
for (Path log : logs)
logLineIterators.add(new LogLineIterator(log, fromSeconds, toSeconds, hostname));
Iterator<LineWithTimestamp> lines = Iterators.mergeSorted(logLineIterators,
Comparator.comparingDouble(LineWithTimestamp::timestamp));
PriorityQueue<LineWithTimestamp> heap = new PriorityQueue<>(Comparator.comparingDouble(LineWithTimestamp::timestamp));
while (lines.hasNext()) {
heap.offer(lines.next());
if (heap.size() > 1000) {
if (linesWritten++ >= maxLines) return;
writer.write(heap.poll().line);
writer.newLine();
}
}
while ( ! heap.isEmpty()) {
if (linesWritten++ >= maxLines) return;
writer.write(heap.poll().line);
writer.newLine();
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
for (LogLineIterator ll : logLineIterators) {
try { ll.close(); } catch (IOException ignored) { }
}
Exceptions.uncheck(writer::flush);
}
}
} | @Test
void testThatLogsNotMatchingRegexAreExcluded() {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
LogReader logReader = new LogReader(logDirectory, Pattern.compile(".*-1.*"));
logReader.writeLogs(baos, Instant.EPOCH, Instant.EPOCH.plus(Duration.ofDays(2)), 100, Optional.empty());
assertEquals(log101 + logv11, baos.toString(UTF_8));
} |
@WithSpan
@Override
public SearchType.Result doExtractResult(SearchJob job, Query query, MessageList searchType, org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse result, Aggregations aggregations, ESGeneratedQueryContext queryContext) {
final List<ResultMessageSummary> messages = StreamSupport.stream(result.getHits().spliterator(), false)
.map(this::resultMessageFromSearchHit)
.map((resultMessage) -> ResultMessageSummary.create(resultMessage.highlightRanges, resultMessage.getMessage().getFields(), resultMessage.getIndex()))
.collect(Collectors.toList());
final String queryString = query.query().queryString();
final DateTime from = query.effectiveTimeRange(searchType).getFrom();
final DateTime to = query.effectiveTimeRange(searchType).getTo();
final SearchResponse searchResponse = SearchResponse.create(
queryString,
queryString,
Collections.emptySet(),
messages,
Collections.emptySet(),
0,
result.getHits().getTotalHits().value,
from,
to
);
final SearchResponse decoratedSearchResponse = decoratorProcessor.decorateSearchResponse(searchResponse, searchType.decorators());
final MessageList.Result.Builder resultBuilder = MessageList.Result.result(searchType.id())
.messages(decoratedSearchResponse.messages())
.effectiveTimerange(AbsoluteRange.create(from, to))
.totalResults(decoratedSearchResponse.totalResults());
return searchType.name().map(resultBuilder::name).orElse(resultBuilder).build();
} | @Test
public void includesCustomNameInResultIfPresent() {
final ESMessageList esMessageList = new ESMessageList(new LegacyDecoratorProcessor.Fake(),
new TestResultMessageFactory(), false);
final MessageList messageList = someMessageList().toBuilder().name("customResult").build();
final org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse result =
mock(org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse.class);
when(result.getHits()).thenReturn(SearchHits.empty());
final SearchType.Result searchTypeResult = esMessageList.doExtractResult(null, someQuery(), messageList, result, null, null);
assertThat(searchTypeResult.name()).contains("customResult");
} |
@Override
public YamlShardingCacheConfiguration swapToYamlConfiguration(final ShardingCacheConfiguration data) {
YamlShardingCacheConfiguration result = new YamlShardingCacheConfiguration();
result.setAllowedMaxSqlLength(data.getAllowedMaxSqlLength());
result.setRouteCache(cacheOptionsConfigurationSwapper.swapToYamlConfiguration(data.getRouteCache()));
return result;
} | @Test
void assertSwapToYamlConfiguration() {
YamlShardingCacheConfiguration actual = new YamlShardingCacheConfigurationSwapper()
.swapToYamlConfiguration(new ShardingCacheConfiguration(100, new ShardingCacheOptionsConfiguration(true, 128, 1024)));
assertThat(actual.getAllowedMaxSqlLength(), is(100));
YamlShardingCacheOptionsConfiguration actualRouteCache = actual.getRouteCache();
assertTrue(actualRouteCache.isSoftValues());
assertThat(actualRouteCache.getInitialCapacity(), is(128));
assertThat(actualRouteCache.getMaximumSize(), is(1024));
} |
public RequestSpecification build() {
return spec;
} | @Test public void
request_spec_picks_up_filters_from_static_config() {
RestAssured.filters(new RequestLoggingFilter());
try {
RequestSpecBuilder builder = new RequestSpecBuilder();
RequestSpecificationImpl spec = (RequestSpecificationImpl) builder.build();
Assert.assertThat(spec.getDefinedFilters(), hasSize(1));
} finally {
RestAssured.reset();
}
} |
@Override
public KCell[] getRow( int rownr ) {
// xlsx raw row numbers are 1-based index, KSheet is 0-based
// Don't check the upper limit as not all rows may have been read!
// If it's found that the row does not exist, the exception will be thrown at the end of this method.
if ( rownr < 0 ) {
// KSheet requires out of bounds here
throw new ArrayIndexOutOfBoundsException( rownr );
}
if ( rownr + 1 < firstRow ) {
// before first non-empty row
return new KCell[0];
}
if ( rownr > 0 && currentRow == rownr + 1 ) {
if ( currentRowCells != null ) {
return currentRowCells;
}
// The case when the table contains the empty row(s) before the header
// but at the same time user wants to read starting from 0 row
return new KCell[0];
}
try {
if ( currentRow >= rownr + 1 ) {
// allow random access per api despite performance hit
resetSheetReader();
}
while ( sheetReader.hasNext() ) {
int event = sheetReader.next();
if ( event == XMLStreamConstants.START_ELEMENT && sheetReader.getLocalName().equals( TAG_ROW ) ) {
String rowIndicator = sheetReader.getAttributeValue( null, "r" );
currentRow = Integer.parseInt( rowIndicator );
if ( currentRow < rownr + 1 ) {
continue;
}
currentRowCells = parseRow();
return currentRowCells;
}
if ( event == XMLStreamConstants.END_ELEMENT && sheetReader.getLocalName().equals( TAG_SHEET_DATA ) ) {
// There're no more columns, no need to continue to read
break;
}
}
} catch ( Exception e ) {
throw new RuntimeException( e );
}
// We've read all document rows, let's update the final count.
numRows = currentRow;
// And, as this was an invalid row to ask for, throw the proper exception!
throw new ArrayIndexOutOfBoundsException( rownr );
} | @Test
public void testInlineString() throws Exception {
final String sheetId = "1";
final String sheetName = "Sheet 1";
XSSFReader reader = mockXSSFReader( sheetId, SHEET_INLINE_STRINGS,
mock( SharedStringsTable.class ),
mock( StylesTable.class ) );
StaxPoiSheet spSheet = new StaxPoiSheet( reader, sheetName, sheetId );
KCell[] rowCells = spSheet.getRow( 0 );
assertEquals( "Test1", rowCells[ 0 ].getValue() );
assertEquals( KCellType.STRING_FORMULA, rowCells[ 0 ].getType() );
assertEquals( "Test2", rowCells[ 1 ].getValue() );
assertEquals( KCellType.STRING_FORMULA, rowCells[ 1 ].getType() );
rowCells = spSheet.getRow( 1 );
assertEquals( "value 1 1", rowCells[ 0 ].getValue() );
assertEquals( KCellType.STRING_FORMULA, rowCells[ 0 ].getType() );
assertEquals( "value 2 1", rowCells[ 1 ].getValue() );
assertEquals( KCellType.STRING_FORMULA, rowCells[ 1 ].getType() );
rowCells = spSheet.getRow( 2 );
assertEquals( "value 1 2", rowCells[ 0 ].getValue() );
assertEquals( KCellType.STRING_FORMULA, rowCells[ 0 ].getType() );
assertEquals( "value 2 2", rowCells[ 1 ].getValue() );
assertEquals( KCellType.STRING_FORMULA, rowCells[ 1 ].getType() );
} |
protected boolean shouldAnalyze() {
if (analyzer instanceof FileTypeAnalyzer) {
final FileTypeAnalyzer fileTypeAnalyzer = (FileTypeAnalyzer) analyzer;
return fileTypeAnalyzer.accept(dependency.getActualFile());
}
return true;
} | @Test
public void shouldAnalyzeReturnsTrueIfTheFileTypeAnalyzersAcceptsTheDependency() {
final File dependencyFile = new File("");
new Expectations() {{
dependency.getActualFile();
result = dependencyFile;
fileTypeAnalyzer.accept(dependencyFile);
result = true;
}};
AnalysisTask analysisTask = new AnalysisTask(fileTypeAnalyzer, dependency, null, null);
boolean shouldAnalyze = analysisTask.shouldAnalyze();
assertTrue(shouldAnalyze);
} |
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
} | @Test
public void testStaleOutOfRangeError() {
// verify that an out of range error which arrives after a seek
// does not cause us to reset our position or throw an exception
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.OFFSET_OUT_OF_RANGE, 100L, 0));
subscriptions.seek(tp0, 1);
networkClientDelegate.poll(time.timer(0));
assertEmptyFetch("Should not return records or advance position on fetch error");
assertFalse(subscriptions.isOffsetResetNeeded(tp0));
assertEquals(1, subscriptions.position(tp0).offset);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.