focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public PipelineOptions get() {
return options;
} | @Test
public void testIndependence() throws Exception {
SerializablePipelineOptions first =
new SerializablePipelineOptions(
PipelineOptionsFactory.fromArgs("--foo=first").as(MyOptions.class));
SerializablePipelineOptions firstCopy = SerializableUtils.clone(first);
SerializablePipelineOptions second =
new SerializablePipelineOptions(
PipelineOptionsFactory.fromArgs("--foo=second").as(MyOptions.class));
SerializablePipelineOptions secondCopy = SerializableUtils.clone(second);
assertEquals("first", first.get().as(MyOptions.class).getFoo());
assertEquals("first", firstCopy.get().as(MyOptions.class).getFoo());
assertEquals("second", second.get().as(MyOptions.class).getFoo());
assertEquals("second", secondCopy.get().as(MyOptions.class).getFoo());
first.get().as(MyOptions.class).setFoo("new first");
firstCopy.get().as(MyOptions.class).setFoo("new firstCopy");
second.get().as(MyOptions.class).setFoo("new second");
secondCopy.get().as(MyOptions.class).setFoo("new secondCopy");
assertEquals("new first", first.get().as(MyOptions.class).getFoo());
assertEquals("new firstCopy", firstCopy.get().as(MyOptions.class).getFoo());
assertEquals("new second", second.get().as(MyOptions.class).getFoo());
assertEquals("new secondCopy", secondCopy.get().as(MyOptions.class).getFoo());
} |
@Override
public Num calculate(BarSeries series, Position position) {
Num averageProfit = averageProfitCriterion.calculate(series, position);
if (averageProfit.isZero()) {
// only loosing positions means a ratio of 0
return series.zero();
}
Num averageLoss = averageLossCriterion.calculate(series, position);
if (averageLoss.isZero()) {
// only winning positions means a ratio of 1
return series.one();
}
return averageProfit.dividedBy(averageLoss).abs();
} | @Test
public void calculateProfitWithShortPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 85, 80, 70, 100, 95);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.sellAt(0, series), Trade.buyAt(1, series),
Trade.sellAt(2, series), Trade.buyAt(5, series));
AnalysisCriterion avtProfit = getCriterion();
assertNumEquals(1, avtProfit.calculate(series, tradingRecord));
} |
public static boolean isNative(RunnerApi.PTransform pTransform) {
// TODO(https://github.com/apache/beam/issues/20192) Use default (context) classloader.
Iterator<IsNativeTransform> matchers =
ServiceLoader.load(IsNativeTransform.class, NativeTransforms.class.getClassLoader())
.iterator();
while (matchers.hasNext()) {
if (matchers.next().test(pTransform)) {
return true;
}
}
return false;
} | @Test
public void testNoMatch() {
Assert.assertFalse(NativeTransforms.isNative(RunnerApi.PTransform.getDefaultInstance()));
} |
@SuppressWarnings( "LockAcquiredButNotSafelyReleased" )
public AutoCloseableLock lock() throws IllegalStateException {
checkNotReleased();
lock.lock();
return autoCloseable;
} | @Test(timeout = 1000)
public void willUseDifferentLocksForDifferentClassesWithTheSamePart() {
final AtomicBoolean lockAcquired = new AtomicBoolean(false);
final AtomicInteger callCount = new AtomicInteger(0);
thread1 = new Thread(() -> {
try (final AutoCloseableReentrantLock.AutoCloseableLock ignored = new AutoCloseableReentrantLock(AutoCloseableReentrantLockTest.class, "user1").lock()) {
lockAcquired.set(true);
callCount.incrementAndGet();
Thread.sleep(1000);
} catch (final InterruptedException e) {
LOGGER.info("Interrupted whilst sleeping", e);
}
});
thread1.start();
// Create a lock for the same user but for a different class - should be acquired
thread2 = new Thread(() -> {
try (final AutoCloseableReentrantLock.AutoCloseableLock ignored = new AutoCloseableReentrantLock(XMPPServer.class, "user1").lock()) {
lockAcquired.set(true);
callCount.incrementAndGet();
Thread.sleep(1000);
} catch (final InterruptedException e) {
LOGGER.info("Interrupted whilst sleeping", e);
}
});
thread2.start();
// Wait until we can be sure both threads have started
await().untilTrue(lockAcquired);
assertThat(callCount.get(), is(2));
} |
@Override
public int size() {
return values.length;
} | @Test
public void hasASize() throws Exception {
assertThat(snapshot.size())
.isEqualTo(5);
} |
@Override
public void transitionToActive(final StreamTask streamTask, final RecordCollector recordCollector, final ThreadCache newCache) {
if (stateManager.taskType() != TaskType.ACTIVE) {
throw new IllegalStateException("Tried to transition processor context to active but the state manager's " +
"type was " + stateManager.taskType());
}
this.streamTask = streamTask;
this.collector = recordCollector;
this.cache = newCache;
addAllFlushListenersToNewCache();
} | @Test
public void localSessionStoreShouldNotAllowInitOrClose() {
foreachSetUp();
when(stateManager.taskType()).thenReturn(TaskType.ACTIVE);
when(stateManager.getGlobalStore(anyString())).thenReturn(null);
final SessionStore<String, Long> sessionStore = mock(SessionStore.class);
when(stateManager.getStore("LocalSessionStore")).thenAnswer(answer -> sessionStoreMock(sessionStore));
mockStateStoreFlush(sessionStore);
doAnswer(answer -> {
putExecuted = true;
return null;
}).when(sessionStore).put(any(), any());
doAnswer(answer -> {
removeExecuted = true;
return null;
}).when(sessionStore).remove(any());
context = buildProcessorContextImpl(streamsConfig, stateManager);
final StreamTask task = mock(StreamTask.class);
context.transitionToActive(task, null, null);
mockProcessorNodeWithLocalKeyValueStore();
doTest("LocalSessionStore", (Consumer<SessionStore<String, Long>>) store -> {
verifyStoreCannotBeInitializedOrClosed(store);
store.flush();
assertTrue(flushExecuted);
store.remove(null);
assertTrue(removeExecuted);
store.put(null, null);
assertTrue(putExecuted);
assertEquals(iters.get(3), store.findSessions(KEY, 1L, 2L));
assertEquals(iters.get(4), store.findSessions(KEY, KEY, 1L, 2L));
assertEquals(iters.get(5), store.fetch(KEY));
assertEquals(iters.get(6), store.fetch(KEY, KEY));
});
} |
@Override
public CheckForDecommissioningNodesResponse checkForDecommissioningNodes(
CheckForDecommissioningNodesRequest request) throws YarnException, IOException {
// Parameter check
if (request == null) {
RouterServerUtil.logAndThrowException("Missing checkForDecommissioningNodes request.", null);
routerMetrics.incrCheckForDecommissioningNodesFailedRetrieved();
}
String subClusterId = request.getSubClusterId();
if (StringUtils.isBlank(subClusterId)) {
routerMetrics.incrCheckForDecommissioningNodesFailedRetrieved();
RouterServerUtil.logAndThrowException("Missing checkForDecommissioningNodes SubClusterId.",
null);
}
try {
long startTime = clock.getTime();
RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod(
new Class[]{CheckForDecommissioningNodesRequest.class}, new Object[]{request});
Collection<CheckForDecommissioningNodesResponse> responses =
remoteMethod.invokeConcurrent(this, CheckForDecommissioningNodesResponse.class,
subClusterId);
if (CollectionUtils.isNotEmpty(responses)) {
// We selected a subCluster, the list is not empty and size=1.
List<CheckForDecommissioningNodesResponse> collects =
responses.stream().collect(Collectors.toList());
if (!collects.isEmpty() && collects.size() == 1) {
CheckForDecommissioningNodesResponse response = collects.get(0);
long stopTime = clock.getTime();
routerMetrics.succeededCheckForDecommissioningNodesRetrieved((stopTime - startTime));
Set<NodeId> nodes = response.getDecommissioningNodes();
return CheckForDecommissioningNodesResponse.newInstance(nodes);
}
}
} catch (YarnException e) {
routerMetrics.incrCheckForDecommissioningNodesFailedRetrieved();
RouterServerUtil.logAndThrowException(e,
"Unable to checkForDecommissioningNodes due to exception " + e.getMessage());
}
routerMetrics.incrCheckForDecommissioningNodesFailedRetrieved();
throw new YarnException("Unable to checkForDecommissioningNodes.");
} | @Test
public void testCheckForDecommissioningNodesRequest() throws Exception {
// null request1.
LambdaTestUtils.intercept(YarnException.class, "Missing checkForDecommissioningNodes request.",
() -> interceptor.checkForDecommissioningNodes(null));
// null request2.
CheckForDecommissioningNodesRequest request =
CheckForDecommissioningNodesRequest.newInstance(null);
LambdaTestUtils.intercept(YarnException.class,
"Missing checkForDecommissioningNodes SubClusterId.",
() -> interceptor.checkForDecommissioningNodes(request));
} |
public static DeleteAclsRequest parse(ByteBuffer buffer, short version) {
return new DeleteAclsRequest(new DeleteAclsRequestData(new ByteBufferAccessor(buffer), version), version);
} | @Test
public void shouldRoundTripLiteralV0() {
final DeleteAclsRequest original = new DeleteAclsRequest.Builder(requestData(LITERAL_FILTER)).build(V0);
final ByteBuffer buffer = original.serialize();
final DeleteAclsRequest result = DeleteAclsRequest.parse(buffer, V0);
assertRequestEquals(original, result);
} |
static String getConfigValueAsString(ServiceConfiguration conf,
String configProp) throws IllegalArgumentException {
String value = getConfigValueAsStringImpl(conf, configProp);
log.info("Configuration for [{}] is [{}]", configProp, value);
return value;
} | @Test
public void testGetConfigValueAsStringReturnsDefaultIfMissing() {
Properties props = new Properties();
ServiceConfiguration config = new ServiceConfiguration();
config.setProperties(props);
String actual = ConfigUtils.getConfigValueAsString(config, "prop1", "default");
assertEquals("default", actual);
} |
public int get(final int key)
{
final int initialValue = this.initialValue;
final int[] entries = this.entries;
@DoNotSub final int mask = entries.length - 1;
@DoNotSub int index = Hashing.evenHash(key, mask);
int value;
while (initialValue != (value = entries[index + 1]))
{
if (key == entries[index])
{
break;
}
index = next(index, mask);
}
return value;
} | @Test
void getShouldReturnInitialValueWhenEmpty()
{
assertEquals(INITIAL_VALUE, map.get(1));
} |
public static <T extends Metric> T safelyRegister(MetricRegistry metricRegistry, String name, T metric) {
try {
return metricRegistry.register(name, metric);
} catch (IllegalArgumentException ignored) {
// safely ignore already existing metric, and simply return the one registered previously.
// note that we do not guard against differing metric types here, we consider that a programming error for now.
//noinspection unchecked
return (T) metricRegistry.getMetrics().get(name);
}
} | @Test
public void safelyRegister() {
final MetricRegistry metricRegistry = new MetricRegistry();
final Gauge<Long> longGauge = new Gauge<>() {
@Override
public Long getValue() {
return 0L;
}
};
final Gauge<Long> newGauge = MetricUtils.safelyRegister(metricRegistry, "somename", longGauge);
assertSame("metric objects are identical", longGauge, newGauge);
try {
MetricUtils.safelyRegister(metricRegistry, "somename", longGauge);
} catch (Exception e) {
fail("Should not have thrown: " + e.getMessage());
}
assertThatExceptionOfType(ClassCastException.class)
.describedAs("Registering a metric with a different metric type fails on using it")
.isThrownBy(() -> {
// assignment has to be done to raise the exception
Counter c = MetricUtils.safelyRegister(metricRegistry, "somename", new Counter());
});
} |
@Override
public void checkServerTrusted(final X509Certificate[] certs, final String cipher) throws CertificateException {
if((certs != null)) {
if(log.isDebugEnabled()) {
log.debug("Server certificate chain:");
for(int i = 0; i < certs.length; i++) {
log.debug(String.format("X509Certificate[%d]=%s", i, certs[i]));
}
}
}
if((certs != null) && (certs.length == 1)) {
this.verify(null, certs, cipher);
}
else {
system.checkServerTrusted(certs, cipher);
}
} | @Test(expected = CertificateExpiredException.class)
public void testCheckServerTrusted() throws Exception {
final DefaultX509TrustManager m = new DefaultX509TrustManager();
InputStream inStream = new FileInputStream("src/test/resources/OXxlRDVcWqdPEvFm.cer");
CertificateFactory cf = CertificateFactory.getInstance("X.509");
X509Certificate cert = (X509Certificate) cf.generateCertificate(inStream);
m.checkServerTrusted(new X509Certificate[]{cert}, "RSA");
} |
@VisibleForTesting
public TaskAttemptEvent createContainerFinishedEvent(ContainerStatus cont,
TaskAttemptId attemptId) {
TaskAttemptEvent event;
switch (cont.getExitStatus()) {
case ContainerExitStatus.ABORTED:
case ContainerExitStatus.PREEMPTED:
case ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER:
// killed by YARN
event = new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL);
break;
default:
event = new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_COMPLETED);
}
return event;
} | @Test
public void testCompletedContainerEvent() {
RMContainerAllocator allocator = new RMContainerAllocator(
mock(ClientService.class), mock(AppContext.class),
new NoopAMPreemptionPolicy());
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(
MRBuilderUtils.newTaskId(
MRBuilderUtils.newJobId(1, 1, 1), 1, TaskType.MAP), 1);
ApplicationId applicationId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
// ABORTED
ContainerId containerId =
ContainerId.newContainerId(applicationAttemptId, 1);
ContainerStatus status = ContainerStatus.newInstance(
containerId, ContainerState.RUNNING, "", 0);
ContainerStatus abortedStatus = ContainerStatus.newInstance(
containerId, ContainerState.RUNNING, "",
ContainerExitStatus.ABORTED);
TaskAttemptEvent event = allocator.createContainerFinishedEvent(status,
attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,
event.getType());
TaskAttemptEvent abortedEvent = allocator.createContainerFinishedEvent(
abortedStatus, attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType());
// PREEMPTED
ContainerId containerId2 =
ContainerId.newContainerId(applicationAttemptId, 2);
ContainerStatus status2 = ContainerStatus.newInstance(containerId2,
ContainerState.RUNNING, "", 0);
ContainerStatus preemptedStatus = ContainerStatus.newInstance(containerId2,
ContainerState.RUNNING, "", ContainerExitStatus.PREEMPTED);
TaskAttemptEvent event2 = allocator.createContainerFinishedEvent(status2,
attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,
event2.getType());
TaskAttemptEvent abortedEvent2 = allocator.createContainerFinishedEvent(
preemptedStatus, attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent2.getType());
// KILLED_BY_CONTAINER_SCHEDULER
ContainerId containerId3 =
ContainerId.newContainerId(applicationAttemptId, 3);
ContainerStatus status3 = ContainerStatus.newInstance(containerId3,
ContainerState.RUNNING, "", 0);
ContainerStatus killedByContainerSchedulerStatus =
ContainerStatus.newInstance(containerId3, ContainerState.RUNNING, "",
ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER);
TaskAttemptEvent event3 = allocator.createContainerFinishedEvent(status3,
attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,
event3.getType());
TaskAttemptEvent abortedEvent3 = allocator.createContainerFinishedEvent(
killedByContainerSchedulerStatus, attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent3.getType());
} |
@Override
public <K, T> UncommittedBundle<T> createKeyedBundle(
StructuralKey<K> key, PCollection<T> output) {
return new CloningBundle<>(underlying.createKeyedBundle(key, output));
} | @Test
public void keyedBundleEncodeFailsAddFails() {
PCollection<Record> pc = p.apply(Create.empty(new RecordNoEncodeCoder()));
UncommittedBundle<Record> bundle =
factory.createKeyedBundle(StructuralKey.of("foo", StringUtf8Coder.of()), pc);
thrown.expect(UserCodeException.class);
thrown.expectCause(isA(CoderException.class));
thrown.expectMessage("Encode not allowed");
bundle.add(WindowedValue.valueInGlobalWindow(new Record()));
} |
public int compareNodePositions() {
if(beginPath.length == 0 && endPath.length == 0)
return 0;
if(beginPath.length == 0)
return -1;
if(endPath.length == 0)
return 1;
return Integer.compare(beginPath[0], endPath[0]);
} | @Test
public void compareDescendantNodeToParent(){
final NodeModel parent = root();
final NodeModel node1 = new NodeModel("node1", map);
parent.insert(node1);
final int compared = new NodeRelativePath(parent, node1).compareNodePositions();
assertTrue(compared < 0);
} |
public static boolean isNormalizedPathOutsideWorkingDir(String path) {
final String normalize = FilenameUtils.normalize(path);
final String prefix = FilenameUtils.getPrefix(normalize);
return (normalize != null && StringUtils.isBlank(prefix));
} | @Test
public void shouldReturnFalseIfGivenFolderWithRelativeTakesYouOutOfSandbox() {
assertThat(FilenameUtil.isNormalizedPathOutsideWorkingDir("../tmp"), is(false));
assertThat(FilenameUtil.isNormalizedPathOutsideWorkingDir("tmp/../../../pavan"), is(false));
} |
@SuppressWarnings("MethodLength")
static void dissectControlRequest(
final ArchiveEventCode eventCode,
final MutableDirectBuffer buffer,
final int offset,
final StringBuilder builder)
{
int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder);
HEADER_DECODER.wrap(buffer, offset + encodedLength);
encodedLength += MessageHeaderDecoder.ENCODED_LENGTH;
switch (eventCode)
{
case CMD_IN_CONNECT:
CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendConnect(builder);
break;
case CMD_IN_CLOSE_SESSION:
CLOSE_SESSION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendCloseSession(builder);
break;
case CMD_IN_START_RECORDING:
START_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording(builder);
break;
case CMD_IN_STOP_RECORDING:
STOP_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecording(builder);
break;
case CMD_IN_REPLAY:
REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplay(builder);
break;
case CMD_IN_STOP_REPLAY:
STOP_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplay(builder);
break;
case CMD_IN_LIST_RECORDINGS:
LIST_RECORDINGS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordings(builder);
break;
case CMD_IN_LIST_RECORDINGS_FOR_URI:
LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingsForUri(builder);
break;
case CMD_IN_LIST_RECORDING:
LIST_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecording(builder);
break;
case CMD_IN_EXTEND_RECORDING:
EXTEND_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording(builder);
break;
case CMD_IN_RECORDING_POSITION:
RECORDING_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendRecordingPosition(builder);
break;
case CMD_IN_TRUNCATE_RECORDING:
TRUNCATE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTruncateRecording(builder);
break;
case CMD_IN_STOP_RECORDING_SUBSCRIPTION:
STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingSubscription(builder);
break;
case CMD_IN_STOP_POSITION:
STOP_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopPosition(builder);
break;
case CMD_IN_FIND_LAST_MATCHING_RECORD:
FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendFindLastMatchingRecord(builder);
break;
case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS:
LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingSubscriptions(builder);
break;
case CMD_IN_START_BOUNDED_REPLAY:
BOUNDED_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartBoundedReplay(builder);
break;
case CMD_IN_STOP_ALL_REPLAYS:
STOP_ALL_REPLAYS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopAllReplays(builder);
break;
case CMD_IN_REPLICATE:
REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate(builder);
break;
case CMD_IN_STOP_REPLICATION:
STOP_REPLICATION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplication(builder);
break;
case CMD_IN_START_POSITION:
START_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartPosition(builder);
break;
case CMD_IN_DETACH_SEGMENTS:
DETACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDetachSegments(builder);
break;
case CMD_IN_DELETE_DETACHED_SEGMENTS:
DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDeleteDetachedSegments(builder);
break;
case CMD_IN_PURGE_SEGMENTS:
PURGE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeSegments(builder);
break;
case CMD_IN_ATTACH_SEGMENTS:
ATTACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAttachSegments(builder);
break;
case CMD_IN_MIGRATE_SEGMENTS:
MIGRATE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendMigrateSegments(builder);
break;
case CMD_IN_AUTH_CONNECT:
AUTH_CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAuthConnect(builder);
break;
case CMD_IN_KEEP_ALIVE:
KEEP_ALIVE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendKeepAlive(builder);
break;
case CMD_IN_TAGGED_REPLICATE:
TAGGED_REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTaggedReplicate(builder);
break;
case CMD_IN_START_RECORDING2:
START_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording2(builder);
break;
case CMD_IN_EXTEND_RECORDING2:
EXTEND_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording2(builder);
break;
case CMD_IN_STOP_RECORDING_BY_IDENTITY:
STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingByIdentity(builder);
break;
case CMD_IN_PURGE_RECORDING:
PURGE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeRecording(builder);
break;
case CMD_IN_REPLICATE2:
REPLICATE_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate2(builder);
break;
case CMD_IN_REQUEST_REPLAY_TOKEN:
REPLAY_TOKEN_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplayToken(builder);
break;
default:
builder.append(": unknown command");
}
} | @Test
void controlRequestConnect()
{
internalEncodeLogHeader(buffer, 0, 32, 64, () -> 5_600_000_000L);
final ConnectRequestEncoder requestEncoder = new ConnectRequestEncoder();
requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder)
.correlationId(88)
.responseStreamId(42)
.version(-10)
.responseChannel("call me maybe");
dissectControlRequest(CMD_IN_CONNECT, buffer, 0, builder);
assertEquals("[5.600000000] " + CONTEXT + ": " + CMD_IN_CONNECT.name() + " [32/64]: " +
"correlationId=88" +
" responseStreamId=42" +
" version=-10" +
" responseChannel=call me maybe",
builder.toString());
} |
public static void prepareFilesForStaging(FileStagingOptions options) {
List<String> filesToStage = options.getFilesToStage();
if (filesToStage == null || filesToStage.isEmpty()) {
filesToStage = detectClassPathResourcesToStage(ReflectHelpers.findClassLoader(), options);
LOG.info(
"PipelineOptions.filesToStage was not specified. "
+ "Defaulting to files from the classpath: will stage {} files. "
+ "Enable logging at DEBUG level to see which files will be staged.",
filesToStage.size());
LOG.debug("Classpath elements: {}", filesToStage);
}
final String tmpJarLocation =
MoreObjects.firstNonNull(options.getTempLocation(), System.getProperty("java.io.tmpdir"));
final List<String> resourcesToStage = prepareFilesForStaging(filesToStage, tmpJarLocation);
options.setFilesToStage(resourcesToStage);
} | @Test
public void testPrepareFilesForStagingUndefinedFilesToStage() throws IOException {
String temporaryLocation = tmpFolder.newFolder().getAbsolutePath();
FileStagingOptions options = PipelineOptionsFactory.create().as(FileStagingOptions.class);
options.setTempLocation(temporaryLocation);
PipelineResources.prepareFilesForStaging(options);
List<String> result = options.getFilesToStage();
assertNotNull(result);
assertTrue(result.size() > 0);
} |
public void removeDataConnection(String name, boolean ifExists) {
if (!dataConnectionStorage.removeDataConnection(name)) {
if (!ifExists) {
throw QueryException.error("Data connection does not exist: " + name);
}
} else {
listeners.forEach(TableListener::onTableChanged);
}
} | @Test
public void when_removesNonExistingDataConnectionWithIfExists_then_succeeds() {
// given
String name = "name";
given(relationsStorage.removeDataConnection(name)).willReturn(false);
// when
// then
dataConnectionResolver.removeDataConnection(name, true);
} |
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
} | @Test
void testForwardedPojo() {
String[] forwardedFields = {"int1->int2; int3->int1; string1 "};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, pojoType, pojoType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(3);
forwardedFields[0] = "f1->int1; f0->int3 ";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, threeIntTupleType, pojoType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(0);
forwardedFields[0] = "int1->f2; int2->f0; int3->f1";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, pojoType, threeIntTupleType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(1);
forwardedFields[0] = "*->pojo1.*";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, pojoType, nestedPojoType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(3);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(4);
forwardedFields[0] = "*->pojo1";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, pojoType, nestedPojoType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(3);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(4);
forwardedFields[0] = "int1; string1; int2->pojo1.int3";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, pojoType, nestedPojoType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(3);
assertThat(sp.getForwardingTargetFields(0, 2)).isEmpty();
assertThat(sp.getForwardingTargetFields(0, 3)).contains(5);
forwardedFields[0] = "pojo1.*->f2.*; int1->f1";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, nestedPojoType, pojoInTupleType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(3);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(4);
assertThat(sp.getForwardingTargetFields(0, 4)).contains(5);
forwardedFields[0] = "f2.*->*";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, pojoInTupleType, pojoType);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 4)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 5)).contains(3);
forwardedFields[0] = "pojo1->f2; int1->f1";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, nestedPojoType, pojoInTupleType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(3);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(4);
assertThat(sp.getForwardingTargetFields(0, 4)).contains(5);
forwardedFields[0] = "f2->*";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, pojoInTupleType, pojoType);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 4)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 5)).contains(3);
forwardedFields[0] = "int2; string1";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, pojoType, pojoType);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(3);
assertThat(sp.getForwardingTargetFields(0, 0)).isEmpty();
assertThat(sp.getForwardingTargetFields(0, 2)).isEmpty();
forwardedFields[0] = "pojo1.int1; string1";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, nestedPojoType, nestedPojoType);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 5)).contains(5);
assertThat(sp.getForwardingTargetFields(0, 0)).isEmpty();
assertThat(sp.getForwardingTargetFields(0, 2)).isEmpty();
assertThat(sp.getForwardingTargetFields(0, 3)).isEmpty();
assertThat(sp.getForwardingTargetFields(0, 4)).isEmpty();
forwardedFields[0] = "pojo1.*; int1";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, nestedPojoType, nestedPojoType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(3);
assertThat(sp.getForwardingTargetFields(0, 4)).contains(4);
assertThat(sp.getForwardingTargetFields(0, 5)).isEmpty();
forwardedFields[0] = "pojo1; int1";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, nestedPojoType, nestedPojoType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(3);
assertThat(sp.getForwardingTargetFields(0, 4)).contains(4);
assertThat(sp.getForwardingTargetFields(0, 5)).isEmpty();
} |
@Override
public CustomResponse<TokenResponse> refreshToken(TokenRefreshRequest tokenRefreshRequest) {
return userServiceClient.refreshToken(tokenRefreshRequest);
} | @Test
void refreshToken_ValidTokenRefreshRequest_ReturnsCustomResponse() {
// Given
TokenRefreshRequest tokenRefreshRequest = TokenRefreshRequest.builder()
.refreshToken("validRefreshToken")
.build();
TokenResponse tokenResponse = TokenResponse.builder()
.accessToken("newAccessToken")
.accessTokenExpiresAt(System.currentTimeMillis() + 3600)
.refreshToken("newRefreshToken")
.build();
CustomResponse<TokenResponse> expectedResponse = CustomResponse.successOf(tokenResponse);
// When
when(userServiceClient.refreshToken(any(TokenRefreshRequest.class)))
.thenReturn(expectedResponse);
// Then
CustomResponse<TokenResponse> result = refreshTokenService.refreshToken(tokenRefreshRequest);
assertNotNull(result);
assertTrue(result.getIsSuccess());
assertEquals(HttpStatus.OK, result.getHttpStatus());
assertEquals(tokenResponse, result.getResponse());
// Verify
verify(userServiceClient, times(1)).refreshToken(any(TokenRefreshRequest.class));
} |
@Override
public int hashCode() {
int result = 1;
result = 31 * result + Objects.hashCode(username);
result = 31 * result + Objects.hashCode(getPasswordValue());
result = 31 * result + Objects.hashCode(getSocketAddress().get());
result = 31 * result + Boolean.hashCode(getNonProxyHostsValue());
result = 31 * result + Objects.hashCode(httpHeaders.get());
result = 31 * result + Objects.hashCode(getType());
result = 31 * result + Long.hashCode(connectTimeoutMillis);
return result;
} | @Test
void differentAddresses() {
assertThat(createProxy(ADDRESS_1, PASSWORD_1)).isNotEqualTo(createProxy(ADDRESS_2, PASSWORD_1));
assertThat(createProxy(ADDRESS_1, PASSWORD_1).hashCode()).isNotEqualTo(createProxy(ADDRESS_2, PASSWORD_1).hashCode());
} |
public static String getInstanceIdByComputeNode(final String computeNodePath) {
Pattern pattern = Pattern.compile(getComputeNodePath() + "(/status|/worker_id|/labels)" + "/([\\S]+)$", Pattern.CASE_INSENSITIVE);
Matcher matcher = pattern.matcher(computeNodePath);
return matcher.find() ? matcher.group(2) : "";
} | @Test
void assertGetInstanceIdByComputeNodePath() {
assertThat(ComputeNode.getInstanceIdByComputeNode("/nodes/compute_nodes/status/foo_instance_1"), is("foo_instance_1"));
assertThat(ComputeNode.getInstanceIdByComputeNode("/nodes/compute_nodes/worker_id/foo_instance_2"), is("foo_instance_2"));
assertThat(ComputeNode.getInstanceIdByComputeNode("/nodes/compute_nodes/labels/foo_instance_3"), is("foo_instance_3"));
} |
public void batchAckMessageAsync(
final String addr,
final long timeOut,
final AckCallback ackCallback,
final String topic,
final String consumerGroup,
final List<String> extraInfoList
) throws RemotingException, MQBrokerException, InterruptedException {
String brokerName = null;
Map<String, BatchAck> batchAckMap = new HashMap<>();
for (String extraInfo : extraInfoList) {
String[] extraInfoData = ExtraInfoUtil.split(extraInfo);
if (brokerName == null) {
brokerName = ExtraInfoUtil.getBrokerName(extraInfoData);
}
String mergeKey = ExtraInfoUtil.getRetry(extraInfoData) + "@" +
ExtraInfoUtil.getQueueId(extraInfoData) + "@" +
ExtraInfoUtil.getCkQueueOffset(extraInfoData) + "@" +
ExtraInfoUtil.getPopTime(extraInfoData);
BatchAck bAck = batchAckMap.computeIfAbsent(mergeKey, k -> {
BatchAck newBatchAck = new BatchAck();
newBatchAck.setConsumerGroup(consumerGroup);
newBatchAck.setTopic(topic);
newBatchAck.setRetry(ExtraInfoUtil.getRetry(extraInfoData));
newBatchAck.setStartOffset(ExtraInfoUtil.getCkQueueOffset(extraInfoData));
newBatchAck.setQueueId(ExtraInfoUtil.getQueueId(extraInfoData));
newBatchAck.setReviveQueueId(ExtraInfoUtil.getReviveQid(extraInfoData));
newBatchAck.setPopTime(ExtraInfoUtil.getPopTime(extraInfoData));
newBatchAck.setInvisibleTime(ExtraInfoUtil.getInvisibleTime(extraInfoData));
newBatchAck.setBitSet(new BitSet());
return newBatchAck;
});
bAck.getBitSet().set((int) (ExtraInfoUtil.getQueueOffset(extraInfoData) - ExtraInfoUtil.getCkQueueOffset(extraInfoData)));
}
BatchAckMessageRequestBody requestBody = new BatchAckMessageRequestBody();
requestBody.setBrokerName(brokerName);
requestBody.setAcks(new ArrayList<>(batchAckMap.values()));
batchAckMessageAsync(addr, timeOut, ackCallback, requestBody);
} | @Test
public void testBatchAckMessageAsync() throws MQBrokerException, RemotingException, InterruptedException {
AckCallback callback = mock(AckCallback.class);
List<String> extraInfoList = new ArrayList<>();
extraInfoList.add(String.format("%s %s %s %s %s %s %d %d", "1", "2", "3", "4", "5", brokerName, 7, 8));
mqClientAPI.batchAckMessageAsync(defaultBrokerAddr, defaultTimeout, callback, defaultTopic, "", extraInfoList);
} |
public static MountTo to(final String target) {
return new MountTo(checkNotNullOrEmpty(target, "Target should not be null"));
} | @Test
public void should_return_bad_request_for_nonexistence_file() throws Exception {
server.mount(MOUNT_DIR, to("/dir"));
assertThrows(HttpResponseException.class, () ->
running(server, () -> helper.get(remoteUrl("/dir/unknown.response"))));
} |
@GetMapping(params = "beta=true")
@Secured(action = ActionTypes.READ, signType = SignType.CONFIG)
public RestResult<ConfigInfo4Beta> queryBeta(@RequestParam(value = "dataId") String dataId,
@RequestParam(value = "group") String group,
@RequestParam(value = "tenant", required = false, defaultValue = StringUtils.EMPTY) String tenant) {
try {
ConfigInfo4Beta ci = configInfoBetaPersistService.findConfigInfo4Beta(dataId, group, tenant);
if (Objects.nonNull(ci)) {
String encryptedDataKey = ci.getEncryptedDataKey();
Pair<String, String> pair = EncryptionHandler.decryptHandler(dataId, encryptedDataKey, ci.getContent());
ci.setContent(pair.getSecond());
}
return RestResultUtils.success("query beta ok", ci);
} catch (Throwable e) {
LOGGER.error("query beta data error", e);
return RestResultUtils.failed("query beta data error");
}
} | @Test
void testQueryBeta() throws Exception {
ConfigInfoBetaWrapper configInfoBetaWrapper = new ConfigInfoBetaWrapper();
configInfoBetaWrapper.setDataId("test");
configInfoBetaWrapper.setGroup("test");
configInfoBetaWrapper.setContent("test");
when(configInfoBetaPersistService.findConfigInfo4Beta("test", "test", "")).thenReturn(configInfoBetaWrapper);
MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(Constants.CONFIG_CONTROLLER_PATH).param("beta", "true")
.param("dataId", "test").param("group", "test").param("tenant", "");
String actualValue = mockmvc.perform(builder).andReturn().getResponse().getContentAsString();
String code = JacksonUtils.toObj(actualValue).get("code").toString();
String data = JacksonUtils.toObj(actualValue).get("data").toString();
ConfigInfoBetaWrapper resConfigInfoBetaWrapper = JacksonUtils.toObj(data, ConfigInfoBetaWrapper.class);
assertEquals("200", code);
assertEquals(configInfoBetaWrapper.getDataId(), resConfigInfoBetaWrapper.getDataId());
assertEquals(configInfoBetaWrapper.getGroup(), resConfigInfoBetaWrapper.getGroup());
assertEquals(configInfoBetaWrapper.getContent(), resConfigInfoBetaWrapper.getContent());
} |
@Override
public MergedResult decorate(final QueryResult queryResult, final SQLStatementContext sqlStatementContext, final MaskRule rule) {
return new MaskMergedResult(maskRule, selectStatementContext, new TransparentMergedResult(queryResult));
} | @Test
void assertDecorateMergedResult() throws SQLException {
MergedResult mergedResult = mock(MergedResult.class);
when(mergedResult.next()).thenReturn(true);
MaskDQLResultDecorator decorator = new MaskDQLResultDecorator(mock(MaskRule.class), mock(SelectStatementContext.class));
MergedResult actual = decorator.decorate(mergedResult, mock(SQLStatementContext.class), mock(MaskRule.class));
assertTrue(actual.next());
} |
public static CreateSourceProperties from(final Map<String, Literal> literals) {
try {
return new CreateSourceProperties(literals, DurationParser::parse, false);
} catch (final ConfigException e) {
final String message = e.getMessage().replace(
"configuration",
"property"
);
throw new KsqlException(message, e);
}
} | @Test
public void shouldThrowOnConstructionInvalidTimestampFormat() {
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> from(
of(TIMESTAMP_FORMAT_PROPERTY, new StringLiteral("invalid")))
);
// Then:
assertThat(e.getMessage(), containsString("Invalid datetime format for config:TIMESTAMP_FORMAT, reason:Unknown pattern letter: i"));
} |
public void inputWatermark(Watermark watermark, int channelIndex, DataOutput<?> output)
throws Exception {
final SubpartitionStatus subpartitionStatus;
if (watermark instanceof InternalWatermark) {
int subpartitionStatusIndex = ((InternalWatermark) watermark).getSubpartitionIndex();
subpartitionStatus =
subpartitionStatuses.get(channelIndex).get(subpartitionStatusIndex);
} else {
subpartitionStatus =
subpartitionStatuses.get(channelIndex).get(subpartitionIndexes[channelIndex]);
}
// ignore the input watermark if its subpartition, or all subpartitions are idle (i.e.
// overall the valve is idle).
if (lastOutputWatermarkStatus.isActive() && subpartitionStatus.watermarkStatus.isActive()) {
long watermarkMillis = watermark.getTimestamp();
// if the input watermark's value is less than the last received watermark for its
// subpartition, ignore it also.
if (watermarkMillis > subpartitionStatus.watermark) {
subpartitionStatus.watermark = watermarkMillis;
if (subpartitionStatus.isWatermarkAligned) {
adjustAlignedSubpartitionStatuses(subpartitionStatus);
} else if (watermarkMillis >= lastOutputWatermark) {
// previously unaligned subpartitions are now aligned if its watermark has
// caught up
markWatermarkAligned(subpartitionStatus);
}
// now, attempt to find a new min watermark across all aligned subpartitions
findAndOutputNewMinWatermarkAcrossAlignedSubpartitions(output);
}
}
} | @Test
void testSingleInputDecreasingWatermarksYieldsNoOutput() throws Exception {
StatusWatermarkOutput valveOutput = new StatusWatermarkOutput();
StatusWatermarkValve valve = new StatusWatermarkValve(1);
valve.inputWatermark(new Watermark(25), 0, valveOutput);
assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(25));
valve.inputWatermark(new Watermark(18), 0, valveOutput);
assertThat(valveOutput.popLastSeenOutput()).isNull();
valve.inputWatermark(new Watermark(42), 0, valveOutput);
assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(42));
assertThat(valveOutput.popLastSeenOutput()).isNull();
} |
@Override
public OpenstackVtap createVtap(Type type, OpenstackVtapCriterion vtapCriterion) {
checkNotNull(type, VTAP_DESC_NULL, "type");
checkNotNull(vtapCriterion, VTAP_DESC_NULL, "vtapCriterion");
Set<DeviceId> txDevices = type.isValid(Type.VTAP_TX) ?
getEdgeDevice(Type.VTAP_TX, vtapCriterion) : ImmutableSet.of();
Set<DeviceId> rxDevices = type.isValid(Type.VTAP_RX) ?
getEdgeDevice(Type.VTAP_RX, vtapCriterion) : ImmutableSet.of();
DefaultOpenstackVtap description = DefaultOpenstackVtap.builder()
.id(OpenstackVtapId.vtapId())
.type(type)
.vtapCriterion(vtapCriterion)
.txDeviceIds(txDevices)
.rxDeviceIds(rxDevices)
.build();
return store.createVtap(description);
} | @Test(expected = NullPointerException.class)
public void testCreateNullVtap() {
target.createVtap(null, null);
} |
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
gauges.put("total.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit() +
mxBean.getNonHeapMemoryUsage().getInit());
gauges.put("total.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed() +
mxBean.getNonHeapMemoryUsage().getUsed());
gauges.put("total.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax() == -1 ?
-1 : mxBean.getHeapMemoryUsage().getMax() + mxBean.getNonHeapMemoryUsage().getMax());
gauges.put("total.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted() +
mxBean.getNonHeapMemoryUsage().getCommitted());
gauges.put("heap.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit());
gauges.put("heap.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed());
gauges.put("heap.max", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getMax());
gauges.put("heap.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted());
gauges.put("heap.usage", new RatioGauge() {
@Override
protected Ratio getRatio() {
final MemoryUsage usage = mxBean.getHeapMemoryUsage();
return Ratio.of(usage.getUsed(), usage.getMax());
}
});
gauges.put("non-heap.init", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getInit());
gauges.put("non-heap.used", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getUsed());
gauges.put("non-heap.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax());
gauges.put("non-heap.committed", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getCommitted());
gauges.put("non-heap.usage", new RatioGauge() {
@Override
protected Ratio getRatio() {
final MemoryUsage usage = mxBean.getNonHeapMemoryUsage();
return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax());
}
});
for (final MemoryPoolMXBean pool : memoryPools) {
final String poolName = name("pools", WHITESPACE.matcher(pool.getName()).replaceAll("-"));
gauges.put(name(poolName, "usage"), new RatioGauge() {
@Override
protected Ratio getRatio() {
MemoryUsage usage = pool.getUsage();
return Ratio.of(usage.getUsed(),
usage.getMax() == -1 ? usage.getCommitted() : usage.getMax());
}
});
gauges.put(name(poolName, "max"), (Gauge<Long>) () -> pool.getUsage().getMax());
gauges.put(name(poolName, "used"), (Gauge<Long>) () -> pool.getUsage().getUsed());
gauges.put(name(poolName, "committed"), (Gauge<Long>) () -> pool.getUsage().getCommitted());
// Only register GC usage metrics if the memory pool supports usage statistics.
if (pool.getCollectionUsage() != null) {
gauges.put(name(poolName, "used-after-gc"), (Gauge<Long>) () ->
pool.getCollectionUsage().getUsed());
}
gauges.put(name(poolName, "init"), (Gauge<Long>) () -> pool.getUsage().getInit());
}
return Collections.unmodifiableMap(gauges);
} | @Test
public void hasAGaugeForHeapCommitted() {
final Gauge gauge = (Gauge) gauges.getMetrics().get("heap.committed");
assertThat(gauge.getValue())
.isEqualTo(10L);
} |
public static <T> Read<T> read() {
return new AutoValue_JdbcIO_Read.Builder<T>()
.setFetchSize(DEFAULT_FETCH_SIZE)
.setOutputParallelization(true)
.build();
} | @Test
public void testReadWithCoderInference() {
PCollection<TestRow> rows =
pipeline.apply(
JdbcIO.<TestRow>read()
.withDataSourceConfiguration(DATA_SOURCE_CONFIGURATION)
.withQuery(String.format("select name,id from %s where name = ?", READ_TABLE_NAME))
.withStatementPreparator(
preparedStatement -> preparedStatement.setString(1, TestRow.getNameForSeed(1)))
.withRowMapper(new JdbcTestHelper.CreateTestRowOfNameAndId()));
PAssert.thatSingleton(rows.apply("Count All", Count.globally())).isEqualTo(1L);
Iterable<TestRow> expectedValues = Collections.singletonList(TestRow.fromSeed(1));
PAssert.that(rows).containsInAnyOrder(expectedValues);
pipeline.run();
} |
@Override
public int size() {
return get(sizeAsync());
} | @Test
public void testSize() {
RScoredSortedSet<Integer> set = redisson.getScoredSortedSet("simple");
set.add(0, 1);
set.add(1, 2);
set.add(2, 3);
set.add(2, 3);
set.add(3, 4);
set.add(4, 5);
set.add(4, 5);
Assertions.assertEquals(5, set.size());
} |
public B filter(String filter) {
this.filter = filter;
return getThis();
} | @Test
void filter() {
InterfaceBuilder builder = new InterfaceBuilder();
builder.filter("mockfilter");
Assertions.assertEquals("mockfilter", builder.build().getFilter());
} |
void forwardToStateService(DeviceStateServiceMsgProto deviceStateServiceMsg, TbCallback callback) {
if (statsEnabled) {
stats.log(deviceStateServiceMsg);
}
stateService.onQueueMsg(deviceStateServiceMsg, callback);
} | @Test
public void givenStatsDisabled_whenForwardingInactivityMsgToStateService_thenStatsAreNotRecorded() {
// GIVEN
ReflectionTestUtils.setField(defaultTbCoreConsumerServiceMock, "stats", statsMock);
ReflectionTestUtils.setField(defaultTbCoreConsumerServiceMock, "statsEnabled", false);
var inactivityMsg = TransportProtos.DeviceInactivityProto.newBuilder()
.setTenantIdMSB(tenantId.getId().getMostSignificantBits())
.setTenantIdLSB(tenantId.getId().getLeastSignificantBits())
.setDeviceIdMSB(deviceId.getId().getMostSignificantBits())
.setDeviceIdLSB(deviceId.getId().getLeastSignificantBits())
.setLastInactivityTime(time)
.build();
doCallRealMethod().when(defaultTbCoreConsumerServiceMock).forwardToStateService(inactivityMsg, tbCallbackMock);
// WHEN
defaultTbCoreConsumerServiceMock.forwardToStateService(inactivityMsg, tbCallbackMock);
// THEN
then(statsMock).should(never()).log(inactivityMsg);
} |
@Override
public void upgrade() {
migrationHelpers.removeBuiltinRole("Field Type Mappings Manager");
} | @Test
void testRemovesProperRole() {
toTest.upgrade();
verify(migrationHelpers).removeBuiltinRole("Field Type Mappings Manager");
verifyNoMoreInteractions(migrationHelpers);
} |
public static KeyPair loadKey(File f, String passwd) throws IOException, GeneralSecurityException {
return loadKey(readPemFile(f), passwd);
} | @Test
public void loadKeyBroken() throws IOException, GeneralSecurityException {
File file = new File(this.getClass().getResource("openssh-broken").getFile());
String password = "password";
assertThrows(IllegalArgumentException.class, () -> PrivateKeyProvider.loadKey(file, password));
} |
protected static Map<String, String> appendParameters(
Map<String, String> parameters, Map<String, String> appendParameters) {
if (parameters == null) {
parameters = new HashMap<>();
}
parameters.putAll(appendParameters);
return parameters;
} | @Test
void appendParameters() {
Map<String, String> source = null;
source = AbstractBuilder.appendParameter(source, "default.num", "one");
source = AbstractBuilder.appendParameter(source, "num", "ONE");
Assertions.assertTrue(source.containsKey("default.num"));
Assertions.assertEquals("ONE", source.get("num"));
} |
public static Write write(String url, String token) {
checkNotNull(url, "url is required.");
checkNotNull(token, "token is required.");
return write(StaticValueProvider.of(url), StaticValueProvider.of(token));
} | @Test
@Category(NeedsRunner.class)
public void successfulSplunkIOSingleBatchParallelismTest() {
// Create server expectation for success.
mockServerListening(200);
int testPort = mockServerRule.getPort();
int testParallelism = 2;
String url = Joiner.on(':').join("http://localhost", testPort);
String token = "test-token";
List<SplunkEvent> testEvents =
ImmutableList.of(
SplunkEvent.newBuilder()
.withEvent("test-event-1")
.withHost("test-host-1")
.withIndex("test-index-1")
.withSource("test-source-1")
.withSourceType("test-source-type-1")
.withTime(12345L)
.create(),
SplunkEvent.newBuilder()
.withEvent("test-event-2")
.withHost("test-host-2")
.withIndex("test-index-2")
.withSource("test-source-2")
.withSourceType("test-source-type-2")
.withTime(12345L)
.create());
PCollection<SplunkWriteError> actual =
pipeline
.apply("Create Input data", Create.of(testEvents))
.apply(
"SplunkIO",
SplunkIO.write(url, token).withParallelism(testParallelism).withBatchCount(1));
// All successful responses.
PAssert.that(actual).empty();
pipeline.run();
// Server received exactly 1 post request per SplunkEvent
mockServerClient.verify(
HttpRequest.request(EXPECTED_PATH), VerificationTimes.exactly(testEvents.size()));
} |
public static String getGroupName(final String serviceNameWithGroup) {
if (StringUtils.isBlank(serviceNameWithGroup)) {
return StringUtils.EMPTY;
}
if (!serviceNameWithGroup.contains(Constants.SERVICE_INFO_SPLITER)) {
return Constants.DEFAULT_GROUP;
}
return serviceNameWithGroup.split(Constants.SERVICE_INFO_SPLITER)[0];
} | @Test
void testGetGroupNameWithEmpty() {
assertEquals(StringUtils.EMPTY, NamingUtils.getGroupName(null));
} |
@Override
protected String convertToString(final String value) {
return value;
} | @Test
void testConvertToString() throws Exception {
final String expected = "test.jar";
final String toString = jarIdPathParameter.convertToString(expected);
assertThat(toString).isEqualTo(expected);
} |
public void goOnlineFromConsuming(SegmentZKMetadata segmentZKMetadata)
throws InterruptedException {
_serverMetrics.setValueOfTableGauge(_clientId, ServerGauge.LLC_PARTITION_CONSUMING, 0);
try {
// Remove the segment file before we do anything else.
removeSegmentFile();
_leaseExtender.removeSegment(_segmentNameStr);
StreamPartitionMsgOffset endOffset = _streamPartitionMsgOffsetFactory.create(segmentZKMetadata.getEndOffset());
_segmentLogger.info("State: {}, transitioning from CONSUMING to ONLINE (startOffset: {}, endOffset: {})", _state,
_startOffset, endOffset);
stop();
_segmentLogger.info("Consumer thread stopped in state {}", _state);
switch (_state) {
case COMMITTED:
case RETAINED:
// Nothing to do. we already built local segment and swapped it with in-memory data.
_segmentLogger.info("State {}. Nothing to do", _state.toString());
break;
case DISCARDED:
case ERROR:
_segmentLogger.info("State {}. Downloading to replace", _state.toString());
downloadSegmentAndReplace(segmentZKMetadata);
break;
case CATCHING_UP:
case HOLDING:
case INITIAL_CONSUMING:
switch (_segmentCompletionMode) {
case DOWNLOAD:
_segmentLogger.info("State {}. CompletionMode {}. Downloading to replace", _state.toString(),
_segmentCompletionMode);
downloadSegmentAndReplace(segmentZKMetadata);
break;
case DEFAULT:
// Allow to catch up upto final offset, and then replace.
if (_currentOffset.compareTo(endOffset) > 0) {
// We moved ahead of the offset that is committed in ZK.
_segmentLogger
.warn("Current offset {} ahead of the offset in zk {}. Downloading to replace", _currentOffset,
endOffset);
downloadSegmentAndReplace(segmentZKMetadata);
} else if (_currentOffset.compareTo(endOffset) == 0) {
_segmentLogger
.info("Current offset {} matches offset in zk {}. Replacing segment", _currentOffset, endOffset);
buildSegmentAndReplace();
} else {
_segmentLogger.info("Attempting to catch up from offset {} to {} ", _currentOffset, endOffset);
boolean success = catchupToFinalOffset(endOffset,
TimeUnit.MILLISECONDS.convert(MAX_TIME_FOR_CONSUMING_TO_ONLINE_IN_SECONDS, TimeUnit.SECONDS));
if (success) {
_segmentLogger.info("Caught up to offset {}", _currentOffset);
buildSegmentAndReplace();
} else {
_segmentLogger
.info("Could not catch up to offset (current = {}). Downloading to replace", _currentOffset);
downloadSegmentAndReplace(segmentZKMetadata);
}
}
break;
default:
break;
}
break;
default:
_segmentLogger.info("Downloading to replace segment while in state {}", _state.toString());
downloadSegmentAndReplace(segmentZKMetadata);
break;
}
} catch (Exception e) {
Utils.rethrowException(e);
} finally {
_serverMetrics.setValueOfTableGauge(_clientId, ServerGauge.LLC_PARTITION_CONSUMING, 0);
}
} | @Test
public void testFileRemovedDuringOnlineTransition()
throws Exception {
FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager();
SegmentCompletionProtocol.Response.Params params = new SegmentCompletionProtocol.Response.Params();
params.withStatus(SegmentCompletionProtocol.ControllerResponseStatus.FAILED);
SegmentCompletionProtocol.Response commitFailed = new SegmentCompletionProtocol.Response(params);
// Set up the responses so that we get a failed response first and then a success response.
segmentDataManager._responses.add(commitFailed);
final long leaseTime = 50000L;
final long finalOffset = START_OFFSET_VALUE + 600;
segmentDataManager.setCurrentOffset(finalOffset);
// We have set up commit to fail, so we should carry over the segment file.
File segmentTarFile = segmentDataManager.invokeBuildForCommit(leaseTime).getSegmentTarFile();
Assert.assertNotNull(segmentTarFile);
Assert.assertTrue(segmentDataManager._buildSegmentCalled);
Assert.assertFalse(segmentDataManager.invokeCommit());
Assert.assertTrue(segmentTarFile.exists());
// Now let the segment go ONLINE from CONSUMING, and ensure that the file is removed.
SegmentZKMetadata metadata = new SegmentZKMetadata(SEGMENT_NAME_STR);
metadata.setEndOffset(new LongMsgOffset(finalOffset).toString());
segmentDataManager._stopWaitTimeMs = 0;
segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.HOLDING);
segmentDataManager.goOnlineFromConsuming(metadata);
Assert.assertFalse(segmentTarFile.exists());
segmentDataManager.close();
} |
public synchronized @Nullable WorkItemServiceState reportError(Throwable e) throws IOException {
checkState(!finalStateSent, "cannot reportUpdates after sending a final state");
if (wasAskedToAbort) {
LOG.info("Service already asked to abort work item, not reporting ignored progress.");
return null;
}
WorkItemStatus status = createStatusUpdate(true);
// TODO: Provide more structure representation of error, e.g., the serialized exception object.
// TODO: Look into moving the stack trace thinning into the client.
Throwable t = e instanceof UserCodeException ? e.getCause() : e;
Status error = new Status();
error.setCode(2); // Code.UNKNOWN. TODO: Replace with a generated definition.
// TODO: Attach the stack trace as exception details, not to the message.
String logPrefix = String.format("Failure processing work item %s", uniqueWorkId());
if (isOutOfMemoryError(t)) {
String message =
"An OutOfMemoryException occurred. Consider specifying higher memory "
+ "instances in PipelineOptions.\n";
LOG.error("{}: {}", logPrefix, message);
error.setMessage(message + DataflowWorkerLoggingHandler.formatException(t));
} else if (isReadLoopAbortedError(t)) {
LOG.debug("Read loop aborted error occurred during work unit execution", t);
} else {
LOG.error(
"{}: Uncaught exception occurred during work unit execution. This will be retried.",
logPrefix,
t);
error.setMessage(DataflowWorkerLoggingHandler.formatException(t));
}
status.setErrors(ImmutableList.of(error));
return execute(status);
} | @Test
public void reportError() throws IOException {
RuntimeException error = new RuntimeException();
error.fillInStackTrace();
statusClient.reportError(error);
verify(workUnitClient).reportWorkItemStatus(statusCaptor.capture());
WorkItemStatus workStatus = statusCaptor.getValue();
assertThat(workStatus.getWorkItemId(), equalTo(Long.toString(WORK_ID)));
assertThat(workStatus.getCompleted(), equalTo(true));
assertThat(workStatus.getReportIndex(), equalTo(INITIAL_REPORT_INDEX));
assertThat(workStatus.getErrors(), hasSize(1));
Status status = workStatus.getErrors().get(0);
assertThat(status.getCode(), equalTo(2));
assertThat(status.getMessage(), containsString("WorkItemStatusClientTest"));
} |
public List<JobVertex> getVerticesSortedTopologicallyFromSources()
throws InvalidProgramException {
// early out on empty lists
if (this.taskVertices.isEmpty()) {
return Collections.emptyList();
}
List<JobVertex> sorted = new ArrayList<JobVertex>(this.taskVertices.size());
Set<JobVertex> remaining = new LinkedHashSet<JobVertex>(this.taskVertices.values());
// start by finding the vertices with no input edges
// and the ones with disconnected inputs (that refer to some standalone data set)
{
Iterator<JobVertex> iter = remaining.iterator();
while (iter.hasNext()) {
JobVertex vertex = iter.next();
if (vertex.hasNoConnectedInputs()) {
sorted.add(vertex);
iter.remove();
}
}
}
int startNodePos = 0;
// traverse from the nodes that were added until we found all elements
while (!remaining.isEmpty()) {
// first check if we have more candidates to start traversing from. if not, then the
// graph is cyclic, which is not permitted
if (startNodePos >= sorted.size()) {
throw new InvalidProgramException("The job graph is cyclic.");
}
JobVertex current = sorted.get(startNodePos++);
addNodesThatHaveNoNewPredecessors(current, sorted, remaining);
}
return sorted;
} | @Test
public void testTopologicalSort1() {
JobVertex source1 = new JobVertex("source1");
JobVertex source2 = new JobVertex("source2");
JobVertex target1 = new JobVertex("target1");
JobVertex target2 = new JobVertex("target2");
JobVertex intermediate1 = new JobVertex("intermediate1");
JobVertex intermediate2 = new JobVertex("intermediate2");
target1.connectNewDataSetAsInput(
source1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
target2.connectNewDataSetAsInput(
source1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
target2.connectNewDataSetAsInput(
intermediate2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
intermediate2.connectNewDataSetAsInput(
intermediate1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
intermediate1.connectNewDataSetAsInput(
source2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
JobGraph graph =
JobGraphTestUtils.streamingJobGraph(
source1, source2, intermediate1, intermediate2, target1, target2);
List<JobVertex> sorted = graph.getVerticesSortedTopologicallyFromSources();
assertEquals(6, sorted.size());
assertBefore(source1, target1, sorted);
assertBefore(source1, target2, sorted);
assertBefore(source2, target2, sorted);
assertBefore(source2, intermediate1, sorted);
assertBefore(source2, intermediate2, sorted);
assertBefore(intermediate1, target2, sorted);
assertBefore(intermediate2, target2, sorted);
} |
@Override
public Integer doCall() throws Exception {
List<Row> rows = new ArrayList<>();
JsonObject plugins = loadConfig().getMap("plugins");
plugins.forEach((key, value) -> {
JsonObject details = (JsonObject) value;
String name = details.getStringOrDefault("name", key);
String command = details.getStringOrDefault("command", name);
String dependency = details.getStringOrDefault("dependency",
"org.apache.camel:camel-jbang-plugin-%s".formatted(command));
String description
= details.getStringOrDefault("description", "Plugin %s called with command %s".formatted(name, command));
rows.add(new Row(name, command, dependency, description));
});
printRows(rows);
if (all) {
rows.clear();
for (PluginType camelPlugin : PluginType.values()) {
if (plugins.get(camelPlugin.getName()) == null) {
String dependency = "org.apache.camel:camel-jbang-plugin-%s".formatted(camelPlugin.getCommand());
rows.add(new Row(
camelPlugin.getName(), camelPlugin.getCommand(), dependency,
camelPlugin.getDescription()));
}
}
if (!rows.isEmpty()) {
printer().println();
printer().println("Supported plugins:");
printer().println();
printRows(rows);
}
}
return 0;
} | @Test
public void shouldGetPlugin() throws Exception {
PluginHelper.enable(PluginType.CAMEL_K);
PluginGet command = new PluginGet(new CamelJBangMain().withPrinter(printer));
command.doCall();
List<String> output = printer.getLines();
Assertions.assertEquals(2, output.size());
Assertions.assertEquals("NAME COMMAND DEPENDENCY DESCRIPTION", output.get(0));
Assertions.assertEquals(
"camel-k k org.apache.camel:camel-jbang-plugin-k %s".formatted(PluginType.CAMEL_K.getDescription()),
output.get(1));
} |
public synchronized Schema create(URI id, String refFragmentPathDelimiters) {
URI normalizedId = id.normalize();
if (!schemas.containsKey(normalizedId)) {
URI baseId = removeFragment(id).normalize();
if (!schemas.containsKey(baseId)) {
logger.debug("Reading schema: " + baseId);
final JsonNode baseContent = contentResolver.resolve(baseId);
schemas.put(baseId, new Schema(baseId, baseContent, null));
}
final Schema baseSchema = schemas.get(baseId);
if (normalizedId.toString().contains("#")) {
JsonNode childContent = fragmentResolver.resolve(baseSchema.getContent(), '#' + id.getFragment(), refFragmentPathDelimiters);
schemas.put(normalizedId, new Schema(normalizedId, childContent, baseSchema));
}
}
return schemas.get(normalizedId);
} | @Test
public void createWithAbsolutePath() throws URISyntaxException {
URI schemaUri = getClass().getResource("/schema/address.json").toURI();
Schema schema = new SchemaStore().create(schemaUri, "#/.");
assertThat(schema, is(notNullValue()));
assertThat(schema.getId(), is(equalTo(schemaUri)));
assertThat(schema.getContent().has("description"), is(true));
assertThat(schema.getContent().get("description").asText(), is(equalTo("An Address following the convention of http://microformats.org/wiki/hcard")));
} |
public static String stripTrailingSlash(String path) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(path), "path must not be null or empty");
String result = path;
while (!result.endsWith("://") && result.endsWith("/")) {
result = result.substring(0, result.length() - 1);
}
return result;
} | @Test
public void testStripTrailingSlashWithInvalidPath() {
String[] invalidPaths = new String[] {null, ""};
for (String invalidPath : invalidPaths) {
assertThatThrownBy(() -> LocationUtil.stripTrailingSlash(invalidPath))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("path must not be null or empty");
}
} |
public static void registerExternalConfigLoader(ExternalConfigLoader configLoader) {
wLock.lock();
try {
CONFIG_LOADERS.add(configLoader);
CONFIG_LOADERS.sort(Comparator.comparingInt(ExternalConfigLoader::getOrder));
} finally {
wLock.unlock();
}
} | @Test
public void registerExternalConfigLoader() throws Exception {
} |
String getPropertyName(String metricsName) {
return new StringBuilder("timer")
.append(":")
.append(metricsName)
.toString();
} | @Test
public void testGetPropertyName() {
assertThat(producer.getPropertyName(METRICS_NAME), is("timer" + ":" + METRICS_NAME));
} |
@Override
public void delete(K key) {
long startNanos = Timer.nanos();
try {
delegate.delete(key);
} finally {
deleteProbe.recordValue(Timer.nanosElapsed(startNanos));
}
} | @Test
public void delete() {
String key = "somekey";
cacheStore.delete(key);
verify(delegate).delete(key);
assertProbeCalledOnce("delete");
} |
@Override
protected void onMessage(RedissonCountDownLatchEntry value, Long message) {
if (message.equals(ZERO_COUNT_MESSAGE)) {
Runnable runnableToExecute = value.getListeners().poll();
while (runnableToExecute != null) {
runnableToExecute.run();
runnableToExecute = value.getListeners().poll();
}
value.getLatch().open();
}
if (message.equals(NEW_COUNT_MESSAGE)) {
value.getLatch().close();
}
} | @Test
public void testOnZeroMessageAllListenersExecuted(@Mocked Runnable listener) {
int listenCount = 10;
RedissonCountDownLatchEntry entry = new RedissonCountDownLatchEntry(null);
for (int i = 0; i < listenCount; i++) {
entry.addListener(listener);
}
countDownLatchPubSub.onMessage(entry, CountDownLatchPubSub.ZERO_COUNT_MESSAGE);
new Verifications() {{
listener.run();
times = listenCount;
}};
} |
@Override
public boolean checkCredentials(String username, String password) {
if (username == null || password == null) {
return false;
}
Credentials credentials = new Credentials(username, password);
if (validCredentialsCache.contains(credentials)) {
return true;
} else if (invalidCredentialsCache.contains(credentials)) {
return false;
}
boolean isValid =
this.username.equals(username)
&& this.passwordHash.equals(
generatePasswordHash(
algorithm, salt, iterations, keyLength, password));
if (isValid) {
validCredentialsCache.add(credentials);
} else {
invalidCredentialsCache.add(credentials);
}
return isValid;
} | @Test
public void testPBKDF2WithHmacSHA1_lowerCaseWithoutColon() throws Exception {
String algorithm = "PBKDF2WithHmacSHA1";
int iterations = 1000;
int keyLength = 128;
String hash =
"17:87:CA:B9:14:73:60:36:8B:20:82:87:92:58:43:B8:A3:85:66:BC:C1:6D:C3:31:6C:1D:47:48:C7:F2:E4:1D:96"
+ ":00:11:F8:4D:94:63:2F:F2:7A:F0:3B:72:63:16:5D:EF:5C:97:CC:EC:59:CB:18:4A:AA:F5:23:63:0B:6E:3B:65"
+ ":E0:72:6E:69:7D:EB:83:05:05:E5:D6:F2:19:99:49:3F:89:DA:DE:83:D7:2B:5B:7D:C9:56:B4:F2:F6:A5:61:29"
+ ":29:ED:DF:4C:4E:8D:EA:DF:47:A2:B0:89:11:86:D4:77:A1:02:E9:0C:26:A4:1E:2A:C1:A8:71:E0:93:8F:A4";
hash = hash.toLowerCase().replace(":", "");
PBKDF2Authenticator PBKDF2Authenticator =
new PBKDF2Authenticator(
"/", VALID_USERNAME, hash, algorithm, SALT, iterations, keyLength);
for (String username : TEST_USERNAMES) {
for (String password : TEST_PASSWORDS) {
boolean expectedIsAuthenticated =
VALID_USERNAME.equals(username) && VALID_PASSWORD.equals(password);
boolean actualIsAuthenticated =
PBKDF2Authenticator.checkCredentials(username, password);
assertEquals(expectedIsAuthenticated, actualIsAuthenticated);
}
}
} |
@Override
public int positionedRead(long pos, byte[] b, int off, int len) throws IOException {
if (!CachePerThreadContext.get().getCacheEnabled()) {
MetricsSystem.meter(MetricKey.CLIENT_CACHE_BYTES_REQUESTED_EXTERNAL.getName())
.mark(len);
MetricsSystem.counter(MetricKey.CLIENT_CACHE_EXTERNAL_REQUESTS.getName()).inc();
len = getExternalFileInStream().positionedRead(pos, b, off, len);
MultiDimensionalMetricsSystem.EXTERNAL_DATA_READ.inc(len);
return len;
}
try {
return readInternal(new ByteArrayTargetBuffer(b, off), off, len,
ReadType.READ_INTO_BYTE_ARRAY, pos, true);
} catch (IOException | RuntimeException e) {
LOG.warn("Failed to read from Alluxio's page cache.", e);
if (mFallbackEnabled) {
MetricsSystem.counter(MetricKey.CLIENT_CACHE_POSITION_READ_FALLBACK.getName()).inc();
len = getExternalFileInStream().positionedRead(pos, b, off, len);
MultiDimensionalMetricsSystem.EXTERNAL_DATA_READ.inc(len);
return len;
}
throw e;
}
} | @Test
public void positionedReadPartialPage() throws Exception {
int fileSize = mPageSize;
byte[] testData = BufferUtils.getIncreasingByteArray(fileSize);
ByteArrayCacheManager manager = new ByteArrayCacheManager();
LocalCacheFileInStream stream = setupWithSingleFile(testData, manager);
int partialReadSize = fileSize / 5;
int offset = fileSize / 5;
// cache miss
byte[] cacheMiss = new byte[partialReadSize];
Assert.assertEquals(partialReadSize,
stream.positionedRead(offset, cacheMiss, 0, cacheMiss.length));
Assert.assertArrayEquals(
Arrays.copyOfRange(testData, offset, offset + partialReadSize), cacheMiss);
Assert.assertEquals(0, manager.mPagesServed);
Assert.assertEquals(1, manager.mPagesCached);
Assert.assertEquals(1,
MetricsSystem.counter(MetricKey.CLIENT_CACHE_EXTERNAL_REQUESTS.getName()).getCount());
Assert.assertEquals(0,
MetricsSystem.counter(MetricKey.CLIENT_CACHE_HIT_REQUESTS.getName()).getCount());
// cache hit
byte[] cacheHit = new byte[partialReadSize];
Assert.assertEquals(partialReadSize,
stream.positionedRead(offset, cacheHit, 0, cacheHit.length));
Assert.assertArrayEquals(
Arrays.copyOfRange(testData, offset, offset + partialReadSize), cacheHit);
Assert.assertEquals(1, manager.mPagesServed);
Assert.assertEquals(1,
MetricsSystem.counter(MetricKey.CLIENT_CACHE_EXTERNAL_REQUESTS.getName()).getCount());
Assert.assertEquals(1,
MetricsSystem.counter(MetricKey.CLIENT_CACHE_HIT_REQUESTS.getName()).getCount());
} |
public static <T> T copyProperties(Object source, Class<T> tClass, String... ignoreProperties) {
if (null == source) {
return null;
}
T target = ReflectUtil.newInstanceIfPossible(tClass);
copyProperties(source, target, CopyOptions.create().setIgnoreProperties(ignoreProperties));
return target;
} | @Test
public void copyPropertiesBeanToMapTest() {
// 测试BeanToMap
final SubPerson p1 = new SubPerson();
p1.setSlow(true);
p1.setName("测试");
p1.setSubName("sub测试");
final Map<String, Object> map = MapUtil.newHashMap();
BeanUtil.copyProperties(p1, map);
assertTrue((Boolean) map.get("slow"));
assertEquals("测试", map.get("name"));
assertEquals("sub测试", map.get("subName"));
} |
@SneakyThrows
public static Optional<Date> nextExecutionDate(
TimeTrigger trigger, Date startDate, String uniqueId) {
CronTimeTrigger cronTimeTrigger = getCronTimeTrigger(trigger);
if (cronTimeTrigger != null) {
CronExpression cronExpression =
TriggerHelper.buildCron(cronTimeTrigger.getCron(), cronTimeTrigger.getTimezone());
Date nextTime = cronExpression.getNextValidTimeAfter(startDate);
if (nextTime != null) {
nextTime.setTime(
nextTime.getTime()
+ getDelayInSeconds(cronTimeTrigger, uniqueId) * TimeTrigger.MS_IN_SECONDS);
}
return Optional.ofNullable(nextTime);
}
throw new UnsupportedOperationException(
"TimeTrigger nextExecutionDate is not implemented for type: " + trigger.getType());
} | @Test
public void testNextExecutionDateForPredefined() throws Exception {
TimeTrigger trigger =
loadObject("fixtures/time_triggers/sample-predefined-time-trigger.json", TimeTrigger.class);
Optional<Date> actual =
TriggerHelper.nextExecutionDate(trigger, Date.from(Instant.EPOCH), "test-id");
assertEquals(Optional.of(Date.from(Instant.ofEpochSecond(28800))), actual);
} |
public void write(CruiseConfig configForEdit, OutputStream output, boolean skipPreprocessingAndValidation) throws Exception {
LOGGER.debug("[Serializing Config] Starting to write. Validation skipped? {}", skipPreprocessingAndValidation);
MagicalGoConfigXmlLoader loader = new MagicalGoConfigXmlLoader(configCache, registry);
if (!configForEdit.getOrigin().isLocal()) {
throw new GoConfigInvalidException(configForEdit, "Attempted to save merged configuration with partials");
}
if (!skipPreprocessingAndValidation) {
loader.preprocessAndValidate(configForEdit);
LOGGER.debug("[Serializing Config] Done with cruise config validators.");
}
Document document = createEmptyCruiseConfigDocument();
write(configForEdit, document.getRootElement(), configCache, registry);
LOGGER.debug("[Serializing Config] XSD and DOM validation.");
verifyXsdValid(document);
MagicalGoConfigXmlLoader.validateDom(document.getRootElement(), registry);
LOGGER.info("[Serializing Config] Generating config partial.");
XmlUtils.writeXml(document, output);
LOGGER.debug("[Serializing Config] Finished writing config partial.");
} | @Test
public void shouldNotWriteDuplicatedPipelines() {
String xml = ConfigFileFixture.TWO_PIPELINES;
CruiseConfig cruiseConfig = ConfigMigrator.loadWithMigration(xml).config;
cruiseConfig.addPipeline("someGroup", PipelineConfigMother.pipelineConfig("pipeline1"));
try {
xmlWriter.write(cruiseConfig, output, false);
fail("Should not be able to save config when there are 2 pipelines with same name");
} catch (Exception e) {
assertThat(e.getMessage(), containsString("You have defined multiple pipelines named 'pipeline1'. Pipeline names must be unique. Source(s): [cruise-config.xml]"));
}
} |
@VisibleForTesting
void validateDeptNameUnique(Long id, Long parentId, String name) {
DeptDO dept = deptMapper.selectByParentIdAndName(parentId, name);
if (dept == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的部门
if (id == null) {
throw exception(DEPT_NAME_DUPLICATE);
}
if (ObjectUtil.notEqual(dept.getId(), id)) {
throw exception(DEPT_NAME_DUPLICATE);
}
} | @Test
public void testValidateNameUnique_duplicate() {
// mock 数据
DeptDO deptDO = randomPojo(DeptDO.class);
deptMapper.insert(deptDO);
// 准备参数
Long id = randomLongId();
Long parentId = deptDO.getParentId();
String name = deptDO.getName();
// 调用, 并断言异常
assertServiceException(() -> deptService.validateDeptNameUnique(id, parentId, name),
DEPT_NAME_DUPLICATE);
} |
@Override
public Proxy find(final String target) {
final String route = this.findNative(target);
if(null == route) {
if(log.isInfoEnabled()) {
log.info(String.format("No proxy configuration found for target %s", target));
}
// Direct
return Proxy.DIRECT;
}
final URI proxy;
try {
proxy = new URI(route);
try {
// User info is never populated. Would have to lookup in keychain but we are unaware of the username
return new Proxy(Proxy.Type.valueOf(StringUtils.upperCase(proxy.getScheme())),
proxy.getHost(), proxy.getPort());
}
catch(IllegalArgumentException e) {
log.warn(String.format("Unsupported scheme for proxy %s", proxy));
}
}
catch(URISyntaxException e) {
log.warn(String.format("Invalid proxy configuration %s", route));
}
return Proxy.DIRECT;
} | @Test
public void testExcludedLocalHost() {
final SystemConfigurationProxy proxy = new SystemConfigurationProxy();
assertEquals(Proxy.Type.DIRECT, proxy.find("http://cyberduck.local").getType());
} |
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
} | @Test
public void testGauntletNewPersonalBest()
{
ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Challenge duration: <col=ff0000>10:24</col> (new personal best).", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Gauntlet completion count is: <col=ff0000>124</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("personalbest", "gauntlet", 10 * 60 + 24.0);
verify(configManager).setRSProfileConfiguration("killcount", "gauntlet", 124);
// Precise times
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Challenge duration: <col=ff0000>10:24.40</col> (new personal best).", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Gauntlet completion count is: <col=ff0000>124</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("personalbest", "gauntlet", 10 * 60 + 24.4);
} |
@Override
public final ChannelHandler get(String name) {
ChannelHandlerContext ctx = context(name);
if (ctx == null) {
return null;
} else {
return ctx.handler();
}
} | @Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testChannelInitializerException() throws Exception {
final IllegalStateException exception = new IllegalStateException();
final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
final CountDownLatch latch = new CountDownLatch(1);
EmbeddedChannel channel = new EmbeddedChannel(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) {
throw exception;
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
super.exceptionCaught(ctx, cause);
error.set(cause);
latch.countDown();
}
});
latch.await();
assertFalse(channel.isActive());
assertSame(exception, error.get());
} |
public FloatArrayAsIterable usingExactEquality() {
return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
} | @Test
public void usingExactEquality_contains_success() {
assertThat(array(1.0f, 2.0f, 3.0f)).usingExactEquality().contains(2.0f);
} |
public static void copyBytes(InputStream in, OutputStream out,
int buffSize, boolean close)
throws IOException {
try {
copyBytes(in, out, buffSize);
if(close) {
out.close();
out = null;
in.close();
in = null;
}
} finally {
if(close) {
closeStream(out);
closeStream(in);
}
}
} | @Test
public void testCopyBytesShouldCloseInputSteamWhenInputStreamCloseThrowsRunTimeException()
throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class);
OutputStream outputStream = Mockito.mock(OutputStream.class);
Mockito.doReturn(-1).when(inputStream).read(new byte[1]);
Mockito.doThrow(new RuntimeException()).when(inputStream).close();
try {
IOUtils.copyBytes(inputStream, outputStream, 1, true);
fail("Didn't throw exception");
} catch (RuntimeException e) {
}
Mockito.verify(inputStream, Mockito.atLeastOnce()).close();
} |
public void writeBigSmart(int value)
{
Preconditions.checkArgument(value >= 0);
if (value >= 32768)
{
ensureRemaining(4);
this.writeInt((1 << 31) | value);
}
else
{
ensureRemaining(2);
this.writeShort(value);
}
} | @Test
public void testWriteBigSmart()
{
OutputStream os = new OutputStream();
os.writeBigSmart(42);
os.writeBigSmart(70000);
os.writeBigSmart(65535);
InputStream is = new InputStream(os.getArray());
assertEquals(42, is.readBigSmart());
assertEquals(70000, is.readBigSmart());
assertEquals(65535, is.readBigSmart());
} |
public static PartitionKey createPartitionKey(List<String> values, List<Column> columns) throws AnalysisException {
return createPartitionKey(values, columns, Table.TableType.HIVE);
} | @Test
public void testCreateJDBCPartitionKey() throws AnalysisException {
PartitionKey partitionKey = createPartitionKey(
Lists.newArrayList("1", "a", "3.0", JDBCTable.PARTITION_NULL_VALUE), partColumns, Table.TableType.JDBC);
Assert.assertEquals("(\"1\", \"a\", \"3.0\", \"NULL\")", partitionKey.toSql());
} |
public static <T> Opt<T> ofBlankAble(T value) {
return StrUtil.isBlankIfStr(value) ? empty() : new Opt<>(value);
} | @Test
public void ofBlankAbleTest() {
// ofBlankAble相对于ofNullable考虑了字符串为空串的情况
String hutool = Opt.ofBlankAble("").orElse("hutool");
assertEquals("hutool", hutool);
} |
public static Integer stringToInteger(String in) {
if (in == null) {
return null;
}
in = in.trim();
if (in.length() == 0) {
return null;
}
try {
return Integer.parseInt(in);
} catch (NumberFormatException e) {
LOG.warn("stringToInteger fail,string=" + in, e);
return null;
}
} | @Test
public void testStringToInteger() {
Assert.assertNull(StringUtils.stringToInteger(""));
Assert.assertNull(StringUtils.stringToInteger(null));
Assert.assertNull(StringUtils.stringToInteger("a"));
Assert.assertEquals(new Integer(3), StringUtils.stringToInteger("3"));
} |
@Override
public List<IndexSegment> prune(List<IndexSegment> segments, QueryContext query) {
if (segments.isEmpty()) {
return segments;
}
// For LIMIT 0 case, keep one segment to create the schema
int limit = query.getLimit();
if (limit == 0) {
return Collections.singletonList(segments.get(0));
}
// Skip pruning segments for upsert table because valid doc index is equivalent to a filter
if (segments.get(0).getValidDocIds() != null) {
return segments;
}
if (query.getOrderByExpressions() == null) {
return pruneSelectionOnly(segments, query);
} else {
return pruneSelectionOrderBy(segments, query);
}
} | @Test
public void testSelectionOnly() {
List<IndexSegment> indexSegments =
Arrays.asList(getIndexSegment(null, null, 10), getIndexSegment(0L, 10L, 10), getIndexSegment(-5L, 5L, 15));
// Should keep enough documents to fulfill the LIMIT requirement
QueryContext queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable LIMIT 5");
List<IndexSegment> result = _segmentPruner.prune(indexSegments, queryContext);
assertEquals(result.size(), 1);
assertSame(result.get(0), indexSegments.get(0));
queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable LIMIT 10");
result = _segmentPruner.prune(indexSegments, queryContext);
assertEquals(result.size(), 1);
assertSame(result.get(0), indexSegments.get(0));
queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable LIMIT 15");
result = _segmentPruner.prune(indexSegments, queryContext);
assertEquals(result.size(), 2);
assertSame(result.get(0), indexSegments.get(0));
assertSame(result.get(1), indexSegments.get(1));
queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable LIMIT 25");
result = _segmentPruner.prune(indexSegments, queryContext);
assertEquals(result.size(), 3);
assertSame(result.get(0), indexSegments.get(0));
assertSame(result.get(1), indexSegments.get(1));
assertSame(result.get(2), indexSegments.get(2));
queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable LIMIT 100");
result = _segmentPruner.prune(indexSegments, queryContext);
assertEquals(result.size(), 3);
assertSame(result.get(0), indexSegments.get(0));
assertSame(result.get(1), indexSegments.get(1));
assertSame(result.get(2), indexSegments.get(2));
} |
public static IntStream allLinesFor(DefaultIssue issue, String componentUuid) {
DbIssues.Locations locations = issue.getLocations();
if (locations == null) {
return IntStream.empty();
}
Stream<DbCommons.TextRange> textRanges = Stream.concat(
locations.hasTextRange() ? Stream.of(locations.getTextRange()) : Stream.empty(),
locations.getFlowList().stream()
.flatMap(f -> f.getLocationList().stream())
.filter(l -> Objects.equals(componentIdOf(issue, l), componentUuid))
.map(DbIssues.Location::getTextRange));
return textRanges.flatMapToInt(range -> IntStream.rangeClosed(range.getStartLine(), range.getEndLine()));
} | @Test
public void allLinesFor_returns_empty_if_no_locations_are_set() {
DefaultIssue issue = new DefaultIssue().setLocations(null);
assertThat(IssueLocations.allLinesFor(issue, "file1")).isEmpty();
} |
public static DATA_TYPE getDATA_TYPE(final List<Field<?>> fields, String fieldName) {
Optional<DATA_TYPE> toReturn = fields.stream()
.filter(fld -> Objects.equals(fieldName,fld.getName()))
.findFirst()
.map(dataField -> DATA_TYPE.byName(dataField.getDataType().value()));
return toReturn.orElseThrow(() -> new KiePMMLInternalException(String.format("Failed to find DATA_TYPE for " +
"field %s",
fieldName)));
} | @Test
void getDataTypeNotFound() {
assertThatExceptionOfType(KiePMMLInternalException.class).isThrownBy(() -> {
final DataDictionary dataDictionary = new DataDictionary();
IntStream.range(0, 3).forEach(i -> {
String fieldName = "field" + i;
final DataField dataField = getRandomDataField();
dataField.setName(fieldName);
dataDictionary.addDataFields(dataField);
});
org.kie.pmml.compiler.api.utils.ModelUtils.getDATA_TYPE(getFieldsFromDataDictionary(dataDictionary),
"NOT_EXISTING");
});
} |
public long computeMemorySize(double fraction) {
validateFraction(fraction);
return (long) Math.floor(memoryBudget.getTotalMemorySize() * fraction);
} | @Test
void testComputeMemorySizeFailForZeroFraction() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> memoryManager.computeMemorySize(0.0));
} |
@Override
public boolean canRescaleMaxParallelism(int desiredMaxParallelism) {
// Technically a valid parallelism value, but one that cannot be rescaled to
if (desiredMaxParallelism == JobVertex.MAX_PARALLELISM_DEFAULT) {
return false;
}
return !rescaleMaxValidator
.apply(normalizeAndCheckMaxParallelism(desiredMaxParallelism))
.isPresent();
} | @Test
void canRescaleMaxAuto() {
DefaultVertexParallelismInfo info = new DefaultVertexParallelismInfo(1, 1, ALWAYS_VALID);
assertThat(info.canRescaleMaxParallelism(ExecutionConfig.PARALLELISM_AUTO_MAX)).isTrue();
} |
@Override
public boolean encode(
@NonNull Resource<GifDrawable> resource, @NonNull File file, @NonNull Options options) {
GifDrawable drawable = resource.get();
Transformation<Bitmap> transformation = drawable.getFrameTransformation();
boolean isTransformed = !(transformation instanceof UnitTransformation);
if (isTransformed && options.get(ENCODE_TRANSFORMATION)) {
return encodeTransformedToFile(drawable, file);
} else {
return writeDataDirect(drawable.getBuffer(), file);
}
} | @Test
public void testWritesBytesDirectlyToDiskIfTransformationIsUnitTransformation() {
when(gifDrawable.getFrameTransformation()).thenReturn(UnitTransformation.<Bitmap>get());
String expected = "expected";
when(gifDrawable.getBuffer()).thenReturn(ByteBuffer.wrap(expected.getBytes()));
encoder.encode(resource, file, options);
assertThat(getEncodedData()).isEqualTo(expected);
verify(gifEncoder, never()).start(any(OutputStream.class));
verify(parser, never()).setData(any(byte[].class));
verify(parser, never()).parseHeader();
} |
@Override
public List<Container> allocateContainers(ResourceBlacklistRequest blackList,
List<ResourceRequest> oppResourceReqs,
ApplicationAttemptId applicationAttemptId,
OpportunisticContainerContext opportContext, long rmIdentifier,
String appSubmitter) throws YarnException {
// Update black list.
updateBlacklist(blackList, opportContext);
// Add OPPORTUNISTIC requests to the outstanding ones.
opportContext.addToOutstandingReqs(oppResourceReqs);
Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist());
Set<String> allocatedNodes = new HashSet<>();
List<Container> allocatedContainers = new ArrayList<>();
// Satisfy the outstanding OPPORTUNISTIC requests.
boolean continueLoop = true;
while (continueLoop) {
continueLoop = false;
List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>();
for (SchedulerRequestKey schedulerKey :
opportContext.getOutstandingOpReqs().descendingKeySet()) {
// Allocated containers :
// Key = Requested Capability,
// Value = List of Containers of given cap (the actual container size
// might be different than what is requested, which is why
// we need the requested capability (key) to match against
// the outstanding reqs)
int remAllocs = -1;
int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat();
if (maxAllocationsPerAMHeartbeat > 0) {
remAllocs =
maxAllocationsPerAMHeartbeat - allocatedContainers.size()
- getTotalAllocations(allocations);
if (remAllocs <= 0) {
LOG.info("Not allocating more containers as we have reached max "
+ "allocations per AM heartbeat {}",
maxAllocationsPerAMHeartbeat);
break;
}
}
Map<Resource, List<Allocation>> allocation = allocate(
rmIdentifier, opportContext, schedulerKey, applicationAttemptId,
appSubmitter, nodeBlackList, allocatedNodes, remAllocs);
if (allocation.size() > 0) {
allocations.add(allocation);
continueLoop = true;
}
}
matchAllocation(allocations, allocatedContainers, opportContext);
}
return allocatedContainers;
} | @Test
public void testRoundRobinSimpleAllocation() throws Exception {
ResourceBlacklistRequest blacklistRequest =
ResourceBlacklistRequest.newInstance(
new ArrayList<>(), new ArrayList<>());
List<ResourceRequest> reqs =
Arrays.asList(
ResourceRequest.newBuilder().allocationRequestId(1)
.priority(PRIORITY_NORMAL)
.resourceName(ResourceRequest.ANY)
.capability(CAPABILITY_1GB)
.relaxLocality(true)
.executionType(ExecutionType.OPPORTUNISTIC).build(),
ResourceRequest.newBuilder().allocationRequestId(2)
.priority(PRIORITY_NORMAL)
.resourceName(ResourceRequest.ANY)
.capability(CAPABILITY_1GB)
.relaxLocality(true)
.executionType(ExecutionType.OPPORTUNISTIC).build(),
ResourceRequest.newBuilder().allocationRequestId(3)
.priority(PRIORITY_NORMAL)
.resourceName(ResourceRequest.ANY)
.capability(CAPABILITY_1GB)
.relaxLocality(true)
.executionType(ExecutionType.OPPORTUNISTIC).build());
ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(0L, 1), 1);
oppCntxt.updateNodeList(
Arrays.asList(
RemoteNode.newInstance(
NodeId.newInstance("h1", 1234), "h1:1234", "/r1"),
RemoteNode.newInstance(
NodeId.newInstance("h2", 1234), "h2:1234", "/r1"),
RemoteNode.newInstance(
NodeId.newInstance("h3", 1234), "h3:1234", "/r1")));
List<Container> containers = allocator.allocateContainers(
blacklistRequest, reqs, appAttId, oppCntxt, 1L, "luser");
LOG.info("Containers: {}", containers);
Set<String> allocatedHosts = new HashSet<>();
for (Container c : containers) {
allocatedHosts.add(c.getNodeHttpAddress());
}
Assert.assertTrue(allocatedHosts.contains("h1:1234"));
Assert.assertTrue(allocatedHosts.contains("h2:1234"));
Assert.assertTrue(allocatedHosts.contains("h3:1234"));
Assert.assertEquals(3, containers.size());
} |
public static TableRebalanceProgressStats.RebalanceStateStats getDifferenceBetweenTableRebalanceStates(
Map<String, Map<String, String>> targetState, Map<String, Map<String, String>> sourceState) {
TableRebalanceProgressStats.RebalanceStateStats rebalanceStats =
new TableRebalanceProgressStats.RebalanceStateStats();
for (Map.Entry<String, Map<String, String>> entry : targetState.entrySet()) {
String segmentName = entry.getKey();
Map<String, String> sourceInstanceStateMap = sourceState.get(segmentName);
if (sourceInstanceStateMap == null) {
// Skip the missing segment
rebalanceStats._segmentsMissing++;
rebalanceStats._segmentsToRebalance++;
continue;
}
Map<String, String> targetStateInstanceStateMap = entry.getValue();
boolean hasSegmentConverged = true;
for (Map.Entry<String, String> instanceStateEntry : targetStateInstanceStateMap.entrySet()) {
// Ignore OFFLINE state in target state
String targetStateInstanceState = instanceStateEntry.getValue();
if (targetStateInstanceState.equals(CommonConstants.Helix.StateModel.SegmentStateModel.OFFLINE)) {
continue;
}
// Check whether the instance state in source matches the target
String instanceName = instanceStateEntry.getKey();
String sourceInstanceState = sourceInstanceStateMap.get(instanceName);
if (!targetStateInstanceState.equals(sourceInstanceState)) {
rebalanceStats._replicasToRebalance++;
hasSegmentConverged = false;
}
}
if (!hasSegmentConverged) {
rebalanceStats._segmentsToRebalance++;
}
}
int totalSegments = targetState.size();
rebalanceStats._percentSegmentsToRebalance =
(totalSegments == 0) ? 0 : ((double) rebalanceStats._segmentsToRebalance / totalSegments) * 100.0;
return rebalanceStats;
} | @Test
void testDifferenceBetweenTableRebalanceStates() {
Map<String, Map<String, String>> target = new TreeMap<>();
target.put("segment1",
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3"), ONLINE));
target.put("segment2",
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host2", "host3", "host4"), ONLINE));
// Stats when there's nothing to rebalance
TableRebalanceProgressStats.RebalanceStateStats stats =
ZkBasedTableRebalanceObserver.getDifferenceBetweenTableRebalanceStates(target, target);
assertEquals(stats._segmentsToRebalance, 0);
assertEquals(stats._segmentsMissing, 0);
assertEquals(stats._percentSegmentsToRebalance, 0.0);
// Stats when there's something to converge
Map<String, Map<String, String>> current = new TreeMap<>();
current.put("segment1", SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1"), ONLINE));
current.put("segment2", SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host2"), ONLINE));
stats = ZkBasedTableRebalanceObserver.getDifferenceBetweenTableRebalanceStates(target, current);
assertEquals(stats._segmentsToRebalance, 2);
assertEquals(stats._percentSegmentsToRebalance, 100.0);
assertEquals(stats._replicasToRebalance, 4);
// Stats when there are errors
current = new TreeMap<>();
current.put("segment1", SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1"), ERROR));
stats = ZkBasedTableRebalanceObserver.getDifferenceBetweenTableRebalanceStates(target, current);
assertEquals(stats._segmentsToRebalance, 2);
assertEquals(stats._segmentsMissing, 1);
assertEquals(stats._replicasToRebalance, 3);
// Stats when partially converged
current = new TreeMap<>();
current.put("segment1",
SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3"), ONLINE));
current.put("segment2", SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host2", "host3"), ONLINE));
stats = ZkBasedTableRebalanceObserver.getDifferenceBetweenTableRebalanceStates(target, current);
assertEquals(stats._percentSegmentsToRebalance, 50.0);
} |
@Override
public void write(final PostgreSQLPacketPayload payload, final Object value) {
payload.writeInt8(((Number) value).longValue());
} | @Test
void assertWrite() {
byte[] actual = new byte[24];
PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(Unpooled.wrappedBuffer(actual).writerIndex(0), StandardCharsets.UTF_8);
new PostgreSQLInt8BinaryProtocolValue().write(payload, -1);
new PostgreSQLInt8BinaryProtocolValue().write(payload, Long.MAX_VALUE);
new PostgreSQLInt8BinaryProtocolValue().write(payload, BigDecimal.valueOf(Long.MIN_VALUE));
byte[] expected = new byte[]{
(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF,
(byte) 0x7F, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF,
(byte) 0x80, 0, 0, 0, 0, 0, 0, 0};
assertThat(actual, is(expected));
} |
@Override public HashSlotCursor16byteKey cursor() {
return new CursorLongKey2();
} | @Test
public void testCursor_withManyValues() {
final long factor = 123456;
final int k = 1000;
for (int i = 1; i <= k; i++) {
long key1 = (long) i;
long key2 = key1 * factor;
insert(key1, key2);
}
boolean[] verifiedKeys = new boolean[k];
HashSlotCursor16byteKey cursor = hsa.cursor();
while (cursor.advance()) {
long key1 = cursor.key1();
long key2 = cursor.key2();
long valueAddress = cursor.valueAddress();
assertEquals(key1 * factor, key2);
verifyValue(key1, key2, valueAddress);
verifiedKeys[((int) key1) - 1] = true;
}
for (int i = 0; i < k; i++) {
assertTrue("Failed to encounter key " + i, verifiedKeys[i]);
}
} |
public static Optional<JksOptions> buildJksKeyStoreOptions(
final Map<String, String> props,
final Optional<String> alias
) {
final String location = getKeyStoreLocation(props);
final String keyStorePassword = getKeyStorePassword(props);
final String keyPassword = getKeyPassword(props);
if (!Strings.isNullOrEmpty(location)) {
final JksOptions jksOptions;
if (alias.isPresent() && !alias.get().isEmpty()) {
jksOptions = buildJksOptions(
loadJksKeyStore(location, keyStorePassword, keyPassword, alias.get()),
keyStorePassword
);
} else {
jksOptions = buildJksOptions(location, keyStorePassword);
}
return Optional.of(jksOptions);
}
return Optional.empty();
} | @Test
public void shouldReturnEmptyKeyStoreJksOptionsIfLocationIsEmpty() {
// When
final Optional<JksOptions> jksOptions = VertxSslOptionsFactory.buildJksKeyStoreOptions(
ImmutableMap.of(),
Optional.empty()
);
// Then
assertThat(jksOptions, is(Optional.empty()));
} |
@Override
@NotNull
public List<PartitionStatistics> sort(@NotNull List<PartitionStatistics> partitionStatistics) {
return partitionStatistics.stream()
.filter(p -> p.getCompactionScore() != null)
.sorted(Comparator.comparingInt((PartitionStatistics stats) -> stats.getPriority().getValue()).reversed()
.thenComparing(Comparator.comparing(PartitionStatistics::getCompactionScore).reversed()))
.collect(Collectors.toList());
} | @Test
public void testPriority() {
List<PartitionStatistics> statisticsList = new ArrayList<>();
PartitionStatistics statistics = new PartitionStatistics(new PartitionIdentifier(1, 2, 3));
statistics.setCompactionScore(Quantiles.compute(Arrays.asList(0.0, 0.0, 0.0)));
statisticsList.add(statistics);
statistics = new PartitionStatistics(new PartitionIdentifier(1, 2, 4));
statistics.setCompactionScore(Quantiles.compute(Arrays.asList(1.1, 1.1, 1.2)));
statisticsList.add(statistics);
ScoreSorter sorter = new ScoreSorter();
List<PartitionStatistics> sortedList = sorter.sort(statisticsList);
Assert.assertEquals(4, sortedList.get(0).getPartition().getPartitionId());
Assert.assertEquals(3, sortedList.get(1).getPartition().getPartitionId());
// sort by priority first
statisticsList.get(0).setPriority(PartitionStatistics.CompactionPriority.MANUAL_COMPACT);
sortedList = sorter.sort(statisticsList);
Assert.assertEquals(3, sortedList.get(0).getPartition().getPartitionId());
Assert.assertEquals(4, sortedList.get(1).getPartition().getPartitionId());
// when having same priority value, should compare by compaction score
statisticsList.get(1).setPriority(PartitionStatistics.CompactionPriority.MANUAL_COMPACT);
sortedList = sorter.sort(statisticsList);
Assert.assertEquals(4, sortedList.get(0).getPartition().getPartitionId());
Assert.assertEquals(3, sortedList.get(1).getPartition().getPartitionId());
} |
public String toCompactListString() {
return id + COMMA + locType + COMMA + latOrY + COMMA + longOrX;
} | @Test
public void toCompactListStringEmptyArray() {
String s = toCompactListString();
assertEquals("not empty string", "", s);
} |
@Override
public Set<String> filterCatalogs(Identity identity, AccessControlContext context, Set<String> catalogs)
{
ImmutableSet.Builder<String> filteredCatalogs = ImmutableSet.builder();
for (String catalog : catalogs) {
if (canAccessCatalog(identity, catalog, READ_ONLY)) {
filteredCatalogs.add(catalog);
}
}
return filteredCatalogs.build();
} | @Test
public void testCatalogOperationsReadOnly() throws IOException
{
TransactionManager transactionManager = createTestTransactionManager();
AccessControlManager accessControlManager = newAccessControlManager(transactionManager, "catalog_read_only.json");
transaction(transactionManager, accessControlManager)
.execute(transactionId -> {
assertEquals(accessControlManager.filterCatalogs(admin, context, allCatalogs), allCatalogs);
Set<String> aliceCatalogs = ImmutableSet.of("open-to-all", "alice-catalog", "all-allowed");
assertEquals(accessControlManager.filterCatalogs(alice, context, allCatalogs), aliceCatalogs);
Set<String> bobCatalogs = ImmutableSet.of("open-to-all", "all-allowed");
assertEquals(accessControlManager.filterCatalogs(bob, context, allCatalogs), bobCatalogs);
Set<String> nonAsciiUserCatalogs = ImmutableSet.of("open-to-all", "all-allowed", "\u0200\u0200\u0200");
assertEquals(accessControlManager.filterCatalogs(nonAsciiUser, context, allCatalogs), nonAsciiUserCatalogs);
});
} |
Packet toBackupAckPacket(long callId, boolean urgent) {
byte[] bytes = new byte[BACKUP_RESPONSE_SIZE_IN_BYTES];
writeResponsePrologueBytes(bytes, BACKUP_ACK_RESPONSE, callId, urgent);
return newResponsePacket(bytes, urgent);
} | @Test
public void toBackupAckPacket() {
testToBackupAckPacket(1, false);
testToBackupAckPacket(2, true);
} |
@Override
public V load(K key) {
long startNanos = Timer.nanos();
try {
return delegate.load(key);
} finally {
loadProbe.recordValue(Timer.nanosElapsed(startNanos));
}
} | @Test
public void load() {
String key = "key";
String value = "value";
when(delegate.load(key)).thenReturn(value);
String result = cacheLoader.load(key);
assertSame(value, result);
assertProbeCalledOnce("load");
} |
@Override
public String resolve(Method method, Object[] arguments, String spelExpression) {
if (StringUtils.isEmpty(spelExpression)) {
return spelExpression;
}
if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) {
return stringValueResolver.resolveStringValue(spelExpression);
}
if (spelExpression.matches(METHOD_SPEL_REGEX)) {
SpelRootObject rootObject = new SpelRootObject(method, arguments);
MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer);
Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext);
return (String) evaluated;
}
if (spelExpression.matches(BEAN_SPEL_REGEX)) {
SpelRootObject rootObject = new SpelRootObject(method, arguments);
MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer);
evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory));
Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext);
return (String) evaluated;
}
return spelExpression;
} | @Test
public void testP0() throws Exception {
String testExpression = "#p0";
String firstArgument = "test";
DefaultSpelResolverTest target = new DefaultSpelResolverTest();
Method testMethod = target.getClass().getMethod("testMethod", String.class);
String result = sut.resolve(testMethod, new Object[]{firstArgument}, testExpression);
assertThat(result).isEqualTo(firstArgument);
} |
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception {
return newGetter(object, parent, modifier, field.getType(), field::get,
(t, et) -> new FieldGetter(parent, field, modifier, t, et));
} | @Test
public void newFieldGetter_whenExtractingFromNull_Collection_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType()
throws Exception {
OuterObject object = new OuterObject("name", InnerObject.nullInner("inner"));
Getter parentGetter = GetterFactory.newFieldGetter(object, null, innersCollectionField, "[any]");
Getter innerObjectNameGetter
= GetterFactory.newFieldGetter(object, parentGetter, innerAttributesCollectionField, "[any]");
assertSame(NullMultiValueGetter.NULL_MULTIVALUE_GETTER, innerObjectNameGetter);
} |
@Override
public Path copy(final Path file, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
try {
if(status.isExists()) {
if(log.isWarnEnabled()) {
log.warn(String.format("Delete file %s to be replaced with %s", target, file));
}
new BoxDeleteFeature(session, fileid).delete(Collections.singletonList(target), callback, new Delete.DisabledCallback());
}
if(file.isDirectory()) {
return target.withAttributes(new BoxAttributesFinderFeature(session, fileid).toAttributes(
new FoldersApi(new BoxApiClient(session.getClient())).postFoldersIdCopy(
fileid.getFileId(file),
new FolderIdCopyBody().name(target.getName()).parent(new FoldersfolderIdcopyParent().id(fileid.getFileId(target.getParent()))),
BoxAttributesFinderFeature.DEFAULT_FIELDS)
));
}
return target.withAttributes(new BoxAttributesFinderFeature(session, fileid).toAttributes(
new FilesApi(new BoxApiClient(session.getClient())).postFilesIdCopy(
fileid.getFileId(file),
new FileIdCopyBody()
.name(target.getName())
.parent(new FilesfileIdcopyParent().id(fileid.getFileId(target.getParent()))),
null, BoxAttributesFinderFeature.DEFAULT_FIELDS)
));
}
catch(ApiException e) {
throw new BoxExceptionMappingService(fileid).map("Cannot copy {0}", e, file);
}
} | @Test
public void testCopyDirectory() throws Exception {
final BoxFileidProvider fileid = new BoxFileidProvider(session);
final Path directory = new BoxDirectoryFeature(session, fileid).mkdir(new Path(new DefaultHomeFinderService(session).find(),
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final String name = new AlphanumericRandomStringService().random();
final Path file = new BoxTouchFeature(session, fileid).touch(new Path(directory, name, EnumSet.of(Path.Type.file)), new TransferStatus());
final Path copy = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new BoxCopyFeature(session, fileid).copy(directory, copy, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener());
assertTrue(new BoxFindFeature(session, fileid).find(file));
assertTrue(new BoxFindFeature(session, fileid).find(copy));
assertTrue(new BoxFindFeature(session, fileid).find(new Path(copy, name, EnumSet.of(Path.Type.file))));
new BoxDeleteFeature(session, fileid).delete(Arrays.asList(copy, directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public TrackerClient getClient(String serviceName, URI uri)
{
Map<URI, TrackerClient> trackerClients = _trackerClients.get(serviceName);
TrackerClient trackerClient = null;
if (trackerClients != null)
{
trackerClient = trackerClients.get(uri);
}
else
{
warn(_log, "get client called on unknown service ", serviceName, ": ", uri);
}
return trackerClient;
} | @Test(groups = { "small", "back-end" })
public void testGetClient() throws URISyntaxException
{
reset();
URI uri = URI.create("http://cluster-1/test");
List<String> schemes = new ArrayList<>();
Map<Integer, PartitionData> partitionData = new HashMap<>(1);
partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<>();
uriData.put(uri, partitionData);
schemes.add("http");
assertNull(_state.getClient("service-1", uri));
// set up state
_state.listenToCluster("cluster-1", new NullStateListenerCallback());
assertNull(_state.getClient("service-1", uri));
_state.listenToService("service-1", new NullStateListenerCallback());
assertNull(_state.getClient("service-1", uri));
_serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1",
"/test", Arrays.asList("random"),
Collections.<String, Object>emptyMap(),
null, null, schemes, null));
assertNull(_state.getClient("service-1", uri));
_uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));
TrackerClient client = _state.getClient("service-1", uri);
assertNotNull(client);
assertEquals(client.getUri(), uri);
} |
@Override
public T deserialize(final String topic, final byte[] bytes) {
try {
if (bytes == null) {
return null;
}
// don't use the JsonSchemaConverter to read this data because
// we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS,
// which is not currently available in the standard converters
final JsonNode value = isJsonSchema
? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class)
: MAPPER.readTree(bytes);
final Object coerced = enforceFieldType(
"$",
new JsonValueContext(value, schema)
);
if (LOG.isTraceEnabled()) {
LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced);
}
return SerdeUtils.castToTargetType(coerced, targetType);
} catch (final Exception e) {
// Clear location in order to avoid logging data, for security reasons
if (e instanceof JsonParseException) {
((JsonParseException) e).clearLocation();
}
throw new SerializationException(
"Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e);
}
} | @Test
public void shouldThrowIfNotAnObject() {
// Given:
final byte[] bytes = serializeJson(BooleanNode.valueOf(true));
// When:
final Exception e = assertThrows(
SerializationException.class,
() -> deserializer.deserialize(SOME_TOPIC, bytes)
);
// Then:
assertThat(e.getCause(), (hasMessage(containsString(
"Can't convert type. sourceType: BooleanNode, requiredType: STRUCT<ORDERTIME BIGINT"))));
} |
public void start() {
// Last minute init. Neither properties not annotations provided an
// injector source.
if (injector == null) {
injector = createDefaultScenarioModuleInjectorSource().getInjector();
}
scenarioScope = injector.getInstance(ScenarioScope.class);
scenarioScope.enterScope();
} | @Test
void factoryStartFailsIfScenarioScopeIsNotBound() {
initFactory(Guice.createInjector());
ConfigurationException actualThrown = assertThrows(ConfigurationException.class, () -> factory.start());
assertThat("Unexpected exception message", actualThrown.getMessage(),
containsString("1) [Guice/MissingImplementation]: No implementation for ScenarioScope was bound."));
} |
@VisibleForTesting
public void validateDictTypeExists(String type) {
DictTypeDO dictType = dictTypeService.getDictType(type);
if (dictType == null) {
throw exception(DICT_TYPE_NOT_EXISTS);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(dictType.getStatus())) {
throw exception(DICT_TYPE_NOT_ENABLE);
}
} | @Test
public void testValidateDictTypeExists_notExists() {
assertServiceException(() -> dictDataService.validateDictTypeExists(randomString()), DICT_TYPE_NOT_EXISTS);
} |
@Override
public ResultSet getVersionColumns(final String catalog, final String schema, final String table) throws SQLException {
return createDatabaseMetaDataResultSet(getDatabaseMetaData().getVersionColumns(getActualCatalog(catalog), getActualSchema(schema), getActualTable(getActualCatalog(catalog), table)));
} | @Test
void assertGetVersionColumns() throws SQLException {
when(databaseMetaData.getVersionColumns("test", null, null)).thenReturn(resultSet);
assertThat(shardingSphereDatabaseMetaData.getVersionColumns("test", null, null), instanceOf(DatabaseMetaDataResultSet.class));
} |
@Override
public String toString() {
if (stringified == null) {
stringified = formatToString();
}
return stringified;
} | @Test
void testStandardUtils() throws IOException {
final MemorySize size = new MemorySize(1234567890L);
final MemorySize cloned = CommonTestUtils.createCopySerializable(size);
assertThat(cloned).isEqualTo(size);
assertThat(cloned).hasSameHashCodeAs(size);
assertThat(cloned).hasToString(size.toString());
} |
@InvokeOnHeader(Web3jConstants.DB_PUT_STRING)
void dbPutString(Message message) throws IOException {
String databaseName = message.getHeader(Web3jConstants.DATABASE_NAME, configuration::getDatabaseName, String.class);
String keyName = message.getHeader(Web3jConstants.KEY_NAME, configuration::getKeyName, String.class);
Request<?, DbPutString> request = web3j.dbPutString(databaseName, keyName, message.getBody(String.class));
setRequestId(message, request);
DbPutString response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.valueStored());
}
} | @Test
public void dbPutStringTest() throws Exception {
DbPutString response = Mockito.mock(DbPutString.class);
Mockito.when(mockWeb3j.dbPutString(any(), any(), any())).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.valueStored()).thenReturn(Boolean.TRUE);
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.DB_PUT_STRING);
template.send(exchange);
Boolean body = exchange.getIn().getBody(Boolean.class);
assertTrue(body);
} |
@Override
public void deletePost(Long id) {
// 校验是否存在
validatePostExists(id);
// 删除部门
postMapper.deleteById(id);
} | @Test
public void testDeletePost_success() {
// mock 数据
PostDO postDO = randomPostDO();
postMapper.insert(postDO);
// 准备参数
Long id = postDO.getId();
// 调用
postService.deletePost(id);
assertNull(postMapper.selectById(id));
} |
public static BigDecimal cast(final Integer value, final int precision, final int scale) {
if (value == null) {
return null;
}
return cast(value.longValue(), precision, scale);
} | @Test
public void shouldCastDecimalNoOp() {
// When:
final BigDecimal decimal = DecimalUtil.cast(new BigDecimal("1.1"), 2, 1);
// Then:
assertThat(decimal, sameInstance(decimal));
} |
@Override
public void updateMember(ConsumerGroupMember newMember) {
if (newMember == null) {
throw new IllegalArgumentException("newMember cannot be null.");
}
ConsumerGroupMember oldMember = members.put(newMember.memberId(), newMember);
maybeUpdateSubscribedTopicNamesAndGroupSubscriptionType(oldMember, newMember);
maybeUpdateServerAssignors(oldMember, newMember);
maybeUpdatePartitionEpoch(oldMember, newMember);
updateStaticMember(newMember);
maybeUpdateGroupState();
maybeUpdateNumClassicProtocolMembers(oldMember, newMember);
maybeUpdateClassicProtocolMembersSupportedProtocols(oldMember, newMember);
} | @Test
public void testUpdateSubscriptionMetadata() {
Uuid fooTopicId = Uuid.randomUuid();
Uuid barTopicId = Uuid.randomUuid();
Uuid zarTopicId = Uuid.randomUuid();
MetadataImage image = new MetadataImageBuilder()
.addTopic(fooTopicId, "foo", 1)
.addTopic(barTopicId, "bar", 2)
.addTopic(zarTopicId, "zar", 3)
.addRacks()
.build();
ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1")
.setSubscribedTopicNames(Collections.singletonList("foo"))
.build();
ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2")
.setSubscribedTopicNames(Collections.singletonList("bar"))
.build();
ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3")
.setSubscribedTopicNames(Collections.singletonList("zar"))
.build();
ConsumerGroup consumerGroup = createConsumerGroup("group-foo");
// It should be empty by default.
assertEquals(
Collections.emptyMap(),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(null, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account member 1.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(null, member1),
image.topics(),
image.cluster()
)
);
// Updating the group with member1.
consumerGroup.updateMember(member1);
// It should return foo now.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(null, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account removal of member 1.
assertEquals(
Collections.emptyMap(),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(member1, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account member 2.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))),
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(null, member2),
image.topics(),
image.cluster()
)
);
// Updating the group with member2.
consumerGroup.updateMember(member2);
// It should return foo and bar.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))),
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(null, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account removal of member 2.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(member2, null),
image.topics(),
image.cluster()
)
);
// Removing member1 results in returning bar.
assertEquals(
mkMap(
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(member1, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account member 3.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))),
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2))),
mkEntry("zar", new TopicMetadata(zarTopicId, "zar", 3, mkMapOfPartitionRacks(3)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(null, member3),
image.topics(),
image.cluster()
)
);
// Updating group with member3.
consumerGroup.updateMember(member3);
// It should return foo, bar and zar.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))),
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2))),
mkEntry("zar", new TopicMetadata(zarTopicId, "zar", 3, mkMapOfPartitionRacks(3)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(null, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account removal of member 1, member 2 and member 3
assertEquals(
Collections.emptyMap(),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(new HashSet<>(Arrays.asList(member1, member2, member3))),
image.topics(),
image.cluster()
)
);
// Compute while taking into account removal of member 2 and member 3.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(new HashSet<>(Arrays.asList(member2, member3))),
image.topics(),
image.cluster()
)
);
// Compute while taking into account removal of member 1.
assertEquals(
mkMap(
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2))),
mkEntry("zar", new TopicMetadata(zarTopicId, "zar", 3, mkMapOfPartitionRacks(3)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(Collections.singleton(member1)),
image.topics(),
image.cluster()
)
);
// It should return foo, bar and zar.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))),
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2))),
mkEntry("zar", new TopicMetadata(zarTopicId, "zar", 3, mkMapOfPartitionRacks(3)))
),
consumerGroup.computeSubscriptionMetadata(
consumerGroup.computeSubscribedTopicNames(Collections.emptySet()),
image.topics(),
image.cluster()
)
);
} |
public static void writeIdlProtocol(Writer writer, Protocol protocol) throws IOException {
final String protocolFullName = protocol.getName();
final int lastDotPos = protocolFullName.lastIndexOf(".");
final String protocolNameSpace;
if (lastDotPos < 0) {
protocolNameSpace = protocol.getNamespace();
} else if (lastDotPos > 0) {
protocolNameSpace = protocolFullName.substring(0, lastDotPos);
} else {
protocolNameSpace = null;
}
writeIdlProtocol(writer, protocol, protocolNameSpace, protocolFullName.substring(lastDotPos + 1),
protocol.getTypes(), protocol.getMessages().values());
} | @Test
public void validateHappyFlowForProtocol() throws IOException {
Protocol protocol = parseIdlResource("idl_utils_test_protocol.avdl").getProtocol();
StringWriter buffer = new StringWriter();
IdlUtils.writeIdlProtocol(buffer, protocol);
assertEquals(getResourceAsString("idl_utils_test_protocol.avdl"), buffer.toString());
} |
public static void ensureCorrectArgs(
final FunctionName functionName, final Object[] args, final Class<?>... argTypes
) {
if (args == null) {
throw new KsqlFunctionException("Null argument list for " + functionName.text() + ".");
}
if (args.length != argTypes.length) {
throw new KsqlFunctionException("Incorrect arguments for " + functionName.text() + ".");
}
for (int i = 0; i < argTypes.length; i++) {
if (args[i] == null) {
continue;
}
if (!argTypes[i].isAssignableFrom(args[i].getClass())) {
throw new KsqlFunctionException(
String.format(
"Incorrect arguments type for %s. "
+ "Expected %s for arg number %d but found %s.",
functionName.text(),
argTypes[i].getCanonicalName(),
i,
args[i].getClass().getCanonicalName()
));
}
}
} | @Test(expected = KsqlException.class)
public void shouldFailIfArgCountIsTooFew() {
final Object[] args = new Object[]{"TtestArg1", 10L};
UdfUtil.ensureCorrectArgs(FUNCTION_NAME, args, String.class, Boolean.class, String.class);
} |
public synchronized String get() {
ConfidentialStore cs = ConfidentialStore.get();
if (secret == null || cs != lastCS) {
lastCS = cs;
try {
byte[] payload = load();
if (payload == null) {
payload = cs.randomBytes(length / 2);
store(payload);
}
secret = Util.toHexString(payload).substring(0, length);
} catch (IOException e) {
throw new Error("Failed to load the key: " + getId(), e);
}
}
return secret;
} | @Test
public void specifyLengthAndMakeSureItTakesEffect() {
for (int n : new int[] {8, 16, 32, 256}) {
assertEquals(n, new HexStringConfidentialKey("test" + n, n).get().length());
}
} |
public void replayCreateResource(Resource resource) throws DdlException {
if (resource.needMappingCatalog()) {
String type = resource.getType().name().toLowerCase(Locale.ROOT);
String catalogName = getResourceMappingCatalogName(resource.getName(), type);
if (nameToResource.containsKey(resource.name)) {
DropCatalogStmt dropCatalogStmt = new DropCatalogStmt(catalogName);
GlobalStateMgr.getCurrentState().getCatalogMgr().dropCatalog(dropCatalogStmt);
}
Map<String, String> properties = Maps.newHashMap(resource.getProperties());
properties.put("type", type);
properties.put(HIVE_METASTORE_URIS, resource.getHiveMetastoreURIs());
GlobalStateMgr.getCurrentState().getCatalogMgr().createCatalog(type, catalogName, "mapping catalog", properties);
}
this.writeLock();
try {
nameToResource.put(resource.getName(), resource);
} finally {
this.writeUnLock();
}
LOG.info("replay create/alter resource log success. resource name: {}", resource.getName());
} | @Test
public void testReplayCreateResource(@Injectable EditLog editLog, @Mocked GlobalStateMgr globalStateMgr)
throws UserException {
ResourceMgr mgr = new ResourceMgr();
type = "hive";
name = "hive0";
addHiveResource(mgr, editLog, globalStateMgr);
Resource hiveRes = new HiveResource(name);
Map<String, String> properties = new HashMap<>();
String newUris = "thrift://10.10.44.xxx:9083";
properties.put("hive.metastore.uris", newUris);
hiveRes.setProperties(properties);
mgr.replayCreateResource(hiveRes);
Assert.assertNotNull(mgr.getResource(name));
} |
Map<ExecNode<?>, Integer> calculateMaximumDistance() {
Map<ExecNode<?>, Integer> result = new HashMap<>();
Map<TopologyNode, Integer> inputsVisitedMap = new HashMap<>();
Queue<TopologyNode> queue = new LinkedList<>();
for (TopologyNode node : nodes.values()) {
if (node.inputs.size() == 0) {
queue.offer(node);
}
}
while (!queue.isEmpty()) {
TopologyNode node = queue.poll();
int dist = -1;
for (TopologyNode input : node.inputs) {
dist =
Math.max(
dist,
Preconditions.checkNotNull(
result.get(input.execNode),
"The distance of an input node is not calculated. This is a bug."));
}
dist++;
result.put(node.execNode, dist);
for (TopologyNode output : node.outputs) {
int inputsVisited =
inputsVisitedMap.compute(output, (k, v) -> v == null ? 1 : v + 1);
if (inputsVisited == output.inputs.size()) {
queue.offer(output);
}
}
}
return result;
} | @Test
void testBoundedCalculateMaximumDistance() {
Tuple2<TopologyGraph, TestingBatchExecNode[]> tuple2 = buildBoundedTopologyGraph();
TopologyGraph graph = tuple2.f0;
TestingBatchExecNode[] nodes = tuple2.f1;
Map<ExecNode<?>, Integer> result = graph.calculateMaximumDistance();
assertThat(result).hasSize(6);
assertThat(result.get(nodes[2]).intValue()).isEqualTo(0);
assertThat(result.get(nodes[3]).intValue()).isEqualTo(0);
assertThat(result.get(nodes[4]).intValue()).isEqualTo(1);
assertThat(result.get(nodes[6]).intValue()).isEqualTo(1);
assertThat(result.get(nodes[5]).intValue()).isEqualTo(2);
assertThat(result.get(nodes[7]).intValue()).isEqualTo(2);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.