focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public String getJobCompletionRequestBody(String elasticAgentId, JobIdentifier jobIdentifier) {
JsonObject jsonObject = new JsonObject();
jsonObject.addProperty("elastic_agent_id", elasticAgentId);
jsonObject.add("job_identifier", jobIdentifierJson(jobIdentifier));
return FORCED_EXPOSE_GSON.toJson(jsonObject);
} | @Test
public void shouldJSONizeJobCompletionRequestBody() throws Exception {
String actual = new ElasticAgentExtensionConverterV4().getJobCompletionRequestBody("ea1", jobIdentifier);
String expected = """
{ "elastic_agent_id":"ea1", "job_identifier": {
"pipeline_name": "test-pipeline",
"pipeline_counter": 1,
"pipeline_label": "Test Pipeline",
"stage_name": "test-stage",
"stage_counter": "1",
"job_name": "test-job",
"job_id": 100
}
}""";
assertThatJson(expected).isEqualTo(actual);
} |
@Override
public boolean match(Message msg, StreamRule rule) {
final boolean inverted = rule.getInverted();
final Object field = msg.getField(rule.getField());
if (field != null) {
final String value = field.toString();
return inverted ^ value.contains(rule.getValue());
} else {
return inverted;
}
} | @Test
public void testNonExistentFieldInverted() {
rule.setInverted(true);
msg.addField("someother", "hello foo");
StreamRuleMatcher matcher = getMatcher(rule);
assertTrue(matcher.match(msg, rule));
} |
@Override
public ConcurrentJobModificationResolveResult resolve(Job localJob, Job storageProviderJob) {
if (localJob.getState() == StateName.DELETED && storageProviderJob.getState() == StateName.DELETED) {
throw shouldNotHappenException("Should not happen as matches filter should be filtering out this StateChangeFilter");
} else if (localJob.getState() == StateName.PROCESSING && storageProviderJob.getState() == StateName.DELETED) {
localJob.delete("Job is already deleted in StorageProvider");
final Thread threadProcessingJob = jobSteward.getThreadProcessingJob(localJob);
if (threadProcessingJob != null) {
threadProcessingJob.interrupt();
}
}
return ConcurrentJobModificationResolveResult.succeeded(localJob);
} | @Test
void ifJobDeletedWhileInProgress() {
final Job jobInProgress = aJobInProgress().build();
final Job jobInProgressWithUpdate = aCopyOf(jobInProgress).withMetadata("extra", "metadata").build();
final Job deletedJob = aCopyOf(jobInProgress).withDeletedState().build();
Thread mockThread = mock(Thread.class);
when(jobSteward.getThreadProcessingJob(jobInProgressWithUpdate)).thenReturn(mockThread);
final ConcurrentJobModificationResolveResult resolveResult = allowedStateChange.resolve(jobInProgressWithUpdate, deletedJob);
assertThat(resolveResult.failed()).isFalse();
verify(mockThread).interrupt();
} |
public void writeTo(T object, DataWriter writer) throws IOException {
writeTo(object, 0, writer);
} | @Test
public void merge() throws Exception {
StringWriter sw = new StringWriter();
builder.get(B.class).writeTo(b, Flavor.JSON.createDataWriter(b, sw, config));
// B.x should maskc C.x, so x should be 40
// but C.y should be printed as merged
assertEquals("{'_class':'B','y':20,'z':30,'x':40}", sw.toString().replace('"','\''));
} |
@Override
public List<String> readFilesWithRetries(Sleeper sleeper, BackOff backOff)
throws IOException, InterruptedException {
IOException lastException = null;
do {
try {
// Match inputPath which may contains glob
Collection<Metadata> files =
Iterables.getOnlyElement(FileSystems.match(Collections.singletonList(filePattern)))
.metadata();
LOG.debug("Found {} file(s) by matching the path: {}", files.size(), filePattern);
if (files.isEmpty() || !checkTotalNumOfFiles(files)) {
continue;
}
// Read data from file paths
return readLines(files);
} catch (IOException e) {
// Ignore and retry
lastException = e;
LOG.warn("Error in file reading. Ignore and retry.");
}
} while (BackOffUtils.next(sleeper, backOff));
// Failed after max retries
throw new IOException(
String.format("Unable to read file(s) after retrying %d times", MAX_READ_RETRIES),
lastException);
} | @Test
public void testReadWithRetriesFailsWhenOutputDirEmpty() throws Exception {
NumberedShardedFile shardedFile = new NumberedShardedFile(filePattern);
thrown.expect(IOException.class);
thrown.expectMessage(
containsString(
"Unable to read file(s) after retrying " + NumberedShardedFile.MAX_READ_RETRIES));
shardedFile.readFilesWithRetries(fastClock, backOff);
} |
static void checkValidCollectionName(String databaseName, String collectionName) {
String fullCollectionName = databaseName + "." + collectionName;
if (collectionName.length() < MIN_COLLECTION_NAME_LENGTH) {
throw new IllegalArgumentException("Collection name cannot be empty.");
}
if (fullCollectionName.length() > MAX_COLLECTION_NAME_LENGTH) {
throw new IllegalArgumentException(
"Collection name "
+ fullCollectionName
+ " cannot be longer than "
+ MAX_COLLECTION_NAME_LENGTH
+ " characters, including the database name and dot.");
}
if (ILLEGAL_COLLECTION_CHARS.matcher(collectionName).find()) {
throw new IllegalArgumentException(
"Collection name "
+ collectionName
+ " is not a valid name. Only letters, numbers, hyphens, underscores and exclamation points are allowed.");
}
if (collectionName.charAt(0) != '_' && !Character.isLetter(collectionName.charAt(0))) {
throw new IllegalArgumentException(
"Collection name " + collectionName + " must start with a letter or an underscore.");
}
String illegalKeyword = "system.";
if (collectionName.startsWith(illegalKeyword)) {
throw new IllegalArgumentException(
"Collection name "
+ collectionName
+ " cannot start with the prefix \""
+ illegalKeyword
+ "\".");
}
} | @Test
public void testCheckValidCollectionNameDoesNotThrowErrorWhenNameIsValid() {
checkValidCollectionName("test-database", "a collection-name_valid.Test1");
checkValidCollectionName("test-database", "_a collection-name_valid.Test1");
} |
@Override
@Deprecated
public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier,
final String... stateStoreNames) {
process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames);
} | @Test
public void shouldNotAllowNullStoreNameOnProcessWithNamed() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.process(processorSupplier, Named.as("processor"), (String) null));
assertThat(exception.getMessage(), equalTo("stateStoreNames can't be null"));
} |
public void tick() {
// The main loop does two primary things: 1) drive the group membership protocol, responding to rebalance events
// as they occur, and 2) handle external requests targeted at the leader. All the "real" work of the herder is
// performed in this thread, which keeps synchronization straightforward at the cost of some operations possibly
// blocking up this thread (especially those in callbacks due to rebalance events).
try {
// if we failed to read to end of log before, we need to make sure the issue was resolved before joining group
// Joining and immediately leaving for failure to read configs is exceedingly impolite
if (!canReadConfigs) {
if (readConfigToEnd(workerSyncTimeoutMs)) {
canReadConfigs = true;
} else {
return; // Safe to return and tick immediately because readConfigToEnd will do the backoff for us
}
}
log.debug("Ensuring group membership is still active");
String stageDescription = "ensuring membership in the cluster";
member.ensureActive(() -> new TickThreadStage(stageDescription));
completeTickThreadStage();
// Ensure we're in a good state in our group. If not restart and everything should be setup to rejoin
if (!handleRebalanceCompleted()) return;
} catch (WakeupException e) {
// May be due to a request from another thread, or might be stopping. If the latter, we need to check the
// flag immediately. If the former, we need to re-run the ensureActive call since we can't handle requests
// unless we're in the group.
log.trace("Woken up while ensure group membership is still active");
return;
}
if (fencedFromConfigTopic) {
if (isLeader()) {
// We were accidentally fenced out, possibly by a zombie leader
try {
log.debug("Reclaiming write privileges for config topic after being fenced out");
try (TickThreadStage stage = new TickThreadStage("reclaiming write privileges for the config topic")) {
configBackingStore.claimWritePrivileges();
}
fencedFromConfigTopic = false;
log.debug("Successfully reclaimed write privileges for config topic after being fenced out");
} catch (Exception e) {
log.warn("Unable to claim write privileges for config topic. Will backoff and possibly retry if still the leader", e);
backoff(CONFIG_TOPIC_WRITE_PRIVILEGES_BACKOFF_MS);
return;
}
} else {
log.trace("Relinquished write privileges for config topic after being fenced out, since worker is no longer the leader of the cluster");
// We were meant to be fenced out because we fell out of the group and a new leader was elected
fencedFromConfigTopic = false;
}
}
long now = time.milliseconds();
if (checkForKeyRotation(now)) {
log.debug("Distributing new session key");
keyExpiration = Long.MAX_VALUE;
try {
SessionKey newSessionKey = new SessionKey(keyGenerator.generateKey(), now);
writeToConfigTopicAsLeader(
"writing a new session key to the config topic",
() -> configBackingStore.putSessionKey(newSessionKey)
);
} catch (Exception e) {
log.info("Failed to write new session key to config topic; forcing a read to the end of the config topic before possibly retrying", e);
canReadConfigs = false;
return;
}
}
// Process any external requests
// TODO: Some of these can be performed concurrently or even optimized away entirely.
// For example, if three different connectors are slated to be restarted, it's fine to
// restart all three at the same time instead.
// Another example: if multiple configurations are submitted for the same connector,
// the only one that actually has to be written to the config topic is the
// most-recently one.
Long scheduledTick = null;
while (true) {
final DistributedHerderRequest next = peekWithoutException();
if (next == null) {
break;
} else if (now >= next.at) {
currentRequest = requests.pollFirst();
} else {
scheduledTick = next.at;
break;
}
runRequest(next.action(), next.callback());
}
// Process all pending connector restart requests
processRestartRequests();
if (scheduledRebalance < Long.MAX_VALUE) {
scheduledTick = scheduledTick != null ? Math.min(scheduledTick, scheduledRebalance) : scheduledRebalance;
rebalanceResolved = false;
log.debug("Scheduled rebalance at: {} (now: {} scheduledTick: {}) ",
scheduledRebalance, now, scheduledTick);
}
if (isLeader() && internalRequestValidationEnabled() && keyExpiration < Long.MAX_VALUE) {
scheduledTick = scheduledTick != null ? Math.min(scheduledTick, keyExpiration) : keyExpiration;
log.debug("Scheduled next key rotation at: {} (now: {} scheduledTick: {}) ",
keyExpiration, now, scheduledTick);
}
// Process any configuration updates
AtomicReference<Set<String>> connectorConfigUpdatesCopy = new AtomicReference<>();
AtomicReference<Set<String>> connectorTargetStateChangesCopy = new AtomicReference<>();
AtomicReference<Set<ConnectorTaskId>> taskConfigUpdatesCopy = new AtomicReference<>();
boolean shouldReturn;
if (member.currentProtocolVersion() == CONNECT_PROTOCOL_V0) {
shouldReturn = updateConfigsWithEager(connectorConfigUpdatesCopy,
connectorTargetStateChangesCopy);
// With eager protocol we should return immediately if needsReconfigRebalance has
// been set to retain the old workflow
if (shouldReturn) {
return;
}
if (connectorConfigUpdatesCopy.get() != null) {
processConnectorConfigUpdates(connectorConfigUpdatesCopy.get());
}
if (connectorTargetStateChangesCopy.get() != null) {
processTargetStateChanges(connectorTargetStateChangesCopy.get());
}
} else {
shouldReturn = updateConfigsWithIncrementalCooperative(connectorConfigUpdatesCopy,
connectorTargetStateChangesCopy, taskConfigUpdatesCopy);
if (connectorConfigUpdatesCopy.get() != null) {
processConnectorConfigUpdates(connectorConfigUpdatesCopy.get());
}
if (connectorTargetStateChangesCopy.get() != null) {
processTargetStateChanges(connectorTargetStateChangesCopy.get());
}
if (taskConfigUpdatesCopy.get() != null) {
processTaskConfigUpdatesWithIncrementalCooperative(taskConfigUpdatesCopy.get());
}
if (shouldReturn) {
return;
}
}
// Let the group take any actions it needs to
try {
long nextRequestTimeoutMs = scheduledTick != null ? Math.max(scheduledTick - time.milliseconds(), 0L) : Long.MAX_VALUE;
log.trace("Polling for group activity; will wait for {}ms or until poll is interrupted by "
+ "either config backing store updates or a new external request",
nextRequestTimeoutMs);
String pollDurationDescription = scheduledTick != null ? "for up to " + nextRequestTimeoutMs + "ms or " : "";
String stageDescription = "polling the group coordinator " + pollDurationDescription + "until interrupted";
member.poll(nextRequestTimeoutMs, () -> new TickThreadStage(stageDescription));
completeTickThreadStage();
// Ensure we're in a good state in our group. If not restart and everything should be setup to rejoin
handleRebalanceCompleted();
} catch (WakeupException e) { // FIXME should not be WakeupException
log.trace("Woken up while polling for group activity");
// Ignore. Just indicates we need to check the exit flag, for requested actions, etc.
}
} | @Test
public void testConnectorResumedRunningTaskOnly() {
// even if we don't own the connector, we should still propagate target state
// changes to the worker so that tasks will transition correctly
when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE);
when(member.memberId()).thenReturn("member");
when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0);
// join
expectRebalance(1, Collections.emptyList(), singletonList(TASK0));
expectConfigRefreshAndSnapshot(SNAPSHOT_PAUSED_CONN1);
expectMemberPoll();
when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), eq(TargetState.PAUSED))).thenReturn(true);
herder.tick(); // join
// handle the state change
expectMemberEnsureActive();
when(configBackingStore.snapshot()).thenReturn(SNAPSHOT);
ArgumentCaptor<Callback<TargetState>> onStart = ArgumentCaptor.forClass(Callback.class);
doAnswer(invocation -> {
onStart.getValue().onCompletion(null, TargetState.PAUSED);
return null;
}).when(worker).setTargetState(eq(CONN1), eq(TargetState.STARTED), onStart.capture());
configUpdateListener.onConnectorTargetStateChange(CONN1); // state changes to paused
herder.tick(); // apply state change
herder.tick();
verify(worker).setTargetState(eq(CONN1), eq(TargetState.STARTED), any(Callback.class));
verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore);
} |
@Override
public void createNetwork(KubevirtNetwork network) {
checkNotNull(network, ERR_NULL_NETWORK);
checkArgument(!Strings.isNullOrEmpty(network.networkId()), ERR_NULL_NETWORK_ID);
networkStore.createNetwork(network);
log.info(String.format(MSG_NETWORK, network.name(), MSG_CREATED));
} | @Test(expected = IllegalArgumentException.class)
public void testCreateDuplicateNetwork() {
target.createNetwork(NETWORK);
target.createNetwork(NETWORK);
} |
@Before(value = "@annotation(org.apache.bigtop.manager.server.annotations.Audit)")
public void before(JoinPoint joinPoint) {
MethodSignature ms = (MethodSignature) joinPoint.getSignature();
Long userId = SessionUserHolder.getUserId();
ServletRequestAttributes attributes = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
if (attributes != null && userId != null) {
// obtain request uri
HttpServletRequest request = attributes.getRequest();
String uri = request.getRequestURI();
// obtain controller name
Class<?> controller = joinPoint.getThis().getClass();
Tag annotation = controller.getAnnotation(Tag.class);
String apiName = "";
String apiDesc = "";
if (annotation != null) {
apiName = annotation.name();
apiDesc = annotation.description();
}
// obtain method name
String methodName = ms.getName();
// obtain method desc
String operationSummary = "";
String operationDesc = "";
Operation operation = ms.getMethod().getDeclaredAnnotation(Operation.class);
if (operation != null) {
operationSummary = operation.summary();
operationDesc = operation.description();
}
AuditLogPO auditLogPO = new AuditLogPO();
auditLogPO.setUserId(userId);
auditLogPO.setUri(uri);
auditLogPO.setTagName(apiName);
auditLogPO.setTagDesc(apiDesc);
auditLogPO.setOperationSummary(operationSummary);
auditLogPO.setOperationDesc(operationDesc);
auditLogPO.setArgs(JsonUtils.writeAsString(joinPoint.getArgs()));
log.debug("auditLog: {}", auditLogPO);
log.debug("request method:{}.{}", joinPoint.getSignature().getDeclaringTypeName(), methodName);
auditLogRepository.save(auditLogPO);
}
} | @Test
void before_NullRequestAttributes_DoesNotSaveAuditLog() {
SessionUserHolder.setUserId(1L);
auditAspect.before(joinPoint);
verify(auditLogRepository, never()).save(any(AuditLogPO.class));
} |
public static <T extends EurekaEndpoint> boolean identical(List<T> firstList, List<T> secondList) {
if (firstList.size() != secondList.size()) {
return false;
}
HashSet<T> compareSet = new HashSet<>(firstList);
compareSet.removeAll(secondList);
return compareSet.isEmpty();
} | @Test
public void testIdentical() throws Exception {
List<AwsEndpoint> firstList = SampleCluster.UsEast1a.builder().withServerPool(10).build();
List<AwsEndpoint> secondList = ResolverUtils.randomize(firstList);
assertThat(ResolverUtils.identical(firstList, secondList), is(true));
secondList.set(0, SampleCluster.UsEast1b.build().get(0));
assertThat(ResolverUtils.identical(firstList, secondList), is(false));
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 3) {
onInvalidDataReceived(device, data);
return;
}
final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0);
if (opCode != OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE && opCode != OP_CODE_RESPONSE_CODE) {
onInvalidDataReceived(device, data);
return;
}
final int operator = data.getIntValue(Data.FORMAT_UINT8, 1);
if (operator != OPERATOR_NULL) {
onInvalidDataReceived(device, data);
return;
}
switch (opCode) {
case OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE -> {
// Field size is defined per service
int numberOfRecords;
switch (data.size() - 2) {
case 1 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT8, 2);
case 2 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT16_LE, 2);
case 4 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT32_LE, 2);
default -> {
// Other field sizes are not supported
onInvalidDataReceived(device, data);
return;
}
}
onNumberOfRecordsReceived(device, numberOfRecords);
}
case OP_CODE_RESPONSE_CODE -> {
if (data.size() != 4) {
onInvalidDataReceived(device, data);
return;
}
final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 2);
final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 3);
if (responseCode == RACP_RESPONSE_SUCCESS) {
onRecordAccessOperationCompleted(device, requestCode);
} else if (responseCode == RACP_ERROR_NO_RECORDS_FOUND) {
onRecordAccessOperationCompletedWithNoRecordsFound(device, requestCode);
} else {
onRecordAccessOperationError(device, requestCode, responseCode);
}
}
}
} | @Test
public void onRecordAccessOperationError_invalidOperator() {
final Data data = new Data(new byte[] { 6, 0, 1, 3 });
callback.onDataReceived(null, data);
assertEquals(error, 3);
assertEquals(1, requestCode);
} |
@Bean
public RateLimiterRegistry rateLimiterRegistry(
RateLimiterConfigurationProperties rateLimiterProperties,
EventConsumerRegistry<RateLimiterEvent> rateLimiterEventsConsumerRegistry,
RegistryEventConsumer<RateLimiter> rateLimiterRegistryEventConsumer,
@Qualifier("compositeRateLimiterCustomizer") CompositeCustomizer<RateLimiterConfigCustomizer> compositeRateLimiterCustomizer) {
RateLimiterRegistry rateLimiterRegistry = createRateLimiterRegistry(rateLimiterProperties,
rateLimiterRegistryEventConsumer, compositeRateLimiterCustomizer);
registerEventConsumer(rateLimiterRegistry, rateLimiterEventsConsumerRegistry,
rateLimiterProperties);
initRateLimiterRegistry(rateLimiterProperties, compositeRateLimiterCustomizer, rateLimiterRegistry);
return rateLimiterRegistry;
} | @Test
public void testCreateRateLimiterRegistryWithUnknownConfig() {
RateLimiterConfigurationProperties rateLimiterConfigurationProperties = new RateLimiterConfigurationProperties();
io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties instanceProperties = new io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties();
instanceProperties.setBaseConfig("unknownConfig");
rateLimiterConfigurationProperties.getInstances().put("backend", instanceProperties);
RateLimiterConfiguration rateLimiterConfiguration = new RateLimiterConfiguration();
DefaultEventConsumerRegistry<RateLimiterEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>();
assertThatThrownBy(() -> rateLimiterConfiguration
.rateLimiterRegistry(rateLimiterConfigurationProperties, eventConsumerRegistry,
new CompositeRegistryEventConsumer<>(emptyList()),
compositeRateLimiterCustomizerTest()))
.isInstanceOf(ConfigurationNotFoundException.class)
.hasMessage("Configuration with name 'unknownConfig' does not exist");
} |
@Override
public Map<String, Object> processCsvFile(String encodedCsvData, boolean dryRun) throws JsonProcessingException {
services = new HashMap<>();
serviceParentChildren = new HashMap<>();
Map<String, Object> result = super.processCsvFile(encodedCsvData, dryRun);
if (!services.isEmpty()) {
retrieveLegacyServiceIds();
saveAll(dryRun);
processServiceParentChildren(serviceParentChildren, dryRun);
}
return result;
} | @Test
void processCsvFileFailInvalidDuplicateUUIDTest() throws JsonProcessingException, UnsupportedEncodingException {
// Contains 2 duplicate UUID, 2 empty UUID
String csvData = """SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS""";
List<String> failedArray = new ArrayList<>();
failedArray.add("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS");
Map<String, Object> resultMap = csvService.processCsvFile(encodeCsv(csvData), false);
assertEquals("Bestand niet verwerkt", resultMap.get("result"));
assertTrue(((List) resultMap.get("failed")).contains("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"));
} |
@Override
public void run() {
try {
PushDataWrapper wrapper = generatePushData();
ClientManager clientManager = delayTaskEngine.getClientManager();
for (String each : getTargetClientIds()) {
Client client = clientManager.getClient(each);
if (null == client) {
// means this client has disconnect
continue;
}
Subscriber subscriber = client.getSubscriber(service);
// skip if null
if (subscriber == null) {
continue;
}
delayTaskEngine.getPushExecutor().doPushWithCallback(each, subscriber, wrapper,
new ServicePushCallback(each, subscriber, wrapper.getOriginalData(), delayTask.isPushToAll()));
}
} catch (Exception e) {
Loggers.PUSH.error("Push task for service" + service.getGroupedServiceName() + " execute failed ", e);
delayTaskEngine.addTask(service, new PushDelayTask(service, 1000L));
}
} | @Test
void testRunSuccessForPushAll() {
PushDelayTask delayTask = new PushDelayTask(service, 0L);
PushExecuteTask executeTask = new PushExecuteTask(service, delayTaskExecuteEngine, delayTask);
executeTask.run();
assertEquals(1, MetricsMonitor.getTotalPushMonitor().get());
} |
public DdlCommandResult execute(
final String sql,
final DdlCommand ddlCommand,
final boolean withQuery,
final Set<SourceName> withQuerySources
) {
return execute(sql, ddlCommand, withQuery, withQuerySources, false);
} | @Test
public void shouldAddStreamWithCorrectSql() {
// Given:
givenCreateStream();
// When:
cmdExec.execute(SQL_TEXT, createStream, false, NO_QUERY_SOURCES);
// Then:
assertThat(metaStore.getSource(STREAM_NAME).getSqlExpression(), is(SQL_TEXT));
} |
public static String encodeBase64(final String what) {
return BaseEncoding.base64().encode(what.getBytes(StandardCharsets.UTF_8));
} | @Test
public void testEncodeBase64() {
assertEquals("bG9sd2F0LmVuY29kZWQ=", Tools.encodeBase64("lolwat.encoded"));
} |
@Override
public <T> void execute(URI uri, String httpMethod, RequestHttpEntity requestHttpEntity,
final ResponseHandler<T> responseHandler, final Callback<T> callback) throws Exception {
HttpRequestBase httpRequestBase = DefaultHttpClientRequest.build(uri, httpMethod, requestHttpEntity, defaultConfig);
try {
asyncClient.execute(httpRequestBase, new FutureCallback<HttpResponse>() {
@Override
public void completed(HttpResponse result) {
DefaultClientHttpResponse response = new DefaultClientHttpResponse(result);
try {
HttpRestResult<T> httpRestResult = responseHandler.handle(response);
callback.onReceive(httpRestResult);
} catch (Exception e) {
callback.onError(e);
} finally {
HttpClientUtils.closeQuietly(result);
}
}
@Override
public void failed(Exception ex) {
callback.onError(ex);
}
@Override
public void cancelled() {
callback.onCancel();
}
});
} catch (IllegalStateException e) {
final List<ExceptionEvent> events = ioreactor.getAuditLog();
if (events != null) {
for (ExceptionEvent event : events) {
if (event != null) {
LOGGER.error("[DefaultAsyncHttpClientRequest] IllegalStateException! I/O Reactor error time: {}",
event.getTimestamp(), event.getCause());
}
}
}
throw e;
}
} | @Test
void testExecuteOnFail() throws Exception {
Header header = Header.newInstance();
Map<String, String> body = new HashMap<>();
body.put("test", "test");
RequestHttpEntity httpEntity = new RequestHttpEntity(header, Query.EMPTY, body);
RuntimeException exception = new RuntimeException("test");
when(client.execute(any(), any())).thenAnswer(invocationOnMock -> {
((FutureCallback) invocationOnMock.getArgument(1)).failed(exception);
return null;
});
httpClientRequest.execute(uri, "PUT", httpEntity, responseHandler, callback);
verify(callback).onError(exception);
} |
@Nullable
public InetSocketAddress getPeer() {
return nextPeer();
} | @Test
public void getPeer_all() throws Exception{
SeedPeers seedPeers = new SeedPeers(MAINNET);
for (int i = 0; i < MAINNET.getAddrSeeds().length; ++i) {
assertThat("Failed on index: "+i, seedPeers.getPeer(), notNullValue());
}
assertThat(seedPeers.getPeer(), equalTo(null));
} |
public static MetricName name(Class<?> klass, String... names) {
return name(klass.getName(), names);
} | @Test
public void concatenatesClassNamesWithStringsToFormADottedName() throws Exception {
assertThat(name(MetricRegistryTest.class, "one", "two"))
.isEqualTo(MetricName.build("io.dropwizard.metrics.MetricRegistryTest.one.two"));
} |
public String generatePushDownFilter(List<String> writtenPartitions, List<FieldSchema> partitionFields, HiveSyncConfig config) {
PartitionValueExtractor partitionValueExtractor = ReflectionUtils
.loadClass(config.getStringOrDefault(META_SYNC_PARTITION_EXTRACTOR_CLASS));
List<Partition> partitions = writtenPartitions.stream().map(s -> {
List<String> values = partitionValueExtractor.extractPartitionValuesInPath(s);
if (values.size() != partitionFields.size()) {
throw new HoodieHiveSyncException("Partition fields and values should be same length"
+ ", but got partitionFields: " + partitionFields + " with values: " + values);
}
return new Partition(values, null);
}).collect(Collectors.toList());
Expression filter;
int estimateSize = partitionFields.size() * partitions.size();
if (estimateSize > config.getIntOrDefault(HIVE_SYNC_FILTER_PUSHDOWN_MAX_SIZE)) {
filter = buildMinMaxPartitionExpression(partitions, partitionFields);
} else {
filter = buildPartitionExpression(partitions, partitionFields);
}
if (filter != null) {
return generateFilterString(filter);
}
return "";
} | @Test
public void testPushDownFilterIfExceedLimit() {
Properties props = new Properties();
props.put(HIVE_SYNC_FILTER_PUSHDOWN_MAX_SIZE.key(), "0");
HiveSyncConfig config = new HiveSyncConfig(props);
List<FieldSchema> partitionFieldSchemas = new ArrayList<>(4);
partitionFieldSchemas.add(new FieldSchema("date", "date"));
partitionFieldSchemas.add(new FieldSchema("year", "string"));
partitionFieldSchemas.add(new FieldSchema("month", "int"));
partitionFieldSchemas.add(new FieldSchema("day", "bigint"));
List<String> writtenPartitions = new ArrayList<>();
writtenPartitions.add("2022-09-01/2022/9/1");
assertEquals("(((date = 2022-09-01 AND year = \"2022\") AND month = 9) AND day = 1)",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
writtenPartitions.add("2022-09-02/2022/9/2");
writtenPartitions.add("2022-09-03/2022/9/2");
writtenPartitions.add("2022-09-04/2022/9/2");
assertEquals(
"((((date >= 2022-09-01 AND date <= 2022-09-04) AND year = \"2022\") AND month = 9) AND (day >= 1 AND day <= 2))",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
// If there are incompatible types to convert as filters inside partition
partitionFieldSchemas.clear();
writtenPartitions.clear();
partitionFieldSchemas.add(new FieldSchema("date", "date"));
partitionFieldSchemas.add(new FieldSchema("finished", "boolean"));
writtenPartitions.add("2022-09-01/true");
assertEquals("date = 2022-09-01",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
writtenPartitions.add("2022-09-02/true");
writtenPartitions.add("2022-09-03/false");
writtenPartitions.add("2022-09-04/false");
assertEquals("(date >= 2022-09-01 AND date <= 2022-09-04)",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
// If no compatible types matched to convert as filters
partitionFieldSchemas.clear();
writtenPartitions.clear();
partitionFieldSchemas.add(new FieldSchema("finished", "boolean"));
writtenPartitions.add("true");
assertEquals("",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
writtenPartitions.add("false");
writtenPartitions.add("false");
writtenPartitions.add("false");
assertEquals("",
partitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFieldSchemas, config));
} |
static KryoState get(KryoCoder<?> coder) {
return STORAGE.getOrCreate(coder);
} | @Test
public void testSameKryoAfterDeserialization() throws IOException, ClassNotFoundException {
final KryoCoder<?> coder = KryoCoder.of(k -> k.register(TestClass.class));
final KryoState firstKryo = KryoState.get(coder);
final ByteArrayOutputStream outStr = new ByteArrayOutputStream();
final ObjectOutputStream oss = new ObjectOutputStream(outStr);
oss.writeObject(coder);
oss.flush();
oss.close();
final ObjectInputStream ois =
new ObjectInputStream(new ByteArrayInputStream(outStr.toByteArray()));
@SuppressWarnings("unchecked")
final KryoCoder<?> deserializedCoder = (KryoCoder) ois.readObject();
final KryoState secondKryo = KryoState.get(deserializedCoder);
assertSame(firstKryo, secondKryo);
} |
@Override
public List<String> listSchemaNames(ConnectorSession session)
{
return ImmutableList.copyOf(jdbcClient.getSchemaNames(session, JdbcIdentity.from(session)));
} | @Test
public void testListSchemaNames()
{
assertTrue(metadata.listSchemaNames(SESSION).containsAll(ImmutableSet.of("example", "tpch")));
} |
public static StatementExecutorResponse execute(
final ConfiguredStatement<TerminateQuery> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final TerminateQuery terminateQuery = statement.getStatement();
// do default behaviour for TERMINATE ALL
if (!terminateQuery.getQueryId().isPresent()) {
return StatementExecutorResponse.notHandled();
}
final QueryId queryId = terminateQuery.getQueryId().get();
final RemoteHostExecutor remoteHostExecutor = RemoteHostExecutor.create(
statement,
sessionProperties,
executionContext,
serviceContext.getKsqlClient()
);
if (executionContext.getPersistentQuery(queryId).isPresent()
|| statement.getUnMaskedStatementText().equals(
TerminateCluster.TERMINATE_CLUSTER_STATEMENT_TEXT)) {
// do default behaviour for terminating persistent queries
return StatementExecutorResponse.notHandled();
} else {
// Check are we running this push query locally, if yes then terminate, otherwise
// propagate terminate query to other nodes
if (executionContext.getQuery(queryId).isPresent()) {
executionContext.getQuery(queryId).get().close();
} else {
final boolean wasTerminatedRemotely = remoteHostExecutor.fetchAllRemoteResults().getLeft()
.values()
.stream()
.map(TerminateQueryEntity.class::cast)
.map(TerminateQueryEntity::getWasTerminated)
.anyMatch(b -> b.equals(true));
if (!wasTerminatedRemotely) {
throw new KsqlException(String.format(
"Failed to terminate query with query ID: '%s'",
queryId));
}
}
return StatementExecutorResponse.handled(Optional.of(
new TerminateQueryEntity(statement.getMaskedStatementText(), queryId.toString(), true)
));
}
} | @Test
public void shouldDefaultToDistributorForPersistentQuery() {
// Given:
final ConfiguredStatement<?> terminatePersistent = engine.configure("TERMINATE PERSISTENT_QUERY;");
final PersistentQueryMetadata persistentQueryMetadata = givenPersistentQuery("PERSISTENT_QUERY", RUNNING_QUERY_STATE);
final QueryId persistentQueryId= persistentQueryMetadata.getQueryId();
final KsqlEngine engine = mock(KsqlEngine.class);
when(engine.getPersistentQuery(persistentQueryId)).thenReturn(Optional.of(persistentQueryMetadata));
// When:
final Optional<KsqlEntity> ksqlEntity = CustomExecutors.TERMINATE_QUERY.execute(
terminatePersistent,
mock(SessionProperties.class),
engine,
this.engine.getServiceContext()
).getEntity();
// Then:
assertThat(ksqlEntity, is(Optional.empty()));
} |
@Override
public long getMailTemplateCountByAccountId(Long accountId) {
return mailTemplateMapper.selectCountByAccountId(accountId);
} | @Test
public void testCountByAccountId() {
// mock 数据
MailTemplateDO dbMailTemplate = randomPojo(MailTemplateDO.class);
mailTemplateMapper.insert(dbMailTemplate);
// 测试 accountId 不匹配
mailTemplateMapper.insert(cloneIgnoreId(dbMailTemplate, o -> o.setAccountId(2L)));
// 准备参数
Long accountId = dbMailTemplate.getAccountId();
// 调用
long count = mailTemplateService.getMailTemplateCountByAccountId(accountId);
// 断言
assertEquals(1, count);
} |
public void retrieveDocuments() throws DocumentRetrieverException {
boolean first = true;
String route = params.cluster.isEmpty() ? params.route : resolveClusterRoute(params.cluster);
MessageBusParams messageBusParams = createMessageBusParams(params.configId, params.timeout, route);
documentAccess = documentAccessFactory.createDocumentAccess(messageBusParams);
session = documentAccess.createSyncSession(new SyncParameters.Builder().build());
int trace = params.traceLevel;
if (trace > 0) {
session.setTraceLevel(trace);
}
Iterator<String> iter = params.documentIds;
if (params.jsonOutput && !params.printIdsOnly) {
System.out.println('[');
}
while (iter.hasNext()) {
if (params.jsonOutput && !params.printIdsOnly) {
if (!first) {
System.out.println(',');
} else {
first = false;
}
}
String docid = iter.next();
Message msg = createDocumentRequest(docid);
Reply reply = session.syncSend(msg);
printReply(reply);
}
if (params.jsonOutput && !params.printIdsOnly) {
System.out.println(']');
}
} | @Test
void testTrace() throws DocumentRetrieverException {
final int traceLevel = 9;
ClientParameters params = createParameters()
.setDocumentIds(asIterator(DOC_ID_1))
.setTraceLevel(traceLevel)
.build();
GetDocumentReply reply = new GetDocumentReply(new Document(DataType.DOCUMENT, new DocumentId(DOC_ID_1)));
reply.getTrace().getRoot().addChild("childnode");
when(mockedSession.syncSend(any())).thenReturn(reply);
DocumentRetriever documentRetriever = createDocumentRetriever(params);
documentRetriever.retrieveDocuments();
verify(mockedSession, times(1)).setTraceLevel(traceLevel);
assertTrue(outContent.toString().contains("<trace>"));
} |
@Override
public MaintenanceDomain decode(ObjectNode json, CodecContext context) {
if (json == null || !json.isObject()) {
return null;
}
JsonNode mdNode = json.get(MD);
String mdName = nullIsIllegal(mdNode.get(MD_NAME), "mdName is required").asText();
String mdNameType = MdId.MdNameType.CHARACTERSTRING.name();
if (mdNode.get(MD_NAME_TYPE) != null) {
mdNameType = mdNode.get(MD_NAME_TYPE).asText();
}
try {
MdId mdId = MdMaNameUtil.parseMdName(mdNameType, mdName);
MaintenanceDomain.MdBuilder builder = DefaultMaintenanceDomain.builder(mdId);
JsonNode mdLevelNode = mdNode.get(MD_LEVEL);
if (mdLevelNode != null) {
MdLevel mdLevel = MdLevel.valueOf(mdLevelNode.asText());
builder = builder.mdLevel(mdLevel);
}
JsonNode mdNumericIdNode = mdNode.get(MD_NUMERIC_ID);
if (mdNumericIdNode != null) {
short mdNumericId = (short) mdNumericIdNode.asInt();
builder = builder.mdNumericId(mdNumericId);
}
return builder.build();
} catch (CfmConfigException e) {
throw new IllegalArgumentException(e);
}
} | @Test
public void testDecodeMd1() throws IOException {
String mdString = "{\"md\": { \"mdName\": \"test-1\"," +
"\"mdNameType\": \"CHARACTERSTRING\"," +
"\"mdLevel\": \"LEVEL1\", \"mdNumericId\": 1}}";
InputStream input = new ByteArrayInputStream(
mdString.getBytes(StandardCharsets.UTF_8));
JsonNode cfg = mapper.readTree(input);
MaintenanceDomain mdDecode1 = context
.codec(MaintenanceDomain.class).decode((ObjectNode) cfg, context);
assertEquals(MDID1_CHAR, mdDecode1.mdId());
assertEquals(MaintenanceDomain.MdLevel.LEVEL1, mdDecode1.mdLevel());
assertEquals(1, mdDecode1.mdNumericId());
} |
public static NamenodeRole convert(NamenodeRoleProto role) {
switch (role) {
case NAMENODE:
return NamenodeRole.NAMENODE;
case BACKUP:
return NamenodeRole.BACKUP;
case CHECKPOINT:
return NamenodeRole.CHECKPOINT;
}
return null;
} | @Test
public void testConvertExportedBlockKeys() {
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);
ExportedBlockKeysProto expKeysProto = PBHelper.convert(expKeys);
ExportedBlockKeys expKeys1 = PBHelper.convert(expKeysProto);
compare(expKeys, expKeys1);
} |
public static boolean validate(int[] replicas) {
if (replicas.length == 0) return true;
int[] sortedReplicas = clone(replicas);
Arrays.sort(sortedReplicas);
int prev = sortedReplicas[0];
if (prev < 0) return false;
for (int i = 1; i < sortedReplicas.length; i++) {
int replica = sortedReplicas[i];
if (prev == replica) return false;
prev = replica;
}
return true;
} | @Test
public void testValidate() {
assertTrue(Replicas.validate(new int[] {}));
assertTrue(Replicas.validate(new int[] {3}));
assertTrue(Replicas.validate(new int[] {3, 1, 2, 6}));
assertFalse(Replicas.validate(new int[] {3, 3}));
assertFalse(Replicas.validate(new int[] {4, -1, 3}));
assertFalse(Replicas.validate(new int[] {-1}));
assertFalse(Replicas.validate(new int[] {3, 1, 2, 6, 1}));
assertTrue(Replicas.validate(new int[] {1, 100}));
} |
@Override
public CloseableIterator<String> readLines(Component file) {
requireNonNull(file, "Component should not be null");
checkArgument(file.getType() == FILE, "Component '%s' is not a file", file);
Optional<CloseableIterator<String>> linesIteratorOptional = reportReader.readFileSource(file.getReportAttributes().getRef());
checkState(linesIteratorOptional.isPresent(), "File '%s' has no source code", file);
CloseableIterator<String> lineIterator = linesIteratorOptional.get();
return new ComponentLinesCloseableIterator(file, lineIterator, file.getFileAttributes().getLines());
} | @Test
public void read_lines_throws_ISE_when_sourceLine_has_less_elements_then_lineCount_minus_1() {
reportReader.putFileSourceLines(FILE_REF, "line1", "line2");
assertThatThrownBy(() -> consume(underTest.readLines(createComponent(10))))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Source of file 'ReportComponent{ref=2, key='FILE_KEY', type=FILE}' has less lines (2) than the expected number (10)");
} |
@Override
public Iterator<QueryableEntry> iterator() {
return new It();
} | @Test
public void testIterator_notEmpty_iteratorReused() {
QueryableEntry entry = entry(data());
addEntry(entry);
Iterator<QueryableEntry> it = result.iterator();
assertThat(it.hasNext()).isTrue();
assertThat(it.next()).isEqualTo(entry);
} |
public boolean hasStaticFieldName() {
return staticFieldName != null;
} | @Test
void testJobDetailsFromJobRequest() {
final TestJobRequest jobRequest = new TestJobRequest("some input");
JobDetails jobDetails = new JobDetails(jobRequest);
assertThat(jobDetails)
.hasClass(TestJobRequestHandler.class)
.hasStaticFieldName(null)
.hasMethodName("run")
.hasArgs(jobRequest)
.isCacheable();
} |
public boolean eval(ContentFile<?> file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return new MetricsEvalVisitor().eval(file);
} | @Test
public void testSomeNulls() {
boolean shouldRead =
new StrictMetricsEvaluator(SCHEMA, lessThan("some_nulls", "ggg")).eval(FILE_2);
assertThat(shouldRead).as("Should not match: lessThan on some nulls column").isFalse();
shouldRead =
new StrictMetricsEvaluator(SCHEMA, lessThanOrEqual("some_nulls", "eee")).eval(FILE_2);
assertThat(shouldRead).as("Should not match: lessThanOrEqual on some nulls column").isFalse();
shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThan("some_nulls", "aaa")).eval(FILE_2);
assertThat(shouldRead).as("Should not match: greaterThan on some nulls column").isFalse();
shouldRead =
new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("some_nulls", "bbb")).eval(FILE_2);
assertThat(shouldRead)
.as("Should not match: greaterThanOrEqual on some nulls column")
.isFalse();
shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("some_nulls", "bbb")).eval(FILE_3);
assertThat(shouldRead).as("Should not match: equal on some nulls column").isFalse();
} |
@Override public String method() {
return GrpcParser.method(call.getMethodDescriptor().getFullMethodName());
} | @Test void method() {
when(call.getMethodDescriptor()).thenReturn(methodDescriptor);
assertThat(request.service()).isEqualTo("helloworld.Greeter");
} |
@Override
public void execute(ComputationStep.Context context) {
new DepthTraversalTypeAwareCrawler(
new TypeAwareVisitorAdapter(CrawlerDepthLimit.PROJECT, PRE_ORDER) {
@Override
public void visitProject(Component project) {
executeForProject(project);
}
}).visit(treeRootHolder.getRoot());
} | @Test
void new_measures_are_created_even_if_there_is_no_rawMeasure_for_metric_of_condition() {
Condition equals2Condition = createLessThanCondition(INT_METRIC_1, "2");
qualityGateHolder.setQualityGate(new QualityGate(SOME_QG_UUID, SOME_QG_NAME, of(equals2Condition)));
underTest.execute(new TestComputationStepContext());
Optional<Measure> addedRawMeasure = measureRepository.getAddedRawMeasure(PROJECT_COMPONENT, INT_METRIC_1_KEY);
assertThat(addedRawMeasure).isAbsent();
assertThat(getAlertStatusMeasure())
.hasQualityGateLevel(OK)
.hasQualityGateText("");
assertThat(getQGDetailsMeasure())
.hasValue(new QualityGateDetailsData(OK, Collections.emptyList(), false).toJson());
QualityGateStatusHolderAssertions.assertThat(qualityGateStatusHolder)
.hasStatus(QualityGateStatus.OK)
.hasConditionCount(1)
.hasCondition(equals2Condition, ConditionStatus.EvaluationStatus.NO_VALUE, null);
} |
public static String getServiceName(final String serviceNameWithGroup) {
if (StringUtils.isBlank(serviceNameWithGroup)) {
return StringUtils.EMPTY;
}
if (!serviceNameWithGroup.contains(Constants.SERVICE_INFO_SPLITER)) {
return serviceNameWithGroup;
}
return serviceNameWithGroup.split(Constants.SERVICE_INFO_SPLITER)[1];
} | @Test
void testGetServiceNameWithoutGroup() {
String serviceName = "serviceName";
assertEquals(serviceName, NamingUtils.getServiceName(serviceName));
} |
public synchronized Map<GroupId, Long> getTabletsNumInScheduleForEachCG() {
Map<GroupId, Long> result = Maps.newHashMap();
List<Stream<TabletSchedCtx>> streams = Lists.newArrayList(pendingTablets.stream(),
runningTablets.values().stream());
// Exclude the VERSION_INCOMPLETE tablet, because they are not added because of relocation.
streams.forEach(s -> s.filter(t ->
(t.getColocateGroupId() != null && t.getTabletHealthStatus() != TabletHealthStatus.VERSION_INCOMPLETE)
).forEach(t -> result.merge(t.getColocateGroupId(), 1L, Long::sum))
);
return result;
} | @Test
public void testGetTabletsNumInScheduleForEachCG() {
TabletScheduler tabletScheduler = new TabletScheduler(tabletSchedulerStat);
Map<Long, ColocateTableIndex.GroupId> tabletIdToCGIdForPending = Maps.newHashMap();
tabletIdToCGIdForPending.put(101L, new ColocateTableIndex.GroupId(200L, 300L));
tabletIdToCGIdForPending.put(102L, new ColocateTableIndex.GroupId(200L, 300L));
tabletIdToCGIdForPending.put(103L, new ColocateTableIndex.GroupId(200L, 301L));
tabletIdToCGIdForPending.forEach((k, v) -> {
TabletSchedCtx ctx = new TabletSchedCtx(TabletSchedCtx.Type.REPAIR, 200L, 201L, 202L,
203L, k, System.currentTimeMillis());
ctx.setColocateGroupId(v);
ctx.setOrigPriority(TabletSchedCtx.Priority.LOW);
Deencapsulation.invoke(tabletScheduler, "addToPendingTablets", ctx);
});
Map<Long, ColocateTableIndex.GroupId> tabletIdToCGIdForRunning = Maps.newHashMap();
tabletIdToCGIdForRunning.put(104L, new ColocateTableIndex.GroupId(200L, 300L));
tabletIdToCGIdForRunning.put(105L, new ColocateTableIndex.GroupId(200L, 300L));
tabletIdToCGIdForRunning.put(106L, new ColocateTableIndex.GroupId(200L, 301L));
tabletIdToCGIdForRunning.forEach((k, v) -> {
TabletSchedCtx ctx = new TabletSchedCtx(TabletSchedCtx.Type.REPAIR, 200L, 201L, 202L,
203L, k, System.currentTimeMillis());
ctx.setColocateGroupId(v);
ctx.setOrigPriority(TabletSchedCtx.Priority.LOW);
if (k == 104L) {
ctx.setTabletStatus(LocalTablet.TabletHealthStatus.VERSION_INCOMPLETE);
}
Deencapsulation.invoke(tabletScheduler, "addToRunningTablets", ctx);
});
Map<ColocateTableIndex.GroupId, Long> result = tabletScheduler.getTabletsNumInScheduleForEachCG();
Assert.assertEquals(Optional.of(3L).get(),
result.get(new ColocateTableIndex.GroupId(200L, 300L)));
Assert.assertEquals(Optional.of(2L).get(),
result.get(new ColocateTableIndex.GroupId(200L, 301L)));
} |
@Override
public int partition(RowData row, int numPartitions) {
// reuse the sortKey and rowDataWrapper
sortKey.wrap(rowDataWrapper.wrap(row));
return SketchUtil.partition(sortKey, numPartitions, rangeBounds, comparator);
} | @Test
public void testRangePartitioningWithRangeBounds() {
SketchRangePartitioner partitioner =
new SketchRangePartitioner(TestFixtures.SCHEMA, SORT_ORDER, RANGE_BOUNDS);
GenericRowData row =
GenericRowData.of(StringData.fromString("data"), 0L, StringData.fromString("2023-06-20"));
for (long id = 0; id < MAX_ID; ++id) {
row.setField(1, id);
int partition = partitioner.partition(row, NUM_PARTITIONS);
assertThat(partition).isGreaterThanOrEqualTo(0).isLessThan(NUM_PARTITIONS);
int expectedPartition = id == 0L ? 0 : (int) ((id - 1) / RANGE_STEP);
assertThat(partition).isEqualTo(expectedPartition);
}
} |
@Override
public void setDate( Date date ) {
this.string = LOCAL_SIMPLE_DATE_PARSER.get().format( date );
} | @Test
public void testSetDate() throws ParseException {
ValueString vs = new ValueString();
SimpleDateFormat format = new SimpleDateFormat( "yyyy/MM/dd HH:mm:ss.SSS", Locale.US );
try {
vs.setDate( null );
// assertNull(vs.getString());
fail( "expected NullPointerException" );
} catch ( NullPointerException ex ) {
// This is the original behaviour
}
vs.setDate( format.parse( "2006/06/07 01:02:03.004" ) );
assertEquals( "2006/06/07 01:02:03.004", vs.getString() );
} |
List<GcpAddress> getAddresses() {
try {
return RetryUtils.retry(this::fetchGcpAddresses, RETRIES, NON_RETRYABLE_KEYWORDS);
} catch (RestClientException e) {
handleKnownException(e);
return emptyList();
}
} | @Test
public void getAddressesForbidden() {
// given
Label label = null;
String forbiddenMessage = "\"reason\":\"Request had insufficient authentication scopes\"";
RestClientException exception = new RestClientException(forbiddenMessage, HttpURLConnection.HTTP_FORBIDDEN);
given(gcpComputeApi.instances(CURRENT_PROJECT, CURRENT_ZONE, label, ACCESS_TOKEN)).willThrow(exception);
GcpConfig gcpConfig = GcpConfig.builder().setLabel(label).build();
GcpClient gcpClient = new GcpClient(gcpMetadataApi, gcpComputeApi, gcpAuthenticator, gcpConfig);
// when
List<GcpAddress> result = gcpClient.getAddresses();
// then
assertEquals(emptyList(), result);
} |
public static Object deserialize(JavaBeanDescriptor beanDescriptor) {
return deserialize(beanDescriptor, Thread.currentThread().getContextClassLoader());
} | @Test
void testDeserialize_Primitive() {
JavaBeanDescriptor descriptor = new JavaBeanDescriptor(long.class.getName(), JavaBeanDescriptor.TYPE_PRIMITIVE);
descriptor.setPrimitiveProperty(Long.MAX_VALUE);
Assertions.assertEquals(Long.MAX_VALUE, JavaBeanSerializeUtil.deserialize(descriptor));
BigDecimal decimal = BigDecimal.TEN;
Assertions.assertEquals(Long.MAX_VALUE, descriptor.setPrimitiveProperty(decimal));
Assertions.assertEquals(decimal, JavaBeanSerializeUtil.deserialize(descriptor));
String string = UUID.randomUUID().toString();
Assertions.assertEquals(decimal, descriptor.setPrimitiveProperty(string));
Assertions.assertEquals(string, JavaBeanSerializeUtil.deserialize(descriptor));
} |
@Override
public void cancel() {
isRunning = false;
// we need to close the socket as well, because the Thread.interrupt() function will
// not wake the thread in the socketStream.read() method when blocked.
Socket theSocket = this.currentSocket;
if (theSocket != null) {
IOUtils.closeSocket(theSocket);
}
} | @Test
void testSocketSourceOutputInfiniteRetries() throws Exception {
ServerSocket server = new ServerSocket(0);
Socket channel = null;
try {
SocketTextStreamFunction source =
new SocketTextStreamFunction(LOCALHOST, server.getLocalPort(), "\n", -1, 100);
SocketSourceThread runner = new SocketSourceThread(source, "test1", "check");
runner.start();
// first connection: nothing
channel = NetUtils.acceptWithoutTimeout(server);
channel.close();
// second connection: first string
channel = NetUtils.acceptWithoutTimeout(server);
OutputStreamWriter writer = new OutputStreamWriter(channel.getOutputStream());
writer.write("test1\n");
writer.close();
channel.close();
// third connection: nothing
channel = NetUtils.acceptWithoutTimeout(server);
channel.close();
// forth connection: second string
channel = NetUtils.acceptWithoutTimeout(server);
writer = new OutputStreamWriter(channel.getOutputStream());
writer.write("check\n");
writer.flush();
runner.waitForNumElements(2);
runner.cancel();
runner.waitUntilDone();
} finally {
if (channel != null) {
IOUtils.closeQuietly(channel);
}
IOUtils.closeQuietly(server);
}
} |
@Override
public Processor<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>, K, SubscriptionResponseWrapper<VO>> get() {
return new ContextualProcessor<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>, K, SubscriptionResponseWrapper<VO>>() {
private KTableValueGetter<KO, VO> foreignValues;
@Override
public void init(final ProcessorContext<K, SubscriptionResponseWrapper<VO>> context) {
super.init(context);
foreignValues = foreignValueGetterSupplier.get();
foreignValues.init(context);
}
@Override
public void process(final Record<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> record) {
Objects.requireNonNull(record.key(), "This processor should never see a null key.");
Objects.requireNonNull(record.value(), "This processor should never see a null value.");
final ValueAndTimestamp<SubscriptionWrapper<K>> valueAndTimestamp = record.value().newValue;
Objects.requireNonNull(valueAndTimestamp, "This processor should never see a null newValue.");
final SubscriptionWrapper<K> value = valueAndTimestamp.value();
if (value.getVersion() > SubscriptionWrapper.CURRENT_VERSION) {
//Guard against modifications to SubscriptionWrapper. Need to ensure that there is compatibility
//with previous versions to enable rolling upgrades. Must develop a strategy for upgrading
//from older SubscriptionWrapper versions to newer versions.
throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
}
final ValueAndTimestamp<VO> foreignValueAndTime =
record.key().getForeignKey() == null ?
null :
foreignValues.get(record.key().getForeignKey());
final long resultTimestamp =
foreignValueAndTime == null ?
valueAndTimestamp.timestamp() :
Math.max(valueAndTimestamp.timestamp(), foreignValueAndTime.timestamp());
switch (value.getInstruction()) {
case DELETE_KEY_AND_PROPAGATE:
context().forward(
record.withKey(record.key().getPrimaryKey())
.withValue(new SubscriptionResponseWrapper<VO>(
value.getHash(),
null,
value.getPrimaryPartition()
))
.withTimestamp(resultTimestamp)
);
break;
case PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE:
//This one needs to go through regardless of LEFT or INNER join, since the extracted FK was
//changed and there is no match for it. We must propagate the (key, null) to ensure that the
//downstream consumers are alerted to this fact.
final VO valueToSend = foreignValueAndTime == null ? null : foreignValueAndTime.value();
context().forward(
record.withKey(record.key().getPrimaryKey())
.withValue(new SubscriptionResponseWrapper<>(
value.getHash(),
valueToSend,
value.getPrimaryPartition()
))
.withTimestamp(resultTimestamp)
);
break;
case PROPAGATE_ONLY_IF_FK_VAL_AVAILABLE:
if (foreignValueAndTime != null) {
context().forward(
record.withKey(record.key().getPrimaryKey())
.withValue(new SubscriptionResponseWrapper<>(
value.getHash(),
foreignValueAndTime.value(),
value.getPrimaryPartition()
))
.withTimestamp(resultTimestamp)
);
}
break;
case DELETE_KEY_NO_PROPAGATE:
break;
default:
throw new IllegalStateException("Unhandled instruction: " + value.getInstruction());
}
}
};
} | @Test
public void shouldDeleteKeyAndPropagateFKV1() {
final MockProcessorContext<String, SubscriptionResponseWrapper<String>> context = new MockProcessorContext<>();
processor.init(context);
final SubscriptionWrapper<String> newValue = new SubscriptionWrapper<>(
new long[]{1L},
Instruction.DELETE_KEY_AND_PROPAGATE,
"pk1",
SubscriptionWrapper.VERSION_1,
12
);
final Record<CombinedKey<String, String>, Change<ValueAndTimestamp<SubscriptionWrapper<String>>>> record =
new Record<>(
new CombinedKey<>("fk1", "pk1"),
new Change<>(ValueAndTimestamp.make(newValue, 1L), null),
1L
);
processor.process(record);
final List<CapturedForward<? extends String, ? extends SubscriptionResponseWrapper<String>>> forwarded = context.forwarded();
assertEquals(1, forwarded.size());
assertEquals(
new Record<>(
"pk1",
new SubscriptionResponseWrapper<>(
newValue.getHash(),
null,
12
),
1L
),
forwarded.get(0).record()
);
} |
@ConstantFunction.List(list = {
@ConstantFunction(name = "unix_timestamp", argTypes = {DATETIME}, returnType = BIGINT),
@ConstantFunction(name = "unix_timestamp", argTypes = {DATE}, returnType = BIGINT)
})
public static ConstantOperator unixTimestamp(ConstantOperator arg) {
LocalDateTime dt = arg.getDatetime();
ZonedDateTime zdt = ZonedDateTime.of(dt, TimeUtils.getTimeZone().toZoneId());
long value = zdt.toEpochSecond();
if (value < 0 || value > TimeUtils.MAX_UNIX_TIMESTAMP) {
value = 0;
}
return ConstantOperator.createBigint(value);
} | @Test
public void unixTimestamp() {
ConstantOperator codt = ConstantOperator.createDatetime(LocalDateTime.of(2050, 3, 23, 9, 23, 55));
assertEquals(2531611435L,
ScalarOperatorFunctions.unixTimestamp(codt).getBigint());
assertEquals(1427073835L,
ScalarOperatorFunctions.unixTimestamp(O_DT_20150323_092355).getBigint());
} |
public FEELFnResult<String> invoke(@ParameterName("from") Object val) {
if ( val == null ) {
return FEELFnResult.ofResult( null );
} else {
return FEELFnResult.ofResult( TypeUtil.formatValue(val, false) );
}
} | @Test
void invokePeriodYears() {
FunctionTestUtil.assertResult(stringFunction.invoke(Period.ofYears(24)), "P24Y");
FunctionTestUtil.assertResult(stringFunction.invoke(Period.ofYears(-24)), "-P24Y");
} |
@Override
public String getName() {
return FUNCTION_NAME;
} | @Test
public void testSubtractionTransformFunction() {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("sub(%s,%s)", INT_SV_COLUMN, LONG_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof SubtractionTransformFunction);
Assert.assertEquals(transformFunction.getName(), SubtractionTransformFunction.FUNCTION_NAME);
double[] expectedValues = new double[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = (double) _intSVValues[i] - (double) _longSVValues[i];
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String.format("sub(%s,%s)", LONG_SV_COLUMN, FLOAT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof SubtractionTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = (double) _longSVValues[i] - (double) _floatSVValues[i];
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String.format("sub(%s,%s)", FLOAT_SV_COLUMN, DOUBLE_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof SubtractionTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = (double) _floatSVValues[i] - _doubleSVValues[i];
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String.format("sub(%s,%s)", DOUBLE_SV_COLUMN, STRING_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof SubtractionTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = _doubleSVValues[i] - Double.parseDouble(_stringSVValues[i]);
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String.format("sub(%s,%s)", STRING_SV_COLUMN, INT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof SubtractionTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = Double.parseDouble(_stringSVValues[i]) - (double) _intSVValues[i];
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(
String.format("sub(sub(sub(sub(sub(12,%s),%s),sub(sub(%s,%s),0.34)),%s),%s)", STRING_SV_COLUMN,
DOUBLE_SV_COLUMN, FLOAT_SV_COLUMN, LONG_SV_COLUMN, INT_SV_COLUMN, DOUBLE_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof SubtractionTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = (((((12d - Double.parseDouble(_stringSVValues[i])) - _doubleSVValues[i]) - (
((double) _floatSVValues[i] - (double) _longSVValues[i]) - 0.34)) - (double) _intSVValues[i])
- _doubleSVValues[i]);
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(
String.format("sub(sub(sub(sub(sub(12,%s),%s),sub(sub(%s,%s),0.34)),%s),%s)", STRING_SV_COLUMN,
DOUBLE_SV_COLUMN, FLOAT_SV_COLUMN, LONG_SV_COLUMN, INT_SV_COLUMN, BIG_DECIMAL_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof SubtractionTransformFunction);
BigDecimal[] expectedBigDecimalValues = new BigDecimal[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedBigDecimalValues[i] = (BigDecimal.valueOf(
(((12d - Double.parseDouble(_stringSVValues[i])) - _doubleSVValues[i]) - (
((double) _floatSVValues[i] - (double) _longSVValues[i]) - 0.34)) - (double) _intSVValues[i])
.subtract(_bigDecimalSVValues[i]));
}
testTransformFunction(transformFunction, expectedBigDecimalValues);
} |
public static Schema project(Schema schema, Set<Integer> fieldIds) {
Preconditions.checkNotNull(schema, "Schema cannot be null");
Types.StructType result = project(schema.asStruct(), fieldIds);
if (schema.asStruct().equals(result)) {
return schema;
} else if (result != null) {
if (schema.getAliases() != null) {
return new Schema(result.fields(), schema.getAliases());
} else {
return new Schema(result.fields());
}
}
return new Schema(Collections.emptyList(), schema.getAliases());
} | @Test
public void testProjectEmpty() {
Schema schema =
new Schema(
Lists.newArrayList(
required(10, "a", Types.IntegerType.get()),
required(11, "A", Types.IntegerType.get()),
required(
12,
"someStruct",
Types.StructType.of(
required(13, "b", Types.IntegerType.get()),
required(14, "B", Types.IntegerType.get()),
required(
15,
"anotherStruct",
Types.StructType.of(
required(16, "c", Types.IntegerType.get()),
required(17, "C", Types.IntegerType.get())))))));
Schema expectedDepthOne =
new Schema(Lists.newArrayList(required(12, "someStruct", Types.StructType.of())));
Schema actualDepthOne = TypeUtil.project(schema, Sets.newHashSet(12));
assertThat(actualDepthOne.asStruct()).isEqualTo(expectedDepthOne.asStruct());
Schema expectedDepthTwo =
new Schema(
Lists.newArrayList(
required(
12,
"someStruct",
Types.StructType.of(required(15, "anotherStruct", Types.StructType.of())))));
Schema actualDepthTwo = TypeUtil.project(schema, Sets.newHashSet(12, 15));
assertThat(actualDepthTwo.asStruct()).isEqualTo(expectedDepthTwo.asStruct());
} |
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
return usage(args);
}
String action = args[0];
String name = args[1];
int result;
if (A_LOAD.equals(action)) {
result = loadClass(name);
} else if (A_CREATE.equals(action)) {
//first load to separate load errors from create
result = loadClass(name);
if (result == SUCCESS) {
//class loads, so instantiate it
result = createClassInstance(name);
}
} else if (A_RESOURCE.equals(action)) {
result = loadResource(name);
} else if (A_PRINTRESOURCE.equals(action)) {
result = dumpResource(name);
} else {
result = usage(args);
}
return result;
} | @Test
public void testLoadFindsLog4J() throws Throwable {
run(FindClass.SUCCESS, FindClass.A_RESOURCE, LOG4J_PROPERTIES);
} |
@Override
public void deleteTenantPackage(Long id) {
// 校验存在
validateTenantPackageExists(id);
// 校验正在使用
validateTenantUsed(id);
// 删除
tenantPackageMapper.deleteById(id);
} | @Test
public void testDeleteTenantPackage_used() {
// mock 数据
TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class);
tenantPackageMapper.insert(dbTenantPackage);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbTenantPackage.getId();
// mock 租户在使用该套餐
when(tenantService.getTenantCountByPackageId(eq(id))).thenReturn(1L);
// 调用, 并断言异常
assertServiceException(() -> tenantPackageService.deleteTenantPackage(id), TENANT_PACKAGE_USED);
} |
public ParsedQuery parse(final String query) throws ParseException {
final TokenCollectingQueryParser parser = new TokenCollectingQueryParser(ParsedTerm.DEFAULT_FIELD, ANALYZER);
parser.setSplitOnWhitespace(true);
parser.setAllowLeadingWildcard(allowLeadingWildcard);
final Query parsed = parser.parse(query);
final ParsedQuery.Builder builder = ParsedQuery.builder().query(query);
builder.tokensBuilder().addAll(parser.getTokens());
final TermCollectingQueryVisitor visitor = new TermCollectingQueryVisitor(ANALYZER, parser.getTokenLookup());
parsed.visit(visitor);
builder.termsBuilder().addAll(visitor.getParsedTerms());
return builder.build();
} | @Test
void testFuzzyQuery() throws ParseException {
final ParsedQuery query = parser.parse("fuzzy~");
assertThat(query.terms())
.hasOnlyOneElementSatisfying(term -> {
assertThat(term.field()).isEqualTo("_default_");
assertThat(term.value()).isEqualTo("fuzzy");
});
} |
public boolean eval(ContentFile<?> file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return new MetricsEvalVisitor().eval(file);
} | @Test
public void testMissingColumn() {
assertThatThrownBy(
() -> new InclusiveMetricsEvaluator(SCHEMA, lessThan("missing", 5)).eval(FILE))
.isInstanceOf(ValidationException.class)
.hasMessageContaining("Cannot find field 'missing'");
} |
@Override
public Monitor monitor(final Host bookmark, final Callback callback) {
final String url = toURL(bookmark);
return new Reachability.Monitor() {
private final SystemConfigurationReachability.Native monitor = SystemConfigurationReachability.Native.monitorForUrl(url);
private final NotificationFilterCallback listener = new NotificationFilterCallback(callback);
@Override
public Monitor start() {
notificationCenter.addObserver(listener.id(), Foundation.selector("notify:"),
"kNetworkReachabilityChangedNotification", monitor.id());
monitor.startReachabilityMonitor();
return this;
}
@Override
public Monitor stop() {
monitor.stopReachabilityMonitor();
notificationCenter.removeObserver(listener.id());
return this;
}
};
} | @Test
public void testMonitor() {
final Reachability r = new SystemConfigurationReachability();
final Reachability.Monitor monitor = r.monitor(new Host(new TestProtocol(Scheme.https), "cyberduck.io", 80), () -> {
}).start();
assertSame(monitor, monitor.stop());
} |
public void deleteGroup(String groupName) {
Iterator<PipelineConfigs> iterator = this.iterator();
while (iterator.hasNext()) {
PipelineConfigs currentGroup = iterator.next();
if (currentGroup.isNamed(groupName)) {
if (!currentGroup.isEmpty()) {
throw new UnprocessableEntityException("Failed to delete group " + groupName + " because it was not empty.");
}
iterator.remove();
break;
}
}
} | @Test
public void shouldThrowExceptionWhenDeletingGroupWhenNotEmpty() {
PipelineConfig p1Config = createPipelineConfig("pipeline1", "stage1");
PipelineConfigs group = createGroup("group", p1Config);
PipelineGroups groups = new PipelineGroups(group);
assertThrows(UnprocessableEntityException.class, () -> groups.deleteGroup("group"));
} |
public static ValidOffsetAndEpoch snapshot(OffsetAndEpoch offsetAndEpoch) {
return new ValidOffsetAndEpoch(Kind.SNAPSHOT, offsetAndEpoch);
} | @Test
void snapshot() {
ValidOffsetAndEpoch validOffsetAndEpoch = ValidOffsetAndEpoch.snapshot(new OffsetAndEpoch(0, 0));
assertEquals(ValidOffsetAndEpoch.Kind.SNAPSHOT, validOffsetAndEpoch.kind());
} |
boolean isHeartbeatTopic(String topic) {
return replicationPolicy.isHeartbeatsTopic(topic);
} | @Test
public void testIsHeartbeatTopic() {
MirrorClient client = new FakeMirrorClient();
assertTrue(client.isHeartbeatTopic("heartbeats"));
assertTrue(client.isHeartbeatTopic("source1.heartbeats"));
assertTrue(client.isHeartbeatTopic("source2.source1.heartbeats"));
assertFalse(client.isHeartbeatTopic("heartbeats!"));
assertFalse(client.isHeartbeatTopic("!heartbeats"));
assertFalse(client.isHeartbeatTopic("source1heartbeats"));
assertFalse(client.isHeartbeatTopic("source1-heartbeats"));
} |
@Override
public SelectedFeatureSet select(Dataset<Label> dataset) {
FSMatrix data = FSMatrix.buildMatrix(dataset,numBins);
ImmutableFeatureMap fmap = data.getFeatureMap();
int max = k == -1 ? fmap.size() : Math.min(k,fmap.size());
int numFeatures = fmap.size();
boolean[] unselectedFeatures = new boolean[numFeatures];
Arrays.fill(unselectedFeatures, true);
int[] selectedFeatures = new int[max];
double[] selectedScores = new double[max];
ForkJoinPool fjp = null;
Pair<Integer,Double> maxPair;
if (numThreads > 1) {
fjp = new ForkJoinPool(numThreads);
try {
maxPair = fjp.submit(() -> IntStream.range(0, numFeatures).parallel().mapToObj(i -> new Pair<>(i, data.mi(i))).max(Comparator.comparingDouble(Pair::getB)).get()).get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
} else {
maxPair = IntStream.range(0, numFeatures).mapToObj(i -> new Pair<>(i,data.mi(i))).max(Comparator.comparingDouble(Pair::getB)).get();
}
int selectedIdx = maxPair.getA();
selectedFeatures[0] = selectedIdx;
unselectedFeatures[selectedIdx] = false;
selectedScores[0] = maxPair.getB();
logger.log(Level.INFO,"Itr 0: selected feature " + fmap.get(selectedIdx).getName() + ", score = " + selectedScores[0]);
double[] jmiScore = new double[numFeatures];
//
// Select features in max JMI order
for (int i = 1; i < max; i++) {
if (numThreads > 1) {
final int prevIdx = selectedFeatures[i-1];
try {
double[] updates = fjp.submit(() -> IntStream.range(0, numFeatures).parallel().mapToDouble(j -> unselectedFeatures[j] ? data.jmi(j, prevIdx) : 0.0).toArray()).get();
for (int j = 0; j < jmiScore.length; j++) {
jmiScore[j] += updates[j];
}
maxPair = fjp.submit(() -> IntStream.range(0, numFeatures).parallel().filter(j -> unselectedFeatures[j]).mapToObj(j -> new Pair<>(j, jmiScore[j])).max(Comparator.comparingDouble(Pair::getB)).get()).get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
} else {
int maxIndex = -1;
double maxScore = Double.NEGATIVE_INFINITY;
for (int j = 0; j < numFeatures; j++) {
if (unselectedFeatures[j]) {
int prevIdx = selectedFeatures[i-1];
jmiScore[j] += data.jmi(j,prevIdx);
if (jmiScore[j] > maxScore) {
maxScore = jmiScore[j];
maxIndex = j;
}
}
}
maxPair = new Pair<>(maxIndex,maxScore);
}
int maxIdx = maxPair.getA();
selectedFeatures[i] = maxIdx;
unselectedFeatures[maxIdx] = false;
selectedScores[i] = maxPair.getB() / i;
logger.log(Level.INFO,"Itr " + i + ": selected feature " + fmap.get(maxIdx).getName() + ", score = " + maxPair.getB() + ", average score = " + selectedScores[i]);
}
if (fjp != null) {
fjp.shutdown();
}
ArrayList<String> names = new ArrayList<>();
ArrayList<Double> scores = new ArrayList<>();
for (int i = 0; i < max; i++) {
names.add(fmap.get(selectedFeatures[i]).getName());
scores.add(selectedScores[i]);
}
FeatureSetProvenance provenance = new FeatureSetProvenance(SelectedFeatureSet.class.getName(),dataset.getProvenance(),getProvenance());
return new SelectedFeatureSet(names,scores,isOrdered(),provenance);
} | @Test
public void parallelismTest() {
Dataset<Label> dataset = MIMTest.createDataset();
JMI sequential = new JMI(-1,5,1);
SelectedFeatureSet sequentialSet = sequential.select(dataset);
JMI parallel = new JMI(-1,5,4);
SelectedFeatureSet parallelSet = parallel.select(dataset);
assertEquals(sequentialSet.featureNames(),parallelSet.featureNames());
double[] sequentialScores = Util.toPrimitiveDouble(sequentialSet.featureScores());
double[] parallelScores = Util.toPrimitiveDouble(parallelSet.featureScores());
assertArrayEquals(sequentialScores,parallelScores,1e-15);
} |
public boolean match(String left, String right) {
if (right == null || left == null) {
return false;
}
if (right.startsWith("\"") && right.endsWith("\"")) {
right = right.substring(1, right.length() - 1);
}
if (right.startsWith("%") && right.endsWith("%")) { // %keyword%
return left.contains(right.substring(1, right.length() - 1));
}
return (right.startsWith("%") && left.endsWith(right.substring(1))) // %suffix
|| (right.endsWith("%") && left.startsWith(right.substring(0, right.length() - 1))) // prefix%
;
} | @Test
public void testLike() {
assertTrue(new LikeMatch().match("MaxBlack", "%Black"));
assertTrue(new LikeMatch().match("MaxBlack", "Max%"));
assertTrue(new LikeMatch().match("MaxBlack", "%axBl%"));
assertFalse(new LikeMatch().match("CarolineChanning", "Max%"));
assertFalse(new LikeMatch().match("CarolineChanning", "%Max"));
assertTrue(new LikeMatch().match("MaxBlack", "\"%Black\""));
assertFalse(new LikeMatch().match("CarolineChanning", "\"Max%\""));
} |
@Override
public CompletableFuture<ListGroupsResponseData> listGroups(
RequestContext context,
ListGroupsRequestData request
) {
if (!isActive.get()) {
return CompletableFuture.completedFuture(new ListGroupsResponseData()
.setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code())
);
}
final List<CompletableFuture<List<ListGroupsResponseData.ListedGroup>>> futures = FutureUtils.mapExceptionally(
runtime.scheduleReadAllOperation(
"list-groups",
(coordinator, lastCommittedOffset) -> coordinator.listGroups(
request.statesFilter(),
request.typesFilter(),
lastCommittedOffset
)
),
exception -> {
exception = Errors.maybeUnwrapException(exception);
if (exception instanceof NotCoordinatorException) {
return Collections.emptyList();
} else {
throw new CompletionException(exception);
}
}
);
return FutureUtils
.combineFutures(futures, ArrayList::new, List::addAll)
.thenApply(groups -> new ListGroupsResponseData().setGroups(groups))
.exceptionally(exception -> handleOperationException(
"list-groups",
request,
exception,
(error, __) -> new ListGroupsResponseData().setErrorCode(error.code())
));
} | @Test
public void testListGroupsWhenNotStarted() throws ExecutionException, InterruptedException {
CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime();
GroupCoordinatorService service = new GroupCoordinatorService(
new LogContext(),
createConfig(),
runtime,
new GroupCoordinatorMetrics(),
createConfigManager()
);
ListGroupsRequestData request = new ListGroupsRequestData();
CompletableFuture<ListGroupsResponseData> future = service.listGroups(
requestContext(ApiKeys.LIST_GROUPS),
request
);
assertEquals(
new ListGroupsResponseData()
.setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()),
future.get()
);
} |
public static @Nullable Instant fromCloudTime(String time) {
Matcher matcher = TIME_PATTERN.matcher(time);
if (!matcher.matches()) {
return null;
}
int year = Integer.parseInt(matcher.group(1));
int month = Integer.parseInt(matcher.group(2));
int day = Integer.parseInt(matcher.group(3));
int hour = Integer.parseInt(matcher.group(4));
int minute = Integer.parseInt(matcher.group(5));
int second = Integer.parseInt(matcher.group(6));
int millis = computeMillis(matcher.group(7));
return new DateTime(
year, month, day, hour, minute, second, millis, ISOChronology.getInstanceUTC())
.toInstant();
} | @Test
public void fromCloudTimeShouldParseTimeStrings() {
assertEquals(new Instant(0), fromCloudTime("1970-01-01T00:00:00Z"));
assertEquals(new Instant(1), fromCloudTime("1970-01-01T00:00:00.001Z"));
assertEquals(new Instant(1), fromCloudTime("1970-01-01T00:00:00.001000Z"));
assertEquals(new Instant(1), fromCloudTime("1970-01-01T00:00:00.001001Z"));
assertEquals(new Instant(1), fromCloudTime("1970-01-01T00:00:00.001000000Z"));
assertEquals(new Instant(1), fromCloudTime("1970-01-01T00:00:00.001000001Z"));
assertEquals(new Instant(0), fromCloudTime("1970-01-01T00:00:00.0Z"));
assertEquals(new Instant(0), fromCloudTime("1970-01-01T00:00:00.00Z"));
assertEquals(new Instant(420), fromCloudTime("1970-01-01T00:00:00.42Z"));
assertEquals(new Instant(300), fromCloudTime("1970-01-01T00:00:00.3Z"));
assertEquals(new Instant(20), fromCloudTime("1970-01-01T00:00:00.02Z"));
assertNull(fromCloudTime(""));
assertNull(fromCloudTime("1970-01-01T00:00:00"));
assertNull(fromCloudTime("1970-01-01T00:00:00.1e3Z"));
} |
public long[] decode(String hash) {
if (hash.isEmpty()) {
return new long[0];
}
String validChars = this.alphabet + this.guards + this.seps;
for (int i = 0; i < hash.length(); i++) {
if (validChars.indexOf(hash.charAt(i)) == -1) {
return new long[0];
}
}
return this._decode(hash, this.alphabet);
} | @Test
public void test_wrong_decoding() {
final Hashids a = new Hashids("this is my pepper");
final long[] b = a.decode("NkK9");
Assert.assertEquals(b.length, 0);
} |
@SafeVarargs
static <K, V> Mono<Map<K, V>> toMonoWithExceptionFilter(Map<K, KafkaFuture<V>> values,
Class<? extends KafkaException>... classes) {
if (values.isEmpty()) {
return Mono.just(Map.of());
}
List<Mono<Tuple2<K, Optional<V>>>> monos = values.entrySet().stream()
.map(e ->
toMono(e.getValue())
.map(r -> Tuples.of(e.getKey(), Optional.of(r)))
.defaultIfEmpty(Tuples.of(e.getKey(), Optional.empty())) //tracking empty Monos
.onErrorResume(
// tracking Monos with suppressible error
th -> Stream.of(classes).anyMatch(clazz -> th.getClass().isAssignableFrom(clazz)),
th -> Mono.just(Tuples.of(e.getKey(), Optional.empty()))))
.toList();
return Mono.zip(
monos,
resultsArr -> Stream.of(resultsArr)
.map(obj -> (Tuple2<K, Optional<V>>) obj)
.filter(t -> t.getT2().isPresent()) //skipping empty & suppressible-errors
.collect(Collectors.toMap(Tuple2::getT1, t -> t.getT2().get()))
);
} | @Test
void testToMonoWithExceptionFilter() {
var failedFuture = new KafkaFutureImpl<String>();
failedFuture.completeExceptionally(new UnknownTopicOrPartitionException());
var okFuture = new KafkaFutureImpl<String>();
okFuture.complete("done");
var emptyFuture = new KafkaFutureImpl<String>();
emptyFuture.complete(null);
Map<String, KafkaFuture<String>> arg = Map.of(
"failure", failedFuture,
"ok", okFuture,
"empty", emptyFuture
);
StepVerifier.create(toMonoWithExceptionFilter(arg, UnknownTopicOrPartitionException.class))
.assertNext(result -> assertThat(result).hasSize(1).containsEntry("ok", "done"))
.verifyComplete();
} |
@Override
public int hashCode()
{
return Objects.hash(_value, _status);
} | @Test(dataProvider = "testHashCodeDataProvider")
public void testHashCode
(
boolean hasSameHashCode,
@Nonnull GetResult<TestRecordTemplateClass.Foo> request1,
@Nonnull GetResult<TestRecordTemplateClass.Foo> request2
)
{
if (hasSameHashCode)
{
assertEquals(request1.hashCode(), request2.hashCode());
}
else
{
assertNotEquals(request1.hashCode(), request2.hashCode());
}
} |
T call() throws IOException, RegistryException {
String apiRouteBase = "https://" + registryEndpointRequestProperties.getServerUrl() + "/v2/";
URL initialRequestUrl = registryEndpointProvider.getApiRoute(apiRouteBase);
return call(initialRequestUrl);
} | @Test
public void testCall_logErrorOnIoExceptions() throws IOException, RegistryException {
IOException ioException = new IOException("detailed exception message");
setUpRegistryResponse(ioException);
try {
endpointCaller.call();
Assert.fail();
} catch (IOException ex) {
Assert.assertSame(ioException, ex);
Mockito.verify(mockEventHandlers)
.dispatch(
LogEvent.error("\u001B[31;1mI/O error for image [serverUrl/imageName]:\u001B[0m"));
Mockito.verify(mockEventHandlers)
.dispatch(LogEvent.error("\u001B[31;1m java.io.IOException\u001B[0m"));
Mockito.verify(mockEventHandlers)
.dispatch(LogEvent.error("\u001B[31;1m detailed exception message\u001B[0m"));
Mockito.verifyNoMoreInteractions(mockEventHandlers);
}
} |
public static MatchAllQueryBuilder matchAllQuery() {
return new MatchAllQueryBuilder();
} | @Test
public void testMatchAllQuery() throws IOException {
assertEquals("{\"match_all\":{}}",
toJson(QueryBuilders.matchAllQuery()));
} |
public static CharArraySet getStopWords() {
final CharArraySet words = StopFilter.makeStopSet(ADDITIONAL_STOP_WORDS, true);
words.addAll(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
return words;
} | @Test
public void testGetStopWords() {
CharArraySet result = SearchFieldAnalyzer.getStopWords();
assertTrue(result.size() > 20);
assertTrue(result.contains("software"));
} |
public void isNotEqualTo(@Nullable Object unexpected) {
standardIsNotEqualTo(unexpected);
} | @Test
public void isNotEqualToWithDifferentTypesAndSameToString() {
Object a = "true";
Object b = true;
assertThat(a).isNotEqualTo(b);
} |
@Override
public Set<Map<String, Object>> connectorPartitions(String connectorName) {
return connectorPartitions.getOrDefault(connectorName, Collections.emptySet());
} | @Test
public void testConnectorPartitions() throws Exception {
@SuppressWarnings("unchecked")
Callback<Void> setCallback = mock(Callback.class);
// This test actually requires the offset store to track deserialized source partitions, so we can't use the member variable mock converter
JsonConverter jsonConverter = new JsonConverter();
jsonConverter.configure(Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true);
Map<ByteBuffer, ByteBuffer> serializedPartitionOffsets = new HashMap<>();
serializedPartitionOffsets.put(
serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue1")),
serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue"))
);
store.set(serializedPartitionOffsets, setCallback).get();
serializedPartitionOffsets.put(
serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue1")),
serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue2"))
);
serializedPartitionOffsets.put(
serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue2")),
serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue"))
);
serializedPartitionOffsets.put(
serializeKey(jsonConverter, "connector2", Collections.singletonMap("partitionKey", "partitionValue")),
serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue"))
);
store.set(serializedPartitionOffsets, setCallback).get();
store.stop();
// Restore into a new store to ensure correct reload from scratch
FileOffsetBackingStore restore = new FileOffsetBackingStore(jsonConverter);
restore.configure(config);
restore.start();
Set<Map<String, Object>> connectorPartitions1 = restore.connectorPartitions("connector1");
Set<Map<String, Object>> expectedConnectorPartition1 = new HashSet<>();
expectedConnectorPartition1.add(Collections.singletonMap("partitionKey", "partitionValue1"));
expectedConnectorPartition1.add(Collections.singletonMap("partitionKey", "partitionValue2"));
assertEquals(expectedConnectorPartition1, connectorPartitions1);
Set<Map<String, Object>> connectorPartitions2 = restore.connectorPartitions("connector2");
Set<Map<String, Object>> expectedConnectorPartition2 = Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue"));
assertEquals(expectedConnectorPartition2, connectorPartitions2);
serializedPartitionOffsets.clear();
// Null valued offset for a partition key should remove that partition for the connector
serializedPartitionOffsets.put(
serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue1")),
null
);
restore.set(serializedPartitionOffsets, setCallback).get();
connectorPartitions1 = restore.connectorPartitions("connector1");
assertEquals(Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue2")), connectorPartitions1);
verify(setCallback, times(3)).onCompletion(isNull(), isNull());
} |
@Override
public void writeClass() throws Exception {
com.squareup.kotlinpoet.ClassName EVM_ANNOTATION =
new com.squareup.kotlinpoet.ClassName("org.web3j", "EVMTest");
com.squareup.kotlinpoet.AnnotationSpec.Builder annotationSpec =
com.squareup.kotlinpoet.AnnotationSpec.builder(EVM_ANNOTATION);
if (JavaVersion.getJavaVersionAsDouble() < 11) {
com.squareup.kotlinpoet.ClassName gethContainer =
new com.squareup.kotlinpoet.ClassName("org.web3j", "NodeType");
annotationSpec.addMember("%T.GETH", gethContainer);
}
com.squareup.kotlinpoet.ClassName TEST_INSTANCE_ANNOTATION =
new com.squareup.kotlinpoet.ClassName("org.junit.jupiter.api", "TestInstance");
com.squareup.kotlinpoet.AnnotationSpec.Builder testAnnotationSpec =
com.squareup.kotlinpoet.AnnotationSpec.builder(TEST_INSTANCE_ANNOTATION);
com.squareup.kotlinpoet.ClassName lifeCycle =
new com.squareup.kotlinpoet.ClassName("", "TestInstance");
testAnnotationSpec.addMember("%T.Lifecycle.PER_CLASS", lifeCycle);
PropertySpec contractInit =
PropertySpec.builder(toCamelCase(theContract), theContract)
.addModifiers(KModifier.LATEINIT, KModifier.PRIVATE)
.mutable(true)
.build();
TypeSpec testClass =
TypeSpec.classBuilder(theContract.getSimpleName() + "Test")
.addFunctions(MethodFilter.generateFunctionSpecsForEachTest(theContract))
.addAnnotation((annotationSpec).build())
.addAnnotation(testAnnotationSpec.build())
.addProperty(contractInit)
.build();
FileSpec kotlinFile =
FileSpec.builder(packageName, theContract.getSimpleName() + "Test")
.addType(testClass)
.build();
kotlinFile.writeTo(new File(writePath));
} | @Test
public void testThatExceptionIsThrownWhenAClassIsNotWritten() {
assertThrows(
NullPointerException.class,
() -> new JavaClassGenerator(null, "org.com", temp.toString()).writeClass());
} |
@Override
@Transactional(rollbackFor = Exception.class) // 添加事务,异常则回滚所有导入
public UserImportRespVO importUserList(List<UserImportExcelVO> importUsers, boolean isUpdateSupport) {
// 1.1 参数校验
if (CollUtil.isEmpty(importUsers)) {
throw exception(USER_IMPORT_LIST_IS_EMPTY);
}
// 1.2 初始化密码不能为空
String initPassword = configApi.getConfigValueByKey(USER_INIT_PASSWORD_KEY).getCheckedData();
if (StrUtil.isEmpty(initPassword)) {
throw exception(USER_IMPORT_INIT_PASSWORD);
}
// 2. 遍历,逐个创建 or 更新
UserImportRespVO respVO = UserImportRespVO.builder().createUsernames(new ArrayList<>())
.updateUsernames(new ArrayList<>()).failureUsernames(new LinkedHashMap<>()).build();
importUsers.forEach(importUser -> {
// 2.1.1 校验字段是否符合要求
try {
ValidationUtils.validate(BeanUtils.toBean(importUser, UserSaveReqVO.class).setPassword(initPassword));
} catch (ConstraintViolationException ex){
respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage());
return;
}
// 2.1.2 校验,判断是否有不符合的原因
try {
validateUserForCreateOrUpdate(null, null, importUser.getMobile(), importUser.getEmail(),
importUser.getDeptId(), null);
} catch (ServiceException ex) {
respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage());
return;
}
// 2.2.1 判断如果不存在,在进行插入
AdminUserDO existUser = userMapper.selectByUsername(importUser.getUsername());
if (existUser == null) {
userMapper.insert(BeanUtils.toBean(importUser, AdminUserDO.class)
.setPassword(encodePassword(initPassword)).setPostIds(new HashSet<>())); // 设置默认密码及空岗位编号数组
respVO.getCreateUsernames().add(importUser.getUsername());
return;
}
// 2.2.2 如果存在,判断是否允许更新
if (!isUpdateSupport) {
respVO.getFailureUsernames().put(importUser.getUsername(), USER_USERNAME_EXISTS.getMsg());
return;
}
AdminUserDO updateUser = BeanUtils.toBean(importUser, AdminUserDO.class);
updateUser.setId(existUser.getId());
userMapper.updateById(updateUser);
respVO.getUpdateUsernames().add(importUser.getUsername());
});
return respVO;
} | @Test
public void testImportUserList_02() {
// 准备参数
UserImportExcelVO importUser = randomPojo(UserImportExcelVO.class, o -> {
o.setStatus(randomEle(CommonStatusEnum.values()).getStatus()); // 保证 status 的范围
o.setSex(randomEle(SexEnum.values()).getSex()); // 保证 sex 的范围
o.setEmail(randomEmail());
o.setMobile(randomMobile());
});
// mock deptService 的方法
DeptDO dept = randomPojo(DeptDO.class, o -> {
o.setId(importUser.getDeptId());
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
});
when(deptService.getDept(eq(dept.getId()))).thenReturn(dept);
// mock passwordEncoder 的方法
when(passwordEncoder.encode(eq("yudaoyuanma"))).thenReturn("java");
// 调用
UserImportRespVO respVO = userService.importUserList(newArrayList(importUser), true);
// 断言
assertEquals(1, respVO.getCreateUsernames().size());
AdminUserDO user = userMapper.selectByUsername(respVO.getCreateUsernames().get(0));
assertPojoEquals(importUser, user);
assertEquals("java", user.getPassword());
assertEquals(0, respVO.getUpdateUsernames().size());
assertEquals(0, respVO.getFailureUsernames().size());
} |
@Override
public ObjectNode encode(Criterion criterion, CodecContext context) {
EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context);
return encoder.encode();
} | @Test
public void matchTcpSrcMaskedTest() {
Criterion criterion = Criteria.matchTcpSrcMasked(tpPort, tpPortMask);
ObjectNode result = criterionCodec.encode(criterion, context);
assertThat(result, matchesCriterion(criterion));
} |
static void registerStateStores(final Logger log,
final String logPrefix,
final ProcessorTopology topology,
final ProcessorStateManager stateMgr,
final StateDirectory stateDirectory,
final InternalProcessorContext processorContext) {
if (topology.stateStores().isEmpty()) {
return;
}
final TaskId id = stateMgr.taskId();
if (!stateDirectory.lock(id)) {
throw new LockException(String.format("%sFailed to lock the state directory for task %s", logPrefix, id));
}
log.debug("Acquired state directory lock");
final boolean storeDirsEmpty = stateDirectory.directoryForTaskIsEmpty(id);
stateMgr.registerStateStores(topology.stateStores(), processorContext);
log.debug("Registered state stores");
// We should only load checkpoint AFTER the corresponding state directory lock has been acquired and
// the state stores have been registered; we should not try to load at the state manager construction time.
// See https://issues.apache.org/jira/browse/KAFKA-8574
stateMgr.initializeStoreOffsetsFromCheckpoint(storeDirsEmpty);
log.debug("Initialized state stores");
} | @Test
public void testRegisterStateStores() {
final MockKeyValueStore store1 = new MockKeyValueStore("store1", false);
final MockKeyValueStore store2 = new MockKeyValueStore("store2", false);
final List<StateStore> stateStores = Arrays.asList(store1, store2);
final InOrder inOrder = inOrder(stateManager);
when(topology.stateStores()).thenReturn(stateStores);
when(stateManager.taskId()).thenReturn(taskId);
when(stateDirectory.lock(taskId)).thenReturn(true);
when(stateDirectory.directoryForTaskIsEmpty(taskId)).thenReturn(true);
when(topology.stateStores()).thenReturn(stateStores);
StateManagerUtil.registerStateStores(logger, "logPrefix:",
topology, stateManager, stateDirectory, processorContext);
inOrder.verify(stateManager).registerStateStores(stateStores, processorContext);
inOrder.verify(stateManager).initializeStoreOffsetsFromCheckpoint(true);
verifyNoMoreInteractions(stateManager);
} |
public String getString(String key, String _default) {
Object object = map.get(key);
return object instanceof String ? (String) object : _default;
} | @Test
public void propertyFromStringWithMultiplePropertiesCanBeRetrieved() {
PMap subject = new PMap("foo=valueA|bar=valueB");
assertEquals("valueA", subject.getString("foo", ""));
assertEquals("valueB", subject.getString("bar", ""));
} |
public static SourceOperationResponse performSplit(
SourceSplitRequest request, PipelineOptions options) throws Exception {
return performSplitWithApiLimit(
request, options, DEFAULT_NUM_BUNDLES_LIMIT, DATAFLOW_SPLIT_RESPONSE_API_SIZE_LIMIT);
} | @Test
public void testSplitAndReadBundlesBack() throws Exception {
com.google.api.services.dataflow.model.Source source =
translateIOToCloudSource(CountingSource.upTo(10L), options);
List<WindowedValue<Integer>> elems = readElemsFromSource(options, source);
assertEquals(10L, elems.size());
for (long i = 0; i < 10L; i++) {
assertEquals(valueInGlobalWindow(i), elems.get((int) i));
}
SourceSplitResponse response =
performSplit(
source,
options,
16L /*desiredBundleSizeBytes for two longs*/,
null /* numBundles limit */,
null /* API limit */);
assertEquals("SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED", response.getOutcome());
List<DerivedSource> bundles = response.getBundles();
assertEquals(5, bundles.size());
for (int i = 0; i < 5; ++i) {
DerivedSource bundle = bundles.get(i);
assertEquals("SOURCE_DERIVATION_MODE_INDEPENDENT", bundle.getDerivationMode());
com.google.api.services.dataflow.model.Source bundleSource = bundle.getSource();
assertTrue(bundleSource.getDoesNotNeedSplitting());
bundleSource.setCodec(source.getCodec());
List<WindowedValue<Integer>> xs = readElemsFromSource(options, bundleSource);
assertThat(
"Failed on bundle " + i,
xs,
contains(valueInGlobalWindow(0L + 2 * i), valueInGlobalWindow(1L + 2 * i)));
assertTrue(bundle.getSource().getMetadata().getEstimatedSizeBytes() > 0);
}
} |
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) {
return invoke(n, BigDecimal.ZERO);
} | @Test
void invokeNull() {
FunctionTestUtil.assertResultError(floorFunction.invoke(null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(floorFunction.invoke((BigDecimal) null, null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(floorFunction.invoke(BigDecimal.ONE, null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(floorFunction.invoke(null, BigDecimal.ONE), InvalidParametersEvent.class);
} |
public static String[] createJarWithClassPath(String inputClassPath, Path pwd,
Map<String, String> callerEnv) throws IOException {
return createJarWithClassPath(inputClassPath, pwd, pwd, callerEnv);
} | @Test (timeout = 30000)
public void testCreateJarWithClassPath() throws Exception {
// create files expected to match a wildcard
List<File> wildcardMatches = Arrays.asList(new File(tmp, "wildcard1.jar"),
new File(tmp, "wildcard2.jar"), new File(tmp, "wildcard3.JAR"),
new File(tmp, "wildcard4.JAR"));
for (File wildcardMatch: wildcardMatches) {
Assert.assertTrue("failure creating file: " + wildcardMatch,
wildcardMatch.createNewFile());
}
// create non-jar files, which we expect to not be included in the classpath
Verify.createNewFile(new File(tmp, "text.txt"));
Verify.createNewFile(new File(tmp, "executable.exe"));
Verify.createNewFile(new File(tmp, "README"));
// create classpath jar
String wildcardPath = tmp.getCanonicalPath() + File.separator + "*";
String nonExistentSubdir = tmp.getCanonicalPath() + Path.SEPARATOR + "subdir"
+ Path.SEPARATOR;
List<String> classPaths = Arrays.asList("", "cp1.jar", "cp2.jar", wildcardPath,
"cp3.jar", nonExistentSubdir);
String inputClassPath = StringUtils.join(File.pathSeparator, classPaths);
String[] jarCp = FileUtil.createJarWithClassPath(inputClassPath + File.pathSeparator + "unexpandedwildcard/*",
new Path(tmp.getCanonicalPath()), System.getenv());
String classPathJar = jarCp[0];
assertNotEquals("Unexpanded wildcard was not placed in extra classpath", jarCp[1].indexOf("unexpanded"), -1);
// verify classpath by reading manifest from jar file
JarFile jarFile = null;
try {
jarFile = new JarFile(classPathJar);
Manifest jarManifest = jarFile.getManifest();
Assert.assertNotNull(jarManifest);
Attributes mainAttributes = jarManifest.getMainAttributes();
Assert.assertNotNull(mainAttributes);
Assert.assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH));
String classPathAttr = mainAttributes.getValue(Attributes.Name.CLASS_PATH);
Assert.assertNotNull(classPathAttr);
List<String> expectedClassPaths = new ArrayList<String>();
for (String classPath: classPaths) {
if (classPath.length() == 0) {
continue;
}
if (wildcardPath.equals(classPath)) {
// add wildcard matches
for (File wildcardMatch: wildcardMatches) {
expectedClassPaths.add(wildcardMatch.getCanonicalFile().toURI().toURL()
.toExternalForm());
}
} else {
File fileCp = null;
if(!new Path(classPath).isAbsolute()) {
fileCp = new File(tmp, classPath).getCanonicalFile();
}
else {
fileCp = new File(classPath).getCanonicalFile();
}
if (nonExistentSubdir.equals(classPath)) {
// expect to maintain trailing path separator if present in input, even
// if directory doesn't exist yet
expectedClassPaths.add(fileCp.toURI().toURL()
.toExternalForm() + Path.SEPARATOR);
} else {
expectedClassPaths.add(fileCp.toURI().toURL()
.toExternalForm());
}
}
}
List<String> actualClassPaths = Arrays.asList(classPathAttr.split(" "));
Collections.sort(expectedClassPaths);
Collections.sort(actualClassPaths);
Assert.assertEquals(expectedClassPaths, actualClassPaths);
} finally {
if (jarFile != null) {
try {
jarFile.close();
} catch (IOException e) {
LOG.warn("exception closing jarFile: " + classPathJar, e);
}
}
}
} |
public static Read read() {
return Read.create();
} | @Test
public void testReadBuildsCorrectly() {
BigtableIO.Read read =
BigtableIO.read()
.withBigtableOptions(BIGTABLE_OPTIONS)
.withTableId("table")
.withInstanceId("instance")
.withProjectId("project")
.withAppProfileId("app-profile")
.withBigtableOptionsConfigurator(PORT_CONFIGURATOR);
assertEquals("options_project", read.getBigtableOptions().getProjectId());
assertEquals("options_instance", read.getBigtableOptions().getInstanceId());
assertEquals("instance", read.getBigtableConfig().getInstanceId().get());
assertEquals("project", read.getBigtableConfig().getProjectId().get());
assertEquals("app-profile", read.getBigtableConfig().getAppProfileId().get());
assertEquals("table", read.getTableId());
assertEquals(PORT_CONFIGURATOR, read.getBigtableConfig().getBigtableOptionsConfigurator());
} |
public void refresh() throws IOException {
updateLock.writeLock().lock();
try {
root = new InnerDesc(fs, fs.getFileStatus(path),
new MinFileFilter(conf.getLong(GRIDMIX_MIN_FILE, 128 * 1024 * 1024),
conf.getLong(GRIDMIX_MAX_TOTAL, 100L * (1L << 40))));
if (0 == root.getSize()) {
throw new IOException("Found no satisfactory file in " + path);
}
} finally {
updateLock.writeLock().unlock();
}
} | @Test
public void testUnsuitable() throws Exception {
try {
final Configuration conf = new Configuration();
// all files 13k or less
conf.setLong(FilePool.GRIDMIX_MIN_FILE, 14 * 1024);
final FilePool pool = new FilePool(conf, base);
pool.refresh();
} catch (IOException e) {
return;
}
fail();
} |
@Override
public ListenableFuture<HttpResponse> sendAsync(HttpRequest httpRequest) {
return sendAsync(httpRequest, null);
} | @Test
public void sendAsync_whenGetRequest_returnsExpectedHttpResponse()
throws IOException, ExecutionException, InterruptedException {
String responseBody = "test response";
mockWebServer.enqueue(
new MockResponse()
.setResponseCode(HttpStatus.OK.code())
.setHeader(CONTENT_TYPE, MediaType.PLAIN_TEXT_UTF_8.toString())
.setBody(responseBody));
mockWebServer.start();
String requestUrl = mockWebServer.url("/test/get").toString();
HttpResponse response = httpClient.sendAsync(get(requestUrl).withEmptyHeaders().build()).get();
assertThat(response)
.isEqualTo(
HttpResponse.builder()
.setStatus(HttpStatus.OK)
.setHeaders(
HttpHeaders.builder()
.addHeader(CONTENT_TYPE, MediaType.PLAIN_TEXT_UTF_8.toString())
// MockWebServer always adds this response header.
.addHeader(CONTENT_LENGTH, String.valueOf(responseBody.length()))
.build())
.setBodyBytes(ByteString.copyFrom(responseBody, UTF_8))
.setResponseUrl(HttpUrl.parse(requestUrl))
.build());
} |
@Override
public void distributeIssueChangeEvent(DefaultIssue issue, @Nullable String severity, @Nullable String type, @Nullable String transition,
BranchDto branch, String projectKey) {
Issue changedIssue = new Issue(issue.key(), branch.getKey());
Boolean resolved = isResolved(transition);
if (severity == null && type == null && resolved == null) {
return;
}
IssueChangedEvent event = new IssueChangedEvent(projectKey, new Issue[]{changedIssue},
resolved, severity, type);
persistEvent(event, branch.getProjectUuid());
} | @Test
public void distributeIssueChangeEvent_whenPullRequestIssues_shouldNotDistributeEvents() {
RuleDto rule = db.rules().insert();
ComponentDto project = db.components().insertPublicProject().getMainBranchComponent();
ComponentDto pullRequest = db.components().insertProjectBranch(project, b -> b.setKey("myBranch1")
.setBranchType(BranchType.PULL_REQUEST)
.setMergeBranchUuid(project.uuid()));
BranchDto branch1 = db.getDbClient().branchDao().selectByUuid(db.getSession(), pullRequest.uuid()).get();
ComponentDto file = db.components().insertComponent(newFileDto(pullRequest));
IssueDto issue1 = db.issues().insert(rule, pullRequest, file, i -> i.setSeverity(MAJOR.name()).setType(RuleType.BUG));
DefaultIssue defaultIssue1 = issue1.toDefaultIssue().setCurrentChangeWithoutAddChange(new FieldDiffs()
.setDiff("resolution", null, null)
.setDiff("severity", MAJOR.name(), CRITICAL.name())
.setDiff("type", RuleType.BUG.name(), CODE_SMELL.name()));
Set<DefaultIssue> issues = Set.of(defaultIssue1);
Map<String, ComponentDto> projectsByUuid = new HashMap<>();
projectsByUuid.put(project.branchUuid(), project);
Map<String, BranchDto> branchesByProjectUuid = new HashMap<>();
branchesByProjectUuid.put(project.branchUuid(), branch1);
underTest.distributeIssueChangeEvent(issues, projectsByUuid, branchesByProjectUuid);
Deque<PushEventDto> events = db.getDbClient().pushEventDao()
.selectChunkByProjectUuids(db.getSession(), Set.of(project.uuid()), 1l, null, 20);
assertThat(events).isEmpty();
} |
public static void copy(Configuration source, Configuration target) {
Check.notNull(source, "source");
Check.notNull(target, "target");
for (Map.Entry<String, String> entry : source) {
target.set(entry.getKey(), entry.getValue());
}
} | @Test
public void copy() throws Exception {
Configuration srcConf = new Configuration(false);
Configuration targetConf = new Configuration(false);
srcConf.set("testParameter1", "valueFromSource");
srcConf.set("testParameter2", "valueFromSource");
targetConf.set("testParameter2", "valueFromTarget");
targetConf.set("testParameter3", "valueFromTarget");
ConfigurationUtils.copy(srcConf, targetConf);
assertEquals("valueFromSource", targetConf.get("testParameter1"));
assertEquals("valueFromSource", targetConf.get("testParameter2"));
assertEquals("valueFromTarget", targetConf.get("testParameter3"));
} |
@Override
public <T> void register(Class<T> remoteInterface, T object) {
register(remoteInterface, object, 1);
} | @Test
public void testReactive() {
RedissonReactiveClient r1 = createInstance().reactive();
r1.getRemoteService().register(RemoteInterface.class, new RemoteImpl());
RedissonClient r2 = createInstance();
RemoteInterfaceReactive ri = r2.getRemoteService().get(RemoteInterfaceReactive.class);
Mono<Void> f = ri.voidMethod("someName", 100L);
f.block();
Mono<Long> resFuture = ri.resultMethod(100L);
assertThat(resFuture.block()).isEqualTo(200);
r1.shutdown();
r2.shutdown();
} |
public ApplicationBuilder appendParameter(String key, String value) {
this.parameters = appendParameter(parameters, key, value);
return getThis();
} | @Test
void appendParameter() {
ApplicationBuilder builder = new ApplicationBuilder();
builder.appendParameter("default.num", "one").appendParameter("num", "ONE");
Map<String, String> parameters = builder.build().getParameters();
Assertions.assertTrue(parameters.containsKey("default.num"));
Assertions.assertEquals("ONE", parameters.get("num"));
} |
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:methodlength"})
void planMigrations(int partitionId, PartitionReplica[] oldReplicas, PartitionReplica[] newReplicas,
MigrationDecisionCallback callback) {
assert oldReplicas.length == newReplicas.length : "Replica addresses with different lengths! Old: "
+ Arrays.toString(oldReplicas) + ", New: " + Arrays.toString(newReplicas);
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Initial state: %s", partitionId, Arrays.toString(oldReplicas));
logger.finest("partitionId=%d, Final state: %s", partitionId, Arrays.toString(newReplicas));
}
initState(oldReplicas);
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
// fix cyclic partition replica movements
if (fixCycle(oldReplicas, newReplicas)) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Final state (after cycle fix): %s", partitionId,
Arrays.toString(newReplicas));
}
}
int currentIndex = 0;
while (currentIndex < oldReplicas.length) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Current index: %d, state: %s", partitionId, currentIndex,
Arrays.toString(state));
}
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
if (newReplicas[currentIndex] == null) {
if (state[currentIndex] != null) {
// replica owner is removed and no one will own this replica
logger.finest("partitionId=%d, New address is null at index: %d", partitionId, currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, null, -1, -1);
state[currentIndex] = null;
}
currentIndex++;
continue;
}
if (state[currentIndex] == null) {
int i = getReplicaIndex(state, newReplicas[currentIndex]);
if (i == -1) {
// fresh replica copy is needed, so COPY replica to newReplicas[currentIndex] from partition owner
logger.finest("partitionId=%d, COPY %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(null, -1, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (i > currentIndex) {
// SHIFT UP replica from i to currentIndex, copy data from partition owner
logger.finest("partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d", partitionId,
state[i], i, currentIndex);
callback.migrate(null, -1, -1, state[i], i, currentIndex);
state[currentIndex] = state[i];
state[i] = null;
continue;
}
throw new AssertionError("partitionId=" + partitionId
+ "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays.toString(oldReplicas)
+ ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas));
}
if (newReplicas[currentIndex].equals(state[currentIndex])) {
// no change, no action needed
currentIndex++;
continue;
}
if (getReplicaIndex(newReplicas, state[currentIndex]) == -1
&& getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
// MOVE partition replica from its old owner to new owner
logger.finest("partitionId=%d, MOVE %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
int newIndex = getReplicaIndex(newReplicas, state[currentIndex]);
assert newIndex > currentIndex : "partitionId=" + partitionId
+ ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: "
+ Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state)
+ ", FINAL: " + Arrays.toString(newReplicas);
if (state[newIndex] == null) {
// it is a SHIFT DOWN
logger.finest("partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d", partitionId,
state[currentIndex], newIndex, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, newIndex, newReplicas[currentIndex], -1, currentIndex);
state[newIndex] = state[currentIndex];
} else {
logger.finest("partitionId=%d, MOVE-3 %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
}
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
planMigrations(partitionId, oldReplicas, newReplicas, callback, currentIndex);
}
assert Arrays.equals(state, newReplicas)
: "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays.toString(oldReplicas)
+ " CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas);
} | @Test
public void test_SHIFT_UP_nonNullSource_isNoLongerReplica() throws UnknownHostException {
final PartitionReplica[] oldReplicas = {
new PartitionReplica(new Address("localhost", 5701), uuids[0]),
new PartitionReplica(new Address("localhost", 5702), uuids[1]),
null,
null,
null,
null,
null,
};
final PartitionReplica[] newReplicas = {
new PartitionReplica(new Address("localhost", 5702), uuids[1]),
null,
null,
null,
null,
null,
null,
};
migrationPlanner.planMigrations(0, oldReplicas, newReplicas, callback);
verify(callback).migrate(new PartitionReplica(new Address("localhost", 5701), uuids[0]), 0, -1, new PartitionReplica(new Address("localhost", 5702), uuids[1]), 1, 0);
} |
@Override
public int compareTo(TtlBucket ttlBucket) {
long startTime1 = getTtlIntervalStartTimeMs();
long startTime2 = ttlBucket.getTtlIntervalStartTimeMs();
return Long.compare(startTime1, startTime2);
} | @Test
public void compareTo() {
TtlBucket firstBucket = new TtlBucket(0);
TtlBucket secondBucket = new TtlBucket(0);
TtlBucket thirdBucket = new TtlBucket(1);
TtlBucket fourthBucket = new TtlBucket(2);
Assert.assertEquals(0, firstBucket.compareTo(firstBucket));
Assert.assertEquals(0, firstBucket.compareTo(secondBucket));
Assert.assertEquals(0, secondBucket.compareTo(firstBucket));
Assert.assertEquals(-1, firstBucket.compareTo(thirdBucket));
Assert.assertEquals(1, fourthBucket.compareTo(firstBucket));
} |
@Override
public double[] extract(Tuple in) {
double[] out = new double[indexes.length];
for (int i = 0; i < indexes.length; i++) {
out[i] = (Double) in.getField(indexes[i]);
}
return out;
} | @Test
void testUserSpecifiedOrder() throws InstantiationException, IllegalAccessException {
Tuple currentTuple = (Tuple) CLASSES[Tuple.MAX_ARITY - 1].newInstance();
for (int i = 0; i < Tuple.MAX_ARITY; i++) {
currentTuple.setField(testDouble[i], i);
}
double[] expected = {
testDouble[5], testDouble[3], testDouble[6], testDouble[7], testDouble[0]
};
arrayEqualityCheck(expected, new FieldsFromTuple(5, 3, 6, 7, 0).extract(currentTuple));
double[] expected2 = {testDouble[0], testDouble[Tuple.MAX_ARITY - 1]};
arrayEqualityCheck(
expected2, new FieldsFromTuple(0, Tuple.MAX_ARITY - 1).extract(currentTuple));
double[] expected3 = {testDouble[Tuple.MAX_ARITY - 1], testDouble[0]};
arrayEqualityCheck(
expected3, new FieldsFromTuple(Tuple.MAX_ARITY - 1, 0).extract(currentTuple));
double[] expected4 = {
testDouble[13],
testDouble[4],
testDouble[5],
testDouble[4],
testDouble[2],
testDouble[8],
testDouble[6],
testDouble[2],
testDouble[8],
testDouble[3],
testDouble[5],
testDouble[2],
testDouble[16],
testDouble[4],
testDouble[3],
testDouble[2],
testDouble[6],
testDouble[4],
testDouble[7],
testDouble[4],
testDouble[2],
testDouble[8],
testDouble[7],
testDouble[2]
};
arrayEqualityCheck(
expected4,
new FieldsFromTuple(
13, 4, 5, 4, 2, 8, 6, 2, 8, 3, 5, 2, 16, 4, 3, 2, 6, 4, 7, 4, 2, 8,
7, 2)
.extract(currentTuple));
} |
@Override
public Set<Entry<Integer, R>> entrySet() {
assert baseDirInitialized();
return entrySet;
} | @Issue("JENKINS-18065")
@Test
public void entrySetSize() {
assertEquals(3, a.entrySet().size());
assertEquals(0, b.entrySet().size());
} |
@Override
public LogicalSchema getSchema() {
return outputSchema;
} | @Test
public void shouldBuildPullQueryOutputSchemaSelectValueAndWindowBounds() {
// Given:
when(keyFormat.isWindowed()).thenReturn(true);
when(source.getSchema()).thenReturn(INPUT_SCHEMA.withPseudoAndKeyColsInValue(true));
final UnqualifiedColumnReferenceExp windowstartRef =
new UnqualifiedColumnReferenceExp(SystemColumns.WINDOWSTART_NAME);
final UnqualifiedColumnReferenceExp windowendRef =
new UnqualifiedColumnReferenceExp(SystemColumns.WINDOWEND_NAME);
selects = ImmutableList.<SelectItem>builder()
.add(new SingleColumn(windowstartRef, Optional.of(SystemColumns.WINDOWSTART_NAME)))
.add((new SingleColumn(windowendRef, Optional.of(SystemColumns.WINDOWEND_NAME))))
.add((new SingleColumn(COL0_REF, Optional.of(COL0)))).build();
when(analysis.getSelectColumnNames()).thenReturn(
ImmutableSet.of(SystemColumns.WINDOWSTART_NAME, SystemColumns.WINDOWEND_NAME, COL0));
// When:
final QueryProjectNode projectNode = new QueryProjectNode(
NODE_ID,
source,
selects,
metaStore,
ksqlConfig,
analysis,
true,
plannerOptions,
false
);
// Then:
final LogicalSchema expected = LogicalSchema.builder()
.keyColumn(SystemColumns.WINDOWSTART_NAME, SqlTypes.BIGINT)
.keyColumn(SystemColumns.WINDOWEND_NAME, SqlTypes.BIGINT)
.valueColumn(COL0, SqlTypes.STRING)
.build();
assertThat(expected, is(projectNode.getSchema()));
} |
@Override
public void recordStateMachineStarted(StateMachineInstance machineInstance, ProcessContext context) {
if (machineInstance != null) {
//if parentId is not null, machineInstance is a SubStateMachine, do not start a new global transaction,
//use parent transaction instead.
String parentId = machineInstance.getParentId();
if (StringUtils.isEmpty(parentId)) {
beginTransaction(machineInstance, context);
}
try {
if (StringUtils.isEmpty(machineInstance.getId()) && seqGenerator != null) {
machineInstance.setId(seqGenerator.generate(DomainConstants.SEQ_ENTITY_STATE_MACHINE_INST));
}
// bind SAGA branch type
RootContext.bindBranchType(BranchType.SAGA);
// save to db
machineInstance.setSerializedStartParams(paramsSerializer.serialize(machineInstance.getStartParams()));
int effect = executeUpdate(stateLogStoreSqls.getRecordStateMachineStartedSql(dbType),
STATE_MACHINE_INSTANCE_TO_STATEMENT_FOR_INSERT, machineInstance);
if (effect < 1) {
throw new StoreException("StateMachineInstance record start error, Xid: " + machineInstance.getId(),
FrameworkErrorCode.OperationDenied);
}
} catch (StoreException e) {
LOGGER.error("Record statemachine start error: {}, StateMachine: {}, XID: {}, Reason: {}",
e.getErrcode(), machineInstance.getStateMachine().getName(), machineInstance.getId(), e.getMessage(), e);
this.clearUp(context);
throw e;
}
}
} | @Test
public void testRecordStateMachineStarted() {
DbAndReportTcStateLogStore dbAndReportTcStateLogStore = new DbAndReportTcStateLogStore();
StateMachineInstanceImpl stateMachineInstance = new StateMachineInstanceImpl();
ProcessContextImpl context = new ProcessContextImpl();
context.setVariable(DomainConstants.VAR_NAME_STATEMACHINE_CONFIG, new DbStateMachineConfig());
Assertions.assertThrows(NullPointerException.class,
() -> dbAndReportTcStateLogStore.recordStateMachineStarted(stateMachineInstance, context));
} |
@SuppressWarnings("PMD.UnusedAssignment")
public void advance(BoundedLocalCache<K, V> cache, long currentTimeNanos) {
long previousTimeNanos = nanos;
nanos = currentTimeNanos;
// If wrapping then temporarily shift the clock for a positive comparison. We assume that the
// advancements never exceed a total running time of Long.MAX_VALUE nanoseconds (292 years)
// so that an overflow only occurs due to using an arbitrary origin time (System.nanoTime()).
if ((previousTimeNanos < 0) && (currentTimeNanos > 0)) {
previousTimeNanos += Long.MAX_VALUE;
currentTimeNanos += Long.MAX_VALUE;
}
try {
for (int i = 0; i < SHIFT.length; i++) {
long previousTicks = (previousTimeNanos >>> SHIFT[i]);
long currentTicks = (currentTimeNanos >>> SHIFT[i]);
long delta = (currentTicks - previousTicks);
if (delta <= 0L) {
break;
}
expire(cache, i, previousTicks, delta);
}
} catch (Throwable t) {
nanos = previousTimeNanos;
throw t;
}
} | @Test(dataProvider = "clock")
public void advance(long clock) {
when(cache.evictEntry(captor.capture(), any(), anyLong())).thenReturn(true);
timerWheel.nanos = clock;
timerWheel.schedule(new Timer(timerWheel.nanos + SPANS[0]));
timerWheel.advance(cache, clock + 13 * SPANS[0]);
verify(cache).evictEntry(any(), any(), anyLong());
} |
@Override
public List<ApolloAuditLogDTO> queryLogsByOpName(String opName, Date startDate, Date endDate,
int page, int size) {
if (startDate == null && endDate == null) {
return ApolloAuditUtil.logListToDTOList(logService.findByOpName(opName, page, size));
}
return ApolloAuditUtil.logListToDTOList(
logService.findByOpNameAndTime(opName, startDate, endDate, page, size));
} | @Test
public void testQueryLogsByOpName() {
final String opName = "query-op-name";
final Date startDate = new Date();
final Date endDate = new Date();
{
List<ApolloAuditLog> logList = MockBeanFactory.mockAuditLogListByLength(size);
Mockito.when(logService.findByOpNameAndTime(Mockito.eq(opName),
Mockito.eq(startDate), Mockito.eq(endDate), Mockito.eq(page), Mockito.eq(size)))
.thenReturn(logList);
}
List<ApolloAuditLogDTO> dtoList = api.queryLogsByOpName(opName, startDate, endDate, page, size);
Mockito.verify(logService, Mockito.times(1))
.findByOpNameAndTime(Mockito.eq(opName),
Mockito.eq(startDate), Mockito.eq(endDate), Mockito.eq(page), Mockito.eq(size));
assertEquals(size, dtoList.size());
} |
@Override
public Long sendSingleMail(String mail, Long userId, Integer userType,
String templateCode, Map<String, Object> templateParams) {
// 校验邮箱模版是否合法
MailTemplateDO template = validateMailTemplate(templateCode);
// 校验邮箱账号是否合法
MailAccountDO account = validateMailAccount(template.getAccountId());
// 校验邮箱是否存在
mail = validateMail(mail);
validateTemplateParams(template, templateParams);
// 创建发送日志。如果模板被禁用,则不发送短信,只记录日志
Boolean isSend = CommonStatusEnum.ENABLE.getStatus().equals(template.getStatus());
String title = mailTemplateService.formatMailTemplateContent(template.getTitle(), templateParams);
String content = mailTemplateService.formatMailTemplateContent(template.getContent(), templateParams);
Long sendLogId = mailLogService.createMailLog(userId, userType, mail,
account, template, content, templateParams, isSend);
// 发送 MQ 消息,异步执行发送短信
if (isSend) {
mailProducer.sendMailSendMessage(sendLogId, mail, account.getId(),
template.getNickname(), title, content);
}
return sendLogId;
} | @Test
public void testSendSingleMail_successWhenMailTemplateEnable() {
// 准备参数
String mail = randomEmail();
Long userId = randomLongId();
Integer userType = randomEle(UserTypeEnum.values()).getValue();
String templateCode = RandomUtils.randomString();
Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234")
.put("op", "login").build();
// mock MailTemplateService 的方法
MailTemplateDO template = randomPojo(MailTemplateDO.class, o -> {
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
o.setContent("验证码为{code}, 操作为{op}");
o.setParams(Lists.newArrayList("code", "op"));
});
when(mailTemplateService.getMailTemplateByCodeFromCache(eq(templateCode))).thenReturn(template);
String title = RandomUtils.randomString();
when(mailTemplateService.formatMailTemplateContent(eq(template.getTitle()), eq(templateParams)))
.thenReturn(title);
String content = RandomUtils.randomString();
when(mailTemplateService.formatMailTemplateContent(eq(template.getContent()), eq(templateParams)))
.thenReturn(content);
// mock MailAccountService 的方法
MailAccountDO account = randomPojo(MailAccountDO.class);
when(mailAccountService.getMailAccountFromCache(eq(template.getAccountId()))).thenReturn(account);
// mock MailLogService 的方法
Long mailLogId = randomLongId();
when(mailLogService.createMailLog(eq(userId), eq(userType), eq(mail),
eq(account), eq(template), eq(content), eq(templateParams), eq(true))).thenReturn(mailLogId);
// 调用
Long resultMailLogId = mailSendService.sendSingleMail(mail, userId, userType, templateCode, templateParams);
// 断言
assertEquals(mailLogId, resultMailLogId);
// 断言调用
verify(mailProducer).sendMailSendMessage(eq(mailLogId), eq(mail),
eq(account.getId()), eq(template.getNickname()), eq(title), eq(content));
} |
public static ObjectNode convertFromGHResponse(GHResponse ghResponse, TranslationMap translationMap, Locale locale,
DistanceConfig distanceConfig) {
ObjectNode json = JsonNodeFactory.instance.objectNode();
if (ghResponse.hasErrors())
throw new IllegalStateException(
"If the response has errors, you should use the method NavigateResponseConverter#convertFromGHResponseError");
PointList waypoints = ghResponse.getBest().getWaypoints();
final ArrayNode routesJson = json.putArray("routes");
List<ResponsePath> paths = ghResponse.getAll();
for (int i = 0; i < paths.size(); i++) {
ResponsePath path = paths.get(i);
ObjectNode pathJson = routesJson.addObject();
putRouteInformation(pathJson, path, i, translationMap, locale, distanceConfig);
}
final ArrayNode waypointsJson = json.putArray("waypoints");
for (int i = 0; i < waypoints.size(); i++) {
ObjectNode waypointJson = waypointsJson.addObject();
// TODO get names
waypointJson.put("name", "");
putLocation(waypoints.getLat(i), waypoints.getLon(i), waypointJson);
}
json.put("code", "Ok");
// TODO: Maybe we need a different format... uuid: "cji4ja4f8004o6xrsta8w4p4h"
json.put("uuid", UUID.randomUUID().toString().replaceAll("-", ""));
return json;
} | @Test
public void basicTest() {
GHResponse rsp = hopper.route(new GHRequest(42.554851, 1.536198, 42.510071, 1.548128).setProfile(profile));
ObjectNode json = NavigateResponseConverter.convertFromGHResponse(rsp, trMap, Locale.ENGLISH, distanceConfig);
JsonNode route = json.get("routes").get(0);
double routeDistance = route.get("distance").asDouble();
assertTrue(routeDistance > 9000, "distance wasn't correct:" + routeDistance);
assertTrue(routeDistance < 9500, "distance wasn't correct:" + routeDistance);
double routeDuration = route.get("duration").asDouble();
assertTrue(routeDuration > 500, "duration wasn't correct:" + routeDuration);
assertTrue(routeDuration < 600, "duration wasn't correct:" + routeDuration);
assertEquals("en", route.get("voiceLocale").asText());
JsonNode leg = route.get("legs").get(0);
assertEquals(routeDistance, leg.get("distance").asDouble(), .000001);
JsonNode steps = leg.get("steps");
JsonNode step = steps.get(0);
JsonNode maneuver = step.get("maneuver");
// Intersection coordinates should be equal to maneuver coordinates
assertEquals(maneuver.get("location").get(0).asDouble(),
step.get("intersections").get(0).get("location").get(0).asDouble(), .00001);
assertEquals("depart", maneuver.get("type").asText());
assertEquals("straight", maneuver.get("modifier").asText());
assertEquals("la Callisa", step.get("name").asText());
double instructionDistance = step.get("distance").asDouble();
assertTrue(instructionDistance < routeDistance);
JsonNode voiceInstructions = step.get("voiceInstructions");
assertEquals(1, voiceInstructions.size());
JsonNode voiceInstruction = voiceInstructions.get(0);
assertTrue(voiceInstruction.get("distanceAlongGeometry").asDouble() <= instructionDistance);
assertEquals("turn sharp left onto la Callisa, then keep left", voiceInstruction.get("announcement").asText());
JsonNode bannerInstructions = step.get("bannerInstructions");
assertEquals(1, bannerInstructions.size());
JsonNode bannerInstruction = bannerInstructions.get(0).get("primary");
assertEquals("la Callisa", bannerInstruction.get("text").asText());
assertEquals("turn", bannerInstruction.get("type").asText());
assertEquals("sharp left", bannerInstruction.get("modifier").asText());
JsonNode bannerInstructionComponent = bannerInstruction.get("components").get(0);
assertEquals("la Callisa", bannerInstructionComponent.get("text").asText());
// Get the second last step (and the last banner/voice instruction)
step = steps.get(steps.size() - 2);
voiceInstructions = step.get("voiceInstructions");
assertEquals(1, voiceInstructions.size());
voiceInstruction = voiceInstructions.get(0);
assertTrue(voiceInstruction.get("distanceAlongGeometry").asDouble() < instructionDistance);
bannerInstructions = step.get("bannerInstructions");
assertEquals(1, bannerInstructions.size());
bannerInstruction = bannerInstructions.get(0).get("primary");
assertEquals("Arrive at destination", bannerInstruction.get("text").asText());
JsonNode waypointsJson = json.get("waypoints");
assertEquals(2, waypointsJson.size());
JsonNode waypointLoc = waypointsJson.get(0).get("location");
assertEquals(1.536198, waypointLoc.get(0).asDouble(), .001);
} |
@Override
public RetrievableStateHandle<T> addAndLock(String key, T state)
throws PossibleInconsistentStateException, Exception {
checkNotNull(key, "Key in ConfigMap.");
checkNotNull(state, "State.");
final RetrievableStateHandle<T> storeHandle = storage.store(state);
final byte[] serializedStoreHandle =
serializeOrDiscard(new StateHandleWithDeleteMarker<>(storeHandle));
// initialize flag to serve the failure case
boolean discardState = true;
try {
// a successful operation will result in the state not being discarded
discardState =
!updateConfigMap(
cm -> {
try {
return addEntry(cm, key, serializedStoreHandle);
} catch (Exception e) {
throw new CompletionException(e);
}
})
.get();
return storeHandle;
} catch (Exception ex) {
final Optional<PossibleInconsistentStateException> possibleInconsistentStateException =
ExceptionUtils.findThrowable(ex, PossibleInconsistentStateException.class);
if (possibleInconsistentStateException.isPresent()) {
// it's unclear whether the state handle metadata was written to the ConfigMap -
// hence, we don't discard the data
discardState = false;
throw possibleInconsistentStateException.get();
}
throw ExceptionUtils.findThrowable(ex, AlreadyExistException.class)
.orElseThrow(() -> ex);
} finally {
if (discardState) {
storeHandle.discardState();
}
}
} | @Test
void testAddWithPossiblyInconsistentStateHandling() throws Exception {
new Context() {
{
runTest(
() -> {
leaderCallbackGrantLeadership();
final FlinkKubeClient anotherFlinkKubeClient =
createFlinkKubeClientBuilder()
.setCheckAndUpdateConfigMapFunction(
(configMapName, function) ->
FutureUtils.completedExceptionally(
new PossibleInconsistentStateException()))
.build();
final KubernetesStateHandleStore<
TestingLongStateHandleHelper.LongStateHandle>
store =
new KubernetesStateHandleStore<>(
anotherFlinkKubeClient,
LEADER_CONFIGMAP_NAME,
longStateStorage,
filter,
LOCK_IDENTITY);
try {
store.addAndLock(key, state);
fail("PossibleInconsistentStateException should have been thrown.");
} catch (PossibleInconsistentStateException ex) {
// PossibleInconsistentStateException is expected
}
assertThat(TestingLongStateHandleHelper.getGlobalStorageSize())
.isEqualTo(1);
assertThat(TestingLongStateHandleHelper.getGlobalDiscardCount())
.isEqualTo(0);
});
}
};
} |
public HttpApiV2ProxyRequestContext getRequestContext() {
return requestContext;
} | @Test
void deserialize_fromJsonString_iamAuthorizer() {
try {
HttpApiV2ProxyRequest req = LambdaContainerHandler.getObjectMapper().readValue(IAM_AUTHORIZER,
HttpApiV2ProxyRequest.class);
assertNotNull(req.getRequestContext().getAuthorizer());
assertFalse(req.getRequestContext().getAuthorizer().isJwt());
assertFalse(req.getRequestContext().getAuthorizer().isLambda());
assertTrue(req.getRequestContext().getAuthorizer().isIam());
assertEquals("AKIAIOSFODNN7EXAMPLE",
req.getRequestContext().getAuthorizer().getIamAuthorizer().getAccessKey());
assertEquals("123456789012", req.getRequestContext().getAuthorizer().getIamAuthorizer().getAccountId());
assertEquals("AIDACKCEVSQ6C2EXAMPLE",
req.getRequestContext().getAuthorizer().getIamAuthorizer().getCallerId());
assertNull(req.getRequestContext().getAuthorizer().getIamAuthorizer().getCognitoIdentity());
assertEquals("AIDACKCEVSQORGEXAMPLE",
req.getRequestContext().getAuthorizer().getIamAuthorizer().getPrincipalOrgId());
assertEquals("arn:aws:iam::111122223333:user/example-user",
req.getRequestContext().getAuthorizer().getIamAuthorizer().getUserArn());
assertEquals("AIDACOSFODNN7EXAMPLE2",
req.getRequestContext().getAuthorizer().getIamAuthorizer().getUserId());
} catch (JsonProcessingException e) {
e.printStackTrace();
fail("Exception while parsing request" + e.getMessage());
}
} |
public void recordFetchDelay(long fetchDelay) {
this.fetchDelay = fetchDelay;
} | @Test
public void testFetchEventTimeLagTracking() {
sourceMetrics.recordFetchDelay(5L);
assertGauge(metricListener, CURRENT_FETCH_EVENT_TIME_LAG, 5L);
} |
public static Description getDescriptionForScenario(Optional<String> fullFileName, int index, String description) {
String testName = fullFileName.isPresent() ? getScesimFileName(fullFileName.get()) : AbstractScenarioRunner.class.getSimpleName();
return Description.createTestDescription(testName,
String.format("#%d: %s", index, description));
} | @Test
public void getDescriptionForScenario() {
final Scenario scenario = scenarioRunnerDTOLocal.getScenarioWithIndices().get(2).getScesimData();
Description retrieved = AbstractScenarioRunner.getDescriptionForScenario(Optional.empty(), 1, scenario.getDescription());
commonVerifyDescriptionForScenario(retrieved, 1, scenario.getDescription(), AbstractScenarioRunner.class.getSimpleName());
retrieved = AbstractScenarioRunner.getDescriptionForScenario(Optional.of("src/test/Test.scesim"), 1, scenario.getDescription());
commonVerifyDescriptionForScenario(retrieved, 1, scenario.getDescription(), "Test");
} |
public List<String> findConsumerIdList(final String topic, final String group) {
String brokerAddr = this.findBrokerAddrByTopic(topic);
if (null == brokerAddr) {
this.updateTopicRouteInfoFromNameServer(topic);
brokerAddr = this.findBrokerAddrByTopic(topic);
}
if (null != brokerAddr) {
try {
return this.mQClientAPIImpl.getConsumerIdListByGroup(brokerAddr, group, clientConfig.getMqClientApiTimeout());
} catch (Exception e) {
log.warn("getConsumerIdListByGroup exception, " + brokerAddr + " " + group, e);
}
}
return null;
} | @Test
public void testFindConsumerIdList() {
topicRouteTable.put(topic, createTopicRouteData());
brokerAddrTable.put(defaultBroker, createBrokerAddrMap());
consumerTable.put(group, createMQConsumerInner());
List<String> actual = mqClientInstance.findConsumerIdList(topic, group);
assertNotNull(actual);
assertEquals(0, actual.size());
} |
@Override
public void onProjectsRekeyed(Set<RekeyedProject> rekeyedProjects) {
checkNotNull(rekeyedProjects, "rekeyedProjects can't be null");
if (rekeyedProjects.isEmpty()) {
return;
}
Arrays.stream(listeners)
.forEach(safelyCallListener(listener -> listener.onProjectsRekeyed(rekeyedProjects)));
} | @Test
public void onProjectsRekeyed_has_no_effect_if_set_is_empty() {
underTestNoListeners.onProjectsRekeyed(Collections.emptySet());
underTestWithListeners.onProjectsRekeyed(Collections.emptySet());
verifyNoInteractions(listener1, listener2, listener3);
} |
@Udf(description = "Converts a string representation of a date in the given format"
+ " into a DATE value.")
public Date parseDate(
@UdfParameter(
description = "The string representation of a date.") final String formattedDate,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.text.SimpleDateFormat.") final String formatPattern) {
if (formattedDate == null || formatPattern == null) {
return null;
}
try {
final long time = formatters.get(formatPattern).parse(formattedDate).getTime();
if (time % MILLIS_IN_DAY != 0) {
throw new KsqlFunctionException("Date format contains time field.");
}
return new Date(time);
} catch (final ExecutionException | RuntimeException | ParseException e) {
throw new KsqlFunctionException("Failed to parse date '" + formattedDate
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
} | @Test
public void shouldBeThreadSafeAndWorkWithManyDifferentFormatters() {
IntStream.range(0, 10_000)
.parallel()
.forEach(idx -> {
try {
final String sourceDate = "2021-12-01X" + idx;
final String pattern = "yyyy-MM-dd'X" + idx + "'";
final Date result = udf.parseDate(sourceDate, pattern);
assertThat(result.getTime(), is(1638316800000L));
} catch (final Exception e) {
fail(e.getMessage());
}
});
} |
@Override
public String getSessionId() {
return sessionID;
} | @Test
public void testGetConfigRequestWithChunkedFraming() {
log.info("Starting get-config async");
assertNotNull("Incorrect sessionId", session3.getSessionId());
try {
assertTrue("NETCONF get-config running command failed. ",
GET_REPLY_PATTERN.matcher(session3.getConfig(RUNNING, SAMPLE_REQUEST)).matches());
assertTrue("NETCONF get-config candidate command failed. ",
GET_REPLY_PATTERN.matcher(session3.getConfig(CANDIDATE, SAMPLE_REQUEST)).matches());
} catch (NetconfException e) {
e.printStackTrace();
fail("NETCONF get-config test failed: " + e.getMessage());
}
log.info("Finishing get-config async");
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.