focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Nonnull
public static <T> Traverser<T> traverseIterable(@Nonnull Iterable<? extends T> iterable) {
return traverseIterator(iterable.iterator());
} | @Test(expected = NullPointerException.class)
public void when_traverseIterableWithNull_then_failure() {
Traverser<Integer> trav = traverseIterable(asList(1, null));
trav.next();
trav.next();
} |
@Override
public void gauge(final String key, final Object value) {
this.metrics.gauge(this.threadContext, this.getSymbol(key), Rubyfier.deep(this.threadContext.getRuntime(), value));
} | @Test
public void testGauge() {
doTestGauge("test");
} |
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
} | @Test
public void testReadCommittedAbortMarkerWithNoData() {
buildFetcher(OffsetResetStrategy.EARLIEST, new StringDeserializer(),
new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
long producerId = 1L;
abortTransaction(buffer, producerId, 5L);
appendTransactionalRecords(buffer, producerId, 6L,
new SimpleRecord("6".getBytes(), null),
new SimpleRecord("7".getBytes(), null),
new SimpleRecord("8".getBytes(), null));
commitTransaction(buffer, producerId, 9L);
buffer.flip();
// send the fetch
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
// prepare the response. the aborted transactions begin at offsets which are no longer in the log
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(producerId).setFirstOffset(0L));
client.prepareResponse(fullFetchResponseWithAbortedTransactions(MemoryRecords.readableRecords(buffer),
abortedTransactions, Errors.NONE, 100L, 100L, 0));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<String, String>>> allFetchedRecords = fetchRecords();
assertTrue(allFetchedRecords.containsKey(tp0));
List<ConsumerRecord<String, String>> fetchedRecords = allFetchedRecords.get(tp0);
assertEquals(3, fetchedRecords.size());
assertEquals(Arrays.asList(6L, 7L, 8L), collectRecordOffsets(fetchedRecords));
} |
@Override
public DataflowPipelineJob run(Pipeline pipeline) {
// Multi-language pipelines and pipelines that include upgrades should automatically be upgraded
// to Runner v2.
if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_runner_v2")) {
LOG.info(
"Automatically enabling Dataflow Runner v2 since the pipeline used cross-language"
+ " transforms or pipeline needed a transform upgrade.");
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build());
}
}
if (useUnifiedWorker(options)) {
if (hasExperiment(options, "disable_runner_v2")
|| hasExperiment(options, "disable_runner_v2_until_2023")
|| hasExperiment(options, "disable_prime_runner_v2")) {
throw new IllegalArgumentException(
"Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set.");
}
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("use_runner_v2")) {
experiments.add("use_runner_v2");
}
if (!experiments.contains("use_unified_worker")) {
experiments.add("use_unified_worker");
}
if (!experiments.contains("beam_fn_api")) {
experiments.add("beam_fn_api");
}
if (!experiments.contains("use_portable_job_submission")) {
experiments.add("use_portable_job_submission");
}
options.setExperiments(ImmutableList.copyOf(experiments));
}
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
logWarningIfBigqueryDLQUnused(pipeline);
if (shouldActAsStreaming(pipeline)) {
options.setStreaming(true);
if (useUnifiedWorker(options)) {
options.setEnableStreamingEngine(true);
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("enable_streaming_engine")) {
experiments.add("enable_streaming_engine");
}
if (!experiments.contains("enable_windmill_service")) {
experiments.add("enable_windmill_service");
}
}
}
if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) {
ProjectionPushdownOptimizer.optimize(pipeline);
}
LOG.info(
"Executing pipeline on the Dataflow Service, which will have billing implications "
+ "related to Google Compute Engine usage and other Google Cloud Services.");
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions);
// This incorrectly puns the worker harness container image (which implements v1beta3 API)
// with the SDK harness image (which implements Fn API).
//
// The same Environment is used in different and contradictory ways, depending on whether
// it is a v1 or v2 job submission.
RunnerApi.Environment defaultEnvironmentForDataflow =
Environments.createDockerEnvironment(workerHarnessContainerImageURL);
// The SdkComponents for portable an non-portable job submission must be kept distinct. Both
// need the default environment.
SdkComponents portableComponents = SdkComponents.create();
portableComponents.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
RunnerApi.Pipeline portablePipelineProto =
PipelineTranslation.toProto(pipeline, portableComponents, false);
// Note that `stageArtifacts` has to be called before `resolveArtifact` because
// `resolveArtifact` updates local paths to staged paths in pipeline proto.
portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto);
List<DataflowPackage> packages = stageArtifacts(portablePipelineProto);
portablePipelineProto = resolveArtifacts(portablePipelineProto);
portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Portable pipeline proto:\n{}",
TextFormat.printer().printToString(portablePipelineProto));
}
// Stage the portable pipeline proto, retrieving the staged pipeline path, then update
// the options on the new job
// TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options
LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation());
byte[] serializedProtoPipeline = portablePipelineProto.toByteArray();
DataflowPackage stagedPipeline =
options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME);
dataflowOptions.setPipelineUrl(stagedPipeline.getLocation());
if (useUnifiedWorker(options)) {
LOG.info("Skipping v1 transform replacements since job will run on v2.");
} else {
// Now rewrite things to be as needed for v1 (mutates the pipeline)
// This way the job submitted is valid for v1 and v2, simultaneously
replaceV1Transforms(pipeline);
}
// Capture the SdkComponents for look up during step translations
SdkComponents dataflowV1Components = SdkComponents.create();
dataflowV1Components.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
// No need to perform transform upgrading for the Runner v1 proto.
RunnerApi.Pipeline dataflowV1PipelineProto =
PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Dataflow v1 pipeline proto:\n{}",
TextFormat.printer().printToString(dataflowV1PipelineProto));
}
// Set a unique client_request_id in the CreateJob request.
// This is used to ensure idempotence of job creation across retried
// attempts to create a job. Specifically, if the service returns a job with
// a different client_request_id, it means the returned one is a different
// job previously created with the same job name, and that the job creation
// has been effectively rejected. The SDK should return
// Error::Already_Exists to user in that case.
int randomNum = new Random().nextInt(9000) + 1000;
String requestId =
DateTimeFormat.forPattern("YYYYMMddHHmmssmmm")
.withZone(DateTimeZone.UTC)
.print(DateTimeUtils.currentTimeMillis())
+ "_"
+ randomNum;
JobSpecification jobSpecification =
translator.translate(
pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages);
if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_staged_dataflow_worker_jar")) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("use_staged_dataflow_worker_jar")
.build());
}
}
Job newJob = jobSpecification.getJob();
try {
newJob
.getEnvironment()
.setSdkPipelineOptions(
MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class));
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
newJob.setClientRequestId(requestId);
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String version = dataflowRunnerInfo.getVersion();
checkState(
!"${pom.version}".equals(version),
"Unable to submit a job to the Dataflow service with unset version ${pom.version}");
LOG.info("Dataflow SDK version: {}", version);
newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties());
// The Dataflow Service may write to the temporary directory directly, so
// must be verified.
if (!isNullOrEmpty(options.getGcpTempLocation())) {
newJob
.getEnvironment()
.setTempStoragePrefix(
dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation()));
}
newJob.getEnvironment().setDataset(options.getTempDatasetId());
if (options.getWorkerRegion() != null) {
newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion());
}
if (options.getWorkerZone() != null) {
newJob.getEnvironment().setWorkerZone(options.getWorkerZone());
}
if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED");
} else if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED");
}
// Represent the minCpuPlatform pipeline option as an experiment, if not already present.
if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
List<String> minCpuFlags =
experiments.stream()
.filter(p -> p.startsWith("min_cpu_platform"))
.collect(Collectors.toList());
if (minCpuFlags.isEmpty()) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform())
.build());
} else {
LOG.warn(
"Flag min_cpu_platform is defined in both top level PipelineOption, "
+ "as well as under experiments. Proceed using {}.",
minCpuFlags.get(0));
}
}
newJob
.getEnvironment()
.setExperiments(
ImmutableList.copyOf(
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList())));
// Set the Docker container image that executes Dataflow worker harness, residing in Google
// Container Registry. Translator is guaranteed to create a worker pool prior to this point.
// For runner_v1, only worker_harness_container is set.
// For runner_v2, both worker_harness_container and sdk_harness_container are set to the same
// value.
String containerImage = getContainerImageForJob(options);
for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) {
workerPool.setWorkerHarnessContainerImage(containerImage);
}
configureSdkHarnessContainerImages(options, portablePipelineProto, newJob);
newJob.getEnvironment().setVersion(getEnvironmentVersion(options));
if (hooks != null) {
hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment());
}
// enable upload_graph when the graph is too large
byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8);
int jobGraphByteSize = jobGraphBytes.length;
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES
&& !hasExperiment(options, "upload_graph")
&& !useUnifiedWorker(options)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build());
LOG.info(
"The job graph size ({} in bytes) is larger than {}. Automatically add "
+ "the upload_graph option to experiments.",
jobGraphByteSize,
CREATE_JOB_REQUEST_LIMIT_BYTES);
}
if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) {
ArrayList<String> experiments = new ArrayList<>(options.getExperiments());
while (experiments.remove("upload_graph")) {}
options.setExperiments(experiments);
LOG.warn(
"The upload_graph experiment was specified, but it does not apply "
+ "to runner v2 jobs. Option has been automatically removed.");
}
// Upload the job to GCS and remove the graph object from the API call. The graph
// will be downloaded from GCS by the service.
if (hasExperiment(options, "upload_graph")) {
DataflowPackage stagedGraph =
options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME);
newJob.getSteps().clear();
newJob.setStepsLocation(stagedGraph.getLocation());
}
if (!isNullOrEmpty(options.getDataflowJobFile())
|| !isNullOrEmpty(options.getTemplateLocation())) {
boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation());
if (isTemplate) {
checkArgument(
isNullOrEmpty(options.getDataflowJobFile()),
"--dataflowJobFile and --templateLocation are mutually exclusive.");
}
String fileLocation =
firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile());
checkArgument(
fileLocation.startsWith("/") || fileLocation.startsWith("gs://"),
"Location must be local or on Cloud Storage, got %s.",
fileLocation);
ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */);
String workSpecJson = DataflowPipelineTranslator.jobToString(newJob);
try (PrintWriter printWriter =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(
Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)),
UTF_8)))) {
printWriter.print(workSpecJson);
LOG.info("Printed job specification to {}", fileLocation);
} catch (IOException ex) {
String error = String.format("Cannot create output file at %s", fileLocation);
if (isTemplate) {
throw new RuntimeException(error, ex);
} else {
LOG.warn(error, ex);
}
}
if (isTemplate) {
LOG.info("Template successfully created.");
return new DataflowTemplateJob();
}
}
String jobIdToUpdate = null;
if (options.isUpdate()) {
jobIdToUpdate = getJobIdFromName(options.getJobName());
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setReplaceJobId(jobIdToUpdate);
}
if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) {
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot());
}
Job jobResult;
try {
jobResult = dataflowClient.createJob(newJob);
} catch (GoogleJsonResponseException e) {
String errorMessages = "Unexpected errors";
if (e.getDetails() != null) {
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) {
errorMessages =
"The size of the serialized JSON representation of the pipeline "
+ "exceeds the allowable limit. "
+ "For more information, please see the documentation on job submission:\n"
+ "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs";
} else {
errorMessages = e.getDetails().getMessage();
}
}
throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e);
} catch (IOException e) {
throw new RuntimeException("Failed to create a workflow job", e);
}
// Use a raw client for post-launch monitoring, as status calls may fail
// regularly and need not be retried automatically.
DataflowPipelineJob dataflowPipelineJob =
new DataflowPipelineJob(
DataflowClient.create(options),
jobResult.getId(),
options,
jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(),
portablePipelineProto);
// If the service returned client request id, the SDK needs to compare it
// with the original id generated in the request, if they are not the same
// (i.e., the returned job is not created by this request), throw
// DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException
// depending on whether this is a reload or not.
if (jobResult.getClientRequestId() != null
&& !jobResult.getClientRequestId().isEmpty()
&& !jobResult.getClientRequestId().equals(requestId)) {
// If updating a job.
if (options.isUpdate()) {
throw new DataflowJobAlreadyUpdatedException(
dataflowPipelineJob,
String.format(
"The job named %s with id: %s has already been updated into job id: %s "
+ "and cannot be updated again.",
newJob.getName(), jobIdToUpdate, jobResult.getId()));
} else {
throw new DataflowJobAlreadyExistsException(
dataflowPipelineJob,
String.format(
"There is already an active job named %s with id: %s. If you want to submit a"
+ " second job, try again by setting a different name using --jobName.",
newJob.getName(), jobResult.getId()));
}
}
LOG.info(
"To access the Dataflow monitoring console, please navigate to {}",
MonitoringUtil.getJobMonitoringPageURL(
options.getProject(), options.getRegion(), jobResult.getId()));
LOG.info("Submitted job: {}", jobResult.getId());
LOG.info(
"To cancel the job using the 'gcloud' tool, run:\n> {}",
MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId()));
return dataflowPipelineJob;
} | @Test
public void testBatchPipelineFailsIfException() throws Exception {
Pipeline p = TestPipeline.create(options);
PCollection<Integer> pc = p.apply(Create.of(1, 2, 3));
PAssert.that(pc).containsInAnyOrder(1, 2, 3);
DataflowPipelineJob mockJob = Mockito.mock(DataflowPipelineJob.class);
when(mockJob.getState()).thenReturn(State.RUNNING);
when(mockJob.getProjectId()).thenReturn("test-project");
when(mockJob.getJobId()).thenReturn("test-job");
when(mockJob.waitUntilFinish(any(Duration.class), any(JobMessagesHandler.class)))
.thenAnswer(
invocation -> {
JobMessage message = new JobMessage();
message.setMessageText("FooException");
message.setTime(TimeUtil.toCloudTime(Instant.now()));
message.setMessageImportance("JOB_MESSAGE_ERROR");
((JobMessagesHandler) invocation.getArguments()[1]).process(Arrays.asList(message));
return State.CANCELLED;
});
DataflowRunner mockRunner = Mockito.mock(DataflowRunner.class);
when(mockRunner.run(any(Pipeline.class))).thenReturn(mockJob);
when(mockClient.getJobMetrics(anyString()))
.thenReturn(generateMockMetricResponse(false /* success */, true /* tentative */));
TestDataflowRunner runner = TestDataflowRunner.fromOptionsAndClient(options, mockClient);
try {
runner.run(p, mockRunner);
} catch (AssertionError expected) {
assertThat(expected.getMessage(), containsString("FooException"));
verify(mockJob, never()).cancel();
return;
}
// Note that fail throws an AssertionError which is why it is placed out here
// instead of inside the try-catch block.
fail("AssertionError expected");
} |
@Override
public Session start(SessionContext context) {
throw new UnsupportedOperationException("Sessions are disabled.");
} | @Test(expected = UnsupportedOperationException.class)
public void testStart() {
mgr.start(null);
} |
public void expand(String key, long value, RangeHandler rangeHandler, EdgeHandler edgeHandler) {
if (value < lowerBound || value > upperBound) {
// Value outside bounds -> expand to nothing.
return;
}
int maxLevels = value > 0 ? maxPositiveLevels : maxNegativeLevels;
int sign = value > 0 ? 1 : -1;
// Append key to feature string builder
StringBuilder builder = new StringBuilder(128);
builder.append(key).append('=');
long levelSize = arity;
long edgeInterval = (value / arity) * arity;
edgeHandler.handleEdge(createEdgeFeatureHash(builder, edgeInterval), (int) Math.abs(value - edgeInterval));
for (int i = 0; i < maxLevels; ++i) {
long start = (value / levelSize) * levelSize;
if (Math.abs(start) + levelSize - 1 < 0) { // overflow
break;
}
rangeHandler.handleRange(createRangeFeatureHash(builder, start, start + sign * (levelSize - 1)));
levelSize *= arity;
if (levelSize <= 0 && levelSize != Long.MIN_VALUE) { //overflow
break;
}
}
} | @Test
void requireThatMinRangeIsExpandedWithArity8() {
PredicateRangeTermExpander expander = new PredicateRangeTermExpander(8);
expander.expand("key", -9223372036854775808L, range -> fail(),
(edge, value) -> {
assertEquals(PredicateHash.hash64("key=-9223372036854775808"), edge);
assertEquals(0, value);
});
} |
public static SqlToJavaTypeConverter sqlToJavaConverter() {
return SQL_TO_JAVA_CONVERTER;
} | @Test
public void shouldGetJavaTypesForAllSqlTypes() {
for (final Entry<SqlBaseType, Class<?>> entry : SQL_TO_JAVA.entrySet()) {
final SqlBaseType sqlType = entry.getKey();
final Class<?> javaType = entry.getValue();
final Class<?> result = SchemaConverters.sqlToJavaConverter().toJavaType(sqlType);
assertThat(result, equalTo(javaType));
}
} |
@Override
public boolean databaseExists(SnowflakeIdentifier database) {
Preconditions.checkArgument(
database.type() == SnowflakeIdentifier.Type.DATABASE,
"databaseExists requires a DATABASE identifier, got '%s'",
database);
final String finalQuery = "SHOW SCHEMAS IN DATABASE IDENTIFIER(?) LIMIT 1";
List<SnowflakeIdentifier> schemas;
try {
schemas =
connectionPool.run(
conn ->
queryHarness.query(
conn, finalQuery, SCHEMA_RESULT_SET_HANDLER, database.databaseName()));
} catch (SQLException e) {
if (DATABASE_NOT_FOUND_ERROR_CODES.contains(e.getErrorCode())) {
return false;
}
throw new UncheckedSQLException(e, "Failed to check if database '%s' exists", database);
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(
e, "Interrupted while checking if database '%s' exists", database);
}
return !schemas.isEmpty();
} | @SuppressWarnings("unchecked")
@Test
public void testDatabaseExists() throws SQLException {
when(mockResultSet.next()).thenReturn(true).thenReturn(false);
when(mockResultSet.getString("database_name")).thenReturn("DB_1");
when(mockResultSet.getString("name")).thenReturn("SCHEMA_1");
assertThat(snowflakeClient.databaseExists(SnowflakeIdentifier.ofDatabase("DB_1"))).isTrue();
verify(mockQueryHarness)
.query(
eq(mockConnection),
eq("SHOW SCHEMAS IN DATABASE IDENTIFIER(?) LIMIT 1"),
any(JdbcSnowflakeClient.ResultSetParser.class),
eq("DB_1"));
} |
@CanIgnoreReturnValue
public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) {
List<@Nullable Object> expected =
(varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs);
return containsExactlyElementsIn(
expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable);
} | @Test
public void iterableContainsExactlyUnexpectedItemFailure() {
expectFailureWhenTestingThat(asList(1, 2, 3)).containsExactly(1, 2);
assertFailureValue("unexpected (1)", "3");
} |
public static Object get(Object object, int index) {
if (index < 0) {
throw new IndexOutOfBoundsException("Index cannot be negative: " + index);
}
if (object instanceof Map) {
Map map = (Map) object;
Iterator iterator = map.entrySet().iterator();
return get(iterator, index);
} else if (object instanceof List) {
return ((List) object).get(index);
} else if (object instanceof Object[]) {
return ((Object[]) object)[index];
} else if (object instanceof Iterator) {
Iterator it = (Iterator) object;
while (it.hasNext()) {
index--;
if (index == -1) {
return it.next();
} else {
it.next();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object instanceof Collection) {
Iterator iterator = ((Collection) object).iterator();
return get(iterator, index);
} else if (object instanceof Enumeration) {
Enumeration it = (Enumeration) object;
while (it.hasMoreElements()) {
index--;
if (index == -1) {
return it.nextElement();
} else {
it.nextElement();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object == null) {
throw new IllegalArgumentException("Unsupported object type: null");
} else {
try {
return Array.get(object, index);
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName());
}
}
} | @Test
void testGetCollection1() {
assertThrows(IndexOutOfBoundsException.class, () -> {
CollectionUtils.get(Collections.emptySet(), 0);
});
} |
public Future<KafkaCluster> prepareKafkaCluster(
Kafka kafkaCr,
List<KafkaNodePool> nodePools,
Map<String, Storage> oldStorage,
Map<String, List<String>> currentPods,
KafkaVersionChange versionChange,
KafkaStatus kafkaStatus,
boolean tryToFixProblems) {
return createKafkaCluster(kafkaCr, nodePools, oldStorage, currentPods, versionChange)
.compose(kafka -> brokerRemovalCheck(kafkaCr, kafka))
.compose(kafka -> {
if (checkFailed() && tryToFixProblems) {
// We have a failure, and should try to fix issues
// Once we fix it, we call this method again, but this time with tryToFixProblems set to false
return revertScaleDown(kafka, kafkaCr, nodePools)
.compose(kafkaAndNodePools -> revertRoleChange(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs()))
.compose(kafkaAndNodePools -> prepareKafkaCluster(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs(), oldStorage, currentPods, versionChange, kafkaStatus, false));
} else if (checkFailed()) {
// We have a failure, but we should not try to fix it
List<String> errors = new ArrayList<>();
if (scaleDownCheckFailed) {
errors.add("Cannot scale-down Kafka brokers " + kafka.removedNodes() + " because they have assigned partition-replicas.");
}
if (usedToBeBrokersCheckFailed) {
errors.add("Cannot remove the broker role from nodes " + kafka.usedToBeBrokerNodes() + " because they have assigned partition-replicas.");
}
return Future.failedFuture(new InvalidResourceException("Following errors were found when processing the Kafka custom resource: " + errors));
} else {
// If everything succeeded, we return the KafkaCluster object
// If any warning conditions exist from the reverted changes, we add them to the status
if (!warningConditions.isEmpty()) {
kafkaStatus.addConditions(warningConditions);
}
return Future.succeededFuture(kafka);
}
});
} | @Test
public void testExistingClusterWithKRaft(VertxTestContext context) {
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
KafkaStatus kafkaStatus = new KafkaStatus();
KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier);
Checkpoint async = context.checkpoint();
creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS_WITH_STATUS, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true)
.onComplete(context.succeeding(kc -> context.verify(() -> {
// Kafka cluster is created
assertThat(kc, is(notNullValue()));
assertThat(kc.nodes().size(), is(9));
assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(1000, 1001, 1002, 2000, 2001, 2002, 3000, 3001, 3002)));
assertThat(kc.removedNodes(), is(Set.of()));
// Check the status conditions
assertThat(kafkaStatus.getConditions(), is(nullValue()));
// No scale-down => scale-down check is not done
verify(supplier.brokersInUseCheck, never()).brokersInUse(any(), any(), any(), any());
async.flag();
})));
} |
@ScalarFunction(visibility = HIDDEN)
@SqlType("array(unknown)")
public static Block arrayConstructor()
{
BlockBuilder blockBuilder = new ArrayType(UNKNOWN).createBlockBuilder(null, 0);
return blockBuilder.build();
} | @Test
public void testArrayConstructor()
{
tryEvaluateWithAll("array[" + Joiner.on(", ").join(nCopies(254, "rand()")) + "]", new ArrayType(DOUBLE));
assertNotSupported(
"array[" + Joiner.on(", ").join(nCopies(255, "rand()")) + "]",
"Too many arguments for array constructor");
} |
@Override
public void writeTo(MysqlSerializer serializer) {
// used to check
MysqlCapability capability = serializer.getCapability();
serializer.writeInt1(PACKET_OK_INDICATOR);
serializer.writeVInt(affectedRows);
serializer.writeVInt(LAST_INSERT_ID);
if (capability.isProtocol41()) {
serializer.writeInt2(serverStatus);
serializer.writeInt2(warningRows);
} else if (capability.isTransactions()) {
serializer.writeInt2(serverStatus);
}
if (capability.isSessionTrack()) {
serializer.writeLenEncodedString(infoMessage);
// TODO(zhaochun): STATUS_FLAGS
// if ((STATUS_FLAGS & MysqlStatusFlag.SERVER_SESSION_STATE_CHANGED) != 0) {
// }
} else {
if (!Strings.isNullOrEmpty(infoMessage)) {
// NOTE: in datasheet, use EOF string, but in the code, mysql use length encoded string
serializer.writeLenEncodedString(infoMessage);
}
}
} | @Test
public void testWrite() {
MysqlOkPacket packet = new MysqlOkPacket(new QueryState());
MysqlSerializer serializer = MysqlSerializer.newInstance(capability);
packet.writeTo(serializer);
ByteBuffer buffer = serializer.toByteBuffer();
// assert OK packet indicator 0x00
Assert.assertEquals(0x00, MysqlProto.readInt1(buffer));
// assert affect rows vint: 0
Assert.assertEquals(0x00, MysqlProto.readVInt(buffer));
// assert last insert id, vint: 0
Assert.assertEquals(0x00, MysqlProto.readVInt(buffer));
// assert status flags, int2: 0
Assert.assertEquals(0x00, MysqlProto.readInt2(buffer));
// assert warnings, int2: 0
Assert.assertEquals(0x00, MysqlProto.readInt2(buffer));
// assert info, eof string: "OK"
// Assert.assertEquals("OK", new String(MysqlProto.readEofString(buffer)));
Assert.assertEquals(0, buffer.remaining());
} |
public void log(String remoteAddress, ContainerRequest jerseyRequest, ContainerResponse jettyResponse) {
WebsocketEvent event = new WebsocketEvent(remoteAddress, jerseyRequest, jettyResponse);
if (getFilterChainDecision(event) == FilterReply.DENY) {
return;
}
aai.appendLoopOnAppenders(event);
} | @Test
void testLogLineWithHeaders() throws InterruptedException {
WebSocketSessionContext sessionContext = mock(WebSocketSessionContext.class);
ListAppender<WebsocketEvent> listAppender = new ListAppender<>();
WebsocketRequestLoggerFactory requestLoggerFactory = new WebsocketRequestLoggerFactory();
requestLoggerFactory.appenders = List.of(new ListAppenderFactory<>(listAppender));
WebsocketRequestLog requestLog = requestLoggerFactory.build("test-logger");
ContainerRequest request = new ContainerRequest(null, URI.create("/v1/test"), "GET",
new WebSocketSecurityContext(new ContextPrincipal(sessionContext)), new MapPropertiesDelegate(new HashMap<>()),
null);
request.header(HttpHeaders.USER_AGENT, "SmertZeSmert");
request.header("Referer", "https://moxie.org");
ContainerResponse response = new ContainerResponse(request, Response.ok("My response body").build());
requestLog.log("123.456.789.123", request, response);
listAppender.waitForListSize(1);
assertThat(listAppender.list.size()).isEqualTo(1);
String loggedLine = new String(listAppender.outputStream.toByteArray());
assertThat(loggedLine).matches(
"123\\.456\\.789\\.123 \\- \\- \\[[0-9]{2}\\/[a-zA-Z]{3}\\/[0-9]{4}:[0-9]{2}:[0-9]{2}:[0-9]{2} (\\-|\\+)[0-9]{4}\\] \"GET \\/v1\\/test WS\" 200 \\- \"https://moxie.org\" \"SmertZeSmert\"\n");
System.out.println(listAppender.list.get(0));
System.out.println(new String(listAppender.outputStream.toByteArray()));
} |
@Override
public EncodedMessage transform(ActiveMQMessage message) throws Exception {
if (message == null) {
return null;
}
long messageFormat = 0;
Header header = null;
Properties properties = null;
Map<Symbol, Object> daMap = null;
Map<Symbol, Object> maMap = null;
Map<String,Object> apMap = null;
Map<Object, Object> footerMap = null;
Section body = convertBody(message);
if (message.isPersistent()) {
if (header == null) {
header = new Header();
}
header.setDurable(true);
}
byte priority = message.getPriority();
if (priority != Message.DEFAULT_PRIORITY) {
if (header == null) {
header = new Header();
}
header.setPriority(UnsignedByte.valueOf(priority));
}
String type = message.getType();
if (type != null) {
if (properties == null) {
properties = new Properties();
}
properties.setSubject(type);
}
MessageId messageId = message.getMessageId();
if (messageId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setMessageId(getOriginalMessageId(message));
}
ActiveMQDestination destination = message.getDestination();
if (destination != null) {
if (properties == null) {
properties = new Properties();
}
properties.setTo(destination.getQualifiedName());
if (maMap == null) {
maMap = new HashMap<>();
}
maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination));
}
ActiveMQDestination replyTo = message.getReplyTo();
if (replyTo != null) {
if (properties == null) {
properties = new Properties();
}
properties.setReplyTo(replyTo.getQualifiedName());
if (maMap == null) {
maMap = new HashMap<>();
}
maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo));
}
String correlationId = message.getCorrelationId();
if (correlationId != null) {
if (properties == null) {
properties = new Properties();
}
try {
properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId));
} catch (AmqpProtocolException e) {
properties.setCorrelationId(correlationId);
}
}
long expiration = message.getExpiration();
if (expiration != 0) {
long ttl = expiration - System.currentTimeMillis();
if (ttl < 0) {
ttl = 1;
}
if (header == null) {
header = new Header();
}
header.setTtl(new UnsignedInteger((int) ttl));
if (properties == null) {
properties = new Properties();
}
properties.setAbsoluteExpiryTime(new Date(expiration));
}
long timeStamp = message.getTimestamp();
if (timeStamp != 0) {
if (properties == null) {
properties = new Properties();
}
properties.setCreationTime(new Date(timeStamp));
}
// JMSX Message Properties
int deliveryCount = message.getRedeliveryCounter();
if (deliveryCount > 0) {
if (header == null) {
header = new Header();
}
header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount));
}
String userId = message.getUserID();
if (userId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8)));
}
String groupId = message.getGroupID();
if (groupId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setGroupId(groupId);
}
int groupSequence = message.getGroupSequence();
if (groupSequence > 0) {
if (properties == null) {
properties = new Properties();
}
properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence));
}
final Map<String, Object> entries;
try {
entries = message.getProperties();
} catch (IOException e) {
throw JMSExceptionSupport.create(e);
}
for (Map.Entry<String, Object> entry : entries.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (key.startsWith(JMS_AMQP_PREFIX)) {
if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) {
// skip transformer appended properties
continue;
} else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) {
// skip transformer appended properties
continue;
} else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) {
messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class);
continue;
} else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) {
if (header == null) {
header = new Header();
}
continue;
} else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
continue;
} else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (maMap == null) {
maMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length());
maMap.put(Symbol.valueOf(name), value);
continue;
} else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) {
if (header == null) {
header = new Header();
}
header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class));
continue;
} else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class)));
continue;
} else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class)));
continue;
} else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class));
continue;
} else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (daMap == null) {
daMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length());
daMap.put(Symbol.valueOf(name), value);
continue;
} else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (footerMap == null) {
footerMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length());
footerMap.put(Symbol.valueOf(name), value);
continue;
}
} else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) {
// strip off the scheduled message properties
continue;
}
// The property didn't map into any other slot so we store it in the
// Application Properties section of the message.
if (apMap == null) {
apMap = new HashMap<>();
}
apMap.put(key, value);
int messageType = message.getDataStructureType();
if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) {
// Type of command to recognize advisory message
Object data = message.getDataStructure();
if(data != null) {
apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName());
}
}
}
final AmqpWritableBuffer buffer = new AmqpWritableBuffer();
encoder.setByteBuffer(buffer);
if (header != null) {
encoder.writeObject(header);
}
if (daMap != null) {
encoder.writeObject(new DeliveryAnnotations(daMap));
}
if (maMap != null) {
encoder.writeObject(new MessageAnnotations(maMap));
}
if (properties != null) {
encoder.writeObject(properties);
}
if (apMap != null) {
encoder.writeObject(new ApplicationProperties(apMap));
}
if (body != null) {
encoder.writeObject(body);
}
if (footerMap != null) {
encoder.writeObject(new Footer(footerMap));
}
return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength());
} | @Test
public void testConvertUncompressedBytesMessageToAmqpMessageWithDataBody() throws Exception {
byte[] expectedPayload = new byte[] { 8, 16, 24, 32 };
ActiveMQBytesMessage outbound = createBytesMessage();
outbound.writeBytes(expectedPayload);
outbound.storeContent();
outbound.onSend();
JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer();
EncodedMessage encoded = transformer.transform(outbound);
assertNotNull(encoded);
Message amqp = encoded.decode();
assertNotNull(amqp.getBody());
assertTrue(amqp.getBody() instanceof Data);
assertTrue(((Data) amqp.getBody()).getValue() instanceof Binary);
assertEquals(4, ((Data) amqp.getBody()).getValue().getLength());
Binary amqpData = ((Data) amqp.getBody()).getValue();
Binary inputData = new Binary(expectedPayload);
assertTrue(inputData.equals(amqpData));
} |
public CompletionStage<Void> migrate(MigrationSet set) {
InterProcessLock lock = new InterProcessSemaphoreMutex(client.unwrap(), ZKPaths.makePath(lockPath, set.id()));
CompletionStage<Void> lockStage = lockAsync(lock, lockMax.toMillis(), TimeUnit.MILLISECONDS, executor);
return lockStage.thenCompose(__ -> runMigrationInLock(lock, set));
} | @Test
public void testConcurrency2() throws Exception {
CuratorOp op1 = client.transactionOp().create().forPath("/test");
CuratorOp op2 = client.transactionOp().create().forPath("/test/bar", "first".getBytes());
Migration migration = () -> Arrays.asList(op1, op2);
MigrationSet migrationSet = MigrationSet.build("1", Collections.singletonList(migration));
CountDownLatch latch = new CountDownLatch(1);
filterLatch.set(latch);
CompletionStage<Void> first = manager.migrate(migrationSet);
assertTrue(timing.awaitLatch(filterIsSetLatch));
CompletionStage<Void> second = manager.migrate(migrationSet);
try {
second.toCompletableFuture().get(timing.forSleepingABit().milliseconds(), TimeUnit.MILLISECONDS);
fail("Should throw");
} catch (Throwable e) {
assertTrue(
Throwables.getRootCause(e) instanceof TimeoutException,
"Should throw TimeoutException, was: "
+ Throwables.getStackTraceAsString(Throwables.getRootCause(e)));
}
latch.countDown();
complete(first);
assertArrayEquals(client.unwrap().getData().forPath("/test/bar"), "first".getBytes());
complete(second);
assertEquals(manager.debugCount.get(), 1);
} |
public List<CounterRequest> getOrderedRequests() {
final List<CounterRequest> requestList = getRequests();
if (requestList.size() > 1) {
requestList.sort(COUNTER_REQUEST_COMPARATOR);
}
return requestList;
} | @Test
public void testGetOrderedRequests() {
counter.clear();
counter.addRequest("test a", 0, 0, 0, false, 1000);
counter.addRequest("test b", 1000, 500, 500, false, 1000); // supérieur
counter.addRequest("test c", 1000, 500, 500, false, 1000); // égal
counter.addRequest("test d", 100, 50, 50, false, 1000); // inférieur
final List<CounterRequest> requests = counter.getOrderedRequests();
assertEquals("requests size", 4, requests.size());
} |
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
} | @Test
public void testMergeAllowSystemChangesInternalMode() throws JsonProcessingException {
for (InternalParamMode mode : Collections.singletonList(InternalParamMode.RESERVED)) {
Map<String, ParamDefinition> allParams =
parseParamDefMap(
String.format(
"{'tomerge': {'type': 'STRING','value': 'hello', 'internal_mode': '%s'}}",
mode.toString()));
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap("{'tomerge': {'type': 'STRING', 'value': 'goodbye'}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, systemMergeContext);
}
} |
@DELETE
@Path("/{connector}")
@Operation(summary = "Delete the specified connector")
public void destroyConnector(final @PathParam("connector") String connector,
final @Context HttpHeaders headers,
final @Parameter(hidden = true) @QueryParam("forward") Boolean forward) throws Throwable {
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>();
herder.deleteConnectorConfig(connector, cb);
requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", headers, null, new TypeReference<Herder.Created<ConnectorInfo>>() { }, forward);
} | @Test
public void testDeleteConnectorNotFound() {
final ArgumentCaptor<Callback<Herder.Created<ConnectorInfo>>> cb = ArgumentCaptor.forClass(Callback.class);
expectAndCallbackException(cb, new NotFoundException("not found"))
.when(herder).deleteConnectorConfig(eq(CONNECTOR_NAME), cb.capture());
assertThrows(NotFoundException.class, () -> connectorsResource.destroyConnector(CONNECTOR_NAME, NULL_HEADERS, FORWARD));
} |
private void urlWithParamIfGet() {
if (Method.GET.equals(method) && false == this.isRest && this.redirectCount <= 0) {
UrlQuery query = this.url.getQuery();
if (null == query) {
query = new UrlQuery();
this.url.setQuery(query);
}
// 优先使用body形式的参数,不存在使用form
if (null != this.body) {
query.parse(StrUtil.str(this.body.readBytes(), this.charset), this.charset);
} else {
query.addAll(this.form);
}
}
} | @Test
@Disabled
public void urlWithParamIfGetTest(){
final UrlBuilder urlBuilder = new UrlBuilder();
urlBuilder.setScheme("https").setHost("hutool.cn");
final HttpRequest httpRequest = new HttpRequest(urlBuilder);
httpRequest.setMethod(Method.GET).execute();
} |
@Override
public void configure(JettyWebSocketServletFactory factory) {
factory.setCreator(this);
factory.setMaxBinaryMessageSize(configuration.getMaxBinaryMessageSize());
factory.setMaxTextMessageSize(configuration.getMaxTextMessageSize());
} | @Test
void testConfigure() {
JettyWebSocketServletFactory servletFactory = mock(JettyWebSocketServletFactory.class);
when(environment.jersey()).thenReturn(jerseyEnvironment);
WebSocketResourceProviderFactory<Account> factory = new WebSocketResourceProviderFactory<>(environment,
Account.class,
mock(WebSocketConfiguration.class),
REMOTE_ADDRESS_PROPERTY_NAME);
factory.configure(servletFactory);
verify(servletFactory).setCreator(eq(factory));
} |
public static boolean equivalent(
Expression left, Expression right, Types.StructType struct, boolean caseSensitive) {
return Binder.bind(struct, Expressions.rewriteNot(left), caseSensitive)
.isEquivalentTo(Binder.bind(struct, Expressions.rewriteNot(right), caseSensitive));
} | @Test
public void testAndEquivalence() {
assertThat(
ExpressionUtil.equivalent(
Expressions.and(
Expressions.lessThan("id", 34), Expressions.greaterThanOrEqual("id", 20)),
Expressions.and(
Expressions.greaterThan("id", 19L), Expressions.lessThanOrEqual("id", 33L)),
STRUCT,
true))
.as("Should detect and equivalence in any order")
.isTrue();
} |
@Override
public Capabilities getCapabilitiesFromResponse(String responseBody) {
return com.thoughtworks.go.plugin.access.configrepo.v2.models.Capabilities.fromJSON(responseBody).toCapabilities();
} | @Test
public void shouldReturnAllFalseCapabilities() {
assertEquals(new Capabilities(true, true, false, false), handler.getCapabilitiesFromResponse("{\"supports_pipeline_export\":\"true\",\"supports_parse_content\":\"true\"}"));
} |
public String[] getFileTypeDisplayNames( Locale locale ) {
return new String[] { "Transformations", "XML" };
} | @Test
public void testGetFileTypeDisplayNames() throws Exception {
String[] names = transFileListener.getFileTypeDisplayNames( null );
assertNotNull( names );
assertEquals( 2, names.length );
assertEquals( "Transformations", names[0] );
assertEquals( "XML", names[1] );
} |
@Override
public ResourceModel processResourceModel(ResourceModel model, Configuration config) {
// Create new resource model.
final ResourceModel.Builder resourceModelBuilder = new ResourceModel.Builder(false);
for (final Resource resource : model.getResources()) {
for (Class<?> handlerClass : resource.getHandlerClasses()) {
final String packageName = handlerClass.getPackage().getName();
final Optional<String> packagePrefix = packagePrefixes.entrySet().stream()
.filter(entry -> packageName.startsWith(entry.getKey()))
.sorted((o1, o2) -> -o1.getKey().compareTo(o2.getKey()))
.map(Map.Entry::getValue)
.findFirst();
if (packagePrefix.isPresent()) {
final String prefixedPath = prefixPath(packagePrefix.get(), resource.getPath());
final Resource newResource = Resource.builder(resource)
.path(prefixedPath)
.build();
resourceModelBuilder.addResource(newResource);
} else {
resourceModelBuilder.addResource(resource);
}
}
}
return resourceModelBuilder.build();
} | @Test
public void processResourceModelDoesNotAddPrefixToResourceClassInOtherPackage() throws Exception {
final ImmutableMap<String, String> packagePrefixes = ImmutableMap.of("org.example", "/test/prefix");
when(configuration.isCloud()).thenReturn(false);
final PrefixAddingModelProcessor modelProcessor = new PrefixAddingModelProcessor(packagePrefixes);
final ResourceModel originalResourceModel = new ResourceModel.Builder(false)
.addResource(Resource.from(TestResource.class)).build();
final ResourceModel resourceModel = modelProcessor.processResourceModel(originalResourceModel, new ResourceConfig());
assertThat(resourceModel.getResources()).hasSize(1);
final Resource resource = resourceModel.getResources().get(0);
assertThat(resource.getPath()).isEqualTo("/foobar/{test}");
} |
@Override
public Set<Algorithm> getKeys(final Path file, final LoginCallback prompt) throws BackgroundException {
final Path container = containerService.getContainer(file);
final Set<Algorithm> keys = super.getKeys(container, prompt);
if(container.isRoot()) {
return keys;
}
try {
final AWSKMS client = this.client(container);
try {
final Map<String, String> aliases = new HashMap<>();
for(AliasListEntry entry : client.listAliases().getAliases()) {
aliases.put(entry.getTargetKeyId(), entry.getAliasName());
}
for(KeyListEntry entry : client.listKeys().getKeys()) {
keys.add(new AliasedAlgorithm(entry, aliases.get(entry.getKeyId())));
}
}
catch(AmazonClientException e) {
throw new AmazonServiceExceptionMappingService().map("Cannot read AWS KMS configuration", e);
}
finally {
client.shutdown();
}
}
catch(AccessDeniedException e) {
log.warn(String.format("Ignore failure reading keys from KMS. %s", e.getMessage()));
keys.add(SSE_KMS_DEFAULT);
}
return keys;
} | @Test
public void testGetKeys_eu_west_1() throws Exception {
final KMSEncryptionFeature kms = new KMSEncryptionFeature(session, new S3LocationFeature(session), new S3AccessControlListFeature(session), new DisabledX509TrustManager(), new DefaultX509KeyManager());
assertFalse(kms.getKeys(new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)), new DisabledLoginCallback()).isEmpty());
} |
static String headerLine(CSVFormat csvFormat) {
return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader());
} | @Test
public void givenNullString_parsesNullCells() {
CSVFormat csvFormat = csvFormat().withNullString("🐼");
PCollection<String> input =
pipeline.apply(Create.of(headerLine(csvFormat), "a,1,🐼", "b,🐼,2.2", "🐼,3,3.3"));
CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat);
CsvIOParseResult<List<String>> result = input.apply(underTest);
PAssert.that(result.getOutput())
.containsInAnyOrder(
Arrays.asList(
Arrays.asList("a", "1", null),
Arrays.asList("b", null, "2.2"),
Arrays.asList(null, "3", "3.3")));
PAssert.that(result.getErrors()).empty();
pipeline.run();
} |
@Override
public double logp(int k) {
if (k < 0 || k > n) {
return Double.NEGATIVE_INFINITY;
} else {
return lfactorial(n) - lfactorial(k)
- lfactorial(n - k) + k * log(p) + (n - k) * log(1.0 - p);
}
} | @Test
public void testLogP() {
System.out.println("logP");
BinomialDistribution instance = new BinomialDistribution(100, 0.3);
instance.rand();
assertEquals(Math.log(3.234477e-16), instance.logp(0), 1E-5);
assertEquals(Math.log(1.386204e-14), instance.logp(1), 1E-5);
assertEquals(Math.log(1.170418e-06), instance.logp(10), 1E-5);
assertEquals(Math.log(0.007575645), instance.logp(20), 1E-5);
assertEquals(Math.log(0.08678386), instance.logp(30), 1E-5);
assertEquals(Math.log(5.153775e-53), instance.logp(100), 1E-5);
} |
public Map<String, Uuid> topicNameToIdView() {
return new TranslatedValueMapView<>(topicsByName, TopicImage::id);
} | @Test
public void testTopicNameToIdView() {
Map<String, Uuid> map = IMAGE1.topicNameToIdView();
assertTrue(map.containsKey("foo"));
assertEquals(FOO_UUID, map.get("foo"));
assertTrue(map.containsKey("bar"));
assertEquals(BAR_UUID, map.get("bar"));
assertFalse(map.containsKey("baz"));
assertNull(map.get("baz"));
HashSet<Uuid> uuids = new HashSet<>();
map.values().iterator().forEachRemaining(uuids::add);
HashSet<Uuid> expectedUuids = new HashSet<>(Arrays.asList(
Uuid.fromString("ThIaNwRnSM2Nt9Mx1v0RvA"),
Uuid.fromString("f62ptyETTjet8SL5ZeREiw")));
assertEquals(expectedUuids, uuids);
assertThrows(UnsupportedOperationException.class, () -> map.remove("foo"));
assertThrows(UnsupportedOperationException.class, () -> map.put("bar", FOO_UUID));
} |
private List<Class<?>> scanForClassesInPackage(String packageName, Predicate<Class<?>> classFilter) {
requireValidPackageName(packageName);
requireNonNull(classFilter, "classFilter must not be null");
List<URI> rootUris = getUrisForPackage(getClassLoader(), packageName);
return findClassesForUris(rootUris, packageName, classFilter);
} | @Test
void scanForClassesInPackage() {
List<Class<?>> classes = scanner.scanForClassesInPackage("io.cucumber.core.resource.test");
assertThat(classes, containsInAnyOrder(
ExampleClass.class,
ExampleInterface.class,
OtherClass.class));
} |
public static String cut(String s, String splitChar, int index) {
if (s == null || splitChar == null || index < 0) {
return null;
}
final String[] parts = s.split(Pattern.quote(splitChar));
if (parts.length <= index) {
return null;
}
return emptyToNull(parts[index]);
} | @Test
public void testCutChecksBounds() throws Exception {
String result = SplitAndIndexExtractor.cut("foobar", " ", 1);
assertNull(result);
} |
public RuntimeOptionsBuilder parse(Class<?> clazz) {
RuntimeOptionsBuilder args = new RuntimeOptionsBuilder();
for (Class<?> classWithOptions = clazz; hasSuperClass(
classWithOptions); classWithOptions = classWithOptions.getSuperclass()) {
CucumberOptions options = requireNonNull(optionsProvider).getOptions(classWithOptions);
if (options != null) {
addDryRun(options, args);
addMonochrome(options, args);
addTags(classWithOptions, options, args);
addPlugins(options, args);
addPublish(options, args);
addName(options, args);
addSnippets(options, args);
addGlue(options, args);
addFeatures(options, args);
addObjectFactory(options, args);
addUuidGenerator(options, args);
}
}
addDefaultFeaturePathIfNoFeaturePathIsSpecified(args, clazz);
addDefaultGlueIfNoOverridingGlueIsSpecified(args, clazz);
return args;
} | @Test
void create_without_options() {
RuntimeOptions runtimeOptions = parser()
.parse(WithoutOptions.class)
.build();
assertAll(
() -> assertThat(runtimeOptions.getObjectFactoryClass(), is(nullValue())),
() -> assertThat(runtimeOptions.getFeaturePaths(), contains(uri("classpath:/io/cucumber/core/options"))),
() -> assertThat(runtimeOptions.getGlue(), contains(uri("classpath:/io/cucumber/core/options"))));
Plugins plugins = new Plugins(new PluginFactory(), runtimeOptions);
plugins.setEventBusOnEventListenerPlugins(new TimeServiceEventBus(Clock.systemUTC(), UUID::randomUUID));
assertAll(
() -> assertThat(plugins.getPlugins(), is(empty())));
} |
@Override
public Collection<Integer> getOutboundPorts(EndpointQualifier endpointQualifier) {
final AdvancedNetworkConfig advancedNetworkConfig = node.getConfig().getAdvancedNetworkConfig();
if (advancedNetworkConfig.isEnabled()) {
EndpointConfig endpointConfig = advancedNetworkConfig.getEndpointConfigs().get(endpointQualifier);
final Collection<Integer> outboundPorts = endpointConfig != null
? endpointConfig.getOutboundPorts() : Collections.emptyList();
final Collection<String> outboundPortDefinitions = endpointConfig != null
? endpointConfig.getOutboundPortDefinitions() : Collections.emptyList();
return AddressUtil.getOutboundPorts(outboundPorts, outboundPortDefinitions);
}
final NetworkConfig networkConfig = node.getConfig().getNetworkConfig();
final Collection<Integer> outboundPorts = networkConfig.getOutboundPorts();
final Collection<String> outboundPortDefinitions = networkConfig.getOutboundPortDefinitions();
return AddressUtil.getOutboundPorts(outboundPorts, outboundPortDefinitions);
} | @Test
public void testGetOutboundPorts_acceptsRange() {
networkConfig.addOutboundPortDefinition("29000-29001");
Collection<Integer> outboundPorts = serverContext.getOutboundPorts(MEMBER);
assertThat(outboundPorts).hasSize(2);
assertThat(outboundPorts).containsExactlyInAnyOrder(29000, 29001);
} |
@Override
public void execute(List<RegisteredMigrationStep> steps, MigrationStatusListener listener) {
Profiler globalProfiler = Profiler.create(LOGGER);
globalProfiler.startInfo(GLOBAL_START_MESSAGE, databaseMigrationState.getTotalMigrations());
boolean allStepsExecuted = false;
try {
for (RegisteredMigrationStep step : steps) {
this.execute(step);
listener.onMigrationStepCompleted();
}
allStepsExecuted = true;
} finally {
long dbMigrationDuration = 0L;
if (allStepsExecuted) {
dbMigrationDuration = globalProfiler.stopInfo(GLOBAL_END_MESSAGE,
databaseMigrationState.getCompletedMigrations(),
databaseMigrationState.getTotalMigrations(),
"success");
} else {
dbMigrationDuration = globalProfiler.stopError(GLOBAL_END_MESSAGE,
databaseMigrationState.getCompletedMigrations(),
databaseMigrationState.getTotalMigrations(),
"failure");
}
telemetryDbMigrationTotalTimeProvider.setDbMigrationTotalTime(dbMigrationDuration);
telemetryDbMigrationStepsProvider.setDbMigrationCompletedSteps(databaseMigrationState.getCompletedMigrations());
telemetryDbMigrationSuccessProvider.setDbMigrationSuccess(allStepsExecuted);
}
} | @Test
void execute_execute_the_instance_of_type_specified_in_step_in_stream_order() {
migrationContainer.add(MigrationStep1.class, MigrationStep2.class, MigrationStep3.class);
((SpringComponentContainer) migrationContainer).startComponents();
underTest.execute(asList(
registeredStepOf(1, MigrationStep2.class),
registeredStepOf(2, MigrationStep1.class),
registeredStepOf(3, MigrationStep3.class)),
migrationStatusListener);
assertThat(SingleCallCheckerMigrationStep.calledSteps)
.containsExactly(MigrationStep2.class, MigrationStep1.class, MigrationStep3.class);
assertThat(logTester.logs()).hasSize(8);
assertLogLevel(Level.INFO,
"Executing 5 DB migrations...",
"3/5 #1 '1-MigrationStep2'...",
"3/5 #1 '1-MigrationStep2': success | time=",
"3/5 #2 '2-MigrationStep1'...",
"3/5 #2 '2-MigrationStep1': success | time=",
"3/5 #3 '3-MigrationStep3'...",
"3/5 #3 '3-MigrationStep3': success | time=",
"Executed 2/5 DB migrations: success | time=");
assertThat(migrationContainer.getComponentByType(MigrationStep1.class).isCalled()).isTrue();
assertThat(migrationContainer.getComponentByType(MigrationStep2.class).isCalled()).isTrue();
assertThat(migrationContainer.getComponentByType(MigrationStep3.class).isCalled()).isTrue();
verify(migrationStatusListener, times(3)).onMigrationStepCompleted();
} |
protected void declareRuleFromAttribute(final Attribute attribute, final String parentPath,
final int attributeIndex,
final List<KiePMMLDroolsRule> rules,
final String statusToSet,
final String characteristicReasonCode,
final Number characteristicBaselineScore,
final boolean isLastCharacteristic) {
logger.trace("declareRuleFromAttribute {} {}", attribute, parentPath);
final Predicate predicate = attribute.getPredicate();
// This means the rule should not be created at all.
// Different semantics has to be implemented if the "False"/"True" predicates are declared inside
// an XOR compound predicate
if (predicate instanceof False) {
return;
}
String currentRule = String.format(PATH_PATTERN, parentPath, attributeIndex);
KiePMMLReasonCodeAndValue reasonCodeAndValue = getKiePMMLReasonCodeAndValue(attribute, characteristicReasonCode, characteristicBaselineScore);
PredicateASTFactoryData predicateASTFactoryData = new PredicateASTFactoryData(predicate, outputFields, rules, parentPath, currentRule, fieldTypeMap);
KiePMMLPredicateASTFactory.factory(predicateASTFactoryData).declareRuleFromPredicate(attribute.getPartialScore(), statusToSet, reasonCodeAndValue, isLastCharacteristic);
} | @Test
void declareRuleFromAttributeWithSimpleSetPredicate() {
Attribute attribute = getSimpleSetPredicateAttribute();
final String parentPath = "parent_path";
final int attributeIndex = 2;
final List<KiePMMLDroolsRule> rules = new ArrayList<>();
final String statusToSet = "status_to_set";
final String characteristicReasonCode = "REASON_CODE";
final double characteristicBaselineScore = 12;
final boolean isLastCharacteristic = false;
getKiePMMLScorecardModelCharacteristicASTFactory()
.declareRuleFromAttribute(attribute, parentPath, attributeIndex, rules, statusToSet, characteristicReasonCode, characteristicBaselineScore, isLastCharacteristic);
assertThat(rules).hasSize(1);
commonValidateRule(rules.get(0),
attribute,
statusToSet,
parentPath,
attributeIndex,
isLastCharacteristic,
null,
1,
null,
null,
null);
} |
public static ClusterResolver<EurekaEndpoint> fromURL(String regionName, URL serviceUrl) {
boolean isSecure = "https".equalsIgnoreCase(serviceUrl.getProtocol());
int defaultPort = isSecure ? 443 : 80;
int port = serviceUrl.getPort() == -1 ? defaultPort : serviceUrl.getPort();
return new StaticClusterResolver<EurekaEndpoint>(
regionName,
new DefaultEndpoint(serviceUrl.getHost(), port, isSecure, serviceUrl.getPath())
);
} | @Test
public void testClusterResolverFromURL() throws Exception {
verifyEqual(
StaticClusterResolver.fromURL("regionA", new URL("http://eureka.test:8080/eureka/v2/apps")),
new DefaultEndpoint("eureka.test", 8080, false, "/eureka/v2/apps")
);
verifyEqual(
StaticClusterResolver.fromURL("regionA", new URL("https://eureka.test:8081/eureka/v2/apps")),
new DefaultEndpoint("eureka.test", 8081, true, "/eureka/v2/apps")
);
verifyEqual(
StaticClusterResolver.fromURL("regionA", new URL("http://eureka.test/eureka/v2/apps")),
new DefaultEndpoint("eureka.test", 80, false, "/eureka/v2/apps")
);
verifyEqual(
StaticClusterResolver.fromURL("regionA", new URL("https://eureka.test/eureka/v2/apps")),
new DefaultEndpoint("eureka.test", 443, true, "/eureka/v2/apps")
);
} |
@VisibleForTesting
static Map<String, Long> getStepIdToRunIdForForeachAndSubworkflowFromPreviousRuns(
WorkflowInstance instance) {
Map<String, StepType> stepIdToStepTypeForForeachAndSubworkflows =
instance.getRuntimeWorkflow().getSteps().stream()
.filter(
step ->
step.getType().equals(StepType.FOREACH)
|| step.getType().equals(StepType.SUBWORKFLOW))
.collect(Collectors.toMap(Step::getId, Step::getType));
if (stepIdToStepTypeForForeachAndSubworkflows.isEmpty()) {
// if no foreach and subworkflow steps in the workflow definition
// result should be empty
return Collections.emptyMap();
}
// stepIdToRunId for subworkflow and foreach steps that
// are not part of current instance's runtimeDAG
return instance.getAggregatedInfo().getStepAggregatedViews().entrySet().stream()
.filter(
step ->
!instance.getRuntimeDag().containsKey(step.getKey())
&& stepIdToStepTypeForForeachAndSubworkflows.containsKey(step.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, s -> s.getValue().getWorkflowRunId()));
} | @Test
public void testGetStepIdToRunIdForForeachAndSubworkflowFromPreviousRuns() throws IOException {
WorkflowInstance sampleInstance =
loadObject(
"fixtures/instances/sample-workflow-instance-created-foreach-subworkflow-1.json",
WorkflowInstance.class);
Map<String, Long> stepIdRunId =
RollupAggregationHelper.getStepIdToRunIdForForeachAndSubworkflowFromPreviousRuns(
sampleInstance);
// both subworkflow and foreach steps are included in the current run
assertEquals(0, stepIdRunId.size());
sampleInstance.getRuntimeDag().remove("job_subworkflow");
stepIdRunId =
RollupAggregationHelper.getStepIdToRunIdForForeachAndSubworkflowFromPreviousRuns(
sampleInstance);
// foreach step is included in the current run
// while subworkflow one is not
assertEquals(1, stepIdRunId.size());
assertEquals(3L, stepIdRunId.get("job_subworkflow").longValue());
sampleInstance.getRuntimeDag().remove("job_foreach");
stepIdRunId =
RollupAggregationHelper.getStepIdToRunIdForForeachAndSubworkflowFromPreviousRuns(
sampleInstance);
// both are included
assertEquals(2, stepIdRunId.size());
assertEquals(3L, stepIdRunId.get("job_subworkflow").longValue());
assertEquals(1L, stepIdRunId.get("job_foreach").longValue());
WorkflowInstance sampleInstanceNoForeachSubworkflow =
loadObject(
"fixtures/instances/sample-workflow-instance-created.json", WorkflowInstance.class);
stepIdRunId =
RollupAggregationHelper.getStepIdToRunIdForForeachAndSubworkflowFromPreviousRuns(
sampleInstanceNoForeachSubworkflow);
assertEquals(0, stepIdRunId.size());
} |
public SaslExtensions extensions() {
return saslExtensions;
} | @Test
public void testExtensions() throws Exception {
String message = "n,,\u0001propA=valueA1, valueA2\u0001auth=Bearer 567\u0001propB=valueB\u0001\u0001";
OAuthBearerClientInitialResponse response = new OAuthBearerClientInitialResponse(message.getBytes(StandardCharsets.UTF_8));
assertEquals("567", response.tokenValue());
assertEquals("", response.authorizationId());
assertEquals("valueA1, valueA2", response.extensions().map().get("propA"));
assertEquals("valueB", response.extensions().map().get("propB"));
} |
@Override
public ResultSet getAttributes(final String catalog, final String schemaPattern, final String typeNamePattern, final String attributeNamePattern) {
return null;
} | @Test
void assertGetAttributes() {
assertNull(metaData.getAttributes("", "", "", ""));
} |
@Override
public long getPeriodMillis() {
return STATIC;
} | @Test
public void testGetPeriodMillis() {
assertEquals(STATIC, plugin.getPeriodMillis());
} |
public IndexConfig setAttributes(List<String> attributes) {
checkNotNull(attributes, "Index attributes cannot be null.");
this.attributes = new ArrayList<>(attributes.size());
for (String attribute : attributes) {
addAttribute(attribute);
}
return this;
} | @Test(expected = IllegalArgumentException.class)
public void testAttributeEmpty() {
new IndexConfig().setAttributes(Collections.singletonList(""));
} |
public static String[] parseUri(String uri) {
return doParseUri(uri, false);
} | @Test
public void testParseNoSlashUri() {
String[] out1 = CamelURIParser.parseUri("direct:start");
assertEquals("direct", out1[0]);
assertEquals("start", out1[1]);
assertNull(out1[2]);
} |
private MessageRouter getMessageRouter() {
MessageRouter messageRouter;
MessageRoutingMode messageRouteMode = conf.getMessageRoutingMode();
switch (messageRouteMode) {
case CustomPartition:
messageRouter = Objects.requireNonNull(conf.getCustomMessageRouter());
break;
case SinglePartition:
messageRouter = new SinglePartitionMessageRouterImpl(
ThreadLocalRandom.current().nextInt(topicMetadata.numPartitions()), conf.getHashingScheme());
break;
case RoundRobinPartition:
default:
messageRouter = new RoundRobinPartitionMessageRouterImpl(
conf.getHashingScheme(),
ThreadLocalRandom.current().nextInt(topicMetadata.numPartitions()),
conf.isBatchingEnabled(),
TimeUnit.MICROSECONDS.toMillis(conf.batchingPartitionSwitchFrequencyIntervalMicros()));
}
return messageRouter;
} | @Test
public void testSinglePartitionMessageRouterImplInstance() throws NoSuchFieldException, IllegalAccessException {
ProducerConfigurationData producerConfigurationData = new ProducerConfigurationData();
producerConfigurationData.setMessageRoutingMode(MessageRoutingMode.SinglePartition);
MessageRouter messageRouter = getMessageRouter(producerConfigurationData);
assertTrue(messageRouter instanceof SinglePartitionMessageRouterImpl);
} |
public synchronized boolean saveNamespace(long timeWindow, long txGap,
FSNamesystem source) throws IOException {
if (timeWindow > 0 || txGap > 0) {
final FSImageStorageInspector inspector = storage.readAndInspectDirs(
EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK),
StartupOption.REGULAR);
FSImageFile image = inspector.getLatestImages().get(0);
File imageFile = image.getFile();
final long checkpointTxId = image.getCheckpointTxId();
final long checkpointAge = Time.now() - imageFile.lastModified();
if (checkpointAge <= timeWindow * 1000 &&
checkpointTxId >= this.getCorrectLastAppliedOrWrittenTxId() - txGap) {
return false;
}
}
saveNamespace(source, NameNodeFile.IMAGE, null);
return true;
} | @Test(timeout = 60000)
public void testSupportBlockGroup() throws Exception {
final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() +
testECPolicy.getNumParityUnits());
final int BLOCK_SIZE = 8 * 1024 * 1024;
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE)
.build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
DFSTestUtil.enableAllECPolicies(fs);
Path parentDir = new Path("/ec-10-4");
Path childDir = new Path(parentDir, "ec-3-2");
ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies
.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
// Create directories and files
fs.mkdirs(parentDir);
fs.mkdirs(childDir);
fs.setErasureCodingPolicy(parentDir, testECPolicy.getName());
fs.setErasureCodingPolicy(childDir, ec32Policy.getName());
Path file_10_4 = new Path(parentDir, "striped_file_10_4");
Path file_3_2 = new Path(childDir, "striped_file_3_2");
// Write content to files
byte[] bytes = StripedFileTestUtil.generateBytes(BLOCK_SIZE);
DFSTestUtil.writeFile(fs, file_10_4, new String(bytes));
DFSTestUtil.writeFile(fs, file_3_2, new String(bytes));
// Save namespace and restart NameNode
fs.setSafeMode(SafeModeAction.ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.LEAVE);
cluster.restartNameNodes();
fs = cluster.getFileSystem();
assertTrue(fs.exists(file_10_4));
assertTrue(fs.exists(file_3_2));
// check the information of file_10_4
FSNamesystem fsn = cluster.getNamesystem();
INodeFile inode = fsn.dir.getINode(file_10_4.toString()).asFile();
assertTrue(inode.isStriped());
assertEquals(testECPolicy.getId(), inode.getErasureCodingPolicyID());
BlockInfo[] blks = inode.getBlocks();
assertEquals(1, blks.length);
assertTrue(blks[0].isStriped());
assertEquals(testECPolicy.getId(),
fs.getErasureCodingPolicy(file_10_4).getId());
assertEquals(testECPolicy.getId(),
((BlockInfoStriped)blks[0]).getErasureCodingPolicy().getId());
assertEquals(testECPolicy.getNumDataUnits(),
((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(testECPolicy.getNumParityUnits(),
((BlockInfoStriped) blks[0]).getParityBlockNum());
byte[] content = DFSTestUtil.readFileAsBytes(fs, file_10_4);
assertArrayEquals(bytes, content);
// check the information of file_3_2
inode = fsn.dir.getINode(file_3_2.toString()).asFile();
assertTrue(inode.isStriped());
assertEquals(SystemErasureCodingPolicies.getByID(
SystemErasureCodingPolicies.RS_3_2_POLICY_ID).getId(),
inode.getErasureCodingPolicyID());
blks = inode.getBlocks();
assertEquals(1, blks.length);
assertTrue(blks[0].isStriped());
assertEquals(ec32Policy.getId(),
fs.getErasureCodingPolicy(file_3_2).getId());
assertEquals(ec32Policy.getNumDataUnits(),
((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(ec32Policy.getNumParityUnits(),
((BlockInfoStriped) blks[0]).getParityBlockNum());
content = DFSTestUtil.readFileAsBytes(fs, file_3_2);
assertArrayEquals(bytes, content);
// check the EC policy on parent Dir
ErasureCodingPolicy ecPolicy =
fsn.getErasureCodingPolicy(parentDir.toString());
assertNotNull(ecPolicy);
assertEquals(testECPolicy.getId(), ecPolicy.getId());
// check the EC policy on child Dir
ecPolicy = fsn.getErasureCodingPolicy(childDir.toString());
assertNotNull(ecPolicy);
assertEquals(ec32Policy.getId(), ecPolicy.getId());
// check the EC policy on root directory
ecPolicy = fsn.getErasureCodingPolicy("/");
assertNull(ecPolicy);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
} |
@Override
public List<ServicecombServiceInstance> getInstanceList(String serviceId) {
final List<MicroserviceInstance> microserviceInstances = getScInstances(serviceId);
final List<ServicecombServiceInstance> serviceInstances = new ArrayList<>();
for (final MicroserviceInstance microserviceInstance : microserviceInstances) {
if (!isValidInstance(microserviceInstance)) {
continue;
}
serviceInstances.add(new ServicecombServiceInstance(microserviceInstance));
}
return serviceInstances;
} | @Test
public void getInstanceList() {
final List<ServicecombServiceInstance> instanceList = scRegister.getInstanceList(serviceName);
Assert.assertEquals(instanceList.size(), this.instanceList.size());
} |
public static String[] nullToEmpty(String[] array) {
return edit(array, t -> null == t ? StrUtil.EMPTY : t);
} | @Test
public void nullToEmptyTest() {
String[] a = {"a", "b", "", null, " ", "c"};
String[] resultA = {"a", "b", "", "", " ", "c"};
assertArrayEquals(ArrayUtil.nullToEmpty(a), resultA);
} |
public Result fetchArtifacts(String[] uris) {
checkArgument(uris != null && uris.length > 0, "At least one URI is required.");
ArtifactUtils.createMissingParents(baseDir);
List<File> artifacts =
Arrays.stream(uris)
.map(FunctionUtils.uncheckedFunction(this::fetchArtifact))
.collect(Collectors.toList());
if (artifacts.size() > 1) {
return new Result(null, artifacts);
}
if (artifacts.size() == 1) {
return new Result(artifacts.get(0), null);
}
// Should not happen.
throw new IllegalStateException("Corrupt artifact fetching state.");
} | @Test
void testHttpFetch() throws Exception {
configuration.set(ArtifactFetchOptions.RAW_HTTP_ENABLED, true);
HttpServer httpServer = null;
try {
httpServer = startHttpServer();
File sourceFile = getFlinkClientsJar();
httpServer.createContext(
"/download/" + sourceFile.getName(), new DummyHttpDownloadHandler(sourceFile));
String uriStr =
String.format(
"http://127.0.0.1:%d/download/" + sourceFile.getName(),
httpServer.getAddress().getPort());
ArtifactFetchManager fetchMgr = new ArtifactFetchManager(configuration);
ArtifactFetchManager.Result res = fetchMgr.fetchArtifacts(new String[] {uriStr});
assertThat(res.getJobJar()).isNotNull();
assertThat(res.getArtifacts()).isNull();
assertFetchedFile(res.getJobJar(), sourceFile);
} finally {
if (httpServer != null) {
httpServer.stop(0);
}
}
} |
@Override
public List<SimpleColumn> toColumns(
final ParsedSchema schema,
final SerdeFeatures serdeFeatures,
final boolean isKey) {
SerdeUtils.throwOnUnsupportedFeatures(serdeFeatures, format.supportedFeatures());
Schema connectSchema = connectSrTranslator.toConnectSchema(schema);
if (serdeFeatures.enabled(SerdeFeature.UNWRAP_SINGLES)) {
connectSchema = SerdeUtils.wrapSingle(connectSchema, isKey);
}
if (connectSchema.type() != Type.STRUCT) {
if (isKey) {
throw new IllegalStateException("Key schemas are always unwrapped.");
}
throw new KsqlException("Schema returned from schema registry is anonymous type. "
+ "To use this schema with ksqlDB, set '" + CommonCreateConfigs.WRAP_SINGLE_VALUE
+ "=false' in the WITH clause properties.");
}
final Schema rowSchema = connectKsqlTranslator.toKsqlSchema(connectSchema);
return rowSchema.fields().stream()
.map(ConnectFormatSchemaTranslator::toColumn)
.collect(Collectors.toList());
} | @Test
public void shouldThrowOnUnwrappedSchemaIfUnwrapSingleIsFalse() {
// Given:
when(connectSchema.type()).thenReturn(Type.INT32);
when(format.supportedFeatures()).thenReturn(Collections.singleton(SerdeFeature.UNWRAP_SINGLES));
// When:
final Exception e = assertThrows(KsqlException.class,
() -> translator.toColumns(parsedSchema, SerdeFeatures.of(), false)
);
// Then:
assertThat(e.getMessage(), is("Schema returned from schema registry is anonymous type. "
+ "To use this schema with ksqlDB, set 'WRAP_SINGLE_VALUE=false' in the "
+ "WITH clause properties."));
} |
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats);
}
LongColumnStatsDataInspector columnStatsData = longInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
LongColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
LongColumnStatsMerger merger = new LongColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
LongColumnStatsDataInspector newData = longInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
densityAvgSum += ((double) (newData.getHighValue() - newData.getLowValue())) / newData.getNumDVs();
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setLongStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
LongColumnStatsData newData = cso.getStatsData().getLongStats();
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (newData.getHighValue() - newData.getLowValue())) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
LongColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
LongColumnStatsDataInspector newData = longInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setLongStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (aggregateData.getHighValue() - aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setLongStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (aggregateData.getHighValue() - aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getLongStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getLongStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
} | @Test
public void testAggregateMultiStatsWhenOnlySomeAvailable() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3", "part4");
ColumnStatisticsData data1 = new ColStatsBuilder<>(long.class).numNulls(1).numDVs(3)
.low(1L).high(3L).hll(1, 2, 3).kll(1, 2, 3).build();
ColumnStatisticsData data3 = new ColStatsBuilder<>(long.class).numNulls(3).numDVs(1)
.low(7L).high(7L).hll(7).kll(7).build();
ColumnStatisticsData data4 = new ColStatsBuilder<>(long.class).numNulls(2).numDVs(3)
.low(3L).high(5L).hll(3, 4, 5).kll(3, 4, 5).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)),
createStatsWithInfo(data4, TABLE, COL, partitions.get(3)));
LongColumnStatsAggregator aggregator = new LongColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, false);
// hll in case of missing stats is left as null, only numDVs is updated
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(long.class).numNulls(8).numDVs(4)
.low(1L).high(9L).kll(1, 2, 3, 7, 3, 4, 5).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
} |
public static boolean isNormalizedPathOutsideWorkingDir(String path) {
final String normalize = FilenameUtils.normalize(path);
final String prefix = FilenameUtils.getPrefix(normalize);
return (normalize != null && StringUtils.isBlank(prefix));
} | @Test
public void shouldReturnFalseIfGivenFolderIsAbsoluteUnderLinux() {
assertThat(FilenameUtil.isNormalizedPathOutsideWorkingDir("/tmp"), is(false));
} |
@Override
public Integer call() throws Exception {
super.call();
if (this.pluginsPath == null) {
throw new CommandLine.ParameterException(this.spec.commandLine(), "Missing required options '--plugins' " +
"or environment variable 'KESTRA_PLUGINS_PATH"
);
}
if (!pluginsPath.toFile().exists()) {
if (!pluginsPath.toFile().mkdir()) {
throw new RuntimeException("Cannot create directory: " + pluginsPath.toFile().getAbsolutePath());
}
}
if (repositories != null) {
Arrays.stream(repositories)
.forEach(throwConsumer(s -> {
URIBuilder uriBuilder = new URIBuilder(s);
RepositoryConfig.RepositoryConfigBuilder builder = RepositoryConfig.builder()
.id(IdUtils.create());
if (uriBuilder.getUserInfo() != null) {
int index = uriBuilder.getUserInfo().indexOf(":");
builder.basicAuth(new RepositoryConfig.BasicAuth(
uriBuilder.getUserInfo().substring(0, index),
uriBuilder.getUserInfo().substring(index + 1)
));
uriBuilder.setUserInfo(null);
}
builder.url(uriBuilder.build().toString());
pluginDownloader.addRepository(builder.build());
}));
}
List<URL> resolveUrl = pluginDownloader.resolve(dependencies);
stdOut("Resolved Plugin(s) with {0}", resolveUrl);
for (URL url: resolveUrl) {
Files.copy(
Paths.get(url.toURI()),
Paths.get(pluginsPath.toString(), FilenameUtils.getName(url.toString())),
StandardCopyOption.REPLACE_EXISTING
);
}
stdOut("Successfully installed plugins {0} into {1}", dependencies, pluginsPath);
return 0;
} | @Test
void latestVersion() throws IOException {
Path pluginsPath = Files.createTempDirectory(PluginInstallCommandTest.class.getSimpleName());
pluginsPath.toFile().deleteOnExit();
try (ApplicationContext ctx = ApplicationContext.run(Environment.CLI, Environment.TEST)) {
String[] args = {"--plugins", pluginsPath.toAbsolutePath().toString(), "io.kestra.plugin:plugin-notifications:LATEST"};
PicocliRunner.call(PluginInstallCommand.class, ctx, args);
List<Path> files = Files.list(pluginsPath).toList();
assertThat(files.size(), is(1));
assertThat(files.getFirst().getFileName().toString(), startsWith("plugin-notifications"));
assertThat(files.getFirst().getFileName().toString(), not(containsString("LATEST")));
}
} |
@Override
public Set<DeviceId> getDevicesOf(NodeId nodeId) {
checkNotNull(nodeId, NODE_ID_NULL);
return store.getDevices(networkId, nodeId);
} | @Test
public void getDevicesOf() {
mastershipMgr1.setRole(NID_LOCAL, VDID1, MASTER);
mastershipMgr1.setRole(NID_LOCAL, VDID2, STANDBY);
assertEquals("should be one device:", 1, mastershipMgr1.getDevicesOf(NID_LOCAL).size());
//hand both devices to NID_LOCAL
mastershipMgr1.setRole(NID_LOCAL, VDID2, MASTER);
assertEquals("should be two devices:", 2, mastershipMgr1.getDevicesOf(NID_LOCAL).size());
} |
public Value parse(String json) {
return this.delegate.parse(json);
} | @Test
public void testOrdinaryFloat() throws Exception {
final JsonParser parser = new JsonParser();
final Value msgpackValue = parser.parse("12345.12");
assertTrue(msgpackValue.getValueType().isNumberType());
assertTrue(msgpackValue.getValueType().isFloatType());
assertFalse(msgpackValue.getValueType().isIntegerType());
assertFalse(msgpackValue.getValueType().isStringType());
assertEquals(12345.12, msgpackValue.asFloatValue().toDouble(), 0.000000001);
// Not sure this |toString| is to be tested...
assertEquals("12345.12", msgpackValue.asFloatValue().toString());
} |
@Override
public ExecuteContext after(ExecuteContext context) throws Exception {
if (isHasMethodLoadSpringFactories()) {
// Only if use LoadSpringFactories for injection in the high version, it is more efficient to
// have a cache in the high version and only need to inject once
if (IS_INJECTED.compareAndSet(false, true)) {
injectConfigurations(context.getResult());
}
} else {
final Object rawFactoryType = context.getArguments()[0];
if (rawFactoryType instanceof Class) {
final Class<?> factoryType = (Class<?>) rawFactoryType;
injectConfigurationsWithLowVersion(context.getResult(), factoryType.getName());
}
}
return context;
} | @Test
public void doAfterLowVersion() throws Exception {
// low version test
final SpringFactoriesInterceptor lowVersionInterceptor = new SpringFactoriesInterceptor();
hasMethodLoadSpringFactoriesFiled.set(lowVersionInterceptor, Boolean.FALSE);
ExecuteContext executeContext = ExecuteContext.forMemberMethod(this, String.class.getMethod("trim"),
new Object[]{EnableAutoConfiguration.class
}, null, null);
final List<String> lowResult = new ArrayList<>();
executeContext.changeResult(lowResult);
executeContext = lowVersionInterceptor.after(executeContext);
executeContext.changeArgs(new Object[]{BootstrapConfiguration.class});
executeContext = lowVersionInterceptor.after(executeContext);
final List<String> injectResult = (List<String>) executeContext.getResult();
Assert.assertTrue(injectResult.contains(RESPONSE_CLASS) && injectResult.contains(RETRY_RESPONSE_CLASS));
} |
public static <
EventTypeT,
EventKeyTypeT,
ResultTypeT,
StateTypeT extends MutableState<EventTypeT, ResultTypeT>>
OrderedEventProcessor<EventTypeT, EventKeyTypeT, ResultTypeT, StateTypeT> create(
OrderedProcessingHandler<EventTypeT, EventKeyTypeT, StateTypeT, ResultTypeT> handler) {
return new AutoValue_OrderedEventProcessor<>(handler);
} | @Test
public void testHandlingOfCheckedExceptions() throws CannotProvideCoderException {
Event[] events = {
Event.create(0, "id-1", "a"),
Event.create(1, "id-1", "b"),
Event.create(2, "id-1", StringBuilderState.BAD_VALUE),
Event.create(3, "id-1", "c"),
};
Collection<KV<String, OrderedProcessingStatus>> expectedStatuses = new ArrayList<>();
expectedStatuses.add(
KV.of("id-1", OrderedProcessingStatus.create(1L, 1, 3L, 3L, events.length, 2, 0, false)));
Collection<KV<String, String>> expectedOutput = new ArrayList<>();
expectedOutput.add(KV.of("id-1", "a"));
expectedOutput.add(KV.of("id-1", "ab"));
Collection<KV<String, KV<Long, UnprocessedEvent<String>>>> failedEvents = new ArrayList<>();
failedEvents.add(
KV.of(
"id-1",
KV.of(
2L,
UnprocessedEvent.create(StringBuilderState.BAD_VALUE, Reason.exception_thrown))));
testProcessing(
events,
expectedStatuses,
expectedOutput,
failedEvents,
EMISSION_FREQUENCY_ON_EVERY_ELEMENT,
INITIAL_SEQUENCE_OF_0,
LARGE_MAX_RESULTS_PER_OUTPUT,
DONT_PRODUCE_STATUS_ON_EVERY_EVENT);
} |
@Override
public void removeGroup(String groupName) {
get(removeGroupAsync(groupName));
} | @Test
public void testRemoveGroup() {
Assertions.assertThrows(RedisException.class, () -> {
RStream<String, String> stream = redisson.getStream("test");
stream.add(StreamAddArgs.entry("0", "0"));
stream.createGroup(StreamCreateGroupArgs.name("testGroup"));
StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1"));
StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2"));
stream.removeGroup("testGroup");
stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered());
});
} |
public static boolean shutdownExecutorForcefully(ExecutorService executor, Duration timeout) {
return shutdownExecutorForcefully(executor, timeout, true);
} | @Test
void testShutdownExecutorForcefullyInterruptable() {
MockExecutorService executor = new MockExecutorService(5);
executor.interruptAfterNumForcefulShutdown(1);
assertThat(
ComponentClosingUtils.shutdownExecutorForcefully(
executor, Duration.ofDays(1), true))
.isFalse();
assertThat(executor.forcefullyShutdownCount).isOne();
} |
public Uuid defaultDir(int brokerId) {
BrokerRegistration registration = registration(brokerId);
if (registration == null) {
// throwing an exception here can break the expected error from an
// Admin call, so instead, we return UNASSIGNED, and let the fact
// that the broker is not registered be handled elsewhere.
return DirectoryId.UNASSIGNED;
}
List<Uuid> directories = registration.directories();
if (directories.isEmpty()) {
return DirectoryId.MIGRATING;
}
if (directories.size() == 1) {
return directories.get(0);
}
return DirectoryId.UNASSIGNED;
} | @Test
public void testDefaultDir() {
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setFeatureControlManager(new FeatureControlManager.Builder().build()).
setBrokerUncleanShutdownHandler((brokerId, records) -> { }).
build();
clusterControl.activate();
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(1).setLogDirs(Collections.emptyList());
brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("127.0.0.1"));
clusterControl.replay(brokerRecord, 100L);
registerNewBrokerWithDirs(clusterControl, 2, Collections.singletonList(Uuid.fromString("singleOnlineDirectoryA")));
registerNewBrokerWithDirs(clusterControl, 3, asList(Uuid.fromString("s4fRmyNFSH6J0vI8AVA5ew"), Uuid.fromString("UbtxBcqYSnKUEMcnTyZFWw")));
assertEquals(DirectoryId.MIGRATING, clusterControl.defaultDir(1));
assertEquals(Uuid.fromString("singleOnlineDirectoryA"), clusterControl.defaultDir(2));
assertEquals(DirectoryId.UNASSIGNED, clusterControl.defaultDir(3));
assertEquals(DirectoryId.UNASSIGNED, clusterControl.defaultDir(4));
} |
public static Builder newBuilder() {
return new Builder();
} | @Test
void testBuilderThrowExceptionIfjarFileAndEntryPointClassNameAreBothNull() {
assertThatThrownBy(() -> PackagedProgram.newBuilder().build())
.isInstanceOf(IllegalArgumentException.class);
} |
public void write(ImageWriter writer, ImageWriterOptions options) {
if (options.metadataVersion().isScramSupported()) {
for (Entry<ScramMechanism, Map<String, ScramCredentialData>> mechanismEntry : mechanisms.entrySet()) {
for (Entry<String, ScramCredentialData> userEntry : mechanismEntry.getValue().entrySet()) {
writer.write(0, userEntry.getValue().toRecord(userEntry.getKey(), mechanismEntry.getKey()));
}
}
} else {
boolean isEmpty = true;
StringBuilder scramImageString = new StringBuilder("ScramImage({");
for (Entry<ScramMechanism, Map<String, ScramCredentialData>> mechanismEntry : mechanisms.entrySet()) {
if (!mechanismEntry.getValue().isEmpty()) {
scramImageString.append(mechanismEntry.getKey()).append(":");
List<String> users = new ArrayList<>(mechanismEntry.getValue().keySet());
scramImageString.append(String.join(", ", users));
scramImageString.append("},{");
isEmpty = false;
}
}
if (!isEmpty) {
scramImageString.append("})");
options.handleLoss(scramImageString.toString());
}
}
} | @Test
public void testEmptyWithInvalidIBP() {
ImageWriterOptions imageWriterOptions = new ImageWriterOptions.Builder().
setMetadataVersion(MetadataVersion.IBP_3_4_IV0).build();
RecordListWriter writer = new RecordListWriter();
ScramImage.EMPTY.write(writer, imageWriterOptions);
} |
@Override
public void open(Map<String, Object> config, SourceContext sourceContext) throws Exception {
this.config = config;
this.sourceContext = sourceContext;
this.intermediateTopicName = SourceConfigUtils.computeBatchSourceIntermediateTopicName(sourceContext.getTenant(),
sourceContext.getNamespace(), sourceContext.getSourceName()).toString();
this.discoveryThread = Executors.newSingleThreadExecutor(
new DefaultThreadFactory(
String.format("%s-batch-source-discovery",
FunctionCommon.getFullyQualifiedName(
sourceContext.getTenant(), sourceContext.getNamespace(), sourceContext.getSourceName()))));
this.getBatchSourceConfigs(config);
this.initializeBatchSource();
this.start();
} | @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp =
"BatchSource does not implement the correct interface")
public void testPushWithoutRightSource() throws Exception {
pushConfig.put(BatchSourceConfig.BATCHSOURCE_CLASSNAME_KEY, TestDiscoveryTriggerer.class.getName());
batchSourceExecutor.open(pushConfig, context);
} |
@Override
public Object evaluate(final ProcessingDTO processingDTO) {
String input = (String) getFromPossibleSources(name, processingDTO)
.orElse(null);
if (input == null) {
return mapMissingTo;
}
return input.equals(value) ? 1.0 : 0.0;
} | @Test
void evaluateSameValue() {
String fieldName = "fieldName";
String fieldValue = "fieldValue";
Number mapMissingTo = null;
KiePMMLNormDiscrete kiePMMLNormContinuous = getKiePMMLNormDiscrete(fieldName, fieldValue, mapMissingTo);
ProcessingDTO processingDTO = getProcessingDTO(Collections.singletonList(new KiePMMLNameValue(fieldName,
fieldValue)));
Object retrieved = kiePMMLNormContinuous.evaluate(processingDTO);
assertThat(retrieved).isNotNull();
assertThat(retrieved).isEqualTo(1.0);
} |
public static void populateGetCreatedKiePMMLMiningFieldsMethod(final ClassOrInterfaceDeclaration modelTemplate,
final List<org.dmg.pmml.MiningField> miningFields,
final List<org.dmg.pmml.Field<?>> fields) {
final MethodDeclaration methodDeclaration =
modelTemplate.getMethodsByName(GET_CREATED_KIEPMMLMININGFIELDS).get(0);
commonPopulateGetCreatedKiePMMLMiningFieldsMethod(methodDeclaration, miningFields, fields);
} | @Test
void populateGetCreatedKiePMMLMiningFieldsMethod() throws IOException {
final CompilationDTO compilationDTO = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME,
pmmlModel,
model,
new PMMLCompilationContextMock(), SOURCE_BASE);
org.kie.pmml.compiler.commons.codegenfactories.KiePMMLModelFactoryUtils.populateGetCreatedKiePMMLMiningFieldsMethod(classOrInterfaceDeclaration,
compilationDTO.getMiningSchema().getMiningFields(), compilationDTO.getFields());
final MethodDeclaration retrieved =
classOrInterfaceDeclaration.getMethodsByName(GET_CREATED_KIEPMMLMININGFIELDS).get(0);
String text = getFileContent(TEST_12_SOURCE);
BlockStmt expected = JavaParserUtils.parseBlock(text);
assertThat(JavaParserUtils.equalsNode(expected, retrieved.getBody().get())).isTrue();
} |
@Override
public String getCommand() {
return "help";
} | @Test
public void getCommand() throws Exception {
Assert.assertEquals(new HelpTelnetHandler().getCommand(), "help");
} |
public static List<Validation> computeFlagsFromCSVString(String csvString,
Log log) {
List<Validation> flags = new ArrayList<>();
boolean resetFlag = false;
for (String p : csvString.split(",")) {
try {
flags.add(Validation.valueOf(p));
} catch (IllegalArgumentException e) {
log.info("validateDMN configured with flag: '" + p + "' determines this Mojo will not be executed (reset all flags).");
resetFlag = true;
}
}
if (resetFlag) {
flags.clear();
}
return flags;
} | @Test
public void testFlagsOK() {
List<DMNValidator.Validation> result = DMNValidationHelper.computeFlagsFromCSVString("VALIDATE_SCHEMA,VALIDATE_MODEL", log);
assertThat(result).isNotNull()
.hasSize(2)
.contains(DMNValidator.Validation.VALIDATE_SCHEMA, DMNValidator.Validation.VALIDATE_MODEL);
} |
@Override
public void importData(JsonReader reader) throws IOException {
logger.info("Reading configuration for 1.0");
// this *HAS* to start as an object
reader.beginObject();
while (reader.hasNext()) {
JsonToken tok = reader.peek();
switch (tok) {
case NAME:
String name = reader.nextName();
// find out which member it is
if (name.equals(CLIENTS)) {
readClients(reader);
} else if (name.equals(GRANTS)) {
readGrants(reader);
} else if (name.equals(WHITELISTEDSITES)) {
readWhitelistedSites(reader);
} else if (name.equals(BLACKLISTEDSITES)) {
readBlacklistedSites(reader);
} else if (name.equals(AUTHENTICATIONHOLDERS)) {
readAuthenticationHolders(reader);
} else if (name.equals(ACCESSTOKENS)) {
readAccessTokens(reader);
} else if (name.equals(REFRESHTOKENS)) {
readRefreshTokens(reader);
} else if (name.equals(SYSTEMSCOPES)) {
readSystemScopes(reader);
} else {
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.importExtensionData(name, reader);
break;
}
}
}
// unknown token, skip it
reader.skipValue();
}
break;
case END_OBJECT:
// the object ended, we're done here
reader.endObject();
continue;
default:
logger.debug("Found unexpected entry");
reader.skipValue();
continue; }
}
fixObjectReferences();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.fixExtensionObjectReferences(maps);
break;
}
}
maps.clearAll();
} | @Test
public void testImportSystemScopes() throws IOException {
SystemScope scope1 = new SystemScope();
scope1.setId(1L);
scope1.setValue("scope1");
scope1.setDescription("Scope 1");
scope1.setRestricted(true);
scope1.setDefaultScope(false);
scope1.setIcon("glass");
SystemScope scope2 = new SystemScope();
scope2.setId(2L);
scope2.setValue("scope2");
scope2.setDescription("Scope 2");
scope2.setRestricted(false);
scope2.setDefaultScope(false);
scope2.setIcon("ball");
SystemScope scope3 = new SystemScope();
scope3.setId(3L);
scope3.setValue("scope3");
scope3.setDescription("Scope 3");
scope3.setRestricted(false);
scope3.setDefaultScope(true);
scope3.setIcon("road");
String configJson = "{" +
"\"" + MITREidDataService.CLIENTS + "\": [], " +
"\"" + MITREidDataService.ACCESSTOKENS + "\": [], " +
"\"" + MITREidDataService.REFRESHTOKENS + "\": [], " +
"\"" + MITREidDataService.GRANTS + "\": [], " +
"\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " +
"\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " +
"\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " +
"\"" + MITREidDataService.SYSTEMSCOPES + "\": [" +
"{\"id\":1,\"description\":\"Scope 1\",\"icon\":\"glass\",\"value\":\"scope1\",\"allowDynReg\":false,\"defaultScope\":false}," +
"{\"id\":2,\"description\":\"Scope 2\",\"icon\":\"ball\",\"value\":\"scope2\",\"allowDynReg\":true,\"defaultScope\":false}," +
"{\"id\":3,\"description\":\"Scope 3\",\"icon\":\"road\",\"value\":\"scope3\",\"allowDynReg\":true,\"defaultScope\":true}" +
" ]" +
"}";
System.err.println(configJson);
JsonReader reader = new JsonReader(new StringReader(configJson));
dataService.importData(reader);
verify(sysScopeRepository, times(3)).save(capturedScope.capture());
List<SystemScope> savedScopes = capturedScope.getAllValues();
assertThat(savedScopes.size(), is(3));
assertThat(savedScopes.get(0).getValue(), equalTo(scope1.getValue()));
assertThat(savedScopes.get(0).getDescription(), equalTo(scope1.getDescription()));
assertThat(savedScopes.get(0).getIcon(), equalTo(scope1.getIcon()));
assertThat(savedScopes.get(0).isDefaultScope(), equalTo(scope1.isDefaultScope()));
assertThat(savedScopes.get(0).isRestricted(), equalTo(scope1.isRestricted()));
assertThat(savedScopes.get(1).getValue(), equalTo(scope2.getValue()));
assertThat(savedScopes.get(1).getDescription(), equalTo(scope2.getDescription()));
assertThat(savedScopes.get(1).getIcon(), equalTo(scope2.getIcon()));
assertThat(savedScopes.get(1).isDefaultScope(), equalTo(scope2.isDefaultScope()));
assertThat(savedScopes.get(1).isRestricted(), equalTo(scope2.isRestricted()));
assertThat(savedScopes.get(2).getValue(), equalTo(scope3.getValue()));
assertThat(savedScopes.get(2).getDescription(), equalTo(scope3.getDescription()));
assertThat(savedScopes.get(2).getIcon(), equalTo(scope3.getIcon()));
assertThat(savedScopes.get(2).isDefaultScope(), equalTo(scope3.isDefaultScope()));
assertThat(savedScopes.get(2).isRestricted(), equalTo(scope3.isRestricted()));
} |
@Override
protected void encode(ChannelHandlerContext ctx, Object msg, List out) throws Exception {
if (!(msg instanceof List)) {
ByteBuf byteBuf = Unpooled.buffer();
((LispMessage) msg).writeTo(byteBuf);
out.add(new DatagramPacket(byteBuf, ((LispMessage) msg).getSender()));
return;
}
List<LispMessage> msgList = (List<LispMessage>) msg;
for (LispMessage message : msgList) {
if (message != null) {
ByteBuf byteBuf = Unpooled.buffer();
message.writeTo(byteBuf);
out.add(new DatagramPacket(byteBuf, message.getSender()));
}
}
} | @Test
public void testEncode() throws Exception {
LispMessageEncoder encoder = new LispMessageEncoder();
MockLispMessage request = new MockLispMessage(LispType.LISP_MAP_REQUEST);
MockLispMessage reply = new MockLispMessage(LispType.LISP_MAP_REPLY);
MockLispMessage register = new MockLispMessage(LispType.LISP_MAP_REGISTER);
MockLispMessage notify = new MockLispMessage(LispType.LISP_MAP_NOTIFY);
ByteBuf buff = Unpooled.buffer();
List<DatagramPacket> list = Lists.newArrayList();
List<MockLispMessage> messages = ImmutableList.of(request, reply, register, notify);
encoder.encode(null, messages, list);
list.forEach(p -> {
byte[] tmp = new byte[p.content().writerIndex()];
p.content().readBytes(tmp);
buff.writeBytes(tmp);
});
assertThat(buff, notNullValue());
StringBuilder expBuilder = new StringBuilder();
expBuilder.append("LISP message [LISP_MAP_REQUEST] ");
expBuilder.append("LISP message [LISP_MAP_REPLY] ");
expBuilder.append("LISP message [LISP_MAP_REGISTER] ");
expBuilder.append("LISP message [LISP_MAP_NOTIFY] ");
String expected = expBuilder.toString();
String returned = new String(buff.array(),
StandardCharsets.UTF_8).substring(0, expected.length());
assertThat(returned, is(expected));
} |
@Override
public String build() {
if(null == this.paramValues){
this.paramValues = new ArrayList<>();
} else {
this.paramValues.clear();
}
return build(this.paramValues);
} | @Test
public void buildTest(){
Condition c1 = new Condition("user", null);
Condition c2 = new Condition("name", "!= null");
c2.setLinkOperator(LogicalOperator.OR);
Condition c3 = new Condition("group", "like %aaa");
final ConditionBuilder builder = ConditionBuilder.of(c1, c2, c3);
final String sql = builder.build();
assertEquals("user IS NULL OR name IS NOT NULL AND group LIKE ?", sql);
assertEquals(1, builder.getParamValues().size());
assertEquals("%aaa", builder.getParamValues().get(0));
} |
public static BlockingQueue<Runnable> buildQueue(int size) {
return buildQueue(size, false);
} | @Test
public void buildQueue1() throws Exception {
BlockingQueue<Runnable> queue = ThreadPoolUtils.buildQueue(0, true);
Assert.assertEquals(queue.getClass(), SynchronousQueue.class);
queue = ThreadPoolUtils.buildQueue(-1, true);
Assert.assertEquals(queue.getClass(), PriorityBlockingQueue.class);
queue = ThreadPoolUtils.buildQueue(100, true);
Assert.assertEquals(queue.getClass(), PriorityBlockingQueue.class);
queue = ThreadPoolUtils.buildQueue(-1, false);
Assert.assertEquals(queue.getClass(), LinkedBlockingQueue.class);
queue = ThreadPoolUtils.buildQueue(100, false);
Assert.assertEquals(queue.getClass(), LinkedBlockingQueue.class);
} |
@Override
public synchronized <T extends EventListener<?>> boolean removeListener(final EventType eventType, final T eventListener) {
if (eventType == null || eventListener == null) {
return false;
}
final EventListener<?>[] listeners = this.listenerMap.get(eventType);
if (listeners == null) {
return false;
}
final int length = listeners.length;
if (length == 1) {
if (listeners[0].equals(eventListener)) {
this.listenerMap.remove(eventType);
return true;
}
return false;
}
boolean found = false;
int index = -1;
for (int i = 0; i < length; i++) {
if (listeners[i].equals(eventListener)) {
found = true;
index = i;
break;
}
}
if (!found) {
return false;
}
final EventListener<?>[] array = new EventListener<?>[length - 1];
System.arraycopy(listeners, 0, array, 0, index);
System.arraycopy(listeners, index + 1, array, index, length - index - 1);
this.listenerMap.put(eventType, array);
return true;
} | @Test
void testRemoveListenerWithNullParameters() {
assertFalse(this.instance.removeListener(null, null));
assertFalse(this.instance.removeListener(EventType.HANDSHAKE, null));
assertFalse(this.instance.removeListener(null, ignored -> {
}));
} |
public ShardingSphereDatabase getDatabase(final String name) {
ShardingSpherePreconditions.checkNotEmpty(name, NoDatabaseSelectedException::new);
ShardingSphereMetaData metaData = getMetaDataContexts().getMetaData();
ShardingSpherePreconditions.checkState(metaData.containsDatabase(name), () -> new UnknownDatabaseException(name));
return metaData.getDatabase(name);
} | @Test
void assertDropDatabase() {
when(metaDataContexts.getMetaData().getDatabase("foo_db").getName()).thenReturn("foo_db");
when(metaDataContexts.getMetaData().containsDatabase("foo_db")).thenReturn(true);
contextManager.getMetaDataContextManager().getSchemaMetaDataManager().dropDatabase("foo_db");
verify(metaDataContexts.getMetaData()).dropDatabase("foo_db");
} |
public CreateTableCommand createTableCommand(
final KsqlStructuredDataOutputNode outputNode,
final Optional<RefinementInfo> emitStrategy
) {
Optional<WindowInfo> windowInfo =
outputNode.getKsqlTopic().getKeyFormat().getWindowInfo();
if (windowInfo.isPresent() && emitStrategy.isPresent()) {
final WindowInfo info = windowInfo.get();
windowInfo = Optional.of(WindowInfo.of(
info.getType(),
info.getSize(),
Optional.of(emitStrategy.get().getOutputRefinement())
));
}
return new CreateTableCommand(
outputNode.getSinkName().get(),
outputNode.getSchema(),
outputNode.getTimestampColumn(),
outputNode.getKsqlTopic().getKafkaTopicName(),
Formats.from(outputNode.getKsqlTopic()),
windowInfo,
Optional.of(outputNode.getOrReplace()),
Optional.of(false)
);
} | @Test
public void shouldThrowInCreateTableOrReplaceSource() {
// Given:
final CreateTable ddlStatement = new CreateTable(TABLE_NAME,
TableElements.of(
tableElement("COL1", new Type(BIGINT), PRIMARY_KEY_CONSTRAINT),
tableElement("COL2", new Type(SqlTypes.STRING))),
true, false, withProperties, true);
// When:
final Exception e = assertThrows(
KsqlException.class, () -> createSourceFactory
.createTableCommand(ddlStatement, ksqlConfig));
// Then:
assertThat(e.getMessage(),
containsString(
"Cannot add table 'table_bob': CREATE OR REPLACE is not supported on "
+ "source tables."));
} |
public Builder toBuilder() {
Builder result = new Builder();
result.flags = flags;
result.traceIdHigh = traceIdHigh;
result.traceId = traceId;
return result;
} | @Test void canSetSampledNull() {
base = base.toBuilder().sampled(true).build();
TraceIdContext objects = base.toBuilder().sampled(null).build();
assertThat(objects.debug())
.isFalse();
assertThat(objects.sampled())
.isNull();
} |
public Future<KafkaVersionChange> reconcile() {
return getVersionFromController()
.compose(i -> getPods())
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
} | @Test
public void testUpgradeWithIVVersions(VertxTestContext context) {
String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION;
String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION + "-IV0";
String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION + "-IV0";
String kafkaVersion = VERSIONS.defaultVersion().version();
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(kafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion),
mockNewCluster(
null,
mockSps(oldKafkaVersion),
mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion)
)
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion)));
assertThat(c.to(), is(VERSIONS.defaultVersion()));
assertThat(c.interBrokerProtocolVersion(), nullValue());
assertThat(c.logMessageFormatVersion(), nullValue());
async.flag();
})));
} |
public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, Timer timer) {
return beginningOrEndOffset(partitions, ListOffsetsRequest.EARLIEST_TIMESTAMP, timer);
} | @Test
public void testBeginningOffsets() {
buildFetcher();
assignFromUser(singleton(tp0));
client.prepareResponse(listOffsetResponse(tp0, Errors.NONE, ListOffsetsRequest.EARLIEST_TIMESTAMP, 2L));
assertEquals(singletonMap(tp0, 2L), offsetFetcher.beginningOffsets(singleton(tp0), time.timer(5000L)));
} |
public boolean eval(ContentFile<?> file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return new MetricsEvalVisitor().eval(file);
} | @Test
public void testIntegerNotEq() {
boolean shouldRead =
new StrictMetricsEvaluator(SCHEMA, notEqual("id", INT_MIN_VALUE - 25)).eval(FILE);
assertThat(shouldRead).as("Should match: no values == 5").isTrue();
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", INT_MIN_VALUE - 1)).eval(FILE);
assertThat(shouldRead).as("Should match: no values == 39").isTrue();
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", INT_MIN_VALUE)).eval(FILE);
assertThat(shouldRead).as("Should not match: some value may be == 30").isFalse();
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", INT_MAX_VALUE - 4)).eval(FILE);
assertThat(shouldRead).as("Should not match: some value may be == 75").isFalse();
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", INT_MAX_VALUE)).eval(FILE);
assertThat(shouldRead).as("Should not match: some value may be == 79").isFalse();
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", INT_MAX_VALUE + 1)).eval(FILE);
assertThat(shouldRead).as("Should match: no values == 80").isTrue();
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", INT_MAX_VALUE + 6)).eval(FILE);
assertThat(shouldRead).as("Should read: no values == 85").isTrue();
} |
@DELETE
@Path("/{connector}/offsets")
@Operation(summary = "Reset the offsets for the specified connector")
public Response resetConnectorOffsets(final @Parameter(hidden = true) @QueryParam("forward") Boolean forward,
final @Context HttpHeaders headers, final @PathParam("connector") String connector) throws Throwable {
FutureCallback<Message> cb = new FutureCallback<>();
herder.resetConnectorOffsets(connector, cb);
Message msg = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/offsets", "DELETE", headers, null,
new TypeReference<Message>() { }, new IdentityTranslator<>(), forward);
return Response.ok().entity(msg).build();
} | @Test
public void testResetOffsets() throws Throwable {
final ArgumentCaptor<Callback<Message>> cb = ArgumentCaptor.forClass(Callback.class);
Message msg = new Message("The offsets for this connector have been reset successfully");
doAnswer(invocation -> {
cb.getValue().onCompletion(null, msg);
return null;
}).when(herder).resetConnectorOffsets(eq(CONNECTOR_NAME), cb.capture());
Response response = connectorsResource.resetConnectorOffsets(null, NULL_HEADERS, CONNECTOR_NAME);
assertEquals(200, response.getStatus());
assertEquals(msg, response.getEntity());
} |
public static <NodeT, EdgeT> List<List<NodeT>> allPathsFromRootsToLeaves(
Network<NodeT, EdgeT> network) {
ArrayDeque<List<NodeT>> paths = new ArrayDeque<>();
// Populate the list with all roots
for (NodeT node : network.nodes()) {
if (network.inDegree(node) == 0) {
paths.add(ImmutableList.of(node));
}
}
List<List<NodeT>> distinctPathsFromRootsToLeaves = new ArrayList<>();
while (!paths.isEmpty()) {
List<NodeT> path = paths.removeFirst();
NodeT lastNode = path.get(path.size() - 1);
if (network.outDegree(lastNode) == 0) {
distinctPathsFromRootsToLeaves.add(new ArrayList<>(path));
} else {
for (EdgeT edge : network.outEdges(lastNode)) {
paths.addFirst(
ImmutableList.<NodeT>builder()
.addAll(path)
.add(network.incidentNodes(edge).target())
.build());
}
}
}
return distinctPathsFromRootsToLeaves;
} | @Test
public void testAllPathsFromRootsToLeaves() {
// Expected paths:
// D
// A, B, C, F
// A, B, E, G
// A, B, E, G (again)
// A, B, E, H
// I, J, E, G
// I, J, E, G (again)
// I, J, E, H
// I, E, G
// I, E, G (again)
// I, E, H
// I, K, L
// M, N, L
// M, N, L (again)
// O
List<List<String>> expectedPaths =
ImmutableList.of(
ImmutableList.of("D"),
ImmutableList.of("A", "B", "C", "F"),
ImmutableList.of("A", "B", "E", "G"),
ImmutableList.of("A", "B", "E", "G"),
ImmutableList.of("A", "B", "E", "H"),
ImmutableList.of("I", "J", "E", "G"),
ImmutableList.of("I", "J", "E", "G"),
ImmutableList.of("I", "J", "E", "H"),
ImmutableList.of("I", "E", "G"),
ImmutableList.of("I", "E", "G"),
ImmutableList.of("I", "E", "H"),
ImmutableList.of("I", "K", "L"),
ImmutableList.of("M", "N", "L"),
ImmutableList.of("M", "N", "L"),
ImmutableList.of("O"));
MutableNetwork<String, String> network = createNetwork();
List<List<String>> actualPaths = Networks.allPathsFromRootsToLeaves(network);
assertThat(actualPaths, containsInAnyOrder(expectedPaths.toArray()));
assertEquals(actualPaths.size(), expectedPaths.size());
} |
@Override
public boolean add(String e) {
return get(addAsync(e));
} | @Test
public void testAddListener() {
testWithParams(redisson -> {
RLexSortedSet al = redisson.getLexSortedSet("test");
CountDownLatch latch = new CountDownLatch(1);
al.addListener(new ScoredSortedSetAddListener() {
@Override
public void onAdd(String name) {
latch.countDown();
}
});
al.add("abc");
try {
assertThat(latch.await(1, TimeUnit.SECONDS)).isTrue();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}, NOTIFY_KEYSPACE_EVENTS, "Ez");
} |
@Override
public BeamSqlTable buildBeamSqlTable(Table tableDefinition) {
ObjectNode tableProperties = tableDefinition.getProperties();
try {
RowJson.RowJsonDeserializer deserializer =
RowJson.RowJsonDeserializer.forSchema(getSchemaIOProvider().configurationSchema())
.withNullBehavior(RowJson.RowJsonDeserializer.NullBehavior.ACCEPT_MISSING_OR_NULL);
Row configurationRow =
newObjectMapperWith(deserializer).readValue(tableProperties.toString(), Row.class);
SchemaIO schemaIO =
getSchemaIOProvider()
.from(tableDefinition.getLocation(), configurationRow, tableDefinition.getSchema());
return new SchemaIOTableWrapper(schemaIO);
} catch (InvalidConfigurationException | InvalidSchemaException e) {
throw new InvalidTableException(e.getMessage());
} catch (JsonProcessingException e) {
throw new AssertionError("Failed to re-parse TBLPROPERTIES JSON " + tableProperties);
}
} | @Test
public void testBuildIOReader_withProjectionPushdown() {
TestSchemaIOTableProviderWrapper provider = new TestSchemaIOTableProviderWrapper();
BeamSqlTable beamSqlTable = provider.buildBeamSqlTable(testTable);
PCollection<Row> result =
beamSqlTable.buildIOReader(
pipeline.begin(),
new DefaultTableFilter(ImmutableList.of()),
ImmutableList.of("f_long"));
Schema outputSchema = Schema.builder().addInt64Field("f_long").build();
PAssert.that(result)
.containsInAnyOrder(
Row.withSchema(outputSchema).addValues(0L).build(),
Row.withSchema(outputSchema).addValues(1L).build());
pipeline.run();
} |
public String marshal() {
StringBuilder result = new StringBuilder();
for (JobDataNodeEntry each : entries) {
result.append(each.marshal()).append('|');
}
if (!entries.isEmpty()) {
result.setLength(result.length() - 1);
}
return result.toString();
} | @Test
void assertMarshal() {
String actual = new JobDataNodeLine(Arrays.asList(
new JobDataNodeEntry("t_order", Arrays.asList(new DataNode("ds_0.t_order_0"), new DataNode("ds_0.t_order_1"))),
new JobDataNodeEntry("t_order_item", Arrays.asList(new DataNode("ds_0.t_order_item_0"), new DataNode("ds_0.t_order_item_1"))))).marshal();
String expected = "t_order:ds_0.t_order_0,ds_0.t_order_1|t_order_item:ds_0.t_order_item_0,ds_0.t_order_item_1";
assertThat(actual, is(expected));
} |
@VisibleForTesting
static @Nullable TimeUnit unitSuggestedByName(String name) {
// Tuple types, especially Pair, trip us up. Skip APIs that might be from them.
// This check is somewhat redundant with the "second" check below.
// TODO(cpovirk): Skip APIs only if they're from a type that also declares a first/getFirst()?
if (name.equals("second") || name.equals("getSecond")) {
return null;
}
// http://grepcode.com/file/repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.33/com/mysql/jdbc/TimeUtil.java#336
if (name.equals("secondsPart")) {
return NANOSECONDS;
}
// The name of a Google-internal method, but I see other methods with this name externally.
if (name.equals("msToS")) {
return SECONDS;
}
ImmutableList<String> words = fixUnitCamelCase(splitToLowercaseTerms(name));
// People use variable names like "firstTimestamp" and "secondTimestamp."
// This check is somewhat redundant with the "second" check above.
if (words.get(0).equals("second")) {
return null;
}
/*
* Sometimes people write a method like "fromNanos()." Whatever unit that might return, it's
* very unlikely to be nanos, so we give up.
*/
if (hasNameOfFromUnits(words)) {
return null;
}
/*
* Sometimes people write "final int TWO_SECONDS = 2 * 1000." Whatever unit that might be in,
* it's very unlikely to be seconds, so we give up.
*
* TODO(cpovirk): We could probably guess the unit correctly most of the time if we wanted.
*/
if (isNamedForNumberOfUnits(words)) {
return null;
}
Set<TimeUnit> units = timeUnits(words);
/*
* TODO(cpovirk): If the name has multiple units, like "millisToNanos," attempt to determine
* which is the output. We can look not only at the method name but also at its parameter: If
* the parameter is named "millis," then the output is presumably nanos.
*/
return units.size() == 1 ? getOnlyElement(units) : null;
} | @Test
public void testUnitSuggestedByName() {
assertSeconds("sleepSec", "deadlineSeconds", "secondsTimeout", "msToS");
assertUnknown(
"second",
"getSecond",
"SECOND",
"secondDeadline",
"twoSeconds",
"THIRTY_SECONDS",
"fromSeconds",
"x",
"millisMicros");
assertMillis(
"millis",
"MILLIS",
"someMillis",
"millisecs",
"timeoutMilli",
"valueInMills",
"mSec",
"deadlineMSec",
"milliSecond",
"milliSeconds",
"FOO_MILLI_SECONDS",
"dateMS",
"dateMs",
"dateMsec",
"msRemaining");
assertMicros("micro", "us");
assertNanos("nano", "secondsPart");
} |
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
} | @Test
public void testBatchesDrainedWithOldProducerIdShouldSucceedOnSubsequentRetry() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
int maxRetries = 10;
Metrics m = new Metrics();
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries,
senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions);
Future<RecordMetadata> outOfOrderResponse = appendToAccumulator(tp0);
Future<RecordMetadata> successfulResponse = appendToAccumulator(tp1);
sender.runOnce(); // connect.
sender.runOnce(); // send.
assertEquals(1, client.inFlightRequestCount());
Map<TopicPartition, OffsetAndError> responses = new LinkedHashMap<>();
responses.put(tp1, new OffsetAndError(-1, Errors.NOT_LEADER_OR_FOLLOWER));
responses.put(tp0, new OffsetAndError(-1, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER));
client.respond(produceResponse(responses));
sender.runOnce();
assertFalse(outOfOrderResponse.isDone());
sender.runOnce(); // bump epoch send request to tp1 with the old producerId
assertEquals(1, transactionManager.producerIdAndEpoch().epoch);
assertFalse(successfulResponse.isDone());
// The response comes back with a retriable error.
client.respond(produceResponse(tp1, 0, Errors.NOT_LEADER_OR_FOLLOWER, -1));
sender.runOnce();
// The response
assertFalse(successfulResponse.isDone());
sender.runOnce(); // retry one more time
client.respond(produceResponse(tp1, 0, Errors.NONE, -1));
sender.runOnce();
assertTrue(successfulResponse.isDone());
// epoch of partition is bumped and sequence is reset when the next batch is sent
assertEquals(1, transactionManager.sequenceNumber(tp1));
} |
public String validate(final String xml) {
final Source source = new SAXSource(reader, new InputSource(IOUtils.toInputStream(xml, Charset.defaultCharset())));
return validate(source);
} | @Test
public void testInvalidXMLWithClientResolver() throws Exception {
String payload = IOUtils.toString(ClassLoader.getSystemResourceAsStream("xml/article-3.xml"),
Charset.defaultCharset());
logger.info("Validating payload: {}", payload);
// validate
String result = getProcessor("sch/schematron-3.sch", new ClientUriResolver()).validate(payload);
logger.info("Schematron Report: {}", result);
assertEquals("A title should be at least two characters", Utils.evaluate("//svrl:failed-assert/svrl:text", result));
assertEquals(0, Integer.valueOf(Utils.evaluate("count(//svrl:successful-report)", result)).intValue());
} |
@Override
public Set<String> getExtensionClassNames(String pluginId) {
if (currentPluginId.equals(pluginId)) {
return original.getExtensionClassNames(pluginId);
} else {
throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute getExtensionClassNames for foreign pluginId!");
}
} | @Test
public void getExtensionClassNames() {
pluginManager.loadPlugins();
assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.getExtensionClassNames(OTHER_PLUGIN_ID));
assertEquals(1, wrappedPluginManager.getExtensionClassNames(THIS_PLUGIN_ID).size());
} |
public void scanResponseTable() {
final List<ResponseFuture> rfList = new LinkedList<>();
Iterator<Entry<Integer, ResponseFuture>> it = this.responseTable.entrySet().iterator();
while (it.hasNext()) {
Entry<Integer, ResponseFuture> next = it.next();
ResponseFuture rep = next.getValue();
if ((rep.getBeginTimestamp() + rep.getTimeoutMillis() + 1000) <= System.currentTimeMillis()) {
rep.release();
it.remove();
rfList.add(rep);
log.warn("remove timeout request, " + rep);
}
}
for (ResponseFuture rf : rfList) {
try {
executeInvokeCallback(rf);
} catch (Throwable e) {
log.warn("scanResponseTable, operationComplete Exception", e);
}
}
} | @Test
public void testScanResponseTable() {
int dummyId = 1;
// mock timeout
ResponseFuture responseFuture = new ResponseFuture(null, dummyId, -1000, new InvokeCallback() {
@Override
public void operationComplete(ResponseFuture responseFuture) {
}
@Override
public void operationSucceed(RemotingCommand response) {
}
@Override
public void operationFail(Throwable throwable) {
}
}, null);
remotingAbstract.responseTable.putIfAbsent(dummyId, responseFuture);
remotingAbstract.scanResponseTable();
assertNull(remotingAbstract.responseTable.get(dummyId));
} |
public static List<MainModel.MainOptionModel> parseConfigurationSource(String fileName) throws IOException {
return parseConfigurationSource(new File(fileName));
} | @Test
public void testMyParser() throws Exception {
String fileName = "src/test/java/org/apache/camel/maven/packaging/MyConfiguration.java";
List<MainModel.MainOptionModel> list = PrepareCamelMainMojo.parseConfigurationSource(fileName);
assertNotNull(list);
assertEquals(39, list.size());
assertEquals("name", list.get(0).getName());
assertEquals("java.lang.String", list.get(0).getJavaType());
assertNull(list.get(0).getDefaultValue());
assertEquals("Sets the name of the CamelContext.", list.get(0).getDescription());
assertEquals("shutdownTimeout", list.get(4).getName());
assertEquals("int", list.get(4).getJavaType());
assertEquals(300, list.get(4).getDefaultValue());
assertEquals("Timeout in seconds to graceful shutdown Camel.", list.get(4).getDescription());
assertEquals("tracing", list.get(25).getName());
assertEquals("boolean", list.get(25).getJavaType());
assertTrue(list.get(25).isDeprecated());
} |
public static String htmlToPlain(final String text) {
if (text == null) {
return null;
}
CachedStringTransformationResult threadLocalCachedHtmlToPlain = cachedHtmlToPlain.get();
if(threadLocalCachedHtmlToPlain.input.equals(text))
return threadLocalCachedHtmlToPlain.output;
String output = HtmlUtils.htmlToPlain(text, /* strictHTMLOnly= */true, /* removeNewLines= */true);
cachedHtmlToPlain.set(new CachedStringTransformationResult(text, output));
return output;
} | @Test
public void testHtmlToPlain_shouldRetainTrailingNonBreakingSpaces() {
String input = "<html>\n" +
" <head>\n" +
" \n" +
" </head>\n" +
" <body>\n" +
" <p>\n" +
" Zero\n" +
" </p>\n" +
" <p>\n" +
" One \n" +
" </p>\n" +
" <p>\n" +
" Two \n" +
" </p>\n" +
" <p>\n" +
" Three \n" +
" </p>\n" +
" <p>\n" +
" EOF\n" +
" </p>\n" +
" </body>\n" +
"</html>\n";
String expected = "Zero\n" +
"One \n" +
"Two \n" +
"Three \n" +
"EOF";
String actual = HtmlUtils.htmlToPlain(input, true, true);
assertEquals(expected, actual);
} |
public VerificationStateEntry maybeCreateVerificationStateEntry(long producerId, int sequence, short epoch) {
VerificationStateEntry entry = verificationStates.computeIfAbsent(producerId, pid ->
new VerificationStateEntry(time.milliseconds(), sequence, epoch)
);
entry.maybeUpdateLowestSequenceAndEpoch(sequence, epoch);
return entry;
} | @Test
public void testSequenceAndEpochInVerificationEntry() {
VerificationStateEntry originalEntry = stateManager.maybeCreateVerificationStateEntry(producerId, 1, epoch);
VerificationGuard originalEntryVerificationGuard = originalEntry.verificationGuard();
verifyEntry(originalEntryVerificationGuard, originalEntry, 1, epoch);
// If we see a lower sequence, update to the lower one.
VerificationStateEntry updatedEntry = stateManager.maybeCreateVerificationStateEntry(producerId, 0, epoch);
verifyEntry(originalEntryVerificationGuard, updatedEntry, 0, epoch);
// If we see a new epoch that is higher, update the sequence.
VerificationStateEntry updatedEntryNewEpoch = stateManager.maybeCreateVerificationStateEntry(producerId, 2, (short) 1);
verifyEntry(originalEntryVerificationGuard, updatedEntryNewEpoch, 2, (short) 1);
// Ignore a lower epoch.
VerificationStateEntry updatedEntryOldEpoch = stateManager.maybeCreateVerificationStateEntry(producerId, 0, epoch);
verifyEntry(originalEntryVerificationGuard, updatedEntryOldEpoch, 2, (short) 1);
} |
@Override
public void filter(final T inMesg) {
try (TaskCloseable ignored = PerfMark.traceTask(this, s -> s.getClass().getSimpleName() + ".filter")) {
addPerfMarkTags(inMesg);
runFilters(inMesg, initRunningFilterIndex(inMesg));
}
} | @Test
void testInboundFilterChain() {
final SimpleInboundFilter inbound1 = spy(new SimpleInboundFilter(true));
final SimpleInboundFilter inbound2 = spy(new SimpleInboundFilter(false));
final ZuulFilter[] filters = new ZuulFilter[] {inbound1, inbound2};
final FilterUsageNotifier notifier = mock(FilterUsageNotifier.class);
final Registry registry = mock(Registry.class);
final ZuulFilterChainRunner runner = new ZuulFilterChainRunner(filters, notifier, registry);
runner.filter(request);
verify(inbound1, times(1)).applyAsync(eq(request));
verify(inbound2, never()).applyAsync(eq(request));
verify(notifier).notify(eq(inbound1), eq(ExecutionStatus.SUCCESS));
verify(notifier).notify(eq(inbound2), eq(ExecutionStatus.SKIPPED));
verifyNoMoreInteractions(notifier);
} |
@Override
public <R> HoodieData<HoodieRecord<R>> tagLocation(
HoodieData<HoodieRecord<R>> records, HoodieEngineContext context,
HoodieTable hoodieTable) {
return HoodieJavaRDD.of(HoodieJavaRDD.getJavaRDD(records)
.mapPartitionsWithIndex(locationTagFunction(hoodieTable.getMetaClient()), true));
} | @Test
public void testTagLocationAndPartitionPathUpdateDisabled() throws Exception {
final String newCommitTime = "001";
final String oldPartitionPath = "1970/01/01";
final int numRecords = 10;
List<HoodieRecord> newRecords = dataGen.generateInserts(newCommitTime, numRecords);
List<HoodieRecord> oldRecords = new LinkedList();
for (HoodieRecord newRecord: newRecords) {
HoodieKey key = new HoodieKey(newRecord.getRecordKey(), oldPartitionPath);
HoodieRecord hoodieRecord = new HoodieAvroRecord(key, (HoodieRecordPayload) newRecord.getData());
oldRecords.add(hoodieRecord);
}
JavaRDD<HoodieRecord> newWriteRecords = jsc().parallelize(newRecords, 1);
JavaRDD<HoodieRecord> oldWriteRecords = jsc().parallelize(oldRecords, 1);
HoodieWriteConfig config = getConfigBuilder(100, false, false).build();
SparkRDDWriteClient writeClient = getHoodieWriteClient(config);
writeClient.startCommitWithTime(newCommitTime);
JavaRDD<WriteStatus> writeStatues = writeClient.upsert(oldWriteRecords, newCommitTime);
writeClient.commit(newCommitTime, writeStatues);
assertNoWriteErrors(writeStatues.collect());
metaClient = HoodieTableMetaClient.reload(metaClient);
HoodieTable hoodieTable = HoodieSparkTable.create(config, context, metaClient);
SparkHoodieHBaseIndex index = new SparkHoodieHBaseIndex(config);
List<HoodieRecord> notAllowPathChangeRecords = tagLocation(index, newWriteRecords, hoodieTable).collect();
assertEquals(numRecords, notAllowPathChangeRecords.stream().count());
String newCommitTime1 = "002";
writeClient.startCommitWithTime(newCommitTime1);
JavaRDD<WriteStatus> writeStatues1 = writeClient.upsert(newWriteRecords, newCommitTime1);
writeClient.commit(newCommitTime1, writeStatues1);
assertNoWriteErrors(writeStatues1.collect());
assertEquals(numRecords, writeStatues1.map(writeStatus -> writeStatus.getTotalRecords()).reduce(Long::sum));
assertEquals(0, writeStatues1.filter(writeStatus -> !writeStatus.getPartitionPath().equals(oldPartitionPath)).count());
} |
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
} | @Test
public void resolutionTooHigh() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/crash-report/resourcepack_resolution.txt")),
CrashReportAnalyzer.Rule.RESOLUTION_TOO_HIGH);
} |
public static void addEstimateNumKeysMetric(final StreamsMetricsImpl streamsMetrics,
final RocksDBMetricContext metricContext,
final Gauge<BigInteger> valueProvider) {
addMutableMetric(
streamsMetrics,
metricContext,
valueProvider,
ESTIMATED_NUMBER_OF_KEYS,
ESTIMATED_NUMBER_OF_KEYS_DESCRIPTION
);
} | @Test
public void shouldAddEstimateNumKeysMetric() {
final String name = "estimate-num-keys";
final String description =
"Estimated number of keys in the active and unflushed immutable memtables and storage";
runAndVerifyMutableMetric(
name,
description,
() -> RocksDBMetrics.addEstimateNumKeysMetric(streamsMetrics, ROCKSDB_METRIC_CONTEXT, VALUE_PROVIDER)
);
} |
static void enableStatisticManagementOnNodes(HazelcastClientInstanceImpl client, String cacheName,
boolean statOrMan, boolean enabled) {
Collection<Member> members = client.getClientClusterService().getMemberList();
Collection<Future> futures = new ArrayList<>();
for (Member member : members) {
try {
UUID uuid = member.getUuid();
ClientMessage request = CacheManagementConfigCodec.encodeRequest(cacheName, statOrMan, enabled, uuid);
ClientInvocation clientInvocation = new ClientInvocation(client, request, cacheName, uuid);
Future<ClientMessage> future = clientInvocation.invoke();
futures.add(future);
} catch (Exception e) {
sneakyThrow(e);
}
}
// make sure all configs are created
FutureUtil.waitWithDeadline(futures, CacheProxyUtil.AWAIT_COMPLETION_TIMEOUT_SECONDS, TimeUnit.SECONDS);
} | @Test
public void testEnableStatisticManagementOnNodes() {
enableStatisticManagementOnNodes(client, CACHE_NAME, false, false);
} |
public static boolean getBoolean(final Map<String, Object> configs, final String key, final boolean defaultValue) {
final Object value = configs.getOrDefault(key, defaultValue);
if (value instanceof Boolean) {
return (boolean) value;
} else if (value instanceof String) {
return Boolean.parseBoolean((String) value);
} else {
log.error("Invalid value (" + value + ") on configuration '" + key + "'. The default value '" + defaultValue + "' will be used instead. Please specify a true/false value.");
return defaultValue;
}
} | @Test
public void testGetBoolean() {
String key = "test.key";
boolean defaultValue = true;
Map<String, Object> config = new HashMap<>();
config.put("some.other.key", false);
assertEquals(defaultValue, ConfigUtils.getBoolean(config, key, defaultValue));
config = new HashMap<>();
config.put(key, false);
assertFalse(ConfigUtils.getBoolean(config, key, defaultValue));
config = new HashMap<>();
config.put(key, "false");
assertFalse(ConfigUtils.getBoolean(config, key, defaultValue));
config = new HashMap<>();
config.put(key, "not-a-boolean");
assertFalse(ConfigUtils.getBoolean(config, key, defaultValue));
config = new HashMap<>();
config.put(key, 5);
assertEquals(defaultValue, ConfigUtils.getBoolean(config, key, defaultValue));
} |
@Override
@WithSpan
public QueryResult doRun(SearchJob job, Query query, OSGeneratedQueryContext queryContext) {
if (query.searchTypes().isEmpty()) {
return QueryResult.builder()
.query(query)
.searchTypes(Collections.emptyMap())
.errors(new HashSet<>(queryContext.errors()))
.build();
}
LOG.debug("Running query {} for job {}", query.id(), job.getId());
final HashMap<String, SearchType.Result> resultsMap = Maps.newHashMap();
final Set<String> affectedIndices = indexLookup.indexNamesForStreamsInTimeRange(query.usedStreamIds(), query.timerange());
final Map<String, SearchSourceBuilder> searchTypeQueries = queryContext.searchTypeQueries();
final List<String> searchTypeIds = new ArrayList<>(searchTypeQueries.keySet());
final List<SearchRequest> searches = searchTypeIds
.stream()
.map(searchTypeId -> {
final Set<String> affectedIndicesForSearchType = query.searchTypes().stream()
.filter(s -> s.id().equalsIgnoreCase(searchTypeId)).findFirst()
.flatMap(searchType -> {
if (searchType.effectiveStreams().isEmpty()
&& query.globalOverride().flatMap(GlobalOverride::timerange).isEmpty()
&& searchType.timerange().isEmpty()) {
return Optional.empty();
}
return Optional.of(indexLookup.indexNamesForStreamsInTimeRange(query.effectiveStreams(searchType), query.effectiveTimeRange(searchType)));
})
.orElse(affectedIndices);
Set<String> indices = affectedIndicesForSearchType.isEmpty() ? Collections.singleton("") : affectedIndicesForSearchType;
final SearchRequest searchRequest = new SearchRequest()
.source(searchTypeQueries.get(searchTypeId))
.indices(indices.toArray(new String[0]))
.indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN);
if (!SearchJob.NO_CANCELLATION.equals(job.getCancelAfterSeconds())) {
searchRequest.setCancelAfterTimeInterval(new TimeValue(job.getCancelAfterSeconds(), TimeUnit.SECONDS));
}
return searchRequest;
})
.toList();
final PlainActionFuture<MultiSearchResponse> mSearchFuture = client.cancellableMsearch(searches);
job.setSearchEngineTaskFuture(mSearchFuture);
final List<MultiSearchResponse.Item> results = getResults(mSearchFuture, searches.size());
for (SearchType searchType : query.searchTypes()) {
final String searchTypeId = searchType.id();
final Provider<OSSearchTypeHandler<? extends SearchType>> handlerProvider = openSearchSearchTypeHandlers.get(searchType.type());
if (handlerProvider == null) {
LOG.error("Unknown search type '{}', cannot convert query result.", searchType.type());
// no need to add another error here, as the query generation code will have added the error about the missing handler already
continue;
}
if (isSearchTypeWithError(queryContext, searchTypeId)) {
LOG.error("Failed search type '{}', cannot convert query result, skipping.", searchType.type());
// no need to add another error here, as the query generation code will have added the error about the missing handler already
continue;
}
// we create a new instance because some search type handlers might need to track information between generating the query and
// processing its result, such as aggregations, which depend on the name and type
final OSSearchTypeHandler<? extends SearchType> handler = handlerProvider.get();
final int searchTypeIndex = searchTypeIds.indexOf(searchTypeId);
final MultiSearchResponse.Item multiSearchResponse = results.get(searchTypeIndex);
if (multiSearchResponse.isFailure()) {
ElasticsearchException e = new ElasticsearchException("Search type returned error: ", multiSearchResponse.getFailure());
queryContext.addError(SearchTypeErrorParser.parse(query, searchTypeId, e));
} else if (checkForFailedShards(multiSearchResponse).isPresent()) {
ElasticsearchException e = checkForFailedShards(multiSearchResponse).get();
queryContext.addError(SearchTypeErrorParser.parse(query, searchTypeId, e));
} else {
try {
final SearchType.Result searchTypeResult = handler.extractResult(job, query, searchType, multiSearchResponse.getResponse(), queryContext);
if (searchTypeResult != null) {
resultsMap.put(searchTypeId, searchTypeResult);
}
} catch (Exception e) {
LOG.warn("Unable to extract results: ", e);
queryContext.addError(new SearchTypeError(query, searchTypeId, e));
}
}
}
LOG.debug("Query {} ran for job {}", query.id(), job.getId());
return QueryResult.builder()
.query(query)
.searchTypes(resultsMap)
.errors(new HashSet<>(queryContext.errors()))
.build();
} | @Test
public void executesSearchForEmptySearchTypes() {
final Query query = Query.builder()
.id("query1")
.query(ElasticsearchQueryString.of(""))
.timerange(RelativeRange.create(300))
.build();
final Search search = Search.builder().queries(ImmutableSet.of(query)).build();
final SearchJob job = new SearchJob("deadbeef", search, "admin", "test-node-id");
final OSGeneratedQueryContext queryContext = mock(OSGeneratedQueryContext.class);
final QueryResult queryResult = backend.doRun(job, query, queryContext);
assertThat(queryResult).isNotNull();
assertThat(queryResult.searchTypes()).isEmpty();
assertThat(queryResult.executionStats()).isNotNull();
assertThat(queryResult.errors()).isEmpty();
} |
@Override
public void stop() {
if (!isStarted()) return;
try {
runner.stop();
super.stop();
}
catch (IOException ex) {
addError("server shutdown error: " + ex, ex);
}
} | @Test
public void testStopWhenNotStarted() throws Exception {
appender.stop();
assertEquals(0, runner.getStartCount());
} |
public void handleSnapshot(MetadataImage image, KRaftMigrationOperationConsumer operationConsumer) {
handleTopicsSnapshot(image.topics(), operationConsumer);
handleConfigsSnapshot(image.configs(), operationConsumer);
handleClientQuotasSnapshot(image.clientQuotas(), image.scram(), operationConsumer);
handleProducerIdSnapshot(image.producerIds(), operationConsumer);
handleAclsSnapshot(image.acls(), operationConsumer);
handleDelegationTokenSnapshot(image.delegationTokens(), operationConsumer);
} | @Test
public void testReconcileSnapshotEmptyZk() {
// These test clients don't return any data in their iterates, so this simulates an empty ZK
CapturingTopicMigrationClient topicClient = new CapturingTopicMigrationClient();
CapturingConfigMigrationClient configClient = new CapturingConfigMigrationClient();
CapturingAclMigrationClient aclClient = new CapturingAclMigrationClient();
CapturingMigrationClient migrationClient = CapturingMigrationClient.newBuilder()
.setBrokersInZk(0)
.setTopicMigrationClient(topicClient)
.setConfigMigrationClient(configClient)
.setAclMigrationClient(aclClient)
.build();
KRaftMigrationZkWriter writer = new KRaftMigrationZkWriter(migrationClient, __ -> { });
MetadataImage image = new MetadataImage(
MetadataProvenance.EMPTY,
FeaturesImage.EMPTY, // Features are not used in ZK mode, so we don't migrate or dual-write them
ClusterImage.EMPTY, // Broker registrations are not dual-written
TopicsImageTest.IMAGE1,
ConfigurationsImageTest.IMAGE1,
ClientQuotasImage.EMPTY, // TODO KAFKA-15017
ProducerIdsImageTest.IMAGE1,
AclsImageTest.IMAGE1,
ScramImage.EMPTY, // TODO KAFKA-15017
DelegationTokenImage.EMPTY
);
Map<String, Integer> opCounts = new HashMap<>();
KRaftMigrationOperationConsumer consumer = KRaftMigrationDriver.countingOperationConsumer(opCounts,
(logMsg, operation) -> operation.apply(ZkMigrationLeadershipState.EMPTY));
writer.handleSnapshot(image, consumer);
assertEquals(2, opCounts.remove("CreateTopic"));
assertEquals(2, opCounts.remove("UpdateBrokerConfig"));
assertEquals(1, opCounts.remove("UpdateProducerId"));
assertEquals(4, opCounts.remove("UpdateAcl"));
assertEquals(0, opCounts.size());
assertEquals(2, topicClient.createdTopics.size());
assertTrue(topicClient.createdTopics.contains("foo"));
assertTrue(topicClient.createdTopics.contains("bar"));
assertEquals("bar", configClient.writtenConfigs.get(new ConfigResource(ConfigResource.Type.BROKER, "0")).get("foo"));
assertEquals("quux", configClient.writtenConfigs.get(new ConfigResource(ConfigResource.Type.BROKER, "0")).get("baz"));
assertEquals("foobaz", configClient.writtenConfigs.get(new ConfigResource(ConfigResource.Type.BROKER, "1")).get("foobar"));
assertEquals(4, aclClient.updatedResources.size());
} |
public static RestSettingBuilder delete(final RestIdMatcher idMatcher) {
return single(HttpMethod.DELETE, checkNotNull(idMatcher, "ID Matcher should not be null"));
} | @Test
public void should_delete_with_matcher() throws Exception {
server.resource("targets",
delete("1").request(eq(header(HttpHeaders.IF_MATCH), "moco")).response(status(200))
);
running(server, () -> {
HttpResponse httpResponse = helper.deleteForResponseWithHeaders(remoteUrl("/targets/1"),
ImmutableMultimap.of(HttpHeaders.IF_MATCH, "moco"));
assertThat(httpResponse.getCode(), is(200));
});
} |
@Override
public String toString() {
return "compositeResponse: " + compositeResponse;
} | @Test
public void shouldDeserializeSuccessfulJsonResponse() throws IOException {
final String json = IOUtils.toString(
this.getClass().getResourceAsStream(
"/org/apache/camel/component/salesforce/api/dto/composite_response_example_success.json"),
StandardCharsets.UTF_8);
final ObjectMapper mapper = JsonUtils.createObjectMapper();
final SObjectCompositeResponse response = mapper.readerFor(SObjectCompositeResponse.class).readValue(json);
assertSuccessfulResponse(response);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.