focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
String delete() {
return delete;
} | @Test
public void testDeleteIsEscaped() {
Queries queries = new Queries(mappingEscape, idColumnEscape, columnMetadataEscape);
String result = queries.delete();
assertEquals("DELETE FROM \"my\"\"mapping\" WHERE \"i\"\"d\" = ?", result);
} |
@VisibleForTesting
static boolean validate(TableConfig tableConfig, String taskType) {
String tableNameWithType = tableConfig.getTableName();
if (REFRESH.equalsIgnoreCase(IngestionConfigUtils.getBatchSegmentIngestionType(tableConfig))) {
LOGGER.warn("Skip generating task: {} for non-APPEND table: {}, REFRESH table is not supported", taskType,
tableNameWithType);
return false;
}
if (tableConfig.getTableType() == TableType.REALTIME) {
if (tableConfig.isUpsertEnabled()) {
LOGGER.warn("Skip generating task: {} for table: {}, table with upsert enabled is not supported", taskType,
tableNameWithType);
return false;
}
if (tableConfig.isDedupEnabled()) {
LOGGER.warn("Skip generating task: {} for table: {}, table with dedup enabled is not supported", taskType,
tableNameWithType);
return false;
}
}
return true;
} | @Test
public void testValidateIfMergeRollupCanBeEnabledOrNot() {
TableConfig tableConfig =
new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
.build();
assertTrue(MergeRollupTaskGenerator.validate(tableConfig, MinionConstants.MergeRollupTask.TASK_TYPE));
IngestionConfig ingestionConfig = new IngestionConfig();
ingestionConfig.setBatchIngestionConfig(new BatchIngestionConfig(Collections.emptyList(), "REFRESH", "daily"));
tableConfig =
new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
.setIngestionConfig(ingestionConfig).build();
assertFalse(MergeRollupTaskGenerator.validate(tableConfig, MinionConstants.MergeRollupTask.TASK_TYPE));
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
.build();
assertTrue(MergeRollupTaskGenerator.validate(tableConfig, MinionConstants.MergeRollupTask.TASK_TYPE));
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
.setUpsertConfig(new UpsertConfig(UpsertConfig.Mode.FULL)).build();
assertFalse(MergeRollupTaskGenerator.validate(tableConfig, MinionConstants.MergeRollupTask.TASK_TYPE));
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
.setDedupConfig(new DedupConfig(true, HashFunction.MD5)).build();
assertFalse(MergeRollupTaskGenerator.validate(tableConfig, MinionConstants.MergeRollupTask.TASK_TYPE));
} |
public static LocalRetryableExecution executeLocallyWithRetry(NodeEngine nodeEngine, Operation operation) {
if (operation.getOperationResponseHandler() != null) {
throw new IllegalArgumentException("Operation must not have a response handler set");
}
if (!operation.returnsResponse()) {
throw new IllegalArgumentException("Operation must return a response");
}
if (operation.validatesTarget()) {
throw new IllegalArgumentException("Operation must not validate the target");
}
final LocalRetryableExecution execution = new LocalRetryableExecution(nodeEngine, operation);
execution.run();
return execution;
} | @Test
public void executeLocallyRetriesWhenPartitionIsMigrating() throws InterruptedException {
final HazelcastInstance instance = createHazelcastInstance(smallInstanceConfig());
final NodeEngineImpl nodeEngineImpl = getNodeEngineImpl(instance);
final InternalPartitionService partitionService = nodeEngineImpl.getPartitionService();
final int randomPartitionId = (int) (Math.random() * partitionService.getPartitionCount());
final InternalPartitionImpl partition = (InternalPartitionImpl) partitionService.getPartition(randomPartitionId);
partition.setMigrating();
final String operationResponse = "operationResponse";
final Operation operation = new LocalOperation(operationResponse)
.setPartitionId(randomPartitionId);
final LocalRetryableExecution execution = executeLocallyWithRetry(nodeEngineImpl, operation);
spawn((Runnable) () -> {
try {
TimeUnit.SECONDS.sleep(10);
} catch (InterruptedException e) {
}
partition.resetMigrating();
});
assertTrue(execution.awaitCompletion(1, TimeUnit.MINUTES));
assertEquals(operationResponse, execution.getResponse());
} |
public OffsetRange[] getNextOffsetRanges(Option<String> lastCheckpointStr, long sourceLimit, HoodieIngestionMetrics metrics) {
// Come up with final set of OffsetRanges to read (account for new partitions, limit number of events)
long maxEventsToReadFromKafka = getLongWithAltKeys(props, KafkaSourceConfig.MAX_EVENTS_FROM_KAFKA_SOURCE);
long numEvents;
if (sourceLimit == Long.MAX_VALUE) {
numEvents = maxEventsToReadFromKafka;
LOG.info("SourceLimit not configured, set numEvents to default value : {}", maxEventsToReadFromKafka);
} else {
numEvents = sourceLimit;
}
long minPartitions = getLongWithAltKeys(props, KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS);
LOG.info("getNextOffsetRanges set config {} to {}", KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS.key(), minPartitions);
return getNextOffsetRanges(lastCheckpointStr, numEvents, minPartitions, metrics);
} | @Test
public void testGetNextOffsetRangesFromLatest() {
HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator();
testUtils.createTopic(testTopicName, 1);
testUtils.sendMessages(testTopicName, Helpers.jsonifyRecords(dataGenerator.generateInserts("000", 1000)));
KafkaOffsetGen kafkaOffsetGen = new KafkaOffsetGen(getConsumerConfigs("latest", KAFKA_CHECKPOINT_TYPE_STRING));
OffsetRange[] nextOffsetRanges = kafkaOffsetGen.getNextOffsetRanges(Option.empty(), 500, metrics);
assertEquals(1, nextOffsetRanges.length);
assertEquals(1000, nextOffsetRanges[0].fromOffset());
assertEquals(1000, nextOffsetRanges[0].untilOffset());
} |
public Map<String, Parameter> generateMergedWorkflowParams(
WorkflowInstance instance, RunRequest request) {
Workflow workflow = instance.getRuntimeWorkflow();
Map<String, ParamDefinition> allParamDefs = new LinkedHashMap<>();
Map<String, ParamDefinition> defaultWorkflowParams =
defaultParamManager.getDefaultWorkflowParams();
// merge workflow params for start
if (request.isFreshRun()) {
// merge default workflow params
ParamsMergeHelper.mergeParams(
allParamDefs,
defaultWorkflowParams,
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM_DEFAULT, request));
// merge defined workflow params
if (workflow.getParams() != null) {
ParamsMergeHelper.mergeParams(
allParamDefs,
workflow.getParams(),
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.DEFINITION, request));
}
}
// merge workflow params from previous instance for restart
if (!request.isFreshRun() && instance.getParams() != null) {
Map<String, ParamDefinition> previousParamDefs =
instance.getParams().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toDefinition()));
// remove reserved params, which should be injected again by the system.
for (String paramName : Constants.RESERVED_PARAM_NAMES) {
previousParamDefs.remove(paramName);
}
ParamsMergeHelper.mergeParams(
allParamDefs,
previousParamDefs,
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM, false));
}
// merge run params
if (request.getRunParams() != null) {
ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun());
ParamsMergeHelper.mergeParams(
allParamDefs,
request.getRunParams(),
ParamsMergeHelper.MergeContext.workflowCreate(source, request));
}
// merge user provided restart run params
getUserRestartParam(request)
.ifPresent(
userRestartParams -> {
ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun());
ParamsMergeHelper.mergeParams(
allParamDefs,
userRestartParams,
ParamsMergeHelper.MergeContext.workflowCreate(source, request));
});
// cleanup any placeholder params and convert to params
return ParamsMergeHelper.convertToParameters(ParamsMergeHelper.cleanupParams(allParamDefs));
} | @Test
public void testWorkflowParamSanity() {
Map<String, ParamDefinition> params = new LinkedHashMap<>();
RunRequest request =
RunRequest.builder()
.initiator(new ManualInitiator())
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.runParams(params)
.build();
Map<String, Parameter> workflowParams =
paramsManager.generateMergedWorkflowParams(workflowInstance, request);
Assert.assertFalse(workflowParams.isEmpty());
} |
@Deprecated
@Override
public ProducerBuilder<T> maxPendingMessagesAcrossPartitions(int maxPendingMessagesAcrossPartitions) {
conf.setMaxPendingMessagesAcrossPartitions(maxPendingMessagesAcrossPartitions);
return this;
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void testProducerBuilderImplWhenMaxPendingMessagesAcrossPartitionsPropertyIsInvalid() {
producerBuilderImpl.maxPendingMessagesAcrossPartitions(-1);
} |
@Override
public Long createSensitiveWord(SensitiveWordSaveVO createReqVO) {
// 校验唯一性
validateSensitiveWordNameUnique(null, createReqVO.getName());
// 插入
SensitiveWordDO sensitiveWord = BeanUtils.toBean(createReqVO, SensitiveWordDO.class);
sensitiveWordMapper.insert(sensitiveWord);
// 刷新缓存
initLocalCache();
return sensitiveWord.getId();
} | @Test
public void testCreateSensitiveWord_success() {
// 准备参数
SensitiveWordSaveVO reqVO = randomPojo(SensitiveWordSaveVO.class)
.setId(null); // 防止 id 被赋值
// 调用
Long sensitiveWordId = sensitiveWordService.createSensitiveWord(reqVO);
// 断言
assertNotNull(sensitiveWordId);
// 校验记录的属性是否正确
SensitiveWordDO sensitiveWord = sensitiveWordMapper.selectById(sensitiveWordId);
assertPojoEquals(reqVO, sensitiveWord, "id");
} |
public static Comparator<Descriptor> getPrimitiveComparator(
boolean compressInt, boolean compressLong) {
if (!compressInt && !compressLong) {
return PRIMITIVE_COMPARATOR;
}
return (d1, d2) -> {
Class<?> t1 = TypeUtils.unwrap(d1.getRawType());
Class<?> t2 = TypeUtils.unwrap(d2.getRawType());
boolean t1Compress = isCompressedType(t1, compressInt, compressLong);
boolean t2Compress = isCompressedType(t2, compressInt, compressLong);
if ((t1Compress && t2Compress) || (!t1Compress && !t2Compress)) {
int c = getSizeOfPrimitiveType(t2) - getSizeOfPrimitiveType(t1);
if (c == 0) {
c = DescriptorGrouper.COMPARATOR_BY_TYPE_AND_NAME.compare(d1, d2);
}
return c;
}
if (t1Compress) {
return 1;
}
// t2 compress
return -1;
};
} | @Test
public void testPrimitiveCompressedComparator() {
List<Descriptor> descriptors = new ArrayList<>();
int index = 0;
for (Class<?> aClass : Primitives.allPrimitiveTypes()) {
descriptors.add(new Descriptor(TypeRef.of(aClass), "f" + index++, -1, "TestClass"));
}
Collections.shuffle(descriptors, new Random(7));
descriptors.sort(DescriptorGrouper.getPrimitiveComparator(true, true));
List<? extends Class<?>> classes =
descriptors.stream().map(Descriptor::getRawType).collect(Collectors.toList());
List<Class<?>> expected =
Arrays.asList(
double.class,
float.class,
char.class,
short.class,
boolean.class,
byte.class,
void.class,
long.class,
int.class);
assertEquals(classes, expected);
} |
public Node chooseRandomWithStorageType(final String scope,
final Collection<Node> excludedNodes, StorageType type) {
netlock.readLock().lock();
try {
if (scope.startsWith("~")) {
return chooseRandomWithStorageType(
NodeBase.ROOT, scope.substring(1), excludedNodes, type);
} else {
return chooseRandomWithStorageType(
scope, null, excludedNodes, type);
}
} finally {
netlock.readLock().unlock();
}
} | @Test
public void testNonExistingNode() throws Exception {
Node n;
n = CLUSTER.chooseRandomWithStorageType(
"/l100", null, null, StorageType.DISK);
assertNull(n);
n = CLUSTER.chooseRandomWithStorageType(
"/l100/d100", null, null, StorageType.DISK);
assertNull(n);
n = CLUSTER.chooseRandomWithStorageType(
"/l100/d100/r100", null, null, StorageType.DISK);
assertNull(n);
} |
@Override
public Properties info(RedisClusterNode node) {
Map<String, String> info = execute(node, RedisCommands.INFO_ALL);
Properties result = new Properties();
for (Entry<String, String> entry : info.entrySet()) {
result.setProperty(entry.getKey(), entry.getValue());
}
return result;
} | @Test
public void testInfo() {
RedisClusterNode master = getFirstMaster();
Properties info = connection.info(master);
assertThat(info.size()).isGreaterThan(10);
} |
public static boolean isDecimal(final Schema schema) {
return schema.type() == Type.BYTES
&& Decimal.LOGICAL_NAME.equals(schema.name());
} | @Test
public void shouldNotCheckSchemaForNonDecimals() {
// Given:
final Schema notDecimal = Schema.OPTIONAL_STRING_SCHEMA;
// Then:
assertThat("String should not be decimal schema", !DecimalUtil.isDecimal(notDecimal));
} |
@VisibleForTesting
DictTypeDO validateDictTypeExists(Long id) {
if (id == null) {
return null;
}
DictTypeDO dictType = dictTypeMapper.selectById(id);
if (dictType == null) {
throw exception(DICT_TYPE_NOT_EXISTS);
}
return dictType;
} | @Test
public void testValidateDictDataExists_notExists() {
assertServiceException(() -> dictTypeService.validateDictTypeExists(randomLongId()), DICT_TYPE_NOT_EXISTS);
} |
@VisibleForTesting
protected RMAdminRequestInterceptor createRequestInterceptorChain() {
Configuration conf = getConfig();
return RouterServerUtil.createRequestInterceptorChain(conf,
YarnConfiguration.ROUTER_RMADMIN_INTERCEPTOR_CLASS_PIPELINE,
YarnConfiguration.DEFAULT_ROUTER_RMADMIN_INTERCEPTOR_CLASS,
RMAdminRequestInterceptor.class);
} | @Test
public void testRequestInterceptorChainCreation() throws Exception {
RMAdminRequestInterceptor root =
super.getRouterRMAdminService().createRequestInterceptorChain();
int index = 0;
while (root != null) {
// The current pipeline is:
// PassThroughRMAdminRequestInterceptor - index = 0
// PassThroughRMAdminRequestInterceptor - index = 1
// PassThroughRMAdminRequestInterceptor - index = 2
// MockClientRequestInterceptor - index = 3
switch (index) {
case 0: // Fall to the next case
case 1: // Fall to the next case
case 2:
// If index is equal to 0,1 or 2 we fall in this check
Assert.assertEquals(
PassThroughRMAdminRequestInterceptor.class.getName(),
root.getClass().getName());
break;
case 3:
Assert.assertEquals(MockRMAdminRequestInterceptor.class.getName(),
root.getClass().getName());
break;
default:
Assert.fail();
}
root = root.getNextInterceptor();
index++;
}
Assert.assertEquals("The number of interceptors in chain does not match", 4,
index);
} |
@Override
public boolean hasNext() {
while (innerIter == null || !innerIter.hasNext()) {
if (!outerIter.hasNext()) {
return false;
}
StateDescriptor<?, ?> descriptor = outerIter.next();
Stream<K> stream = backend.getKeys(descriptor.getName(), VoidNamespace.INSTANCE);
innerIter = stream.iterator();
try {
registry.registerCloseable(stream::close);
} catch (IOException e) {
throw new RuntimeException("Failed to read keys from configured StateBackend", e);
}
}
return true;
} | @Test
public void testIteratorPullsSingleKeyFromAllDescriptors() throws AssertionError {
CountingKeysKeyedStateBackend keyedStateBackend =
createCountingKeysKeyedStateBackend(100_000_000);
MultiStateKeyIterator<Integer> testedIterator =
new MultiStateKeyIterator<>(descriptors, keyedStateBackend);
testedIterator.hasNext();
Assert.assertEquals(
"Unexpected number of keys enumerated",
1,
keyedStateBackend.numberOfKeysEnumerated);
} |
void activate(long newNextWriteOffset) {
if (active()) {
throw new RuntimeException("Can't activate already active OffsetControlManager.");
}
if (newNextWriteOffset < 0) {
throw new RuntimeException("Invalid negative newNextWriteOffset " +
newNextWriteOffset + ".");
}
// Before switching to active, create an in-memory snapshot at the last committed
// offset. This is required because the active controller assumes that there is always
// an in-memory snapshot at the last committed offset.
snapshotRegistry.idempotentCreateSnapshot(lastStableOffset);
this.nextWriteOffset = newNextWriteOffset;
metrics.setActive(true);
} | @Test
public void testActivateFailsIfNewNextWriteOffsetIsNegative() {
OffsetControlManager offsetControl = new OffsetControlManager.Builder().build();
assertEquals("Invalid negative newNextWriteOffset -2.",
assertThrows(RuntimeException.class,
() -> offsetControl.activate(-2)).
getMessage());
} |
public List<ChangeStreamRecord> toChangeStreamRecords(
PartitionMetadata partition,
ChangeStreamResultSet resultSet,
ChangeStreamResultSetMetadata resultSetMetadata) {
if (this.isPostgres()) {
// In PostgresQL, change stream records are returned as JsonB.
return Collections.singletonList(
toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata));
}
// In GoogleSQL, change stream records are returned as an array of structs.
return resultSet.getCurrentRowAsStruct().getStructList(0).stream()
.flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata))
.collect(Collectors.toList());
} | @Test
public void testMappingInsertJsonRowToDataChangeRecord() {
final DataChangeRecord dataChangeRecord =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeSecondsAndNanos(10L, 20),
"transactionId",
false,
"1",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L),
new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)),
Collections.singletonList(
new Mod("{\"column1\":\"value1\"}", null, "{\"column2\":\"newValue2\"}")),
ModType.INSERT,
ValueCaptureType.OLD_AND_NEW_VALUES,
10L,
2L,
"transactionTag",
true,
null);
final String jsonString = recordToJson(dataChangeRecord, false, false);
assertNotNull(jsonString);
ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
when(resultSet.getPgJsonb(0)).thenReturn(jsonString);
assertEquals(
Collections.singletonList(dataChangeRecord),
mapperPostgres.toChangeStreamRecords(partition, resultSet, resultSetMetadata));
} |
@Override
public Result apply(PathData item, int depth) throws IOException {
String name = getPath(item).getName();
if (!caseSensitive) {
name = StringUtils.toLowerCase(name);
}
if (globPattern.matches(name)) {
return Result.PASS;
} else {
return Result.FAIL;
}
} | @Test
public void applyGlobMixedCase() throws IOException {
setup("n*e");
PathData item = new PathData("/directory/path/NaMe", mockFs.getConf());
assertEquals(Result.FAIL, name.apply(item, -1));
} |
static KiePMMLDefineFunction getKiePMMLDefineFunction(final DefineFunction defineFunction) {
final List<KiePMMLParameterField> kiePMMLParameterFields =
getKiePMMLParameterFields(defineFunction.getParameterFields());
DATA_TYPE dataType = defineFunction.getDataType() != null ?
DATA_TYPE.byName(defineFunction.getDataType().value()) : null;
OP_TYPE opType = defineFunction.getOpType() != null ? OP_TYPE.byName(defineFunction.getOpType().value()) : null;
return new KiePMMLDefineFunction(defineFunction.getName(),
getKiePMMLExtensions(defineFunction.getExtensions()),
dataType,
opType,
kiePMMLParameterFields,
getKiePMMLExpression(defineFunction.getExpression()));
} | @Test
void getKiePMMLDefineFunction() {
final String functionName = "functionName";
final DefineFunction toConvert = getDefineFunction(functionName);
KiePMMLDefineFunction retrieved = KiePMMLDefineFunctionInstanceFactory.getKiePMMLDefineFunction(toConvert);
commonVerifyKiePMMLDefineFunction(retrieved, toConvert);
} |
public static KubernetesJobManagerSpecification buildKubernetesJobManagerSpecification(
FlinkPod podTemplate, KubernetesJobManagerParameters kubernetesJobManagerParameters)
throws IOException {
FlinkPod flinkPod = Preconditions.checkNotNull(podTemplate).copy();
List<HasMetadata> accompanyingResources = new ArrayList<>();
final List<KubernetesStepDecorator> stepDecorators =
new ArrayList<>(
Arrays.asList(
new InitJobManagerDecorator(kubernetesJobManagerParameters),
new EnvSecretsDecorator(kubernetesJobManagerParameters),
new MountSecretsDecorator(kubernetesJobManagerParameters),
new CmdJobManagerDecorator(kubernetesJobManagerParameters),
new InternalServiceDecorator(kubernetesJobManagerParameters),
new ExternalServiceDecorator(kubernetesJobManagerParameters)));
Configuration configuration = kubernetesJobManagerParameters.getFlinkConfiguration();
if (configuration.get(KUBERNETES_HADOOP_CONF_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new HadoopConfMountDecorator(kubernetesJobManagerParameters));
}
if (configuration.get(KUBERNETES_KERBEROS_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new KerberosMountDecorator(kubernetesJobManagerParameters));
}
stepDecorators.addAll(
Arrays.asList(
new FlinkConfMountDecorator(kubernetesJobManagerParameters),
new PodTemplateMountDecorator(kubernetesJobManagerParameters)));
for (KubernetesStepDecorator stepDecorator : stepDecorators) {
flinkPod = stepDecorator.decorateFlinkPod(flinkPod);
accompanyingResources.addAll(stepDecorator.buildAccompanyingKubernetesResources());
}
final Deployment deployment =
createJobManagerDeployment(flinkPod, kubernetesJobManagerParameters);
return new KubernetesJobManagerSpecification(deployment, accompanyingResources);
} | @Test
void testKerberosConfConfigMap() throws IOException {
kubernetesJobManagerSpecification =
KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(
flinkPod, kubernetesJobManagerParameters);
final ConfigMap resultConfigMap =
(ConfigMap)
this.kubernetesJobManagerSpecification.getAccompanyingResources().stream()
.filter(
x ->
x instanceof ConfigMap
&& x.getMetadata()
.getName()
.equals(
KerberosMountDecorator
.getKerberosKrb5confConfigMapName(
CLUSTER_ID)))
.collect(Collectors.toList())
.get(0);
assertThat(resultConfigMap.getApiVersion()).isEqualTo(Constants.API_VERSION);
assertThat(resultConfigMap.getMetadata().getName())
.isEqualTo(KerberosMountDecorator.getKerberosKrb5confConfigMapName(CLUSTER_ID));
final Map<String, String> resultDatas = resultConfigMap.getData();
assertThat(resultDatas).hasSize(1);
assertThat(resultDatas.get(KRB5_CONF_FILE)).isEqualTo("some conf");
} |
public static Optional<Expression> convert(
org.apache.flink.table.expressions.Expression flinkExpression) {
if (!(flinkExpression instanceof CallExpression)) {
return Optional.empty();
}
CallExpression call = (CallExpression) flinkExpression;
Operation op = FILTERS.get(call.getFunctionDefinition());
if (op != null) {
switch (op) {
case IS_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::isNull);
case NOT_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::notNull);
case LT:
return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call);
case LT_EQ:
return convertFieldAndLiteral(
Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call);
case GT:
return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call);
case GT_EQ:
return convertFieldAndLiteral(
Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call);
case EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.isNaN(ref);
} else {
return Expressions.equal(ref, lit);
}
},
call);
case NOT_EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.notNaN(ref);
} else {
return Expressions.notEqual(ref, lit);
}
},
call);
case NOT:
return onlyChildAs(call, CallExpression.class)
.flatMap(FlinkFilters::convert)
.map(Expressions::not);
case AND:
return convertLogicExpression(Expressions::and, call);
case OR:
return convertLogicExpression(Expressions::or, call);
case STARTS_WITH:
return convertLike(call);
}
}
return Optional.empty();
} | @Test
public void testNot() {
Expression expr =
resolve(
ApiExpressionUtils.unresolvedCall(
BuiltInFunctionDefinitions.NOT,
Expressions.$("field1").isEqual(Expressions.lit(1))));
Optional<org.apache.iceberg.expressions.Expression> actual = FlinkFilters.convert(expr);
assertThat(actual).isPresent();
Not not = (Not) actual.get();
Not expected =
(Not)
org.apache.iceberg.expressions.Expressions.not(
org.apache.iceberg.expressions.Expressions.equal("field1", 1));
assertThat(not.op()).as("Predicate operation should match").isEqualTo(expected.op());
assertPredicatesMatch(expected.child(), not.child());
} |
public static String removeLeadingSlashes(String path) {
return SLASH_PREFIX_PATTERN.matcher(path).replaceFirst("");
} | @Test
public void removeLeadingSlashes_whenSingleLeadingSlash_removesLeadingSlashes() {
assertThat(removeLeadingSlashes("/a/b/c/")).isEqualTo("a/b/c/");
} |
@Override
public void updateProgress(TReportExecStatusParams params) {
writeLock();
try {
super.updateProgress(params);
if (!loadingStatus.getLoadStatistic().getLoadFinish()) {
if (this.loadType == TLoadJobType.INSERT_QUERY) {
if (loadingStatus.getLoadStatistic().totalFileSizeB != 0) {
// progress of file scan
progress = (int) ((double) loadingStatus.getLoadStatistic().sourceScanBytes() /
loadingStatus.getLoadStatistic().totalFileSize() * 100);
} else {
// progress of table scan. Slightly smaller than actual
progress = (int) ((double) loadingStatus.getLoadStatistic().totalSourceLoadRows()
/ (estimateScanRow + 1) * 100);
}
} else {
progress = (int) ((double) loadingStatus.getLoadStatistic().totalSinkLoadRows()
/ (estimateScanRow + 1) * 100);
}
if (progress >= 100) {
progress = 99;
}
}
} finally {
writeUnlock();
}
} | @Test
public void testUpdateProgress(@Mocked GlobalStateMgr globalStateMgr,
@Injectable Database database,
@Injectable Table table) throws MetaNotFoundException {
new Expectations() {
{
globalStateMgr.getDb(anyLong);
result = database;
database.getTable(anyLong);
result = table;
table.getName();
result = "some_table";
}
};
{
InsertLoadJob loadJob = new InsertLoadJob("label", 1L,
1L, 1000, "", "", null);
TUniqueId loadId = new TUniqueId(1, 2);
TUniqueId fragmentId = new TUniqueId(3, 4);
Set<TUniqueId> fragmentIds = new HashSet<>();
fragmentIds.add(fragmentId);
List<Long> backendIds = new ArrayList<>();
backendIds.add(10001L);
loadJob.initLoadProgress(loadId, fragmentIds, backendIds);
loadJob.setEstimateScanRow(100);
loadJob.setLoadFileInfo(0, 0);
TReportExecStatusParams params = new TReportExecStatusParams();
params.setSource_load_rows(40);
params.setQuery_id(loadId);
params.setFragment_instance_id(fragmentId);
loadJob.updateProgress(params);
Assert.assertEquals(39, loadJob.getProgress());
Assert.assertTrue(loadJob.getTabletCommitInfos().isEmpty());
Assert.assertTrue(loadJob.getTabletFailInfos().isEmpty());
}
{
InsertLoadJob loadJob = new InsertLoadJob("label", 1L,
1L, 1000, "", "", null);
TUniqueId loadId = new TUniqueId(1, 2);
TUniqueId fragmentId = new TUniqueId(3, 4);
Set<TUniqueId> fragmentIds = new HashSet<>();
fragmentIds.add(fragmentId);
List<Long> backendIds = new ArrayList<>();
backendIds.add(10001L);
loadJob.initLoadProgress(loadId, fragmentIds, backendIds);
loadJob.setEstimateScanRow(0);
loadJob.setLoadFileInfo(10, 100);
TReportExecStatusParams params = new TReportExecStatusParams();
params.setQuery_id(loadId);
params.setFragment_instance_id(fragmentId);
params.setSource_scan_bytes(80);
loadJob.updateProgress(params);
Assert.assertEquals(80, loadJob.getProgress());
Assert.assertTrue(loadJob.getTabletCommitInfos().isEmpty());
Assert.assertTrue(loadJob.getTabletFailInfos().isEmpty());
}
} |
public Flow injectDefaults(Flow flow, Execution execution) {
try {
return this.injectDefaults(flow);
} catch (Exception e) {
RunContextLogger
.logEntries(
Execution.loggingEventFromException(e),
LogEntry.of(execution)
)
.forEach(logQueue::emitAsync);
return flow;
}
} | @Test
void alias() {
DefaultTester task = DefaultTester.builder()
.id("test")
.type(DefaultTester.class.getName())
.set(666)
.build();
Flow flow = Flow.builder()
.tasks(Collections.singletonList(task))
.pluginDefaults(List.of(
new PluginDefault("io.kestra.core.services.DefaultTesterAlias", false, ImmutableMap.of(
"value", 1
))
))
.build();
Flow injected = pluginDefaultService.injectDefaults(flow);
assertThat(((DefaultTester) injected.getTasks().getFirst()).getValue(), is(1));
} |
@Converter(fallback = true)
public static <T> T convertTo(Class<T> type, Exchange exchange, Object value, TypeConverterRegistry registry) {
if (NodeInfo.class.isAssignableFrom(value.getClass())) {
// use a fallback type converter so we can convert the embedded body if the value is NodeInfo
NodeInfo ni = (NodeInfo) value;
// first try to find a Converter for Node
TypeConverter tc = registry.lookup(type, Node.class);
if (tc != null) {
Node node = NodeOverNodeInfo.wrap(ni);
return tc.convertTo(type, exchange, node);
}
// if this does not exist we can also try NodeList (there are some type converters for that) as
// the default Xerces Node implementation also implements NodeList.
tc = registry.lookup(type, NodeList.class);
if (tc != null) {
List<NodeInfo> nil = new LinkedList<>();
nil.add(ni);
return tc.convertTo(type, exchange, toDOMNodeList(nil));
}
} else if (List.class.isAssignableFrom(value.getClass())) {
TypeConverter tc = registry.lookup(type, NodeList.class);
if (tc != null) {
List<NodeInfo> lion = new LinkedList<>();
for (Object o : (List<?>) value) {
if (o instanceof NodeInfo) {
lion.add((NodeInfo) o);
}
}
if (!lion.isEmpty()) {
NodeList nl = toDOMNodeList(lion);
return tc.convertTo(type, exchange, nl);
}
}
} else if (NodeOverNodeInfo.class.isAssignableFrom(value.getClass())) {
// NodeOverNode info is a read-only Node implementation from Saxon. In contrast to the JDK
// com.sun.org.apache.xerces.internal.dom.NodeImpl class it does not implement NodeList, but
// many Camel type converters are based on that interface. Therefore we convert to NodeList and
// try type conversion in the fallback type converter.
TypeConverter tc = registry.lookup(type, NodeList.class);
if (tc != null) {
List<Node> domNodeList = new LinkedList<>();
domNodeList.add((NodeOverNodeInfo) value);
return tc.convertTo(type, exchange, new DOMNodeList(domNodeList));
}
}
return null;
} | @Test
public void convertSubNodeToDocument() throws XPathExpressionException {
evaluator.setNamespaceContext(NS_CONTEXT);
Object nodeObj = evaluator.evaluate("/ns1:a/ns1:b", doc, XPathConstants.NODE);
assertNotNull(nodeObj);
Document document = context.getTypeConverter().convertTo(Document.class, exchange, nodeObj);
assertNotNull(document);
String string = context.getTypeConverter().convertTo(String.class, exchange, document);
assertEquals(CONTENT_B, string);
} |
@Override
public void validateJoinRequest(JoinMessage joinMessage) {
// check joining member's major.minor version is same as current cluster version's major.minor numbers
MemberVersion memberVersion = joinMessage.getMemberVersion();
Version clusterVersion = node.getClusterService().getClusterVersion();
if (!memberVersion.asVersion().equals(clusterVersion)) {
String msg = "Joining node's version " + memberVersion + " is not compatible with cluster version " + clusterVersion;
if (clusterVersion.getMajor() != memberVersion.getMajor()) {
msg += " (Rolling Member Upgrades are only supported for the same major version)";
}
if (clusterVersion.getMinor() > memberVersion.getMinor()) {
msg += " (Rolling Member Upgrades are only supported for the next minor version)";
}
if (!BuildInfoProvider.getBuildInfo().isEnterprise()) {
msg += " (Rolling Member Upgrades are only supported in Hazelcast Enterprise)";
}
throw new VersionMismatchException(msg);
}
} | @Test
public void test_joinRequestAllowed_whenSameVersion() {
JoinRequest joinRequest = new JoinRequest(Packet.VERSION, buildNumber, nodeVersion, joinAddress, newUnsecureUUID(),
false, null, null, null, null, null);
nodeExtension.validateJoinRequest(joinRequest);
} |
public abstract long observeWm(int queueIndex, long wmValue); | @Test
public void when_i1_active_i2_idle_then_wmForwardedImmediately() {
assertEquals(Long.MIN_VALUE, wc.observeWm(0, 100));
assertEquals(100, wc.observeWm(1, IDLE_MESSAGE.timestamp()));
} |
@Timed
@Path("/{destination}")
@PUT
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@ManagedAsync
@Operation(
summary = "Send a message",
description = """
Deliver a message to a single recipient. May be authenticated or unauthenticated; if unauthenticated,
an unidentifed-access key or group-send endorsement token must be provided, unless the message is a story.
""")
@ApiResponse(responseCode="200", description="Message was successfully sent", useReturnTypeSchema=true)
@ApiResponse(
responseCode="401",
description="The message is not a story and the authorization, unauthorized access key, or group send endorsement token is missing or incorrect")
@ApiResponse(
responseCode="404",
description="The message is not a story and some the recipient service ID does not correspond to a registered Signal user")
@ApiResponse(
responseCode = "409", description = "Incorrect set of devices supplied for recipient",
content = @Content(schema = @Schema(implementation = AccountMismatchedDevices[].class)))
@ApiResponse(
responseCode = "410", description = "Mismatched registration ids supplied for some recipient devices",
content = @Content(schema = @Schema(implementation = AccountStaleDevices[].class)))
public Response sendMessage(@ReadOnly @Auth Optional<AuthenticatedDevice> source,
@Parameter(description="The recipient's unidentified access key")
@HeaderParam(HeaderUtils.UNIDENTIFIED_ACCESS_KEY) Optional<Anonymous> accessKey,
@Parameter(description="A group send endorsement token covering the recipient. Must not be combined with `Unidentified-Access-Key` or set on a story message.")
@HeaderParam(HeaderUtils.GROUP_SEND_TOKEN)
@Nullable GroupSendTokenHeader groupSendToken,
@HeaderParam(HttpHeaders.USER_AGENT) String userAgent,
@Parameter(description="If true, deliver the message only to recipients that are online when it is sent")
@PathParam("destination") ServiceIdentifier destinationIdentifier,
@Parameter(description="If true, the message is a story; access tokens are not checked and sending to nonexistent recipients is permitted")
@QueryParam("story") boolean isStory,
@Parameter(description="The encrypted message payloads for each recipient device")
@NotNull @Valid IncomingMessageList messages,
@Context ContainerRequestContext context) throws RateLimitExceededException {
final Sample sample = Timer.start();
try {
if (source.isEmpty() && accessKey.isEmpty() && groupSendToken == null && !isStory) {
throw new WebApplicationException(Response.Status.UNAUTHORIZED);
}
if (groupSendToken != null) {
if (!source.isEmpty() || !accessKey.isEmpty()) {
throw new BadRequestException("Group send endorsement tokens should not be combined with other authentication");
} else if (isStory) {
throw new BadRequestException("Group send endorsement tokens should not be sent for story messages");
}
}
final String senderType;
if (source.isPresent()) {
if (source.get().getAccount().isIdentifiedBy(destinationIdentifier)) {
senderType = SENDER_TYPE_SELF;
} else {
senderType = SENDER_TYPE_IDENTIFIED;
}
} else {
senderType = SENDER_TYPE_UNIDENTIFIED;
}
boolean isSyncMessage = source.isPresent() && source.get().getAccount().isIdentifiedBy(destinationIdentifier);
if (isSyncMessage && destinationIdentifier.identityType() == IdentityType.PNI) {
throw new WebApplicationException(Status.FORBIDDEN);
}
Optional<Account> destination;
if (!isSyncMessage) {
destination = accountsManager.getByServiceIdentifier(destinationIdentifier);
} else {
destination = source.map(AuthenticatedDevice::getAccount);
}
final Optional<Response> spamCheck = spamChecker.checkForSpam(
context, source.map(AuthenticatedDevice::getAccount), destination);
if (spamCheck.isPresent()) {
return spamCheck.get();
}
final Optional<byte[]> spamReportToken = switch (senderType) {
case SENDER_TYPE_IDENTIFIED ->
reportSpamTokenProvider.makeReportSpamToken(context, source.get(), destination);
default -> Optional.empty();
};
int totalContentLength = 0;
for (final IncomingMessage message : messages.messages()) {
int contentLength = 0;
if (StringUtils.isNotEmpty(message.content())) {
contentLength += message.content().length();
}
validateContentLength(contentLength, false, userAgent);
validateEnvelopeType(message.type(), userAgent);
totalContentLength += contentLength;
}
try {
rateLimiters.getInboundMessageBytes().validate(destinationIdentifier.uuid(), totalContentLength);
} catch (final RateLimitExceededException e) {
if (dynamicConfigurationManager.getConfiguration().getInboundMessageByteLimitConfiguration().enforceInboundLimit()) {
messageByteLimitEstimator.add(destinationIdentifier.uuid().toString());
throw e;
}
}
try {
if (isStory) {
// Stories will be checked by the client; we bypass access checks here for stories.
} else if (groupSendToken != null) {
checkGroupSendToken(List.of(destinationIdentifier.toLibsignal()), groupSendToken);
if (destination.isEmpty()) {
throw new NotFoundException();
}
} else {
OptionalAccess.verify(source.map(AuthenticatedDevice::getAccount), accessKey, destination,
destinationIdentifier);
}
boolean needsSync = !isSyncMessage && source.isPresent() && source.get().getAccount().getDevices().size() > 1;
// We return 200 when stories are sent to a non-existent account. Since story sends bypass OptionalAccess.verify
// we leak information about whether a destination UUID exists if we return any other code (e.g. 404) from
// these requests.
if (isStory && destination.isEmpty()) {
return Response.ok(new SendMessageResponse(needsSync)).build();
}
// if destination is empty we would either throw an exception in OptionalAccess.verify when isStory is false
// or else return a 200 response when isStory is true.
assert destination.isPresent();
if (source.isPresent() && !isSyncMessage) {
checkMessageRateLimit(source.get(), destination.get(), userAgent);
}
if (isStory) {
rateLimiters.getStoriesLimiter().validate(destination.get().getUuid());
}
final Set<Byte> excludedDeviceIds;
if (isSyncMessage) {
excludedDeviceIds = Set.of(source.get().getAuthenticatedDevice().getId());
} else {
excludedDeviceIds = Collections.emptySet();
}
DestinationDeviceValidator.validateCompleteDeviceList(destination.get(),
messages.messages().stream().map(IncomingMessage::destinationDeviceId).collect(Collectors.toSet()),
excludedDeviceIds);
DestinationDeviceValidator.validateRegistrationIds(destination.get(),
messages.messages(),
IncomingMessage::destinationDeviceId,
IncomingMessage::destinationRegistrationId,
destination.get().getPhoneNumberIdentifier().equals(destinationIdentifier.uuid()));
final String authType;
if (SENDER_TYPE_IDENTIFIED.equals(senderType)) {
authType = AUTH_TYPE_IDENTIFIED;
} else if (isStory) {
authType = AUTH_TYPE_STORY;
} else if (groupSendToken != null) {
authType = AUTH_TYPE_GROUP_SEND_TOKEN;
} else {
authType = AUTH_TYPE_ACCESS_KEY;
}
final List<Tag> tags = List.of(UserAgentTagUtil.getPlatformTag(userAgent),
Tag.of(ENDPOINT_TYPE_TAG_NAME, ENDPOINT_TYPE_SINGLE),
Tag.of(EPHEMERAL_TAG_NAME, String.valueOf(messages.online())),
Tag.of(SENDER_TYPE_TAG_NAME, senderType),
Tag.of(AUTH_TYPE_TAG_NAME, authType),
Tag.of(IDENTITY_TYPE_TAG_NAME, destinationIdentifier.identityType().name()));
for (IncomingMessage incomingMessage : messages.messages()) {
Optional<Device> destinationDevice = destination.get().getDevice(incomingMessage.destinationDeviceId());
if (destinationDevice.isPresent()) {
Metrics.counter(SENT_MESSAGE_COUNTER_NAME, tags).increment();
sendIndividualMessage(
source,
destination.get(),
destinationDevice.get(),
destinationIdentifier,
messages.timestamp(),
messages.online(),
isStory,
messages.urgent(),
incomingMessage,
userAgent,
spamReportToken);
}
}
return Response.ok(new SendMessageResponse(needsSync)).build();
} catch (MismatchedDevicesException e) {
throw new WebApplicationException(Response.status(409)
.type(MediaType.APPLICATION_JSON_TYPE)
.entity(new MismatchedDevices(e.getMissingDevices(),
e.getExtraDevices()))
.build());
} catch (StaleDevicesException e) {
throw new WebApplicationException(Response.status(410)
.type(MediaType.APPLICATION_JSON)
.entity(new StaleDevices(e.getStaleDevices()))
.build());
}
} finally {
sample.stop(SEND_MESSAGE_LATENCY_TIMER);
}
} | @Test
void testMultiDeviceNotUrgent() throws Exception {
try (final Response response =
resources.getJerseyTest()
.target(String.format("/v1/messages/%s", MULTI_DEVICE_UUID))
.request()
.header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD))
.put(Entity.entity(SystemMapper.jsonMapper().readValue(jsonFixture("fixtures/current_message_multi_device_not_urgent.json"),
IncomingMessageList.class),
MediaType.APPLICATION_JSON_TYPE))) {
assertThat("Good Response Code", response.getStatus(), is(equalTo(200)));
final ArgumentCaptor<Envelope> envelopeCaptor = ArgumentCaptor.forClass(Envelope.class);
verify(messageSender, times(3))
.sendMessage(any(Account.class), any(Device.class), envelopeCaptor.capture(), eq(false));
envelopeCaptor.getAllValues().forEach(envelope -> assertFalse(envelope.getUrgent()));
}
} |
@Override
public CompletableFuture<SubscriptionData> querySubscriptionByConsumer(String address,
QuerySubscriptionByConsumerRequestHeader requestHeader, long timeoutMillis) {
CompletableFuture<SubscriptionData> future = new CompletableFuture<>();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_SUBSCRIPTION_BY_CONSUMER, requestHeader);
remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> {
if (response.getCode() == ResponseCode.SUCCESS) {
QuerySubscriptionResponseBody subscriptionResponseBody =
QuerySubscriptionResponseBody.decode(response.getBody(), QuerySubscriptionResponseBody.class);
future.complete(subscriptionResponseBody.getSubscriptionData());
} else {
log.warn("querySubscriptionByConsumer getResponseCommand failed, {} {}", response.getCode(), response.getRemark());
future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark()));
}
});
return future;
} | @Test
public void assertQuerySubscriptionByConsumerWithError() {
setResponseError();
QuerySubscriptionByConsumerRequestHeader requestHeader = mock(QuerySubscriptionByConsumerRequestHeader.class);
CompletableFuture<SubscriptionData> actual = mqClientAdminImpl.querySubscriptionByConsumer(defaultBrokerAddr, requestHeader, defaultTimeout);
Throwable thrown = assertThrows(ExecutionException.class, actual::get);
assertTrue(thrown.getCause() instanceof MQClientException);
MQClientException mqException = (MQClientException) thrown.getCause();
assertEquals(ResponseCode.SYSTEM_ERROR, mqException.getResponseCode());
assertTrue(mqException.getMessage().contains("CODE: 1 DESC: null"));
} |
static boolean isFormFile(final String pathName) {
return pathName.endsWith("frm");
} | @Test
public void testIsFormFile() {
assertThat(KieModuleMetaDataImpl.isFormFile("abc.frm")).isTrue();
assertThat(KieModuleMetaDataImpl.isFormFile("abc.form")).isFalse();
} |
public String getServiceName() {
return serviceName;
} | @Test
public void getServiceName() {
final ServiceStats serviceStats = new ServiceStats(serviceName);
Assert.assertEquals(serviceStats.getServiceName(), serviceName);
} |
public static String substVars(String val, PropertyContainer pc1) {
return substVars(val, pc1, null);
} | @Test
public void testLiteral() {
String noSubst = "hello world";
String result = OptionHelper.substVars(noSubst, context);
assertEquals(noSubst, result);
} |
@JsonIgnore
public WorkflowInstance.Status getRunStatus() {
if (runtimeOverview == null || runtimeOverview.getRunStatus() == null) {
return status;
}
return runtimeOverview.getRunStatus();
} | @Test
public void testRunStatus() throws Exception {
WorkflowInstance instance =
loadObject(
"fixtures/instances/sample-workflow-instance-succeeded.json", WorkflowInstance.class);
assertEquals(WorkflowInstance.Status.SUCCEEDED, instance.getStatus());
assertEquals(WorkflowInstance.Status.SUCCEEDED, instance.getRunStatus());
instance.setStatus(WorkflowInstance.Status.FAILED);
instance.getRuntimeOverview().setRunStatus(WorkflowInstance.Status.SUCCEEDED);
assertEquals(WorkflowInstance.Status.FAILED, instance.getStatus());
assertEquals(WorkflowInstance.Status.SUCCEEDED, instance.getRunStatus());
assertEquals(12345, instance.getInternalId().longValue());
} |
@Override
void decode(ByteBufAllocator alloc, ByteBuf headerBlock, SpdyHeadersFrame frame) throws Exception {
int len = setInput(headerBlock);
int numBytes;
do {
numBytes = decompress(alloc, frame);
} while (numBytes > 0);
// z_stream has an internal 64-bit hold buffer
// it is always capable of consuming the entire input
if (decompressor.getRemaining() != 0) {
// we reached the end of the deflate stream
throw INVALID_HEADER_BLOCK;
}
headerBlock.skipBytes(len);
} | @Test
public void testHeaderBlockInvalidDeflateBlock() throws Exception {
final ByteBuf headerBlock = Unpooled.buffer(11);
headerBlock.writeBytes(zlibHeader);
headerBlock.writeByte(0); // Non-compressed block
headerBlock.writeByte(0x00); // little-endian length (0)
headerBlock.writeByte(0x00); // little-endian length (0)
headerBlock.writeByte(0x00); // invalid one's compliment
headerBlock.writeByte(0x00); // invalid one's compliment
assertThrows(SpdyProtocolException.class, new Executable() {
@Override
public void execute() throws Throwable {
decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame);
}
});
headerBlock.release();
} |
@Override
public <OUT> ProcessConfigurableAndGlobalStream<OUT> process(
OneInputStreamProcessFunction<T, OUT> processFunction) {
validateStates(
processFunction.usesStates(),
new HashSet<>(
Collections.singletonList(StateDeclaration.RedistributionMode.IDENTICAL)));
TypeInformation<OUT> outType =
StreamUtils.getOutputTypeForOneInputProcessFunction(processFunction, getType());
ProcessOperator<T, OUT> operator = new ProcessOperator<>(processFunction);
return StreamUtils.wrapWithConfigureHandle(transform("Global Process", outType, operator));
} | @Test
void testStateErrorWithTwoOutputStream() throws Exception {
ExecutionEnvironmentImpl env = StreamTestUtils.getEnv();
GlobalStreamImpl<Integer> stream =
new GlobalStreamImpl<>(env, new TestingTransformation<>("t1", Types.INT, 1));
assertThatThrownBy(
() ->
stream.process(
new NoOpTwoOutputStreamProcessFunction(
new HashSet<>(
Collections.singletonList(
modeIdenticalStateDeclaration)))))
.isInstanceOf(IllegalRedistributionModeException.class);
} |
public String toJSON() {
return JSON.toJSON(this);
} | @Test
public void testWriteUnionInfo() throws Exception {
StructType st = new StructType(new LinkedList<ThriftField>(), null);
assertEquals(
("{\n"
+ " \"id\" : \"STRUCT\",\n"
+ " \"children\" : [ ],\n"
+ " \"structOrUnionType\" : \"STRUCT\",\n"
+ " \"logicalTypeAnnotation\" : null\n"
+ "}")
.replace("\n", System.lineSeparator()),
st.toJSON());
st = new StructType(new LinkedList<ThriftField>(), StructOrUnionType.UNION);
assertEquals(
("{\n"
+ " \"id\" : \"STRUCT\",\n"
+ " \"children\" : [ ],\n"
+ " \"structOrUnionType\" : \"UNION\",\n"
+ " \"logicalTypeAnnotation\" : null\n"
+ "}")
.replace("\n", System.lineSeparator()),
st.toJSON());
st = new StructType(new LinkedList<ThriftField>(), StructOrUnionType.STRUCT);
assertEquals(
("{\n"
+ " \"id\" : \"STRUCT\",\n"
+ " \"children\" : [ ],\n"
+ " \"structOrUnionType\" : \"STRUCT\",\n"
+ " \"logicalTypeAnnotation\" : null\n"
+ "}")
.replace("\n", System.lineSeparator()),
st.toJSON());
} |
@Override
public ExecuteContext doBefore(ExecuteContext context) {
final Object url = context.getArguments()[0];
final Map<String, String> parameters = getParameters(url);
final String application = parameters.get(CommonConst.DUBBO_APPLICATION);
final String interfaceName = parameters.get(CommonConst.DUBBO_INTERFACE);
if (Objects.nonNull(application) && Objects.nonNull(interfaceName)) {
DubboApplicationCache.INSTANCE.cache(interfaceName, application);
}
return context;
} | @Test
public void doBefore() throws Exception {
String interfaceName = "io.sermant.test";
String application = "test";
String protocol = "dubbo";
String host = "localhost";
int port = 8080;
final ClusterInterceptor clusterInterceptor = new ClusterInterceptor();
final HashMap<String, String> params = new HashMap<>();
params.put("interface", interfaceName);
params.put("application", application);
final URL url = new URL(protocol, host, port, params);
clusterInterceptor.doBefore(buildContext(new Object[]{url}));
Assert.assertEquals(DubboApplicationCache.INSTANCE.getApplicationCache().get(interfaceName), application);
DubboApplicationCache.INSTANCE.getApplicationCache().clear();
final com.alibaba.dubbo.common.URL aliUrl = new com.alibaba.dubbo.common.URL(protocol, host, port, params);
clusterInterceptor.doBefore(buildContext(new Object[]{aliUrl}));
Assert.assertEquals(DubboApplicationCache.INSTANCE.getApplicationCache().get(interfaceName), application);
DubboApplicationCache.INSTANCE.getApplicationCache().clear();
} |
@Override
public void writeFloat(final float v) throws IOException {
ensureAvailable(FLOAT_SIZE_IN_BYTES);
MEM.putFloat(buffer, ARRAY_BYTE_BASE_OFFSET + pos, v);
pos += FLOAT_SIZE_IN_BYTES;
} | @Test
public void testWriteFloatV() throws Exception {
float expected = 1.1f;
out.writeFloat(expected);
int val = Bits.readInt(out.buffer, 0, ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN);
float actual = Float.intBitsToFloat(val);
assertEquals(expected, actual, 0);
} |
@GetMapping("/apps/search/by-appid-or-name")
public PageDTO<App> search(@RequestParam(value = "query", required = false) String query, Pageable pageable) {
if (StringUtils.isEmpty(query)) {
return appService.findAll(pageable);
}
//search app
PageDTO<App> appPageDTO = appService.searchByAppIdOrAppName(query, pageable);
if (appPageDTO.hasContent()) {
return appPageDTO;
}
if (!portalConfig.supportSearchByItem()) {
return new PageDTO<>(Lists.newLinkedList(), pageable, 0);
}
//search item
return searchByItem(query, pageable);
} | @Test
public void testSearchApp() {
String query = "timeout";
PageRequest request = PageRequest.of(0, 20);
PageDTO<App> apps = genPageApp(10, request, 100);
when(appService.searchByAppIdOrAppName(query, request)).thenReturn(apps);
searchController.search(query, request);
verify(appService, times(0)).findAll(request);
verify(appService, times(1)).searchByAppIdOrAppName(query, request);
} |
public RegistryBuilder extraKeys(String extraKeys) {
this.extraKeys = extraKeys;
return getThis();
} | @Test
void extraKeys() {
RegistryBuilder builder = new RegistryBuilder();
builder.extraKeys("extraKeys");
Assertions.assertEquals("extraKeys", builder.build().getExtraKeys());
} |
@SafeVarargs
static <T> List<T> ordered(Collection<T> items, Class<? extends T>... order) {
List<T> ordered = new ArrayList<>(items);
ordered.sort(Comparator.comparingInt(item -> {
int best = order.length;
for (int i = 0; i < order.length; i++) {
if ( order[i].isInstance(item)
&& ( best == order.length
|| order[best].isAssignableFrom(order[i]))) best = i;
}
return best;
}));
return ordered;
} | @Test
void testSorting() {
class A { @Override public String toString() { return getClass().getSimpleName(); } }
class B extends A { }
class C extends B { }
class D extends B { }
A a = new A(), b = new B(), c = new C(), d = new D(), e = new D() { @Override public String toString() { return "E"; } };
List<A> s = List.of(a, b, c, d, e);
assertEquals(List.of(a, b, c, d, e), ordered(s, A.class, B.class, C.class, D.class));
assertEquals(List.of(d, e, c, b, a), ordered(s, D.class, C.class, B.class, A.class));
assertEquals(List.of(e, c, a, b, d), ordered(s, e.getClass(), C.class, A.class));
assertEquals(List.of(d, e, b, c, a), ordered(s, D.class, B.class));
} |
public static FuryBuilder builder() {
return new FuryBuilder();
} | @Test(dataProvider = "crossLanguageReferenceTrackingConfig")
public void primitivesTest(boolean referenceTracking, Language language) {
Fury fury1 =
Fury.builder()
.withLanguage(language)
.withRefTracking(referenceTracking)
.requireClassRegistration(false)
.build();
Fury fury2 =
Fury.builder()
.withLanguage(language)
.withRefTracking(referenceTracking)
.requireClassRegistration(false)
.build();
assertEquals(true, serDe(fury1, fury2, true));
assertEquals(Byte.MAX_VALUE, serDe(fury1, fury2, Byte.MAX_VALUE));
assertEquals(Short.MAX_VALUE, serDe(fury1, fury2, Short.MAX_VALUE));
assertEquals(Integer.MAX_VALUE, serDe(fury1, fury2, Integer.MAX_VALUE));
assertEquals(Long.MAX_VALUE, serDe(fury1, fury2, Long.MAX_VALUE));
assertEquals(Float.MAX_VALUE, serDe(fury1, fury2, Float.MAX_VALUE));
assertEquals(Double.MAX_VALUE, serDe(fury1, fury2, Double.MAX_VALUE));
} |
@Override
public Member getMember() {
throw new UnsupportedOperationException();
} | @Test(expected = UnsupportedOperationException.class)
public void testGetMember() {
batchIMapEvent.getMember();
} |
public synchronized void write(Mutation tableRecord) throws IllegalStateException {
write(ImmutableList.of(tableRecord));
} | @Test
public void testWriteSingleRecordShouldWorkWhenSpannerWriteSucceeds()
throws ExecutionException, InterruptedException {
// arrange
prepareTable();
when(spanner.getDatabaseClient(any()).write(any())).thenReturn(Timestamp.now());
Mutation testMutation =
Mutation.newInsertOrUpdateBuilder("SingerId")
.set("SingerId")
.to(1)
.set("FirstName")
.to("Marc")
.set("LastName")
.to("Richards")
.build();
// act
testManager.write(testMutation);
// assert
verify(spanner.getDatabaseClient(any())).write(writeMutationCaptor.capture());
Iterable<Mutation> actualWriteMutation = writeMutationCaptor.getValue();
assertThat(actualWriteMutation).containsExactlyElementsIn(ImmutableList.of(testMutation));
} |
public static String format(Date date) {
return formatter().format0(checkNotNull(date, "date"));
} | @Test
public void testFormat() {
assertEquals("Sun, 06 Nov 1994 08:49:37 GMT", format(DATE));
} |
@Override
public YamlTableDataConsistencyCheckResult swapToYamlConfiguration(final TableDataConsistencyCheckResult data) {
YamlTableDataConsistencyCheckResult result = new YamlTableDataConsistencyCheckResult();
if (data.isIgnored()) {
result.setIgnoredType(data.getIgnoredType().name());
return result;
}
result.setMatched(data.isMatched());
return result;
} | @Test
void assertSwapToYamlConfigurationWithTableDataConsistencyCheckResultMatched() {
TableDataConsistencyCheckResult data = new TableDataConsistencyCheckResult(true);
YamlTableDataConsistencyCheckResult result = yamlTableDataConsistencyCheckResultSwapper.swapToYamlConfiguration(data);
assertNull(result.getIgnoredType());
assertTrue(result.isMatched());
} |
@Override
public DataTableType dataTableType() {
return dataTableType;
} | @Test
void can_define_table_entry_transformer_with_empty_pattern() throws NoSuchMethodException {
Method method = JavaDataTableTypeDefinitionTest.class.getMethod("converts_table_entry_to_string", Map.class);
JavaDataTableTypeDefinition definition = new JavaDataTableTypeDefinition(method, lookup,
new String[] { "[empty]" });
assertThat(definition.dataTableType().transform(emptyTable.cells()),
is(singletonList("converts_table_entry_to_string={a=, =d}")));
} |
public static Caffeine<Object, Object> newBuilder() {
return new Caffeine<>();
} | @Test
public void nullParameters() {
var npeTester = new NullPointerTester();
npeTester.testAllPublicInstanceMethods(Caffeine.newBuilder());
} |
@Override
public ShenyuContext decorator(final ShenyuContext shenyuContext, final MetaData metaData) {
shenyuContext.setModule(metaData.getAppName());
shenyuContext.setMethod(metaData.getServiceName());
shenyuContext.setContextPath(metaData.getContextPath());
shenyuContext.setRpcType(RpcTypeEnum.GRPC.getName());
return shenyuContext;
} | @Test
public void testDecorator() {
MetaData metaData = new MetaData();
metaData.setAppName("grpc");
metaData.setServiceName("echo");
metaData.setRpcType(PluginEnum.GRPC.getName());
metaData.setContextPath("/grpc");
final ShenyuContext shenyuContext = grpcShenyuContextDecorator.decorator(new ShenyuContext(), metaData);
assertNotNull(shenyuContext);
assertNotNull(shenyuContext.getContextPath());
assertNotNull(shenyuContext.getMethod());
assertNotNull(shenyuContext.getModule());
} |
public static ExecutableStage forGrpcPortRead(
QueryablePipeline pipeline,
PipelineNode.PCollectionNode inputPCollection,
Set<PipelineNode.PTransformNode> initialNodes) {
checkArgument(
!initialNodes.isEmpty(),
"%s must contain at least one %s.",
GreedyStageFuser.class.getSimpleName(),
PipelineNode.PTransformNode.class.getSimpleName());
// Choose the environment from an arbitrary node. The initial nodes may not be empty for this
// subgraph to make any sense, there has to be at least one processor node
// (otherwise the stage is gRPC Read -> gRPC Write, which doesn't do anything).
Environment environment = getStageEnvironment(pipeline, initialNodes);
ImmutableSet.Builder<PipelineNode.PTransformNode> fusedTransforms = ImmutableSet.builder();
fusedTransforms.addAll(initialNodes);
Set<SideInputReference> sideInputs = new LinkedHashSet<>();
Set<UserStateReference> userStates = new LinkedHashSet<>();
Set<TimerReference> timers = new LinkedHashSet<>();
Set<PipelineNode.PCollectionNode> fusedCollections = new LinkedHashSet<>();
Set<PipelineNode.PCollectionNode> materializedPCollections = new LinkedHashSet<>();
Queue<PipelineNode.PCollectionNode> fusionCandidates = new ArrayDeque<>();
for (PipelineNode.PTransformNode initialConsumer : initialNodes) {
fusionCandidates.addAll(pipeline.getOutputPCollections(initialConsumer));
sideInputs.addAll(pipeline.getSideInputs(initialConsumer));
userStates.addAll(pipeline.getUserStates(initialConsumer));
timers.addAll(pipeline.getTimers(initialConsumer));
}
while (!fusionCandidates.isEmpty()) {
PipelineNode.PCollectionNode candidate = fusionCandidates.poll();
if (fusedCollections.contains(candidate) || materializedPCollections.contains(candidate)) {
// This should generally mean we get to a Flatten via multiple paths through the graph and
// we've already determined what to do with the output.
LOG.debug(
"Skipping fusion candidate {} because it is {} in this {}",
candidate,
fusedCollections.contains(candidate) ? "fused" : "materialized",
ExecutableStage.class.getSimpleName());
continue;
}
PCollectionFusibility fusibility =
canFuse(pipeline, candidate, environment, fusedCollections);
switch (fusibility) {
case MATERIALIZE:
materializedPCollections.add(candidate);
break;
case FUSE:
// All of the consumers of the candidate PCollection can be fused into this stage. Do so.
fusedCollections.add(candidate);
fusedTransforms.addAll(pipeline.getPerElementConsumers(candidate));
for (PipelineNode.PTransformNode consumer : pipeline.getPerElementConsumers(candidate)) {
// The outputs of every transform fused into this stage must be either materialized or
// themselves fused away, so add them to the set of candidates.
fusionCandidates.addAll(pipeline.getOutputPCollections(consumer));
sideInputs.addAll(pipeline.getSideInputs(consumer));
}
break;
default:
throw new IllegalStateException(
String.format(
"Unknown type of %s %s",
PCollectionFusibility.class.getSimpleName(), fusibility));
}
}
return ImmutableExecutableStage.ofFullComponents(
pipeline.getComponents(),
environment,
inputPCollection,
sideInputs,
userStates,
timers,
fusedTransforms.build(),
materializedPCollections,
ExecutableStage.DEFAULT_WIRE_CODER_SETTINGS);
} | @Test
public void materializesWithDifferentEnvConsumer() {
// (impulse.out) -> parDo -> parDo.out -> window -> window.out
// Fuses into
// (impulse.out) -> parDo -> (parDo.out)
// (parDo.out) -> window -> window.out
Environment env = Environments.createDockerEnvironment("common");
PTransform parDoTransform =
PTransform.newBuilder()
.putInputs("input", "impulse.out")
.putOutputs("out", "parDo.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.setPayload(
ParDoPayload.newBuilder()
.setDoFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.setEnvironmentId("common")
.build();
PCollection parDoOutput = PCollection.newBuilder().setUniqueName("parDo.out").build();
QueryablePipeline p =
QueryablePipeline.forPrimitivesIn(
partialComponents
.toBuilder()
.putTransforms("parDo", parDoTransform)
.putPcollections("parDo.out", parDoOutput)
.putTransforms(
"window",
PTransform.newBuilder()
.putInputs("input", "parDo.out")
.putOutputs("output", "window.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.ASSIGN_WINDOWS_TRANSFORM_URN)
.setPayload(
WindowIntoPayload.newBuilder()
.setWindowFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.setEnvironmentId("rare")
.build())
.putPcollections(
"window.out", PCollection.newBuilder().setUniqueName("window.out").build())
.putEnvironments("rare", Environments.createDockerEnvironment("rare"))
.putEnvironments("common", env)
.build());
ExecutableStage subgraph =
GreedyStageFuser.forGrpcPortRead(
p, impulseOutputNode, p.getPerElementConsumers(impulseOutputNode));
assertThat(
subgraph.getOutputPCollections(),
contains(PipelineNode.pCollection("parDo.out", parDoOutput)));
assertThat(subgraph.getInputPCollection(), equalTo(impulseOutputNode));
assertThat(subgraph.getEnvironment(), equalTo(env));
assertThat(
subgraph.getTransforms(), contains(PipelineNode.pTransform("parDo", parDoTransform)));
} |
public UserTokenDto setTokenHash(String tokenHash) {
this.tokenHash = checkTokenHash(tokenHash);
return this;
} | @Test
void fail_if_token_hash_is_longer_than_255_characters() {
assertThatThrownBy(() -> new UserTokenDto().setTokenHash(randomAlphabetic(256)))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Token hash length (256) is longer than the maximum authorized (255)");
} |
@VisibleForTesting
public RequestInterceptorChainWrapper getInterceptorChain()
throws IOException {
String user = UserGroupInformation.getCurrentUser().getUserName();
RequestInterceptorChainWrapper chain = userPipelineMap.get(user);
if (chain != null && chain.getRootInterceptor() != null) {
return chain;
}
return initializePipeline(user);
} | @Test
public void testRMAdminPipelineConcurrent() throws InterruptedException {
final String user = "test1";
/*
* ClientTestThread is a thread to simulate a client request to get a
* RMAdminRequestInterceptor for the user.
*/
class ClientTestThread extends Thread {
private RMAdminRequestInterceptor interceptor;
@Override public void run() {
try {
interceptor = pipeline();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
}
}
private RMAdminRequestInterceptor pipeline()
throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user).doAs(
new PrivilegedExceptionAction<RMAdminRequestInterceptor>() {
@Override
public RMAdminRequestInterceptor run() throws Exception {
RequestInterceptorChainWrapper wrapper =
getRouterRMAdminService().getInterceptorChain();
RMAdminRequestInterceptor interceptor =
wrapper.getRootInterceptor();
Assert.assertNotNull(interceptor);
LOG.info("init rm admin interceptor success for user" + user);
return interceptor;
}
});
}
}
/*
* We start the first thread. It should not finish initing a chainWrapper
* before the other thread starts. In this way, the second thread can
* init at the same time of the first one. In the end, we validate that
* the 2 threads get the same chainWrapper without going into error.
*/
ClientTestThread client1 = new ClientTestThread();
ClientTestThread client2 = new ClientTestThread();
client1.start();
client2.start();
client1.join();
client2.join();
Assert.assertNotNull(client1.interceptor);
Assert.assertNotNull(client2.interceptor);
Assert.assertTrue(client1.interceptor == client2.interceptor);
} |
public static List<Uuid> toList(Uuid[] array) {
if (array == null) return null;
List<Uuid> list = new ArrayList<>(array.length);
list.addAll(Arrays.asList(array));
return list;
} | @Test
void testToList() {
assertNull(Uuid.toList(null));
assertEquals(
Arrays.asList(
Uuid.ZERO_UUID, Uuid.fromString("UXyU9i5ARn6W00ON2taeWA")
),
Uuid.toList(new Uuid[]{
Uuid.ZERO_UUID, Uuid.fromString("UXyU9i5ARn6W00ON2taeWA")
})
);
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void deleteWebhook() {
BaseResponse response = bot.execute(new DeleteWebhook().dropPendingUpdates(true));
assertTrue(response.isOk());
} |
@Override
public void abort() {
// Signal the read loop to abort on the next record and async abort any iterators.
// TODO: Also interrupt the execution thread.
for (Operation op : operations) {
Preconditions.checkState(op instanceof ReadOperation || op instanceof ReceivingOperation);
if (op instanceof ReadOperation) {
((ReadOperation) op).abortReadLoop();
}
}
} | @Test
public void testAbort() throws Exception {
// Operation must be an instance of ReadOperation or ReceivingOperation per preconditions
// in MapTaskExecutor.
ReadOperation o1 = Mockito.mock(ReadOperation.class);
ReadOperation o2 = Mockito.mock(ReadOperation.class);
ExecutionStateTracker stateTracker = Mockito.spy(ExecutionStateTracker.newForTest());
MapTaskExecutor executor =
new MapTaskExecutor(Arrays.<Operation>asList(o1, o2), counterSet, stateTracker);
Mockito.doAnswer(
invocation -> {
executor.abort();
return null;
})
.when(o1)
.finish();
executor.execute();
Mockito.verify(stateTracker).activate();
Mockito.verify(o1, atLeastOnce()).abortReadLoop();
Mockito.verify(o2, atLeastOnce()).abortReadLoop();
Mockito.verify(stateTracker).deactivate();
} |
static void createUploadDir(
final Path uploadDir, final Logger log, final boolean initialCreation)
throws IOException {
if (!Files.exists(uploadDir)) {
if (initialCreation) {
log.info("Upload directory {} does not exist. ", uploadDir);
} else {
log.warn(
"Upload directory {} has been deleted externally. "
+ "Previously uploaded files are no longer available.",
uploadDir);
}
checkAndCreateUploadDir(uploadDir, log);
}
} | @Tag("org.apache.flink.testutils.junit.FailsInGHAContainerWithRootUser")
@Test
void testCreateUploadDirFails(@TempDir File file) throws Exception {
assertThat(file.setWritable(false));
final Path testUploadDir = file.toPath().resolve("testUploadDir");
assertThat(Files.exists(testUploadDir)).isFalse();
assertThatThrownBy(
() ->
RestServerEndpoint.createUploadDir(
testUploadDir, NOPLogger.NOP_LOGGER, true))
.isInstanceOf(IOException.class);
} |
@Override
public void transform(Message message, DataType fromType, DataType toType) {
AvroSchema schema = message.getExchange().getProperty(SchemaHelper.CONTENT_SCHEMA, AvroSchema.class);
if (schema == null) {
throw new CamelExecutionException("Missing proper avro schema for data type processing", message.getExchange());
}
try {
byte[] marshalled;
String contentClass = SchemaHelper.resolveContentClass(message.getExchange(), null);
if (contentClass != null) {
Class<?> contentType
= message.getExchange().getContext().getClassResolver().resolveMandatoryClass(contentClass);
marshalled = Avro.mapper().writer().forType(contentType).with(schema)
.writeValueAsBytes(message.getBody());
} else {
marshalled = Avro.mapper().writer().forType(JsonNode.class).with(schema)
.writeValueAsBytes(getBodyAsJsonNode(message, schema));
}
message.setBody(marshalled);
message.setHeader(Exchange.CONTENT_TYPE, MimeType.AVRO_BINARY.type());
message.setHeader(SchemaHelper.CONTENT_SCHEMA, schema.getAvroSchema().getFullName());
} catch (InvalidPayloadException | IOException | ClassNotFoundException e) {
throw new CamelExecutionException("Failed to apply Avro binary data type on exchange", message.getExchange(), e);
}
} | @Test
void shouldHandleJsonNode() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
AvroSchema avroSchema = getSchema();
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, avroSchema);
exchange.getMessage().setBody(Json.mapper().readerFor(JsonNode.class).readValue("""
{ "name": "Goofy", "age": 25 }
"""));
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
JSONAssert.assertEquals("""
{"name":"Goofy","age":25}
""", Json.mapper().writeValueAsString(
Avro.mapper().reader().with(avroSchema).readTree(exchange.getMessage().getBody(byte[].class))), true);
} |
public static void debug(final Logger logger, final String format, final Supplier<Object> supplier) {
if (logger.isDebugEnabled()) {
logger.debug(format, supplier.get());
}
} | @Test
public void testAtLeastOnceDebugWithFormat() {
when(logger.isDebugEnabled()).thenReturn(true);
LogUtils.debug(logger, "testDebug: {}", supplier);
verify(supplier, atLeastOnce()).get();
} |
public void setHeader(String name, String value) {
parent.headers().put(name, value);
} | @Test
void testSetHeader() {
URI uri = URI.create("http://example.com/test");
HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1);
HttpResponse httpResp = newResponse(httpReq, 200);
DiscFilterResponse response = new DiscFilterResponse(httpResp);
response.setHeader("name", "value");
assertEquals(response.getHeader("name"), "value");
} |
@Override
public MapSettings setProperty(String key, String value) {
return (MapSettings) super.setProperty(key, value);
} | @Test
public void should_support_deprecated_props_with_multi_values() {
Settings settings = new MapSettings(definitions);
settings.setProperty("new_multi_values", new String[]{" A ", " B "});
assertThat(settings.getStringArray("new_multi_values")).isEqualTo(new String[]{"A", "B"});
assertThat(settings.getStringArray("old_multi_values")).isEqualTo(new String[]{"A", "B"});
} |
@Override
public TransformResultMetadata getResultMetadata() {
return _resultMetadata;
} | @Test
public void testIntersectIndices() {
ExpressionContext expression = RequestContextUtils.getExpression(
String.format("intersect_indices(%s, %s)", INT_MONO_INCREASING_MV_1, INT_MONO_INCREASING_MV_2));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getResultMetadata().getDataType(), DataType.INT);
assertFalse(transformFunction.getResultMetadata().isSingleValue());
int[][] expectedValues = new int[NUM_ROWS][];
for (int i = 0; i < NUM_ROWS; i++) {
int len = _intMonoIncreasingMV1Values[i].length;
int[] expectedValue = new int[len - 1];
for (int j = 0; j < expectedValue.length; j++) {
expectedValue[j] = j + 1;
}
expectedValues[i] = expectedValue;
}
testTransformFunctionMV(transformFunction, expectedValues);
} |
public final void doesNotContainEntry(@Nullable Object key, @Nullable Object value) {
checkNoNeedToDisplayBothValues("entrySet()")
.that(checkNotNull(actual).entrySet())
.doesNotContain(immutableEntry(key, value));
} | @Test
public void doesNotContainEntryFailure() {
ImmutableMap<String, String> actual = ImmutableMap.of("kurt", "kluever");
expectFailureWhenTestingThat(actual).doesNotContainEntry("kurt", "kluever");
assertFailureKeys("value of", "expected not to contain", "but was");
assertFailureValue("value of", "map.entrySet()");
assertFailureValue("expected not to contain", "kurt=kluever");
assertFailureValue("but was", "[kurt=kluever]");
} |
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
CommandFormat cf =
new CommandFormat(1, Integer.MAX_VALUE, OPTION_FOLLOW_LINK,
OPTION_FOLLOW_ARG_LINK, null);
cf.parse(args);
if (cf.getOpt(OPTION_FOLLOW_LINK)) {
getOptions().setFollowLink(true);
} else if (cf.getOpt(OPTION_FOLLOW_ARG_LINK)) {
getOptions().setFollowArgLink(true);
}
// search for first non-path argument (ie starts with a "-") and capture and
// remove the remaining arguments as expressions
LinkedList<String> expressionArgs = new LinkedList<String>();
Iterator<String> it = args.iterator();
boolean isPath = true;
while (it.hasNext()) {
String arg = it.next();
if (isPath) {
if (arg.startsWith("-")) {
isPath = false;
}
}
if (!isPath) {
expressionArgs.add(arg);
it.remove();
}
}
if (args.isEmpty()) {
args.add(Path.CUR_DIR);
}
Expression expression = parseExpression(expressionArgs);
if (!expression.isAction()) {
Expression and = getExpression(And.class);
Deque<Expression> children = new LinkedList<Expression>();
children.add(getExpression(Print.class));
children.add(expression);
and.addChildren(children);
expression = and;
}
setRootExpression(expression);
} | @Test
public void processOptionsUnknown() throws IOException {
Find find = new Find();
find.setConf(conf);
String args = "path -unknown";
try {
find.processOptions(getArgs(args));
fail("Unknown expression not caught");
} catch (IOException e) {
}
} |
@Override
void toHtml() throws IOException {
writeHtmlHeader();
htmlCoreReport.toHtml();
writeHtmlFooter();
} | @Test
public void testJob() throws IOException, SchedulerException {
// job quartz
initJobGlobalListener();
getJobCounter().clear();
//Grab the Scheduler instance from the Factory
final Scheduler scheduler = StdSchedulerFactory.getDefaultScheduler();
try {
// and start it off
scheduler.start();
final Random random = new Random();
//Define a Trigger that will fire "later"
final JobDetail job2 = JobBuilder.newJob(JobTestImpl.class)
.withIdentity("job" + random.nextInt()).build();
final Trigger trigger2 = TriggerBuilder.newTrigger()
.withIdentity("trigger" + random.nextInt())
.startAt(new Date(System.currentTimeMillis() + 60000))
.withSchedule(SimpleScheduleBuilder.simpleSchedule().withIntervalInHours(48)
.repeatForever())
.build();
scheduler.scheduleJob(job2, trigger2);
scheduler.pauseJob(job2.getKey());
final JobDetail job3 = JobBuilder.newJob(JobTestImpl.class)
.withIdentity("job" + random.nextInt()).build();
// cron trigger that will never fire
final Trigger trigger3 = TriggerBuilder.newTrigger()
.withIdentity("crontrigger" + random.nextInt())
.withSchedule(CronScheduleBuilder.cronSchedule("0 0 0 * * ? 2030")).build();
scheduler.scheduleJob(job3, trigger3);
// JavaInformations doit être réinstancié pour récupérer les jobs
// (mais "Aucun job" dans le counter)
final List<JavaInformations> javaInformationsList2 = Collections
.singletonList(new JavaInformations(null, true));
final HtmlReport htmlReport = new HtmlReport(collector, null, javaInformationsList2,
Period.TOUT, writer);
htmlReport.toHtml(null, null);
assertNotEmptyAndClear(writer);
// on lance 10 jobs pour être à peu près sûr qu'il y en a un qui fait une erreur
// (aléatoirement il y en a 2/10 qui font une erreur)
for (int i = 0; i < 10; i++) {
//Define a Trigger that will fire "now"
final JobDetail job = JobBuilder.newJob(JobTestImpl.class)
.withIdentity("job" + random.nextInt()).withDescription("description")
.build();
final Trigger trigger = TriggerBuilder.newTrigger()
.withIdentity("trigger" + random.nextInt()).startNow()
// pour que les jobs restent en cours après la 1ère exécution
.withSchedule(SimpleScheduleBuilder.simpleSchedule()
.withIntervalInMinutes(1).repeatForever())
.build();
//Schedule the job with the trigger
scheduler.scheduleJob(job, trigger);
}
// JobTestImpl fait un sleep de 2s au plus, donc on attend les jobs pour les compter
try {
Thread.sleep(3000);
} catch (final InterruptedException e) {
throw new IllegalStateException(e);
}
// JavaInformations doit être réinstancié pour récupérer les jobs
setProperty(Parameter.SYSTEM_ACTIONS_ENABLED, Boolean.FALSE.toString());
final List<JavaInformations> javaInformationsList3 = Collections
.singletonList(new JavaInformations(null, true));
final HtmlReport htmlReport3 = new HtmlReport(collector, null, javaInformationsList3,
Period.TOUT, writer);
htmlReport3.toHtml(null, null);
assertNotEmptyAndClear(writer);
} finally {
scheduler.shutdown();
getJobCounter().clear();
destroyJobGlobalListener();
}
} |
@Override
public int hashCode() {
return System.identityHashCode(this);
} | @Test
public void identity() {
assertEquals(a, a);
assertNotEquals(a, b);
assertNotEquals(a.hashCode(), b.hashCode());
} |
public StateStore getStore() {
return store;
} | @Test
public void shouldGetTimestampedStore() {
givenWrapperWithTimestampedStore();
assertThat(wrapper.getStore(), equalTo(timestampedStore));
} |
public static DistCpOptions parse(String[] args)
throws IllegalArgumentException {
CommandLineParser parser = new CustomParser();
CommandLine command;
try {
command = parser.parse(cliOptions, args, true);
} catch (ParseException e) {
throw new IllegalArgumentException("Unable to parse arguments. " +
Arrays.toString(args), e);
}
DistCpOptions.Builder builder = parseSourceAndTargetPaths(command);
builder
.withAtomicCommit(
command.hasOption(DistCpOptionSwitch.ATOMIC_COMMIT.getSwitch()))
.withSyncFolder(
command.hasOption(DistCpOptionSwitch.SYNC_FOLDERS.getSwitch()))
.withDeleteMissing(
command.hasOption(DistCpOptionSwitch.DELETE_MISSING.getSwitch()))
.withIgnoreFailures(
command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch()))
.withOverwrite(
command.hasOption(DistCpOptionSwitch.OVERWRITE.getSwitch()))
.withAppend(
command.hasOption(DistCpOptionSwitch.APPEND.getSwitch()))
.withSkipCRC(
command.hasOption(DistCpOptionSwitch.SKIP_CRC.getSwitch()))
.withBlocking(
!command.hasOption(DistCpOptionSwitch.BLOCKING.getSwitch()))
.withVerboseLog(
command.hasOption(DistCpOptionSwitch.VERBOSE_LOG.getSwitch()))
.withDirectWrite(
command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch()))
.withUseIterator(
command.hasOption(DistCpOptionSwitch.USE_ITERATOR.getSwitch()))
.withUpdateRoot(
command.hasOption(DistCpOptionSwitch.UPDATE_ROOT.getSwitch()));
if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) {
String[] snapshots = getVals(command,
DistCpOptionSwitch.DIFF.getSwitch());
checkSnapshotsArgs(snapshots);
builder.withUseDiff(snapshots[0], snapshots[1]);
}
if (command.hasOption(DistCpOptionSwitch.RDIFF.getSwitch())) {
String[] snapshots = getVals(command,
DistCpOptionSwitch.RDIFF.getSwitch());
checkSnapshotsArgs(snapshots);
builder.withUseRdiff(snapshots[0], snapshots[1]);
}
if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) {
builder.withFiltersFile(
getVal(command, DistCpOptionSwitch.FILTERS.getSwitch()));
}
if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) {
builder.withLogPath(
new Path(getVal(command, DistCpOptionSwitch.LOG_PATH.getSwitch())));
}
if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) {
final String workPath = getVal(command,
DistCpOptionSwitch.WORK_PATH.getSwitch());
if (workPath != null && !workPath.isEmpty()) {
builder.withAtomicWorkPath(new Path(workPath));
}
}
if (command.hasOption(DistCpOptionSwitch.TRACK_MISSING.getSwitch())) {
builder.withTrackMissing(
new Path(getVal(
command,
DistCpOptionSwitch.TRACK_MISSING.getSwitch())));
}
if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) {
try {
final Float mapBandwidth = Float.parseFloat(
getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()));
builder.withMapBandwidth(mapBandwidth);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Bandwidth specified is invalid: " +
getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e);
}
}
if (command.hasOption(
DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) {
try {
final Integer numThreads = Integer.parseInt(getVal(command,
DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()));
builder.withNumListstatusThreads(numThreads);
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"Number of liststatus threads is invalid: " + getVal(command,
DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e);
}
}
if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) {
try {
final Integer maps = Integer.parseInt(
getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()));
builder.maxMaps(maps);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Number of maps is invalid: " +
getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e);
}
}
if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) {
builder.withCopyStrategy(
getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch()));
}
if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
builder.preserve(
getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch()));
}
if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) {
LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" +
" option. Ignoring.");
}
if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) {
LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" +
" option. Ignoring.");
}
if (command.hasOption(DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch())) {
final String chunkSizeStr = getVal(command,
DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch().trim());
try {
int csize = Integer.parseInt(chunkSizeStr);
csize = csize > 0 ? csize : 0;
LOG.info("Set distcp blocksPerChunk to " + csize);
builder.withBlocksPerChunk(csize);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("blocksPerChunk is invalid: "
+ chunkSizeStr, e);
}
}
if (command.hasOption(DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch())) {
final String copyBufferSizeStr = getVal(command,
DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch().trim());
try {
int copyBufferSize = Integer.parseInt(copyBufferSizeStr);
builder.withCopyBufferSize(copyBufferSize);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("copyBufferSize is invalid: "
+ copyBufferSizeStr, e);
}
}
return builder.build();
} | @Test
public void testCopyStrategy() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-strategy",
"dynamic",
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
assertThat(options.getCopyStrategy()).isEqualTo("dynamic");
options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
assertThat(options.getCopyStrategy()).isEqualTo(DistCpConstants.UNIFORMSIZE);
} |
public Counter counter(String name) {
return getOrAdd(name, MetricBuilder.COUNTERS);
} | @Test
public void accessingACustomCounterRegistersAndReusesTheCounter() {
final MetricRegistry.MetricSupplier<Counter> supplier = () -> counter;
final Counter counter1 = registry.counter("thing", supplier);
final Counter counter2 = registry.counter("thing", supplier);
assertThat(counter1)
.isSameAs(counter2);
verify(listener).onCounterAdded("thing", counter1);
} |
BrokerResponse getQueueResponse(String queueName) throws IOException {
String queryUrl = getQueueEndpoint(messageVpn, queueName);
HttpResponse response = executeGet(new GenericUrl(baseUrl + queryUrl));
return BrokerResponse.fromHttpResponse(response);
} | @Test
public void testExecuteWithUnauthorized() throws IOException {
// Making it a final array, so that we can reference it from within the MockHttpTransport
// instance
final int[] requestCounter = {0};
MockHttpTransport transport =
new MockHttpTransport() {
@Override
public LowLevelHttpRequest buildRequest(String method, String url) {
return new MockLowLevelHttpRequest() {
@Override
public LowLevelHttpResponse execute() throws IOException {
MockLowLevelHttpResponse response = new MockLowLevelHttpResponse();
if (requestCounter[0] == 0) {
// The first request has to include Basic Auth header
assertTrue(this.getHeaders().containsKey("authorization"));
List<String> authorizationHeaders = this.getHeaders().get("authorization");
assertEquals(1, authorizationHeaders.size());
assertTrue(authorizationHeaders.get(0).contains("Basic"));
assertFalse(this.getHeaders().containsKey("cookie"));
// Set the response to include Session cookies
response
.setHeaderNames(ImmutableList.of("Set-Cookie", "Set-Cookie"))
.setHeaderValues(
ImmutableList.of(
"ProxySession=JddSdJaGo6FYYmQk6nt8jXxFtq6n3FCFR14ebzRGQ5w;"
+ " HttpOnly; SameSite=Strict;"
+ " Path=/proxy; Max-Age=2592000",
"Session=JddSdJaGo6FYYmQk6nt8jXxFtq6n3FCFR14ebzRGQ5w;"
+ " HttpOnly; SameSite=Strict;"
+ " Path=/SEMP; Max-Age=2592000"));
response.setStatusCode(200);
} else if (requestCounter[0] == 1) {
// The second request does not include Basic Auth header
assertFalse(this.getHeaders().containsKey("authorization"));
// It must include a cookie header
assertTrue(this.getHeaders().containsKey("cookie"));
boolean hasSessionCookie =
this.getHeaders().get("cookie").stream()
.filter(
c ->
c.contains(
"Session=JddSdJaGo6FYYmQk6nt8jXxFtq6n3FCFR14ebzRGQ5w"))
.count()
== 1;
assertTrue(hasSessionCookie);
// Let's assume the Session expired - we return the 401
// unauthorized
response.setStatusCode(401);
} else {
// The second request has to be retried with a Basic Auth header
// this time
assertTrue(this.getHeaders().containsKey("authorization"));
List<String> authorizationHeaders = this.getHeaders().get("authorization");
assertEquals(1, authorizationHeaders.size());
assertTrue(authorizationHeaders.get(0).contains("Basic"));
assertFalse(this.getHeaders().containsKey("cookie"));
response.setStatusCode(200);
}
response.setContentType(Json.MEDIA_TYPE);
requestCounter[0]++;
return response;
}
};
}
};
HttpRequestFactory requestFactory = transport.createRequestFactory();
SempBasicAuthClientExecutor client =
new SempBasicAuthClientExecutor(
"http://host", "username", "password", "vpnName", requestFactory);
// The first, initial request
client.getQueueResponse("queue");
// The second request, which will try to authenticate with a cookie, and then with Basic
// Auth when it receives a 401 unauthorized
client.getQueueResponse("queue");
// There should be 3 requests executed:
// the first one is the initial one with Basic Auth,
// the second one uses the session cookie, but we simulate it being expired,
// so there should be a third request with Basic Auth to create a new session.
assertEquals(3, requestCounter[0]);
} |
@Override
public WindowStore<K, V> build() {
if (storeSupplier.retainDuplicates() && enableCaching) {
log.warn("Disabling caching for {} since store was configured to retain duplicates", storeSupplier.name());
enableCaching = false;
}
return new MeteredWindowStore<>(
maybeWrapCaching(maybeWrapLogging(storeSupplier.get())),
storeSupplier.windowSize(),
storeSupplier.metricsScope(),
time,
keySerde,
valueSerde);
} | @Test
public void shouldHaveChangeLoggingStoreByDefault() {
setUp();
final WindowStore<String, String> store = builder.build();
final StateStore next = ((WrappedStateStore) store).wrapped();
assertThat(next, instanceOf(ChangeLoggingWindowBytesStore.class));
} |
@Override
public void clear() {
items.clear();
} | @Test
public void testClear() throws Exception {
//Test set emptying
assertTrue("The set should be initialized empty.", set.isEmpty());
set.clear();
assertTrue("Clear should have no effect on an empty set.", set.isEmpty());
fillSet(10, set);
assertFalse("The set should no longer be empty.", set.isEmpty());
set.clear();
assertTrue("The set should be empty after clear.", set.isEmpty());
} |
@Override
public String getFileId(final DriveItem.Metadata metadata) {
final ItemReference parent = metadata.getParentReference();
if(metadata.getRemoteItem() != null) {
final DriveItem.Metadata remoteMetadata = metadata.getRemoteItem();
final ItemReference remoteParent = remoteMetadata.getParentReference();
if(parent == null) {
return String.join(String.valueOf(Path.DELIMITER),
remoteParent.getDriveId(), remoteMetadata.getId());
}
else {
return String.join(String.valueOf(Path.DELIMITER),
parent.getDriveId(), metadata.getId(),
remoteParent.getDriveId(), remoteMetadata.getId());
}
}
else {
return String.join(String.valueOf(Path.DELIMITER), parent.getDriveId(), metadata.getId());
}
} | @Test
public void testRealBusinessFileIdResponseSharedWithMe() throws Exception {
final DriveItem.Metadata metadata;
try (final InputStream test = getClass().getResourceAsStream("/RealBusinessFileIdResponseSharedWithMe.json")) {
final InputStreamReader reader = new InputStreamReader(test);
metadata = DriveItem.parseJson(session.getClient(), (JsonObject) Json.parse(reader));
}
assertEquals("Id/A", session.getFileId(metadata));
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void getChatMembersCount() {
GetChatMembersCountResponse response = bot.execute(new GetChatMembersCount(chatId));
assertEquals(2, response.count());
} |
public String getNamespace() {
return namespace;
} | @Test
void getNamespace() {
ConfigurationChangeEvent event = new ConfigurationChangeEvent();
event.setNamespace("namespace");
Assertions.assertEquals("namespace", event.getNamespace());
} |
@Override
public Map<String, StepTransition> translate(WorkflowInstance workflowInstance) {
WorkflowInstance instance = objectMapper.convertValue(workflowInstance, WorkflowInstance.class);
if (instance.getRunConfig() != null) {
if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_INCOMPLETE
|| instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) {
Map<String, StepInstance.Status> statusMap =
instance.getAggregatedInfo().getStepAggregatedViews().entrySet().stream()
.collect(
Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getStatus()));
if (!statusMap.isEmpty()) {
instance
.getRunConfig()
.setStartStepIds(
statusMap.entrySet().stream()
.filter(
entry ->
!entry.getValue().isComplete()
&& (entry.getValue().isTerminal()
|| entry.getValue() == StepInstance.Status.NOT_CREATED))
.map(Map.Entry::getKey)
.collect(Collectors.toList()));
}
// handle the special case of restarting from a completed step
if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) {
String restartStepId =
RunRequest.getCurrentNode(instance.getRunConfig().getRestartConfig()).getStepId();
if (!instance.getRunConfig().getStartStepIds().contains(restartStepId)) {
instance.getRunConfig().getStartStepIds().add(restartStepId);
}
}
} else {
if (workflowInstance.getRunConfig().getStartStepIds() != null) {
instance
.getRunConfig()
.setStartStepIds(new ArrayList<>(workflowInstance.getRunConfig().getStartStepIds()));
}
if (workflowInstance.getRunConfig().getEndStepIds() != null) {
instance
.getRunConfig()
.setEndStepIds(new ArrayList<>(workflowInstance.getRunConfig().getEndStepIds()));
}
}
}
List<String> startStepIds =
instance.getRunConfig() != null && instance.getRunConfig().getStartStepIds() != null
? instance.getRunConfig().getStartStepIds()
: null;
List<String> endStepIds =
instance.getRunConfig() != null && instance.getRunConfig().getEndStepIds() != null
? instance.getRunConfig().getEndStepIds()
: null;
return WorkflowGraph.computeDag(instance.getRuntimeWorkflow(), startStepIds, endStepIds);
} | @Test
public void testTranslateForRestartFromBeginning() {
instance.getRunConfig().setPolicy(RunPolicy.RESTART_FROM_BEGINNING);
Map<String, StepTransition> dag = translator.translate(instance);
Assert.assertEquals(
new HashSet<>(Arrays.asList("job1", "job.2", "job3", "job4")), dag.keySet());
} |
@Override public SpanCustomizer name(String name) {
return tracer.currentSpanCustomizer().name(name);
} | @Test void name() {
span.start();
try (SpanInScope scope = tracing.tracer().withSpanInScope(span)) {
spanCustomizer.name("newname");
}
span.flush();
assertThat(spans).extracting(MutableSpan::name)
.containsExactly("newname");
} |
@Override
public MetadataNode child(String name) {
String value = image.data().get(name);
if (value == null) return null;
return new MetadataNode() {
@Override
public boolean isDirectory() {
return false;
}
@Override
public void print(MetadataNodePrinter printer) {
if (printer.redactionCriteria().
shouldRedactConfig(image.resource().type(), name)) {
printer.output("[redacted]");
} else {
printer.output(value);
}
}
};
} | @Test
public void testUnknownChild() {
assertNull(NODE.child("also.non.secret"));
} |
@Override
public String toString() {
return "Service{" + "name='" + name + '\'' + ", protectThreshold=" + protectThreshold + ", appName='" + appName
+ '\'' + ", groupName='" + groupName + '\'' + ", metadata=" + metadata + '}';
} | @Test
void testToString() {
Service service = new Service("service");
service.setGroupName("group");
service.setAppName("app");
service.setProtectThreshold(1.0f);
service.setMetadata(Collections.singletonMap("a", "b"));
assertEquals("Service{name='service', protectThreshold=1.0, appName='app', groupName='group', metadata={a=b}}",
service.toString());
} |
protected String host(Reconciliation reconciliation, String podName) {
return DnsNameGenerator.podDnsName(reconciliation.namespace(), KafkaResources.zookeeperHeadlessServiceName(reconciliation.name()), podName);
} | @Test
public void testGetHostReturnsCorrectHostForGivenPod() {
assertThat(new ZookeeperLeaderFinder(vertx, this::backoff).host(new Reconciliation("test", "Kafka", "myproject", "my-cluster"), KafkaResources.zookeeperPodName("my-cluster", 3)),
is("my-cluster-zookeeper-3.my-cluster-zookeeper-nodes.myproject.svc.cluster.local"));
} |
@Override
public void onDeserializationFailure(
final String source,
final String changelog,
final byte[] data
) {
// NOTE: this only happens for values, we should never auto-register key schemas
final String sourceSubject = KsqlConstants.getSRSubject(source, false);
final String changelogSubject = KsqlConstants.getSRSubject(changelog, false);
// all schema registry events start with a magic byte 0x0 and then four bytes
// indicating the schema id - we extract that schema id from the data that failed
// to deserialize and then register it into the changelog subject
final int id = ByteBuffer.wrap(data, 1, Integer.BYTES).getInt();
final SchemaRegisterEvent event = new SchemaRegisterEvent(id, sourceSubject, changelogSubject);
try {
if (!failedAttempts.contains(event)) {
LOG.info("Trying to fetch & register schema id {} under subject {}", id, changelogSubject);
final ParsedSchema schema = srClient.getSchemaBySubjectAndId(sourceSubject, id);
srClient.register(changelogSubject, schema);
}
} catch (Exception e) {
LOG.warn("Failed during deserialization callback for topic {}. "
+ "Will not try again to register id {} under subject {}.",
source,
id,
changelogSubject,
e
);
failedAttempts.add(event);
}
} | @Test
public void shouldNotRegisterFailedIdTwice() throws IOException, RestClientException {
// Given:
when(srClient.getSchemaBySubjectAndId(KsqlConstants.getSRSubject(SOURCE, false), ID)).thenReturn(schema);
when(srClient.register(KsqlConstants.getSRSubject(CHANGELOG, false), schema)).thenThrow(new KsqlException(""));
final RegisterSchemaCallback call = new RegisterSchemaCallback(srClient);
// When:
call.onDeserializationFailure(SOURCE, CHANGELOG, SOME_DATA);
call.onDeserializationFailure(SOURCE, CHANGELOG, SOME_DATA);
// Then:
verify(srClient, times(1)).getSchemaBySubjectAndId(KsqlConstants.getSRSubject(SOURCE, false), ID);
verify(srClient).register(KsqlConstants.getSRSubject(CHANGELOG, false), schema);
} |
public static <T> Patch<T> diff(List<T> original, List<T> revised, DiffAlgorithmListener progress) {
return DiffUtils.diff(original, revised, DEFAULT_DIFF.create(), progress);
} | @Test
public void testDiff_Change() {
final List<String> changeTest_from = Arrays.asList("aaa", "bbb", "ccc");
final List<String> changeTest_to = Arrays.asList("aaa", "zzz", "ccc");
final Patch<String> patch = DiffUtils.diff(changeTest_from, changeTest_to);
assertNotNull(patch);
assertEquals(1, patch.getDeltas().size());
final AbstractDelta<String> delta = patch.getDeltas().get(0);
assertTrue(delta instanceof ChangeDelta);
assertEquals(new Chunk<>(1, Arrays.asList("bbb")), delta.getSource());
assertEquals(new Chunk<>(1, Arrays.asList("zzz")), delta.getTarget());
} |
public Blade listen() {
return listen(BladeConst.DEFAULT_SERVER_PORT);
} | @Test
public void testListen() throws Exception {
Blade blade = Blade.create();
blade.listen(9001).start().await();
try {
int code = Unirest.get("http://127.0.0.1:9001").asString().getStatus();
assertEquals(404, code);
} finally {
blade.stop();
try {
new Socket("127.0.0.1", 9001);
Assert.fail("Server is still running");
} catch (ConnectException e) {
}
}
} |
public OpenTelemetry getOpenTelemetry() {
return openTelemetrySdkReference.get();
} | @Test
public void testMetricCardinalityIsSet() {
var prometheusExporterPort = 9464;
@Cleanup
var ots = OpenTelemetryService.builder()
.builderCustomizer(getBuilderCustomizer(null,
Map.of(OpenTelemetryService.OTEL_SDK_DISABLED_KEY, "false",
"otel.metrics.exporter", "prometheus",
"otel.exporter.prometheus.port", Integer.toString(prometheusExporterPort))))
.clusterName("openTelemetryServiceCardinalityTestCluster")
.build();
var meter = ots.getOpenTelemetry().getMeter("openTelemetryMetricCardinalityTest");
var counter = meter.counterBuilder("dummyCounter").build();
for (int i = 0; i < OpenTelemetryService.MAX_CARDINALITY_LIMIT + 100; i++) {
counter.add(1, Attributes.of(AttributeKey.stringKey("attribute"), "value" + i));
}
Awaitility.waitAtMost(30, TimeUnit.SECONDS).ignoreExceptions().until(() -> {
var client = new PrometheusMetricsClient("localhost", prometheusExporterPort);
var allMetrics = client.getMetrics();
var actualMetrics = allMetrics.findByNameAndLabels("dummyCounter_total");
var overflowMetric = allMetrics.findByNameAndLabels("dummyCounter_total", "otel_metric_overflow", "true");
return actualMetrics.size() == OpenTelemetryService.MAX_CARDINALITY_LIMIT + 1 && overflowMetric.size() == 1;
});
} |
@Override
public List<ImportValidationFeedback> verifyRule( Object subject ) {
List<ImportValidationFeedback> feedback = new ArrayList<>();
if ( !isEnabled() || !( subject instanceof TransMeta ) ) {
return feedback;
}
TransMeta transMeta = (TransMeta) subject;
String description = transMeta.getDescription();
if ( null != description && minLength <= description.length() ) {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.APPROVAL, "A description is present" ) );
} else {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.ERROR, "A description is not present or is too short." ) );
}
return feedback;
} | @Test
public void testVerifyRule_NotTransMetaParameter_EnabledRule() {
TransformationHasDescriptionImportRule importRule = getImportRule( 10, true );
List<ImportValidationFeedback> feedbackList = importRule.verifyRule( "" );
assertNotNull( feedbackList );
assertTrue( feedbackList.isEmpty() );
} |
public Meter meter() {
return meter;
} | @Test
public void testConstruction() {
final Meter m1 = new TestMeter();
final MeterOperation op = new MeterOperation(m1, MeterOperation.Type.ADD);
assertThat(op.meter(), is(m1));
} |
public static Find find(String regex) {
return find(regex, 0);
} | @Test
@Category(NeedsRunner.class)
public void testFind() {
PCollection<String> output =
p.apply(Create.of("aj", "xj", "yj", "zj")).apply(Regex.find("[xyz]"));
PAssert.that(output).containsInAnyOrder("x", "y", "z");
p.run();
} |
@Override
public InputFile toInputFile() {
return new OSSInputFile(client(), uri(), aliyunProperties(), metrics());
} | @Test
public void testToInputFile() throws IOException {
int dataSize = 1024 * 10;
byte[] data = randomData(dataSize);
OutputFile out =
new OSSOutputFile(ossClient, randomURI(), aliyunProperties, MetricsContext.nullMetrics());
try (OutputStream os = out.create();
InputStream is = new ByteArrayInputStream(data)) {
ByteStreams.copy(is, os);
}
InputFile in = out.toInputFile();
assertThat(in).as("Should be an instance of OSSInputFile").isInstanceOf(OSSInputFile.class);
assertThat(in.exists()).as("OSS file should exist").isTrue();
assertThat(in.location()).as("Should have expected location").isEqualTo(out.location());
assertThat(in.getLength()).as("Should have expected length").isEqualTo(dataSize);
byte[] actual = new byte[dataSize];
try (InputStream as = in.newStream()) {
ByteStreams.readFully(as, actual);
}
assertThat(actual).as("Should have expected content").isEqualTo(data);
} |
@Override
@CheckForNull
public EmailMessage format(Notification notification) {
if (!BuiltInQPChangeNotification.TYPE.equals(notification.getType())) {
return null;
}
BuiltInQPChangeNotificationBuilder profilesNotification = parse(notification);
StringBuilder message = new StringBuilder("The following built-in profiles have been updated:\n\n");
profilesNotification.getProfiles().stream()
.sorted(Comparator.comparing(Profile::getLanguageName).thenComparing(Profile::getProfileName))
.forEach(profile -> {
message.append("\"")
.append(profile.getProfileName())
.append("\" - ")
.append(profile.getLanguageName())
.append(": ")
.append(server.getPublicRootUrl()).append("/profiles/changelog?language=")
.append(profile.getLanguageKey())
.append("&name=")
.append(encode(profile.getProfileName()))
.append("&since=")
.append(formatDate(new Date(profile.getStartDate())))
.append("&to=")
.append(formatDate(new Date(profile.getEndDate())))
.append("\n");
int newRules = profile.getNewRules();
if (newRules > 0) {
message.append(" ").append(newRules).append(" new rule")
.append(plural(newRules))
.append('\n');
}
int updatedRules = profile.getUpdatedRules();
if (updatedRules > 0) {
message.append(" ").append(updatedRules).append(" rule")
.append(updatedRules > 1 ? "s have been updated" : " has been updated")
.append("\n");
}
int removedRules = profile.getRemovedRules();
if (removedRules > 0) {
message.append(" ").append(removedRules).append(" rule")
.append(plural(removedRules))
.append(" removed\n");
}
message.append("\n");
});
message.append("This is a good time to review your quality profiles and update them to benefit from the latest evolutions: ");
message.append(server.getPublicRootUrl()).append("/profiles");
// And finally return the email that will be sent
return new EmailMessage()
.setMessageId(BuiltInQPChangeNotification.TYPE)
.setSubject("Built-in quality profiles have been updated")
.setPlainTextMessage(message.toString());
} | @Test
public void notification_contains_count_of_new_rules() {
String profileName = newProfileName();
String languageKey = newLanguageKey();
String languageName = newLanguageName();
BuiltInQPChangeNotificationBuilder notification = new BuiltInQPChangeNotificationBuilder()
.addProfile(Profile.newBuilder()
.setProfileName(profileName)
.setLanguageKey(languageKey)
.setLanguageName(languageName)
.setNewRules(2)
.build());
EmailMessage emailMessage = underTest.format(notification.build());
assertMessage(emailMessage, "\n 2 new rules\n");
} |
@Override
public void execute(final CommandLine commandLine, final Options options,
RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
String topic = commandLine.getOptionValue('t').trim();
String type = commandLine.getOptionValue('m').trim();
if ("get".equals(type)) {
defaultMQAdminExt.start();
String orderConf =
defaultMQAdminExt.getKVConfig(NamesrvUtil.NAMESPACE_ORDER_TOPIC_CONFIG, topic);
System.out.printf("get orderConf success. topic=[%s], orderConf=[%s] ", topic, orderConf);
return;
} else if ("put".equals(type)) {
defaultMQAdminExt.start();
String orderConf = "";
if (commandLine.hasOption('v')) {
orderConf = commandLine.getOptionValue('v').trim();
}
if (UtilAll.isBlank(orderConf)) {
throw new Exception("please set orderConf with option -v.");
}
defaultMQAdminExt.createOrUpdateOrderConf(topic, orderConf, true);
System.out.printf("update orderConf success. topic=[%s], orderConf=[%s]", topic,
orderConf.toString());
return;
} else if ("delete".equals(type)) {
defaultMQAdminExt.start();
defaultMQAdminExt.deleteKvConfig(NamesrvUtil.NAMESPACE_ORDER_TOPIC_CONFIG, topic);
System.out.printf("delete orderConf success. topic=[%s]", topic);
return;
}
ServerUtil.printCommandLineHelp("mqadmin " + this.commandName(), options);
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
} | @Test
public void testExecute() {
UpdateOrderConfCommand cmd = new UpdateOrderConfCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {"-t unit-test", "-v default-broker:8", "-m post"};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
assertThat(commandLine.getOptionValue('t').trim()).isEqualTo("unit-test");
assertThat(commandLine.getOptionValue('v').trim()).isEqualTo("default-broker:8");
assertThat(commandLine.getOptionValue('m').trim()).isEqualTo("post");
} |
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"})
public static boolean isScalablePushQuery(
final Statement statement,
final KsqlExecutionContext ksqlEngine,
final KsqlConfig ksqlConfig,
final Map<String, Object> overrides
) {
if (!isPushV2Enabled(ksqlConfig, overrides)) {
return false;
}
if (! (statement instanceof Query)) {
return false;
}
final Query query = (Query) statement;
final SourceFinder sourceFinder = new SourceFinder();
sourceFinder.process(query.getFrom(), null);
// It will be present if it's not a join, which we don't handle
if (!sourceFinder.getSourceName().isPresent()) {
return false;
}
// Find all of the writers to this particular source.
final SourceName sourceName = sourceFinder.getSourceName().get();
final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName);
// See if the config or override have set the stream to be "latest"
final boolean isLatest = isLatest(ksqlConfig, overrides);
// Cannot be a pull query, i.e. must be a push
return !query.isPullQuery()
// Group by is not supported
&& !query.getGroupBy().isPresent()
// Windowing is not supported
&& !query.getWindow().isPresent()
// Having clause is not supported
&& !query.getHaving().isPresent()
// Partition by is not supported
&& !query.getPartitionBy().isPresent()
// There must be an EMIT CHANGES clause
&& (query.getRefinement().isPresent()
&& query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES)
// Must be reading from "latest"
&& isLatest
// We only handle a single sink source at the moment from a CTAS/CSAS
&& upstreamQueries.size() == 1
// ROWPARTITION and ROWOFFSET are not currently supported in SPQs
&& !containsDisallowedColumns(query);
} | @Test
public void isScalablePushQuery_false_configNotLatest() {
try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) {
// When:
expectIsSPQ(ColumnName.of("foo"), columnExtractor);
when(ksqlConfig.getKsqlStreamConfigProp(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG))
.thenReturn(Optional.of("earliest"));
// Then:
assertThat(ScalablePushUtil.isScalablePushQuery(query, ksqlEngine, ksqlConfig,
ImmutableMap.of()),
equalTo(false));
}
} |
public int month() {
return getField(DateField.MONTH);
} | @Test
public void monthTest() {
//noinspection ConstantConditions
int month = DateUtil.parse("2017-07-01").month();
assertEquals(6, month);
} |
public static Thread daemonThread(Runnable r, Class<?> context, String description) {
return daemonThread(r, "hollow", context, description);
} | @Test
public void described_customPlatform() {
Thread thread = daemonThread(() -> {}, "solid", getClass(), "howdy");
assertEquals("solid | ThreadsTest | howdy", thread.getName());
assertTrue(thread.isDaemon());
} |
SourceRecord convertRecord(ConsumerRecord<byte[], byte[]> record) {
String targetTopic = formatRemoteTopic(record.topic());
Headers headers = convertHeaders(record);
return new SourceRecord(
MirrorUtils.wrapPartition(new TopicPartition(record.topic(), record.partition()), sourceClusterAlias),
MirrorUtils.wrapOffset(record.offset()),
targetTopic, record.partition(),
Schema.OPTIONAL_BYTES_SCHEMA, record.key(),
Schema.BYTES_SCHEMA, record.value(),
record.timestamp(), headers);
} | @Test
public void testSerde() {
byte[] key = new byte[]{'a', 'b', 'c', 'd', 'e'};
byte[] value = new byte[]{'f', 'g', 'h', 'i', 'j', 'k'};
Headers headers = new RecordHeaders();
headers.add("header1", new byte[]{'l', 'm', 'n', 'o'});
headers.add("header2", new byte[]{'p', 'q', 'r', 's', 't'});
ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>("topic1", 2, 3L, 4L,
TimestampType.CREATE_TIME, 5, 6, key, value, headers, Optional.empty());
MirrorSourceTask mirrorSourceTask = new MirrorSourceTask(null, null, "cluster7",
new DefaultReplicationPolicy(), null);
SourceRecord sourceRecord = mirrorSourceTask.convertRecord(consumerRecord);
assertEquals("cluster7.topic1", sourceRecord.topic(),
"Failure on cluster7.topic1 consumerRecord serde");
assertEquals(2, sourceRecord.kafkaPartition().intValue(),
"sourceRecord kafka partition is incorrect");
assertEquals(new TopicPartition("topic1", 2), MirrorUtils.unwrapPartition(sourceRecord.sourcePartition()),
"topic1 unwrapped from sourcePartition is incorrect");
assertEquals(3L, MirrorUtils.unwrapOffset(sourceRecord.sourceOffset()).longValue(),
"sourceRecord's sourceOffset is incorrect");
assertEquals(4L, sourceRecord.timestamp().longValue(),
"sourceRecord's timestamp is incorrect");
assertEquals(key, sourceRecord.key(), "sourceRecord's key is incorrect");
assertEquals(value, sourceRecord.value(), "sourceRecord's value is incorrect");
assertEquals(headers.lastHeader("header1").value(), sourceRecord.headers().lastWithName("header1").value(),
"sourceRecord's header1 is incorrect");
assertEquals(headers.lastHeader("header2").value(), sourceRecord.headers().lastWithName("header2").value(),
"sourceRecord's header2 is incorrect");
} |
public String toServiceString() {
return buildString(true, false, true, true);
} | @Test
void testToServiceString() {
URL url = URL.valueOf(
"zookeeper://10.20.130.230:4444/org.apache.dubbo.metadata.report.MetadataReport?version=1.0.0&application=vic&group=aaa");
assertEquals(
"zookeeper://10.20.130.230:4444/aaa/org.apache.dubbo.metadata.report.MetadataReport:1.0.0",
url.toServiceString());
} |
@Override
public int compareTo(LeanHit o) {
int res = (sortData != null)
? compareData(sortData, o.sortData)
: Double.compare(o.relevance, relevance);
return (res != 0) ? res : compareData(gid, o.gid);
} | @Test
void testOrderingByRelevance() {
assertEquals(0, new LeanHit(gidA, 0, 0, 1).compareTo(new LeanHit(gidA, 0, 0, 1)));
verifyTransitiveOrdering(new LeanHit(gidA, 0, 0, 1),
new LeanHit(gidA, 0, 0, 0),
new LeanHit(gidA, 0, 0, -1));
} |
public static String serialize(Object obj) throws JsonProcessingException {
return MAPPER.writeValueAsString(obj);
} | @Test
void serializeHistogram() throws JsonProcessingException {
DHistogram h =
new DHistogram(new TestHistogram(), METRIC, HOST, tags, () -> MOCKED_SYSTEM_MILLIS);
DSeries series = new DSeries();
h.addTo(series);
assertSerialization(
DatadogHttpClient.serialize(series),
new MetricAssertion(MetricType.gauge, true, "4.0", DHistogram.SUFFIX_AVG),
new MetricAssertion(MetricType.gauge, true, "1", DHistogram.SUFFIX_COUNT),
new MetricAssertion(MetricType.gauge, true, "0.5", DHistogram.SUFFIX_MEDIAN),
new MetricAssertion(
MetricType.gauge, true, "0.95", DHistogram.SUFFIX_95_PERCENTILE),
new MetricAssertion(MetricType.gauge, true, "7", DHistogram.SUFFIX_MIN),
new MetricAssertion(MetricType.gauge, true, "6", DHistogram.SUFFIX_MAX));
} |
public ParsedQuery parse(final String query) throws ParseException {
final TokenCollectingQueryParser parser = new TokenCollectingQueryParser(ParsedTerm.DEFAULT_FIELD, ANALYZER);
parser.setSplitOnWhitespace(true);
parser.setAllowLeadingWildcard(allowLeadingWildcard);
final Query parsed = parser.parse(query);
final ParsedQuery.Builder builder = ParsedQuery.builder().query(query);
builder.tokensBuilder().addAll(parser.getTokens());
final TermCollectingQueryVisitor visitor = new TermCollectingQueryVisitor(ANALYZER, parser.getTokenLookup());
parsed.visit(visitor);
builder.termsBuilder().addAll(visitor.getParsedTerms());
return builder.build();
} | @Test
void testDateRange() throws ParseException {
Assertions.assertThat(parser.parse("otherDate:[now-3d TO now-4d]").terms())
.hasSize(2)
.extracting(ParsedTerm::value)
.contains("now-3d", "now-4d");
Assertions.assertThat(parser.parse("otherDate:[20020101 TO 20030101]").terms())
.hasSize(2)
.extracting(ParsedTerm::value)
.contains("20020101", "20030101");
Assertions.assertThat(parser.parse("otherDate:[now-5d TO now-4d]").terms())
.extracting(ParsedTerm::value)
.contains("now-5d", "now-4d");
Assertions.assertThat(parser.parse("otherDate:[now TO now+60d]").terms())
.extracting(ParsedTerm::value)
.contains("now", "now+60d");
} |
@Override
public OUT extract(Tuple in) {
return in.getField(fieldId);
} | @Test
void testSingleFieldExtraction() throws InstantiationException, IllegalAccessException {
// extract single fields
for (int i = 0; i < Tuple.MAX_ARITY; i++) {
Tuple current = (Tuple) CLASSES[i].newInstance();
for (int j = 0; j < i; j++) {
current.setField(testStrings[j], j);
}
for (int j = 0; j < i; j++) {
assertThat(new FieldFromTuple<String>(j).extract(current))
.isEqualTo(testStrings[j]);
}
}
} |
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getBeforeImage();
} | @Test
public void getUndoRows() {
Assertions.assertEquals(executor.getUndoRows(), executor.getSqlUndoLog().getBeforeImage());
} |
@VisibleForTesting
static EntryFilterDefinition getEntryFilterDefinition(NarClassLoader ncl) throws IOException {
String configStr;
try {
configStr = ncl.getServiceDefinition(ENTRY_FILTER_DEFINITION_FILE + ".yaml");
} catch (NoSuchFileException e) {
configStr = ncl.getServiceDefinition(ENTRY_FILTER_DEFINITION_FILE + ".yml");
}
return ObjectMapperFactory.getYamlMapper().reader().readValue(
configStr, EntryFilterDefinition.class
);
} | @Test
public void testReadYamlFile() throws IOException {
try (NarClassLoader cl = mock(NarClassLoader.class)) {
when(cl.getServiceDefinition(ENTRY_FILTER_DEFINITION_FILE + ".yaml"))
.thenThrow(new NoSuchFileException(""));
try {
EntryFilterProvider.getEntryFilterDefinition(cl);
Assert.fail();
} catch (Exception e) {
Assert.assertFalse(e instanceof NoSuchFileException);
}
}
try (NarClassLoader cl = mock(NarClassLoader.class)) {
when(cl.getServiceDefinition(ENTRY_FILTER_DEFINITION_FILE + ".yml"))
.thenThrow(new NoSuchFileException(""));
try {
EntryFilterProvider.getEntryFilterDefinition(cl);
Assert.fail();
} catch (Exception e) {
Assert.assertFalse(e instanceof NoSuchFileException);
}
}
try (NarClassLoader cl = mock(NarClassLoader.class)) {
when(cl.getServiceDefinition(ENTRY_FILTER_DEFINITION_FILE + ".yaml"))
.thenThrow(new NoSuchFileException(""));
when(cl.getServiceDefinition(ENTRY_FILTER_DEFINITION_FILE + ".yml"))
.thenThrow(new NoSuchFileException(""));
try {
EntryFilterProvider.getEntryFilterDefinition(cl);
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e instanceof NoSuchFileException);
}
}
} |
Mono<ImmutableMap<String, String>> resolve(List<SchemaReference> refs) {
return resolveReferences(refs, new Resolving(ImmutableMap.of(), ImmutableSet.of()))
.map(Resolving::resolved);
} | @Test
void resolvesRefsUsingSrClient() {
mockSrCall("sub1", 1,
new SchemaSubject()
.schema("schema1"));
mockSrCall("sub2", 1,
new SchemaSubject()
.schema("schema2")
.references(
List.of(
new SchemaReference().name("ref2_1").subject("sub2_1").version(2),
new SchemaReference().name("ref2_2").subject("sub1").version(1))));
mockSrCall("sub2_1", 2,
new SchemaSubject()
.schema("schema2_1")
.references(
List.of(
new SchemaReference().name("ref2_1_1").subject("sub2_1_1").version(3),
new SchemaReference().name("ref1").subject("should_not_be_called").version(1)
))
);
mockSrCall("sub2_1_1", 3,
new SchemaSubject()
.schema("schema2_1_1"));
var resolvedRefsMono = schemaReferencesResolver.resolve(
List.of(
new SchemaReference().name("ref1").subject("sub1").version(1),
new SchemaReference().name("ref2").subject("sub2").version(1)));
StepVerifier.create(resolvedRefsMono)
.assertNext(refs ->
assertThat(refs)
.containsExactlyEntriesOf(
// checking map should be ordered
ImmutableMap.<String, String>builder()
.put("ref1", "schema1")
.put("ref2_1_1", "schema2_1_1")
.put("ref2_1", "schema2_1")
.put("ref2_2", "schema1")
.put("ref2", "schema2")
.build()))
.verifyComplete();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.