focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void useSmsCode(SmsCodeUseReqDTO reqDTO) {
// 检测验证码是否有效
SmsCodeDO lastSmsCode = validateSmsCode0(reqDTO.getMobile(), reqDTO.getCode(), reqDTO.getScene());
// 使用验证码
smsCodeMapper.updateById(SmsCodeDO.builder().id(lastSmsCode.getId())
.used(true).usedTime(LocalDateTime.now()).usedIp(reqDTO.getUsedIp()).build());
} | @Test
public void testUseSmsCode_success() {
// 准备参数
SmsCodeUseReqDTO reqDTO = randomPojo(SmsCodeUseReqDTO.class, o -> {
o.setMobile("15601691300");
o.setScene(randomEle(SmsSceneEnum.values()).getScene());
});
// mock 数据
SqlConstants.init(DbType.MYSQL);
smsCodeMapper.insert(randomPojo(SmsCodeDO.class, o -> {
o.setMobile(reqDTO.getMobile()).setScene(reqDTO.getScene())
.setCode(reqDTO.getCode()).setUsed(false);
}));
// 调用
smsCodeService.useSmsCode(reqDTO);
// 断言
SmsCodeDO smsCodeDO = smsCodeMapper.selectOne(null);
assertTrue(smsCodeDO.getUsed());
assertNotNull(smsCodeDO.getUsedTime());
assertEquals(reqDTO.getUsedIp(), smsCodeDO.getUsedIp());
} |
static ArgumentParser argParser() {
ArgumentParser parser = ArgumentParsers
.newArgumentParser("producer-performance")
.defaultHelp(true)
.description("This tool is used to verify the producer performance. To enable transactions, " +
"you can specify a transaction id or set a transaction duration using --transaction-duration-ms. " +
"There are three ways to specify the transaction id: set transaction.id=<id> via --producer-props, " +
"set transaction.id=<id> in the config file via --producer.config, or use --transaction-id <id>.");
MutuallyExclusiveGroup payloadOptions = parser
.addMutuallyExclusiveGroup()
.required(true)
.description("either --record-size or --payload-file must be specified but not both.");
parser.addArgument("--topic")
.action(store())
.required(true)
.type(String.class)
.metavar("TOPIC")
.help("produce messages to this topic");
parser.addArgument("--num-records")
.action(store())
.required(true)
.type(Long.class)
.metavar("NUM-RECORDS")
.dest("numRecords")
.help("number of messages to produce");
payloadOptions.addArgument("--record-size")
.action(store())
.required(false)
.type(Integer.class)
.metavar("RECORD-SIZE")
.dest("recordSize")
.help("message size in bytes. Note that you must provide exactly one of --record-size or --payload-file " +
"or --payload-monotonic.");
payloadOptions.addArgument("--payload-file")
.action(store())
.required(false)
.type(String.class)
.metavar("PAYLOAD-FILE")
.dest("payloadFile")
.help("file to read the message payloads from. This works only for UTF-8 encoded text files. " +
"Payloads will be read from this file and a payload will be randomly selected when sending messages. " +
"Note that you must provide exactly one of --record-size or --payload-file or --payload-monotonic.");
payloadOptions.addArgument("--payload-monotonic")
.action(storeTrue())
.type(Boolean.class)
.metavar("PAYLOAD-MONOTONIC")
.dest("payloadMonotonic")
.help("payload is monotonically increasing integer. Note that you must provide exactly one of --record-size " +
"or --payload-file or --payload-monotonic.");
parser.addArgument("--payload-delimiter")
.action(store())
.required(false)
.type(String.class)
.metavar("PAYLOAD-DELIMITER")
.dest("payloadDelimiter")
.setDefault("\\n")
.help("provides delimiter to be used when --payload-file is provided. " +
"Defaults to new line. " +
"Note that this parameter will be ignored if --payload-file is not provided.");
parser.addArgument("--throughput")
.action(store())
.required(true)
.type(Double.class)
.metavar("THROUGHPUT")
.help("throttle maximum message throughput to *approximately* THROUGHPUT messages/sec. Set this to -1 to disable throttling.");
parser.addArgument("--producer-props")
.nargs("+")
.required(false)
.metavar("PROP-NAME=PROP-VALUE")
.type(String.class)
.dest("producerConfig")
.help("kafka producer related configuration properties like bootstrap.servers,client.id etc. " +
"These configs take precedence over those passed via --producer.config.");
parser.addArgument("--producer.config")
.action(store())
.required(false)
.type(String.class)
.metavar("CONFIG-FILE")
.dest("producerConfigFile")
.help("producer config properties file.");
parser.addArgument("--print-metrics")
.action(storeTrue())
.type(Boolean.class)
.metavar("PRINT-METRICS")
.dest("printMetrics")
.help("print out metrics at the end of the test.");
parser.addArgument("--transactional-id")
.action(store())
.required(false)
.type(String.class)
.metavar("TRANSACTIONAL-ID")
.dest("transactionalId")
.help("The transactional id to use. This config takes precedence over the transactional.id " +
"specified via --producer.config or --producer-props. Note that if the transactional id " +
"is not specified while --transaction-duration-ms is provided, the default value for the " +
"transactional id will be performance-producer- followed by a random uuid.");
parser.addArgument("--transaction-duration-ms")
.action(store())
.required(false)
.type(Long.class)
.metavar("TRANSACTION-DURATION")
.dest("transactionDurationMs")
.help("The max age of each transaction. The commitTransaction will be called after this time has elapsed. " +
"The value should be greater than 0. If the transactional id is specified via --producer-props, " +
"--producer.config, or --transactional-id but --transaction-duration-ms is not specified, " +
"the default value will be 3000.");
return parser;
} | @Test
public void testFractionalThroughput() {
String[] args = new String[] {
"--topic", "Hello-Kafka",
"--num-records", "5",
"--throughput", "1.25",
"--record-size", "100",
"--producer-props", "bootstrap.servers=localhost:9000"};
ArgumentParser parser = ProducerPerformance.argParser();
assertDoesNotThrow(() -> parser.parseArgs(args));
} |
public Page getRegion(int positionOffset, int length)
{
if (positionOffset < 0 || length < 0 || positionOffset + length > positionCount) {
throw new IndexOutOfBoundsException(format("Invalid position %s and length %s in page with %s positions", positionOffset, length, positionCount));
}
// Avoid creating new objects when region is same as original page
if (positionOffset == 0 && length == positionCount) {
return this;
}
// Create a new page view with the specified region
int channelCount = getChannelCount();
Block[] slicedBlocks = new Block[channelCount];
for (int i = 0; i < channelCount; i++) {
slicedBlocks[i] = blocks[i].getRegion(positionOffset, length);
}
return wrapBlocksWithoutCopy(length, slicedBlocks);
} | @Test(expectedExceptions = IndexOutOfBoundsException.class, expectedExceptionsMessageRegExp = "Invalid position 1 and length 1 in page with 0 positions")
public void testGetRegionExceptions()
{
new Page(0).getRegion(1, 1);
} |
@Override
@MethodNotAvailable
public void removeAll() {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testRemoveAllWithKeys() {
adapter.removeAll(singleton(42));
} |
public static AclOperation getDeniedOperation(final String errorMessage) {
final Matcher matcher = DENIED_OPERATION_STRING_PATTERN.matcher(errorMessage);
if (matcher.matches()) {
return AclOperation.fromString(matcher.group(1));
} else {
return AclOperation.UNKNOWN;
}
} | @Test
public void shouldReturnUnknownDeniedOperationFromNoValidAuthorizationMessage() {
// When:
final AclOperation operation = SchemaRegistryUtil.getDeniedOperation(
"INVALID is denied operation Write on Subject: t2-value; error code: 40301");
// Then:
assertThat(operation, is(AclOperation.UNKNOWN));
} |
@Override
public void close() throws IOException {
if(close.get()) {
log.warn(String.format("Skip double close of stream %s", this));
return;
}
try {
if(buffer.size() > 0) {
proxy.write(buffer.toByteArray());
}
// Re-use buffer
buffer.reset();
super.close();
}
finally {
close.set(true);
}
} | @Test
public void testCopy2() throws Exception {
final ByteArrayOutputStream proxy = new ByteArrayOutputStream(40500);
final MemorySegementingOutputStream out = new MemorySegementingOutputStream(proxy, 32768);
final byte[] content = RandomUtils.nextBytes(40500);
out.write(content, 0, 32768);
out.write(content, 32768, 7732);
out.close();
assertArrayEquals(content, proxy.toByteArray());
} |
@Udf(description = "Splits a string into an array of substrings based on a delimiter.")
public List<String> split(
@UdfParameter(
description = "The string to be split. If NULL, then function returns NULL.")
final String string,
@UdfParameter(
description = "The delimiter to split a string by. If NULL, then function returns NULL.")
final String delimiter) {
if (string == null || delimiter == null) {
return null;
}
// Java split() accepts regular expressions as a delimiter, but the behavior of this UDF split()
// is to accept only literal strings. This method uses Guava Splitter instead, which does not
// accept any regex pattern. This is to avoid a confusion to users when splitting by regex
// special characters, such as '.' and '|'.
try {
// Guava Splitter does not accept empty delimiters. Use the Java split() method instead.
if (delimiter.isEmpty()) {
return Arrays.asList(EMPTY_DELIMITER.split(string));
} else {
return Splitter.on(delimiter).splitToList(string);
}
} catch (final Exception e) {
throw new KsqlFunctionException(
String.format("Invalid delimiter '%s' in the split() function.", delimiter), e);
}
} | @Test
public void shouldSplitBytesByGivenMultipleBytesDelimiter() {
assertThat(
splitUdf.split(
ByteBuffer.wrap(new byte[]{'a', '-', '-', 'b'}),
ByteBuffer.wrap(new byte[]{'-', '-'})),
contains(
ByteBuffer.wrap(new byte[]{'a'}),
ByteBuffer.wrap(new byte[]{'b'})));
assertThat(
splitUdf.split(
ByteBuffer.wrap(new byte[]{'$', '-', 'a', '-', '-', 'b'}),
ByteBuffer.wrap(new byte[]{'$', '-'})),
contains(
EMPTY_BYTES,
ByteBuffer.wrap(new byte[]{'a', '-', '-', 'b'})));
assertThat(
splitUdf.split(
ByteBuffer.wrap(new byte[]{'a', '-', '-', 'b', '$', '-'}),
ByteBuffer.wrap(new byte[]{'$', '-'})),
contains(
ByteBuffer.wrap(new byte[]{'a', '-', '-', 'b'}),
EMPTY_BYTES));
} |
@Override
@Deprecated
@SuppressWarnings("unchecked")
public <T extends Number> Counter<T> counter(String name, Class<T> type, Unit unit) {
if (Integer.class.equals(type)) {
return (Counter<T>) new DefaultCounter(unit).asIntCounter();
}
if (Long.class.equals(type)) {
return (Counter<T>) new DefaultCounter(unit).asLongCounter();
}
throw new IllegalArgumentException(
String.format("Counter for type %s is not supported", type.getName()));
} | @Test
public void intCounterNullCheck() {
assertThatThrownBy(() -> new DefaultMetricsContext().counter("name", Integer.class, null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid count unit: null");
} |
public synchronized Topology addSink(final String name,
final String topic,
final String... parentNames) {
internalTopologyBuilder.addSink(name, topic, null, null, null, parentNames);
return this;
} | @Test
public void shouldNotAllowNullNameWhenAddingSink() {
assertThrows(NullPointerException.class, () -> topology.addSink(null, "topic"));
} |
public void stop() {
registry.removeListener(listener);
listener.unregisterAll();
} | @Test
public void cleansUpAfterItselfWhenStopped() throws Exception {
reporter.stop();
try {
getAttributes("gauges", "gauge", "Value", "Number");
failBecauseExceptionWasNotThrown(InstanceNotFoundException.class);
} catch (InstanceNotFoundException e) {
}
} |
@Override
public int remainingCapacity() {
int sum = 0;
for (BlockingQueue<E> q : this.queues) {
sum += q.remainingCapacity();
}
return sum;
} | @Test
public void testInitialRemainingCapacity() {
assertEquals(10, fcq.remainingCapacity());
} |
@Override
public Optional<DiscreteResource> lookup(DiscreteResourceId id) {
return Optional.empty();
} | @Test
public void testLookup() {
assertThat(sut.lookup(Resources.discrete(DeviceId.deviceId("a")).id()), is(Optional.empty()));
} |
public static <IN1, IN2, OUT> TwoInputTransformation<IN1, IN2, OUT> getTwoInputTransformation(
String operatorName,
AbstractDataStream<IN1> inputStream1,
AbstractDataStream<IN2> inputStream2,
TypeInformation<OUT> outTypeInformation,
TwoInputStreamOperator<IN1, IN2, OUT> operator) {
TwoInputTransformation<IN1, IN2, OUT> transform =
new TwoInputTransformation<>(
inputStream1.getTransformation(),
inputStream2.getTransformation(),
operatorName,
SimpleOperatorFactory.of(operator),
outTypeInformation,
// inputStream1 & 2 share the same env.
inputStream1.getEnvironment().getParallelism(),
false);
TypeInformation<?> keyType = null;
if (inputStream1 instanceof KeyedPartitionStreamImpl) {
KeyedPartitionStreamImpl<?, IN1> keyedInput1 =
(KeyedPartitionStreamImpl<?, IN1>) inputStream1;
keyType = keyedInput1.getKeyType();
transform.setStateKeySelectors(keyedInput1.getKeySelector(), null);
transform.setStateKeyType(keyType);
}
if (inputStream2 instanceof KeyedPartitionStreamImpl) {
KeyedPartitionStreamImpl<?, IN2> keyedInput2 =
(KeyedPartitionStreamImpl<?, IN2>) inputStream2;
TypeInformation<?> keyType2 = keyedInput2.getKeyType();
if (keyType != null && !(keyType.canEqual(keyType2) && keyType.equals(keyType2))) {
throw new UnsupportedOperationException(
"Key types if input KeyedStreams "
+ "don't match: "
+ keyType
+ " and "
+ keyType2
+ ".");
}
transform.setStateKeySelectors(
transform.getStateKeySelector1(), keyedInput2.getKeySelector());
// we might be overwriting the one that's already set, but it's the same
transform.setStateKeyType(keyType2);
}
return transform;
} | @Test
void testGetTwoInputTransformation() throws Exception {
ExecutionEnvironmentImpl env = StreamTestUtils.getEnv();
TwoInputNonBroadcastProcessOperator<Integer, Long, Long> operator =
new TwoInputNonBroadcastProcessOperator<>(
new StreamTestUtils.NoOpTwoInputNonBroadcastStreamProcessFunction());
TwoInputTransformation<Integer, Long, Long> transformation =
StreamUtils.getTwoInputTransformation(
"op",
new NonKeyedPartitionStreamImpl<>(
env, new TestingTransformation<>("t1", Types.INT, 1)),
new NonKeyedPartitionStreamImpl<>(
env, new TestingTransformation<>("t2", Types.LONG, 1)),
Types.LONG,
operator);
assertThat(transformation.getOperator()).isEqualTo(operator);
assertThat(transformation.getOutputType()).isEqualTo(Types.LONG);
assertThat(transformation.getStateKeySelector1()).isNull();
assertThat(transformation.getStateKeySelector2()).isNull();
} |
@Override
public void execute(Context context) {
try (StreamWriter<ProjectDump.Plugin> writer = dumpWriter.newStreamWriter(DumpElement.PLUGINS)) {
Collection<PluginInfo> plugins = pluginRepository.getPluginInfos();
for (PluginInfo plugin : plugins) {
ProjectDump.Plugin.Builder builder = ProjectDump.Plugin.newBuilder();
writer.write(convert(plugin, builder));
}
LoggerFactory.getLogger(getClass()).debug("{} plugins exported", plugins.size());
}
} | @Test
public void test_nullable_exported_fields() {
when(pluginRepository.getPluginInfos()).thenReturn(singletonList(
new PluginInfo("java")));
underTest.execute(new TestComputationStepContext());
ProjectDump.Plugin exportedPlugin = dumpWriter.getWrittenMessagesOf(DumpElement.PLUGINS).get(0);
assertThat(exportedPlugin.getKey()).isEqualTo("java");
// if name is not set, then value is the same as key
assertThat(exportedPlugin.getName()).isEqualTo("java");
assertThat(exportedPlugin.getVersion()).isEmpty();
} |
@Override
public Long createDiyPage(DiyPageCreateReqVO createReqVO) {
// 校验名称唯一
validateNameUnique(null, createReqVO.getTemplateId(), createReqVO.getName());
// 插入
DiyPageDO diyPage = DiyPageConvert.INSTANCE.convert(createReqVO);
diyPage.setProperty("{}");
diyPageMapper.insert(diyPage);
return diyPage.getId();
} | @Test
public void testCreateDiyPage_success() {
// 准备参数
DiyPageCreateReqVO reqVO = randomPojo(DiyPageCreateReqVO.class);
// 调用
Long diyPageId = diyPageService.createDiyPage(reqVO);
// 断言
assertNotNull(diyPageId);
// 校验记录的属性是否正确
DiyPageDO diyPage = diyPageMapper.selectById(diyPageId);
assertPojoEquals(reqVO, diyPage);
} |
@Udf
public Integer abs(@UdfParameter final Integer val) {
return (val == null) ? null : Math.abs(val);
} | @Test
public void shouldHandlePositive() {
assertThat(udf.abs(1), is(1));
assertThat(udf.abs(1L), is(1L));
assertThat(udf.abs(1.0), is(1.0));
assertThat(udf.abs(new BigDecimal(1)), is(new BigDecimal(1).abs()));
} |
public static Date parseDate(final String str) {
try {
return new Date(TimeUnit.DAYS.toMillis(
LocalDate.parse(PartialStringToTimestampParser.completeDate(str))
.toEpochDay()));
} catch (DateTimeParseException e) {
throw new KsqlException("Failed to parse date '" + str
+ "': " + e.getMessage()
+ DATE_HELP_MESSAGE,
e
);
}
} | @Test
public void shouldParseDate() {
assertThat(SqlTimeTypes.parseDate("1990"), is(new Date(631152000000L)));
assertThat(SqlTimeTypes.parseDate("1990-01"), is(new Date(631152000000L)));
assertThat(SqlTimeTypes.parseDate("1990-01-01"), is(new Date(631152000000L)));
} |
@Override
public void deleteCategory(Long id) {
// 校验分类是否存在
validateProductCategoryExists(id);
// 校验是否还有子分类
if (productCategoryMapper.selectCountByParentId(id) > 0) {
throw exception(CATEGORY_EXISTS_CHILDREN);
}
// 校验分类是否绑定了 SPU
Long spuCount = productSpuService.getSpuCountByCategoryId(id);
if (spuCount > 0) {
throw exception(CATEGORY_HAVE_BIND_SPU);
}
// 删除
productCategoryMapper.deleteById(id);
} | @Test
public void testDeleteCategory_success() {
// mock 数据
ProductCategoryDO dbCategory = randomPojo(ProductCategoryDO.class);
productCategoryMapper.insert(dbCategory);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbCategory.getId();
// 调用
productCategoryService.deleteCategory(id);
// 校验数据不存在了
assertNull(productCategoryMapper.selectById(id));
} |
@Override
public int configInfoTagCount() {
ConfigInfoTagMapper configInfoTagMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_TAG);
String sql = configInfoTagMapper.count(null);
Integer result = jt.queryForObject(sql, Integer.class);
if (result == null) {
throw new IllegalArgumentException("configInfoTagCount error");
}
return result;
} | @Test
void testConfigInfoTagCount() {
Timestamp timestamp = new Timestamp(System.currentTimeMillis());
//mock count
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(Integer.class))).thenReturn(308);
//execute & verify
int count = externalConfigInfoTagPersistService.configInfoTagCount();
assertEquals(308, count);
//mock count is null
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(Integer.class))).thenReturn(null);
//execute & verify
try {
externalConfigInfoTagPersistService.configInfoTagCount();
assertTrue(false);
} catch (Exception e) {
assertEquals("configInfoTagCount error", e.getMessage());
}
} |
private synchronized boolean validateClientAcknowledgement(long h) {
if (h < 0) {
throw new IllegalArgumentException("Argument 'h' cannot be negative, but was: " + h);
}
if (h > MASK) {
throw new IllegalArgumentException("Argument 'h' cannot be larger than 2^32 -1, but was: " + h);
}
final long oldH = clientProcessedStanzas.get();
final Long lastUnackedX = unacknowledgedServerStanzas.isEmpty() ? null : unacknowledgedServerStanzas.getLast().x;
return validateClientAcknowledgement(h, oldH, lastUnackedX);
} | @Test
public void testValidateClientAcknowledgement_rollover_edgecase_unsent() throws Exception
{
// Setup test fixture.
final long MAX = new BigInteger( "2" ).pow( 32 ).longValue() - 1;
final long h = MAX;
final long oldH = MAX - 1;
final Long lastUnackedX = null;
// Execute system under test.
final boolean result = StreamManager.validateClientAcknowledgement(h, oldH, lastUnackedX);
// Verify results.
assertFalse(result);
} |
@Override
public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets,
ListOffsetsOptions options) {
AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future =
ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet());
Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue())));
ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new ListOffsetsResult(future.all());
} | @Test
public void testListOffsetsLatestTierSpecSpecMinVersion() throws Exception {
Node node = new Node(0, "localhost", 8120);
List<Node> nodes = Collections.singletonList(node);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node, new Node[]{node}, new Node[]{node}));
final Cluster cluster = new Cluster(
"mockClusterId",
nodes,
pInfos,
Collections.emptySet(),
Collections.emptySet(),
node);
final TopicPartition tp0 = new TopicPartition("foo", 0);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster,
AdminClientConfig.RETRIES_CONFIG, "2")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE));
env.adminClient().listOffsets(Collections.singletonMap(tp0, OffsetSpec.latestTiered()));
TestUtils.waitForCondition(() -> env.kafkaClient().requests().stream().anyMatch(request ->
request.requestBuilder().apiKey().messageType == ApiMessageType.LIST_OFFSETS && request.requestBuilder().oldestAllowedVersion() == 9
), "no listOffsets request has the expected oldestAllowedVersion");
}
} |
public List<String> mergePartitions(
MergingStrategy mergingStrategy,
List<String> sourcePartitions,
List<String> derivedPartitions) {
if (!derivedPartitions.isEmpty()
&& !sourcePartitions.isEmpty()
&& mergingStrategy != MergingStrategy.EXCLUDING) {
throw new ValidationException(
"The base table already has partitions defined. You might want to specify "
+ "EXCLUDING PARTITIONS.");
}
if (!derivedPartitions.isEmpty()) {
return derivedPartitions;
}
return sourcePartitions;
} | @Test
void mergePartitionsFromBaseTable() {
List<String> sourcePartitions = Arrays.asList("col1", "col2");
List<String> mergePartitions =
util.mergePartitions(
getDefaultMergingStrategies().get(FeatureOption.PARTITIONS),
sourcePartitions,
Collections.emptyList());
assertThat(mergePartitions).isEqualTo(sourcePartitions);
} |
@UdafFactory(description = "Compute sample standard deviation of column with type Double.",
aggregateSchema = "STRUCT<SUM double, COUNT bigint, M2 double>")
public static TableUdaf<Double, Struct, Double> stdDevDouble() {
return getStdDevImplementation(
0.0,
STRUCT_DOUBLE,
(agg, newValue) -> newValue + agg.getFloat64(SUM),
(agg, newValue) -> newValue * (agg.getInt64(COUNT) + 1) - (agg.getFloat64(SUM) + newValue),
(agg1, agg2) ->
agg1.getFloat64(SUM) / agg1.getInt64(COUNT)
- agg2.getFloat64(SUM) / agg2.getInt64(COUNT),
(agg1, agg2) -> agg1.getFloat64(SUM) + agg2.getFloat64(SUM),
(agg, valueToRemove) -> agg.getFloat64(SUM) - valueToRemove);
} | @Test
public void shouldCalculateStdDevDoubles() {
final TableUdaf<Double, Struct, Double> udaf = stdDevDouble();
Struct agg = udaf.initialize();
final Double[] values = new Double[] {10.2, 13.4, 14.5, 17.8};
for (final Double thisValue : values) {
agg = udaf.aggregate(thisValue, agg);
}
assertThat(agg.getInt64(COUNT), equalTo(4L));
assertThat(agg.getFloat64(SUM), equalTo(55.900000000000006));
assertThat(agg.getFloat64(M2), equalTo(29.48749999999999));
final double standardDev = udaf.map(agg);
assertThat(standardDev, equalTo(9.829166666666664));
} |
protected String messageToString(Message message) {
switch (message.getMessageType()) {
case SYSTEM:
return message.getContent();
case USER:
return humanPrompt + message.getContent();
case ASSISTANT:
return assistantPrompt + message.getContent();
case TOOL:
throw new IllegalArgumentException(TOOL_EXECUTION_NOT_SUPPORTED_FOR_WAI_MODELS);
}
throw new IllegalArgumentException("Unknown message type: " + message.getMessageType());
} | @Test
public void testSystemMessageType() {
Message systemMessage = new SystemMessage("System message");
String expected = "System message";
Assert.assertEquals(expected, converter.messageToString(systemMessage));
} |
@Override
public Deserializer getDeserializer(String type) throws HessianProtocolException {
// 如果类型在过滤列表, 说明是jdk自带类, 直接委托父类处理
if (StringUtils.isEmpty(type) || ClassFilter.filterExcludeClass(type)) {
return super.getDeserializer(type);
}
// 如果是数组类型, 且在name过滤列表, 说明jdk类, 直接委托父类处理
if (type.charAt(0) == ARRAY_PREFIX && ClassFilter.arrayFilter(type)) {
return super.getDeserializer(type);
}
// 查看是否已经包含反序列化器
Deserializer deserializer = DESERIALIZER_MAP.get(type);
if (deserializer != null) {
return deserializer;
}
// 自定义Throwable采用JavaDeserializer,反序列化成Throwable而不是GenericObject
deserializer = getDeserializerForCustomThrowable(type);
if (deserializer != null) {
DESERIALIZER_MAP.putIfAbsent(type, deserializer);
return deserializer;
}
// 新建反序列化器, 如果是java.lang.Class使用GenericClassDeserializer,否则使用GenericDeserializer
if (ClassFilter.CLASS_NAME.equals(type)) {
deserializer = GenericClassDeserializer.getInstance();
} else {
deserializer = new GenericDeserializer(type);
}
DESERIALIZER_MAP.putIfAbsent(type, deserializer);
return deserializer;
} | @Test
public void getDeserializer() throws Exception {
Assert.assertEquals(GenericClassDeserializer.class,
factory.getDeserializer(Class.class.getCanonicalName()).getClass());
Assert.assertEquals(GenericDeserializer.class,
factory.getDeserializer(GenericObject.class.getCanonicalName()).getClass());
} |
@Override
public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer,
final Merger<? super K, V> sessionMerger) {
return aggregate(initializer, sessionMerger, Materialized.with(null, null));
} | @Test
public void shouldNotHaveNullNamed2OnAggregate() {
assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(MockInitializer.STRING_INIT, sessionMerger, (Named) null));
} |
public void addIndexes(int maxIndex, int[] dictionaryIndexes, int indexCount)
{
if (indexCount == 0 && indexRetainedBytes > 0) {
// Ignore empty segment, since there are other segments present.
return;
}
checkState(maxIndex >= lastMaxIndex, "LastMax is greater than the current max");
lastMaxIndex = maxIndex;
if (maxIndex <= Byte.MAX_VALUE) {
byte[] byteIndexes = new byte[indexCount];
for (int i = 0; i < indexCount; i++) {
byteIndexes[i] = (byte) dictionaryIndexes[i];
}
appendByteIndexes(byteIndexes);
}
else if (maxIndex <= Short.MAX_VALUE) {
short[] shortIndexes = new short[indexCount];
for (int i = 0; i < indexCount; i++) {
shortIndexes[i] = (short) dictionaryIndexes[i];
}
appendShortIndexes(shortIndexes);
}
else {
int[] intIndexes = Arrays.copyOf(dictionaryIndexes, indexCount);
appendIntegerIndexes(intIndexes);
}
} | @Test
public void testEmptyDictionary()
{
DictionaryRowGroupBuilder rowGroupBuilder = new DictionaryRowGroupBuilder();
rowGroupBuilder.addIndexes(-1, new int[0], 0);
byte[] byteIndexes = getByteIndexes(rowGroupBuilder);
assertEquals(0, byteIndexes.length);
} |
@Udf
public <T> List<T> concat(
@UdfParameter(description = "First array of values") final List<T> left,
@UdfParameter(description = "Second array of values") final List<T> right) {
if (left == null && right == null) {
return null;
}
final int leftSize = left != null ? left.size() : 0;
final int rightSize = right != null ? right.size() : 0;
final List<T> result = new ArrayList<>(leftSize + rightSize);
if (left != null) {
result.addAll(left);
}
if (right != null) {
result.addAll(right);
}
return result;
} | @Test
public void shouldConcatArraysBothContainingNulls() {
final List<String> input1 = Arrays.asList(null, "foo", "bar");
final List<String> input2 = Arrays.asList("foo", null);
final List<String> result = udf.concat(input1, input2);
assertThat(result, is(Arrays.asList(null, "foo", "bar", "foo", null)));
} |
public static Map<String, AdvertisedListener> validateAndAnalysisAdvertisedListener(ServiceConfiguration config) {
if (StringUtils.isBlank(config.getAdvertisedListeners())) {
return Collections.emptyMap();
}
Optional<String> firstListenerName = Optional.empty();
Map<String, List<String>> listeners = new LinkedHashMap<>();
for (final String str : StringUtils.split(config.getAdvertisedListeners(), ",")) {
int index = str.indexOf(":");
if (index <= 0) {
throw new IllegalArgumentException("the configure entry `advertisedListeners` is invalid. because "
+ str + " do not contain listener name");
}
String listenerName = StringUtils.trim(str.substring(0, index));
if (!firstListenerName.isPresent()) {
firstListenerName = Optional.of(listenerName);
}
String value = StringUtils.trim(str.substring(index + 1));
listeners.computeIfAbsent(listenerName, k -> new ArrayList<>(2));
listeners.get(listenerName).add(value);
}
if (StringUtils.isBlank(config.getInternalListenerName())) {
config.setInternalListenerName(firstListenerName.get());
}
if (!listeners.containsKey(config.getInternalListenerName())) {
throw new IllegalArgumentException("the `advertisedListeners` configure do not contain "
+ "`internalListenerName` entry");
}
final Map<String, AdvertisedListener> result = new LinkedHashMap<>();
final Map<String, Set<String>> reverseMappings = new LinkedHashMap<>();
for (final Map.Entry<String, List<String>> entry : listeners.entrySet()) {
if (entry.getValue().size() > 2) {
throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey()
+ "`");
}
URI pulsarAddress = null, pulsarSslAddress = null, pulsarHttpAddress = null, pulsarHttpsAddress = null;
for (final String strUri : entry.getValue()) {
try {
URI uri = URI.create(strUri);
if (StringUtils.equalsIgnoreCase(uri.getScheme(), "pulsar")) {
if (pulsarAddress == null) {
pulsarAddress = uri;
} else {
throw new IllegalArgumentException("there are redundant configure for listener `"
+ entry.getKey() + "`");
}
} else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "pulsar+ssl")) {
if (pulsarSslAddress == null) {
pulsarSslAddress = uri;
} else {
throw new IllegalArgumentException("there are redundant configure for listener `"
+ entry.getKey() + "`");
}
} else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "http")) {
if (pulsarHttpAddress == null) {
pulsarHttpAddress = uri;
} else {
throw new IllegalArgumentException("there are redundant configure for listener `"
+ entry.getKey() + "`");
}
} else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "https")) {
if (pulsarHttpsAddress == null) {
pulsarHttpsAddress = uri;
} else {
throw new IllegalArgumentException("there are redundant configure for listener `"
+ entry.getKey() + "`");
}
}
String hostPort = String.format("%s:%d", uri.getHost(), uri.getPort());
Set<String> sets = reverseMappings.computeIfAbsent(hostPort, k -> new TreeSet<>());
sets.add(entry.getKey());
if (sets.size() > 1) {
throw new IllegalArgumentException("must not specify `" + hostPort
+ "` to different listener.");
}
} catch (Throwable cause) {
throw new IllegalArgumentException("the value " + strUri + " in the `advertisedListeners` "
+ "configure is invalid", cause);
}
}
result.put(entry.getKey(), AdvertisedListener.builder()
.brokerServiceUrl(pulsarAddress)
.brokerServiceUrlTls(pulsarSslAddress)
.brokerHttpUrl(pulsarHttpAddress)
.brokerHttpsUrl(pulsarHttpsAddress)
.build());
}
return result;
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void testWithoutListenerNameInAdvertisedListeners() {
ServiceConfiguration config = new ServiceConfiguration();
config.setAdvertisedListeners(" internal:pulsar://127.0.0.1:6660, internal:pulsar+ssl://127.0.0.1:6651");
config.setInternalListenerName("external");
MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config);
} |
@Override
public String requestMessageForLatestRevisionsSince(SCMPropertyConfiguration scmConfiguration, Map<String, String> materialData, String flyweightFolder, SCMRevision previousRevision) {
Map configuredValues = new LinkedHashMap();
configuredValues.put("scm-configuration", jsonResultMessageHandler.configurationToMap(scmConfiguration));
configuredValues.put("scm-data", materialData);
configuredValues.put("flyweight-folder", flyweightFolder);
configuredValues.put("previous-revision", scmRevisionToMap(previousRevision));
return GSON.toJson(configuredValues);
} | @Test
public void shouldBuildRequestBodyForLatestRevisionsSinceRequest() throws Exception {
Date timestamp = new SimpleDateFormat(DATE_FORMAT).parse("2011-07-13T19:43:37.100Z");
Map data = new LinkedHashMap();
data.put("dataKeyOne", "data-value-one");
data.put("dataKeyTwo", "data-value-two");
SCMRevision previouslyKnownRevision = new SCMRevision("abc.rpm", timestamp, "someuser", "comment", data, null);
String requestBody = messageHandler.requestMessageForLatestRevisionsSince(scmPropertyConfiguration, materialData, "flyweight", previouslyKnownRevision);
String expectedValue = "{\"scm-configuration\":{\"key-one\":{\"value\":\"value-one\"},\"key-two\":{\"value\":\"value-two\"}},\"scm-data\":{\"key-one\":\"value-one\"},\"flyweight-folder\":\"flyweight\"," +
"\"previous-revision\":{\"revision\":\"abc.rpm\",\"timestamp\":\"2011-07-13T19:43:37.100Z\",\"data\":{\"dataKeyOne\":\"data-value-one\",\"dataKeyTwo\":\"data-value-two\"}}}";
assertThat(requestBody, is(expectedValue));
} |
@Override
@NonNull
public String getId() {
return ID;
} | @Test
public void shouldNotProvideIdForMissingCredentials() throws Exception {
User user = login();
String scmPath = "/organizations/" + getOrgName() + "/scm/git/";
String repoPath = scmPath + "?repositoryUrl=" + HTTPS_GITHUB_PUBLIC;
Map resp = new RequestBuilder(baseUrl)
.status(200)
.jwtToken(getJwtToken(j.jenkins,user.getId(), user.getId()))
.crumb( crumb )
.get(repoPath)
.build(Map.class);
assertNull(resp.get("credentialId"));
} |
@Override
public boolean shouldCareAbout(Object entity) {
return securityConfigClasses.stream().anyMatch(aClass -> aClass.isAssignableFrom(entity.getClass()));
} | @Test
public void shouldCareAboutAdminsConfigChange() {
SecurityConfigChangeListener securityConfigChangeListener = new SecurityConfigChangeListener() {
@Override
public void onEntityConfigChange(Object entity) {
}
};
assertThat(securityConfigChangeListener.shouldCareAbout(new AdminsConfig()), is(true));
} |
public static UExpressionStatement create(UExpression expression) {
return new AutoValue_UExpressionStatement(expression);
} | @Test
public void equality() {
new EqualsTester()
.addEqualityGroup(UExpressionStatement.create(UFreeIdent.create("foo")))
.addEqualityGroup(
UExpressionStatement.create(
UBinary.create(Kind.PLUS, ULiteral.intLit(5), ULiteral.intLit(2))))
.testEquals();
} |
@Override
public TimeSlot apply(TimeSlot timeSlot, SegmentInMinutes segmentInMinutes) {
int segmentInMinutesDuration = segmentInMinutes.value();
Instant segmentStart = normalizeStart(timeSlot.from(), segmentInMinutesDuration);
Instant segmentEnd = normalizeEnd(timeSlot.to(), segmentInMinutesDuration);
TimeSlot normalized = new TimeSlot(segmentStart, segmentEnd);
TimeSlot minimalSegment = new TimeSlot(segmentStart, segmentStart.plus(segmentInMinutes.value(), ChronoUnit.MINUTES));
if (normalized.within(minimalSegment)) {
return minimalSegment;
}
return normalized;
} | @Test
void noNormalizationWhenSlotStartsAtSegmentStart() {
//given
Instant start = Instant.parse("2023-09-09T00:15:00Z");
Instant end = Instant.parse("2023-09-09T00:30:00Z");
TimeSlot timeSlot = new TimeSlot(start, end);
Instant start2 = Instant.parse("2023-09-09T00:30:00Z");
Instant end2 = Instant.parse("2023-09-09T00:45:00Z");
TimeSlot timeSlot2 = new TimeSlot(start2, end2);
SegmentInMinutes fifteenMinutes = SegmentInMinutes.of(15, FIFTEEN_MINUTES_SEGMENT_DURATION);
//when
TimeSlot normalized = SLOT_TO_NORMALIZED_SLOT.apply(timeSlot, fifteenMinutes);
TimeSlot normalized2 = SLOT_TO_NORMALIZED_SLOT.apply(timeSlot2, fifteenMinutes);
//then
assertEquals(Instant.parse("2023-09-09T00:15:00Z"), normalized.from());
assertEquals(Instant.parse("2023-09-09T00:30:00Z"), normalized.to());
assertEquals(Instant.parse("2023-09-09T00:30:00Z"), normalized2.from());
assertEquals(Instant.parse("2023-09-09T00:45:00Z"), normalized2.to());
} |
@Override
public Consumer createConsumer(Processor aProcessor) throws Exception {
// validate that all of the endpoint is configured properly
if (getMonitorType() != null) {
if (!isPlatformServer()) {
throw new IllegalArgumentException(ERR_PLATFORM_SERVER);
}
if (ObjectHelper.isEmpty(getObservedAttribute())) {
throw new IllegalArgumentException(ERR_OBSERVED_ATTRIBUTE);
}
if (getMonitorType().equals("string")) {
if (ObjectHelper.isEmpty(getStringToCompare())) {
throw new IllegalArgumentException(ERR_STRING_TO_COMPARE);
}
if (!isNotifyDiffer() && !isNotifyMatch()) {
throw new IllegalArgumentException(ERR_STRING_NOTIFY);
}
} else if (getMonitorType().equals("gauge")) {
if (!isNotifyHigh() && !isNotifyLow()) {
throw new IllegalArgumentException(ERR_GAUGE_NOTIFY);
}
if (getThresholdHigh() == null) {
throw new IllegalArgumentException(ERR_THRESHOLD_HIGH);
}
if (getThresholdLow() == null) {
throw new IllegalArgumentException(ERR_THRESHOLD_LOW);
}
}
JMXMonitorConsumer answer = new JMXMonitorConsumer(this, aProcessor);
configureConsumer(answer);
return answer;
} else {
// shouldn't need any other validation.
JMXConsumer answer = new JMXConsumer(this, aProcessor);
configureConsumer(answer);
return answer;
}
} | @Test
public void noNotifyHighOrNotifyLow() throws Exception {
JMXEndpoint ep = context.getEndpoint(
"jmx:platform?objectDomain=FooDomain&objectName=theObjectName&monitorType=gauge&observedAttribute=foo",
JMXEndpoint.class);
try {
ep.createConsumer(null);
fail("expected exception");
} catch (IllegalArgumentException e) {
assertEquals(JMXEndpoint.ERR_GAUGE_NOTIFY, e.getMessage());
}
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testReturnCommittedTransactions() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset,
new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()),
new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(body -> {
FetchRequest request = (FetchRequest) body;
assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel());
return true;
}, fullFetchResponseWithAbortedTransactions(records, Collections.emptyList(), Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
} |
@Override
public void onMetaDataChanged(final List<MetaData> changed, final DataEventTypeEnum eventType) {
if (CollectionUtils.isEmpty(changed)) {
return;
}
this.updateMetaDataCache();
this.afterMetaDataChanged(changed, eventType);
} | @Test
public void testOnMetaDataChanged() {
List<MetaData> empty = Lists.newArrayList();
DataEventTypeEnum eventType = mock(DataEventTypeEnum.class);
listener.onMetaDataChanged(empty, eventType);
assertFalse(listener.getCache().containsKey(ConfigGroupEnum.META_DATA.name()));
List<MetaData> metaDatas = Lists.newArrayList(mock(MetaData.class));
listener.onMetaDataChanged(metaDatas, eventType);
assertTrue(listener.getCache().containsKey(ConfigGroupEnum.META_DATA.name()));
} |
public CompletableFuture<NotifyClientTerminationResponse> notifyClientTermination(ProxyContext ctx,
NotifyClientTerminationRequest request) {
CompletableFuture<NotifyClientTerminationResponse> future = new CompletableFuture<>();
try {
String clientId = ctx.getClientID();
LanguageCode languageCode = LanguageCode.valueOf(ctx.getLanguage());
Settings clientSettings = grpcClientSettingsManager.removeAndGetClientSettings(ctx);
switch (clientSettings.getClientType()) {
case PRODUCER:
for (Resource topic : clientSettings.getPublishing().getTopicsList()) {
String topicName = topic.getName();
GrpcClientChannel channel = this.grpcChannelManager.removeChannel(clientId);
if (channel != null) {
ClientChannelInfo clientChannelInfo = new ClientChannelInfo(channel, clientId, languageCode, MQVersion.Version.V5_0_0.ordinal());
this.messagingProcessor.unRegisterProducer(ctx, topicName, clientChannelInfo);
}
}
break;
case PUSH_CONSUMER:
case SIMPLE_CONSUMER:
validateConsumerGroup(request.getGroup());
String consumerGroup = request.getGroup().getName();
GrpcClientChannel channel = this.grpcChannelManager.removeChannel(clientId);
if (channel != null) {
ClientChannelInfo clientChannelInfo = new ClientChannelInfo(channel, clientId, languageCode, MQVersion.Version.V5_0_0.ordinal());
this.messagingProcessor.unRegisterConsumer(ctx, consumerGroup, clientChannelInfo);
}
break;
default:
future.complete(NotifyClientTerminationResponse.newBuilder()
.setStatus(ResponseBuilder.getInstance().buildStatus(Code.UNRECOGNIZED_CLIENT_TYPE, clientSettings.getClientType().name()))
.build());
return future;
}
future.complete(NotifyClientTerminationResponse.newBuilder()
.setStatus(ResponseBuilder.getInstance().buildStatus(Code.OK, Code.OK.name()))
.build());
} catch (Throwable t) {
future.completeExceptionally(t);
}
return future;
} | @Test
public void testConsumerNotifyClientTermination() throws Throwable {
ProxyContext context = createContext();
when(this.grpcClientSettingsManager.removeAndGetClientSettings(any())).thenReturn(Settings.newBuilder()
.setClientType(ClientType.PUSH_CONSUMER)
.build());
ArgumentCaptor<ClientChannelInfo> channelInfoArgumentCaptor = ArgumentCaptor.forClass(ClientChannelInfo.class);
doNothing().when(this.messagingProcessor).unRegisterConsumer(any(), anyString(), channelInfoArgumentCaptor.capture());
this.sendConsumerTelemetry(context);
this.sendConsumerHeartbeat(context);
NotifyClientTerminationResponse response = this.clientActivity.notifyClientTermination(
context,
NotifyClientTerminationRequest.newBuilder()
.setGroup(Resource.newBuilder().setName(CONSUMER_GROUP).build())
.build()
).get();
assertEquals(Code.OK, response.getStatus().getCode());
ClientChannelInfo clientChannelInfo = channelInfoArgumentCaptor.getValue();
assertClientChannelInfo(clientChannelInfo, CONSUMER_GROUP);
} |
public void setDisplayNameOrNull(String displayName) throws IOException {
setDisplayName(displayName);
} | @Test
public void testSetDisplayNameOrNull() throws Exception {
final String projectName = "projectName";
final String displayName = "displayName";
StubAbstractItem i = new StubAbstractItem();
i.doSetName(projectName);
assertNull(i.getDisplayNameOrNull());
i.setDisplayNameOrNull(displayName);
assertEquals(displayName, i.getDisplayNameOrNull());
assertEquals(displayName, i.getDisplayName());
} |
@Override
public PipelinedSubpartitionView createReadView(
BufferAvailabilityListener availabilityListener) {
synchronized (buffers) {
checkState(!isReleased);
checkState(
readView == null,
"Subpartition %s of is being (or already has been) consumed, "
+ "but pipelined subpartitions can only be consumed once.",
getSubPartitionIndex(),
parent.getPartitionId());
LOG.debug(
"{}: Creating read view for subpartition {} of partition {}.",
parent.getOwningTaskName(),
getSubPartitionIndex(),
parent.getPartitionId());
readView = new PipelinedSubpartitionView(this, availabilityListener);
}
return readView;
} | @TestTemplate
void testIllegalReadViewRequest() throws Exception {
final PipelinedSubpartition subpartition = createSubpartition();
// Successful request
assertThat(subpartition.createReadView(new NoOpBufferAvailablityListener())).isNotNull();
assertThatThrownBy(() -> subpartition.createReadView(new NoOpBufferAvailablityListener()))
.withFailMessage(
"Did not throw expected exception after duplicate notifyNonEmpty view request.")
.isInstanceOf(IllegalStateException.class);
} |
public Optional<ShardingTable> findShardingTableByActualTable(final String actualTableName) {
for (ShardingTable each : shardingTables.values()) {
if (each.isExisted(actualTableName)) {
return Optional.of(each);
}
}
return Optional.empty();
} | @Test
void assertFindTableRuleByActualTable() {
assertTrue(createMaximumShardingRule().findShardingTableByActualTable("table_0").isPresent());
} |
@CheckForNull
public View getView(final String name) {
ViewGroup group = Jenkins.get();
View view = null;
final StringTokenizer tok = new StringTokenizer(name, "/");
while (tok.hasMoreTokens()) {
String viewName = tok.nextToken();
view = group.getView(viewName);
if (view == null) {
group.checkPermission(View.READ);
throw new IllegalArgumentException(String.format(
"No view named %s inside view %s",
viewName, group.getDisplayName()
));
}
view.checkPermission(View.READ);
if (view instanceof ViewGroup) {
group = (ViewGroup) view;
} else if (tok.hasMoreTokens()) {
throw new IllegalStateException(view.getViewName() + " view can not contain views");
}
}
return view;
} | @Test public void reportViewSpaceNameRequestAsIAE() {
Jenkins jenkins = mock(Jenkins.class);
try (MockedStatic<Jenkins> mocked = mockStatic(Jenkins.class)) {
mockJenkins(mocked, jenkins);
final IllegalArgumentException e = assertThrows("No exception thrown. Expected IllegalArgumentException",
IllegalArgumentException.class, () -> assertNull(handler.getView(" ")));
assertEquals("No view named inside view Jenkins", e.getMessage());
verifyNoInteractions(setter);
}
} |
@Override
public Integer doCall() throws Exception {
List<Row> rows = new ArrayList<>();
JsonObject plugins = loadConfig().getMap("plugins");
plugins.forEach((key, value) -> {
JsonObject details = (JsonObject) value;
String name = details.getStringOrDefault("name", key);
String command = details.getStringOrDefault("command", name);
String dependency = details.getStringOrDefault("dependency",
"org.apache.camel:camel-jbang-plugin-%s".formatted(command));
String description
= details.getStringOrDefault("description", "Plugin %s called with command %s".formatted(name, command));
rows.add(new Row(name, command, dependency, description));
});
printRows(rows);
if (all) {
rows.clear();
for (PluginType camelPlugin : PluginType.values()) {
if (plugins.get(camelPlugin.getName()) == null) {
String dependency = "org.apache.camel:camel-jbang-plugin-%s".formatted(camelPlugin.getCommand());
rows.add(new Row(
camelPlugin.getName(), camelPlugin.getCommand(), dependency,
camelPlugin.getDescription()));
}
}
if (!rows.isEmpty()) {
printer().println();
printer().println("Supported plugins:");
printer().println();
printRows(rows);
}
}
return 0;
} | @Test
public void shouldGetDefaultPlugins() throws Exception {
PluginGet command = new PluginGet(new CamelJBangMain().withPrinter(printer));
command.all = true;
command.doCall();
List<String> output = printer.getLines();
Assertions.assertEquals(6, output.size());
Assertions.assertEquals("Supported plugins:", output.get(0));
Assertions.assertEquals("NAME COMMAND DEPENDENCY DESCRIPTION",
output.get(2));
Assertions.assertEquals(
"camel-k k org.apache.camel:camel-jbang-plugin-k %s"
.formatted(PluginType.CAMEL_K.getDescription()),
output.get(3));
} |
@PublicEvolving
public static CongestionControlRateLimitingStrategyBuilder builder() {
return new CongestionControlRateLimitingStrategyBuilder();
} | @Test
void testInvalidMaxInFlightMessages() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(
() ->
CongestionControlRateLimitingStrategy.builder()
.setMaxInFlightRequests(10)
.setInitialMaxInFlightMessages(0)
.setScalingStrategy(AIMDScalingStrategy.builder(10).build())
.build())
.withMessageContaining("initialMaxInFlightMessages must be a positive integer.");
} |
public Path getLocalPathForWrite(String pathStr,
Configuration conf) throws IOException {
return getLocalPathForWrite(pathStr, SIZE_UNKNOWN, conf);
} | @Test(timeout = 30000)
public void testGetLocalPathForWriteForInvalidPaths() throws Exception {
conf.set(CONTEXT, " ");
try {
dirAllocator.getLocalPathForWrite("/test", conf);
fail("not throwing the exception");
} catch (IOException e) {
assertEquals("Incorrect exception message",
"No space available in any of the local directories.", e.getMessage());
}
} |
public static void main(String[] args) throws IOException {
System.setProperty("hazelcast.tracking.server", "true");
HazelcastInstance hz = Hazelcast.newHazelcastInstance();
printMemberPort(hz);
} | @Test
public void testMain() throws Exception {
System.setProperty("print.port", child.getName());
HazelcastMemberStarter.main(new String[]{});
assertEquals(1, Hazelcast.getAllHazelcastInstances().size());
assertTrue(child.exists());
} |
@VisibleForTesting
ClientConfiguration createBkClientConfiguration(MetadataStoreExtended store, ServiceConfiguration conf) {
ClientConfiguration bkConf = new ClientConfiguration();
if (conf.getBookkeeperClientAuthenticationPlugin() != null
&& conf.getBookkeeperClientAuthenticationPlugin().trim().length() > 0) {
bkConf.setClientAuthProviderFactoryClass(conf.getBookkeeperClientAuthenticationPlugin());
bkConf.setProperty(conf.getBookkeeperClientAuthenticationParametersName(),
conf.getBookkeeperClientAuthenticationParameters());
}
if (conf.isBookkeeperTLSClientAuthentication()) {
bkConf.setTLSClientAuthentication(true);
bkConf.setTLSCertificatePath(conf.getBookkeeperTLSCertificateFilePath());
bkConf.setTLSKeyStore(conf.getBookkeeperTLSKeyFilePath());
bkConf.setTLSKeyStoreType(conf.getBookkeeperTLSKeyFileType());
bkConf.setTLSKeyStorePasswordPath(conf.getBookkeeperTLSKeyStorePasswordPath());
bkConf.setTLSProviderFactoryClass(conf.getBookkeeperTLSProviderFactoryClass());
bkConf.setTLSTrustStore(conf.getBookkeeperTLSTrustCertsFilePath());
bkConf.setTLSTrustStoreType(conf.getBookkeeperTLSTrustCertTypes());
bkConf.setTLSTrustStorePasswordPath(conf.getBookkeeperTLSTrustStorePasswordPath());
bkConf.setTLSCertFilesRefreshDurationSeconds(conf.getBookkeeperTlsCertFilesRefreshDurationSeconds());
}
bkConf.setBusyWaitEnabled(conf.isEnableBusyWait());
bkConf.setNumWorkerThreads(conf.getBookkeeperClientNumWorkerThreads());
bkConf.setThrottleValue(conf.getBookkeeperClientThrottleValue());
bkConf.setAddEntryTimeout((int) conf.getBookkeeperClientTimeoutInSeconds());
bkConf.setReadEntryTimeout((int) conf.getBookkeeperClientTimeoutInSeconds());
bkConf.setSpeculativeReadTimeout(conf.getBookkeeperClientSpeculativeReadTimeoutInMillis());
bkConf.setNumChannelsPerBookie(conf.getBookkeeperNumberOfChannelsPerBookie());
bkConf.setUseV2WireProtocol(conf.isBookkeeperUseV2WireProtocol());
bkConf.setEnableDigestTypeAutodetection(true);
bkConf.setStickyReadsEnabled(conf.isBookkeeperEnableStickyReads());
bkConf.setNettyMaxFrameSizeBytes(conf.getMaxMessageSize() + Commands.MESSAGE_SIZE_FRAME_PADDING);
bkConf.setDiskWeightBasedPlacementEnabled(conf.isBookkeeperDiskWeightBasedPlacementEnabled());
bkConf.setMetadataServiceUri(conf.getBookkeeperMetadataStoreUrl());
bkConf.setLimitStatsLogging(conf.isBookkeeperClientLimitStatsLogging());
if (!conf.isBookkeeperMetadataStoreSeparated()) {
// If we're connecting to the same metadata service, with same config, then
// let's share the MetadataStore instance
bkConf.setProperty(AbstractMetadataDriver.METADATA_STORE_INSTANCE, store);
}
if (conf.isBookkeeperClientHealthCheckEnabled()) {
bkConf.enableBookieHealthCheck();
bkConf.setBookieHealthCheckInterval((int) conf.getBookkeeperClientHealthCheckIntervalSeconds(),
TimeUnit.SECONDS);
bkConf.setBookieErrorThresholdPerInterval(conf.getBookkeeperClientHealthCheckErrorThresholdPerInterval());
bkConf.setBookieQuarantineTime((int) conf.getBookkeeperClientHealthCheckQuarantineTimeInSeconds(),
TimeUnit.SECONDS);
bkConf.setBookieQuarantineRatio(conf.getBookkeeperClientQuarantineRatio());
}
bkConf.setReorderReadSequenceEnabled(conf.isBookkeeperClientReorderReadSequenceEnabled());
bkConf.setExplictLacInterval(conf.getBookkeeperExplicitLacIntervalInMills());
bkConf.setGetBookieInfoIntervalSeconds(
conf.getBookkeeperClientGetBookieInfoIntervalSeconds(), TimeUnit.SECONDS);
bkConf.setGetBookieInfoRetryIntervalSeconds(
conf.getBookkeeperClientGetBookieInfoRetryIntervalSeconds(), TimeUnit.SECONDS);
bkConf.setNumIOThreads(conf.getBookkeeperClientNumIoThreads());
PropertiesUtils.filterAndMapProperties(conf.getProperties(), "bookkeeper_")
.forEach((key, value) -> {
log.info("Applying BookKeeper client configuration setting {}={}", key, value);
bkConf.setProperty(key, value);
});
return bkConf;
} | @Test
public void testSetMetadataServiceUriBookkeeperMetadataServiceUri() {
BookKeeperClientFactoryImpl factory = new BookKeeperClientFactoryImpl();
ServiceConfiguration conf = new ServiceConfiguration();
try {
{
String uri = "metadata-store:localhost:2181";
conf.setBookkeeperMetadataServiceUri(uri);
final String expectedUri = "metadata-store:localhost:2181";
assertEquals(factory.createBkClientConfiguration(mock(MetadataStoreExtended.class), conf)
.getMetadataServiceUri(), expectedUri);
}
{
String uri = "metadata-store:localhost:2181/chroot/ledger";
conf.setBookkeeperMetadataServiceUri(uri);
final String expectedUri = "metadata-store:localhost:2181/chroot/ledger";
assertEquals(factory.createBkClientConfiguration(mock(MetadataStoreExtended.class), conf)
.getMetadataServiceUri(), expectedUri);
}
} catch (ConfigurationException e) {
e.printStackTrace();
fail("Get metadata service uri should be successful", e);
}
} |
public static String toAbsolute(String baseURL, String relativeURL) {
String relURL = relativeURL;
// Relative to protocol
if (relURL.startsWith("//")) {
return StringUtils.substringBefore(baseURL, "//") + "//"
+ StringUtils.substringAfter(relURL, "//");
}
// Relative to domain name
if (relURL.startsWith("/")) {
return getRoot(baseURL) + relURL;
}
// Relative to full full page URL minus ? or #
if (relURL.startsWith("?") || relURL.startsWith("#")) {
// this is a relative url and should have the full page base
return baseURL.replaceFirst("(.*?)([\\?\\#])(.*)", "$1") + relURL;
}
// Relative to last directory/segment
if (!relURL.contains("://")) {
String base = baseURL.replaceFirst("(.*?)([\\?\\#])(.*)", "$1");
if (StringUtils.countMatches(base, '/') > 2) {
base = base.replaceFirst("(.*/)(.*)", "$1");
}
if (base.endsWith("/")) {
// This is a URL relative to the last URL segment
relURL = base + relURL;
} else {
relURL = base + "/" + relURL;
}
}
// Not detected as relative, so return as is
return relURL;
} | @Test
public void testToAbsoluteRelativeToProtocol() {
s = "//www.relative.com/e/f.html";
t = "https://www.relative.com/e/f.html";
assertEquals(t, HttpURL.toAbsolute(absURL, s));
} |
public String transform() throws ScanException {
StringBuilder stringBuilder = new StringBuilder();
compileNode(node, stringBuilder, new Stack<Node>());
return stringBuilder.toString();
} | @Test
public void literal() throws ScanException {
String input = "abv";
Node node = makeNode(input);
NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0);
assertEquals(input, nodeToStringTransformer.transform());
} |
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n, @ParameterName( "scale" ) BigDecimal scale) {
if ( n == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "n", "cannot be null"));
}
if ( scale == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "scale", "cannot be null"));
}
// Based on Table 76: Semantics of numeric functions, the scale is in range −6111 .. 6176
if (scale.compareTo(BigDecimal.valueOf(-6111)) < 0 || scale.compareTo(BigDecimal.valueOf(6176)) > 0) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "scale", "must be in range between -6111 to 6176."));
}
return FEELFnResult.ofResult( n.setScale( scale.intValue(), RoundingMode.HALF_EVEN ) );
} | @Test
void invokeLargerScale() {
FunctionTestUtil.assertResult(decimalFunction.invoke(BigDecimal.valueOf(10.123456789), BigDecimal.valueOf(6))
, BigDecimal.valueOf(10.123457));
} |
public JSONObject set(String key, Object value) throws JSONException {
return set(key, value, null, false);
} | @Test
public void toBeanNullStrTest() {
final JSONObject json = JSONUtil.createObj(JSONConfig.create().setIgnoreError(true))//
.set("strValue", "null")//
.set("intValue", 123)//
// 子对象对应"null"字符串,如果忽略错误,跳过,否则抛出转换异常
.set("beanValue", "null")//
.set("list", JSONUtil.createArray().set("a").set("b"));
final TestBean bean = json.toBean(TestBean.class);
// 当JSON中为字符串"null"时应被当作字符串处理
assertEquals("null", bean.getStrValue());
// 当JSON中为字符串"null"时Bean中的字段类型不匹配应在ignoreError模式下忽略注入
assertNull(bean.getBeanValue());
} |
public Set<ReplicatedRecord> getRecords() {
return new HashSet<>(storageRef.get().values());
} | @Test
public void testGetRecords() {
assertTrue(recordStore.getRecords().isEmpty());
recordStore.put("key1", "value1");
recordStore.put("key2", "value2");
assertEquals(2, recordStore.getRecords().size());
} |
@Override
public boolean sendHeartbeatMessage(int leaderId) {
var leaderInstance = instanceMap.get(leaderId);
return leaderInstance.isAlive();
} | @Test
void testSendHeartbeatMessage() {
var instance1 = new RingInstance(null, 1, 1);
Map<Integer, Instance> instanceMap = Map.of(1, instance1);
var messageManager = new RingMessageManager(instanceMap);
assertTrue(messageManager.sendHeartbeatMessage(1));
} |
@Override
public ClusterHealth checkCluster() {
checkState(!nodeInformation.isStandalone(), "Clustering is not enabled");
checkState(sharedHealthState != null, "HealthState instance can't be null when clustering is enabled");
Set<NodeHealth> nodeHealths = sharedHealthState.readAll();
Health health = clusterHealthChecks.stream()
.map(clusterHealthCheck -> clusterHealthCheck.check(nodeHealths))
.reduce(Health.GREEN, HealthReducer::merge);
return new ClusterHealth(health, nodeHealths);
} | @Test
public void checkCluster_returns_causes_of_all_ClusterHealthChecks_whichever_their_status() {
when(nodeInformation.isStandalone()).thenReturn(false);
List<String[]> causesGroups = IntStream.range(0, 1 + random.nextInt(20))
.mapToObj(s -> IntStream.range(0, random.nextInt(3)).mapToObj(i -> randomAlphanumeric(3)).toArray(String[]::new))
.toList();
ClusterHealthCheck[] clusterHealthChecks = causesGroups.stream()
.map(HardcodedHealthClusterCheck::new)
.map(ClusterHealthCheck.class::cast)
.toArray(ClusterHealthCheck[]::new);
String[] expectedCauses = causesGroups.stream().flatMap(Arrays::stream).toArray(String[]::new);
HealthCheckerImpl underTest = new HealthCheckerImpl(nodeInformation, new NodeHealthCheck[0], clusterHealthChecks, sharedHealthState);
assertThat(underTest.checkCluster().getHealth().getCauses()).containsOnly(expectedCauses);
} |
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset(
RequestContext context,
OffsetCommitRequestData request
) throws ApiException {
Group group = validateOffsetCommit(context, request);
// In the old consumer group protocol, the offset commits maintain the session if
// the group is in Stable or PreparingRebalance state.
if (group.type() == Group.GroupType.CLASSIC) {
ClassicGroup classicGroup = (ClassicGroup) group;
if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) {
groupMetadataManager.rescheduleClassicGroupMemberHeartbeat(
classicGroup,
classicGroup.member(request.memberId())
);
}
}
final OffsetCommitResponseData response = new OffsetCommitResponseData();
final List<CoordinatorRecord> records = new ArrayList<>();
final long currentTimeMs = time.milliseconds();
final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs);
request.topics().forEach(topic -> {
final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name());
response.topics().add(topicResponse);
topic.partitions().forEach(partition -> {
if (isMetadataInvalid(partition.committedMetadata())) {
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code()));
} else {
log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.",
request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(),
request.memberId(), partition.committedLeaderEpoch());
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.NONE.code()));
final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest(
partition,
currentTimeMs,
expireTimestampMs
);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
request.groupId(),
topic.name(),
partition.partitionIndex(),
offsetAndMetadata,
metadataImage.features().metadataVersion()
));
}
});
});
if (!records.isEmpty()) {
metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size());
}
return new CoordinatorResult<>(records, response);
} | @Test
public void testSimpleGroupOffsetCommit() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build();
CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> result = context.commitOffset(
new OffsetCommitRequestData()
.setGroupId("foo")
.setTopics(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestTopic()
.setName("bar")
.setPartitions(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(0)
.setCommittedOffset(100L)
))
))
);
assertEquals(
new OffsetCommitResponseData()
.setTopics(Collections.singletonList(
new OffsetCommitResponseData.OffsetCommitResponseTopic()
.setName("bar")
.setPartitions(Collections.singletonList(
new OffsetCommitResponseData.OffsetCommitResponsePartition()
.setPartitionIndex(0)
.setErrorCode(Errors.NONE.code())
))
)),
result.response()
);
assertEquals(
Collections.singletonList(GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
"foo",
"bar",
0,
new OffsetAndMetadata(
100L,
OptionalInt.empty(),
"",
context.time.milliseconds(),
OptionalLong.empty()
),
MetadataImage.EMPTY.features().metadataVersion()
)),
result.records()
);
// A generic should have been created.
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(
"foo",
false
);
assertNotNull(group);
assertEquals("foo", group.groupId());
} |
@Override
public ParsedLine parse(final String line, final int cursor, final ParseContext context) {
final String trimmed = line.trim();
final int adjCursor = adjustCursor(line, trimmed, cursor);
return delegate.parse(trimmed, adjCursor, context);
} | @Test
public void shouldAdjustCursorIfInRightWhiteSpace() {
expect(delegate.parse(anyString(), eq(4), anyObject()))
.andReturn(parsedLine).anyTimes();
replay(delegate);
parser.parse(" line ", 6, UNSPECIFIED);
parser.parse(" line ", 7, UNSPECIFIED);
parser.parse(" line ", 8, UNSPECIFIED);
} |
public NewIssuesNotification newNewIssuesNotification(Map<String, UserDto> assigneesByUuid) {
verifyAssigneesByUuid(assigneesByUuid);
return new NewIssuesNotification(new DetailsSupplierImpl(assigneesByUuid));
} | @Test
public void newNewIssuesNotification_DetailsSupplier_getUserNameByUuid_always_returns_empty_if_map_argument_is_empty() {
NewIssuesNotification underTest = this.underTest.newNewIssuesNotification(emptyMap());
DetailsSupplier detailsSupplier = readDetailsSupplier(underTest);
assertThat(detailsSupplier.getUserNameByUuid("foo")).isEmpty();
} |
public static NotificationDispatcherMetadata newMetadata() {
return METADATA;
} | @Test
public void reportFailures_notification_is_enable_at_global_level() {
NotificationDispatcherMetadata metadata = ReportAnalysisFailureNotificationHandler.newMetadata();
assertThat(metadata.getProperty(GLOBAL_NOTIFICATION)).isEqualTo("true");
} |
@Override
public void onWorkflowFinalized(Workflow workflow) {
WorkflowSummary summary = StepHelper.retrieveWorkflowSummary(objectMapper, workflow.getInput());
WorkflowRuntimeSummary runtimeSummary = retrieveWorkflowRuntimeSummary(workflow);
String reason = workflow.getReasonForIncompletion();
LOG.info(
"Workflow {} with execution_id [{}] is finalized with internal state [{}] and reason [{}]",
summary.getIdentity(),
workflow.getWorkflowId(),
workflow.getStatus(),
reason);
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"onWorkflowFinalized",
MetricConstants.STATUS_TAG,
workflow.getStatus().name());
if (reason != null
&& workflow.getStatus() == Workflow.WorkflowStatus.FAILED
&& reason.startsWith(MaestroStartTask.DEDUP_FAILURE_PREFIX)) {
LOG.info(
"Workflow {} with execution_id [{}] has not actually started, thus skip onWorkflowFinalized.",
summary.getIdentity(),
workflow.getWorkflowId());
return; // special case doing nothing
}
WorkflowInstance.Status instanceStatus =
instanceDao.getWorkflowInstanceStatus(
summary.getWorkflowId(), summary.getWorkflowInstanceId(), summary.getWorkflowRunId());
if (instanceStatus == null
|| (instanceStatus.isTerminal() && workflow.getStatus().isTerminal())) {
LOG.info(
"Workflow {} with execution_id [{}] does not exist or already "
+ "in a terminal state [{}] with internal state [{}], thus skip onWorkflowFinalized.",
summary.getIdentity(),
workflow.getWorkflowId(),
instanceStatus,
workflow.getStatus());
return;
}
Map<String, Task> realTaskMap = TaskHelper.getUserDefinedRealTaskMap(workflow);
// cancel internally failed tasks
realTaskMap.values().stream()
.filter(task -> !StepHelper.retrieveStepStatus(task.getOutputData()).isTerminal())
.forEach(task -> maestroTask.cancel(workflow, task, null));
WorkflowRuntimeOverview overview =
TaskHelper.computeOverview(
objectMapper, summary, runtimeSummary.getRollupBase(), realTaskMap);
try {
validateAndUpdateOverview(overview, summary);
switch (workflow.getStatus()) {
case TERMINATED: // stopped due to stop request
if (reason != null && reason.startsWith(FAILURE_REASON_PREFIX)) {
update(workflow, WorkflowInstance.Status.FAILED, summary, overview);
} else {
update(workflow, WorkflowInstance.Status.STOPPED, summary, overview);
}
break;
case TIMED_OUT:
update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview);
break;
default: // other status (FAILED, COMPLETED, PAUSED, RUNNING) to be handled here.
Optional<Task.Status> done =
TaskHelper.checkProgress(realTaskMap, summary, overview, true);
switch (done.orElse(Task.Status.IN_PROGRESS)) {
/**
* This is a special status to indicate that the workflow has succeeded. Check {@link
* TaskHelper#checkProgress} for more details.
*/
case FAILED_WITH_TERMINAL_ERROR:
WorkflowInstance.Status nextStatus =
AggregatedViewHelper.deriveAggregatedStatus(
instanceDao, summary, WorkflowInstance.Status.SUCCEEDED, overview);
if (!nextStatus.isTerminal()) {
throw new MaestroInternalError(
"Invalid status: [%s], expecting a terminal one", nextStatus);
}
update(workflow, nextStatus, summary, overview);
break;
case FAILED:
case CANCELED: // due to step failure
update(workflow, WorkflowInstance.Status.FAILED, summary, overview);
break;
case TIMED_OUT:
update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview);
break;
// all other status are invalid
default:
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"invalidStatusOnWorkflowFinalized");
throw new MaestroInternalError(
"Invalid status [%s] onWorkflowFinalized", workflow.getStatus());
}
break;
}
} catch (MaestroInternalError | IllegalArgumentException e) {
// non-retryable error and still fail the instance
LOG.warn("onWorkflowFinalized is failed with a non-retryable error", e);
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"nonRetryableErrorOnWorkflowFinalized");
update(
workflow,
WorkflowInstance.Status.FAILED,
summary,
overview,
Details.create(
e.getMessage(), "onWorkflowFinalized is failed with non-retryable error."));
}
} | @Test
public void testWorkflowFinalizedTerminatedForKilled() {
when(workflow.getStatus()).thenReturn(Workflow.WorkflowStatus.TERMINATED);
when(instanceDao.getWorkflowInstanceStatus(eq("test-workflow-id"), anyLong(), anyLong()))
.thenReturn(WorkflowInstance.Status.IN_PROGRESS);
when(workflow.getReasonForIncompletion()).thenReturn("FAILED-test-reason");
statusListener.onWorkflowFinalized(workflow);
Assert.assertEquals(
1L,
metricRepo
.getCounter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
MaestroWorkflowStatusListener.class,
"type",
"onWorkflowFinalized",
"status",
"TERMINATED")
.count());
verify(instanceDao, times(1))
.updateWorkflowInstance(any(), any(), any(), eq(WorkflowInstance.Status.FAILED), anyLong());
verify(publisher, times(1)).publishOrThrow(any(), any());
} |
@Override
public void v(String tag, String message, Object... args) { } | @Test
public void verboseWithThrowableNotLogged() {
Throwable t = new Throwable("Test Throwable");
logger.v(t, tag, "Hello %s", "World");
assertNotLogged();
} |
@VisibleForTesting
public String validateMobile(String mobile) {
if (StrUtil.isEmpty(mobile)) {
throw exception(SMS_SEND_MOBILE_NOT_EXISTS);
}
return mobile;
} | @Test
public void testCheckMobile_notExists() {
// 准备参数
// mock 方法
// 调用,并断言异常
assertServiceException(() -> smsSendService.validateMobile(null),
SMS_SEND_MOBILE_NOT_EXISTS);
} |
public static boolean isValidEnsName(String input) {
return isValidEnsName(input, Keys.ADDRESS_LENGTH_IN_HEX);
} | @Test
public void testIsEnsName() {
assertTrue(isValidEnsName("eth"));
assertTrue(isValidEnsName("web3.eth"));
assertTrue(isValidEnsName("0x19e03255f667bdfd50a32722df860b1eeaf4d635.eth"));
assertFalse(isValidEnsName("0x19e03255f667bdfd50a32722df860b1eeaf4d635"));
assertFalse(isValidEnsName("19e03255f667bdfd50a32722df860b1eeaf4d635"));
assertTrue(isValidEnsName(""));
assertTrue(isValidEnsName("."));
} |
@Override
public Object intercept(final Invocation invocation) throws Throwable {
Object[] args = invocation.getArgs();
MappedStatement ms = (MappedStatement) args[0];
Object parameter = args[1];
Executor executor = (Executor) invocation.getTarget();
for (Class<?> superClass = parameter.getClass(); superClass != Object.class; superClass = superClass.getSuperclass()) {
Arrays.stream(superClass.getDeclaredFields())
.filter(f -> matchParam(parameter, f))
.forEach(f -> ReflectUtils.setFieldValue(parameter, f.getName(), new Timestamp(System.currentTimeMillis())));
}
return executor.update(ms, parameter);
} | @Test
public void interceptTest() throws SQLException {
final PostgreSqlUpdateInterceptor postgreSqlUpdateInterceptor = new PostgreSqlUpdateInterceptor();
final Invocation invocation = mock(Invocation.class);
Object[] args = new Object[2];
args[0] = mock(MappedStatement.class);
args[1] = mock(RuleData.class);
final Executor executor = mock(Executor.class);
when(invocation.getTarget()).thenReturn(executor);
when(invocation.getArgs()).thenReturn(args);
when(executor.update(any(), any())).thenReturn(1);
Assertions.assertDoesNotThrow(() -> postgreSqlUpdateInterceptor.intercept(invocation));
} |
public void finishTransactionBatch(TransactionStateBatch stateBatch, Set<Long> errorReplicaIds) {
Database db = globalStateMgr.getDb(stateBatch.getDbId());
if (db == null) {
stateBatch.writeLock();
try {
writeLock();
try {
stateBatch.setTransactionStatus(TransactionStatus.ABORTED);
LOG.warn("db is dropped during transaction batch, abort transaction {}", stateBatch);
unprotectSetTransactionStateBatch(stateBatch, false);
} finally {
writeUnlock();
}
if (Config.lock_manager_enable_using_fine_granularity_lock) {
long start = System.currentTimeMillis();
editLog.logInsertTransactionStateBatch(stateBatch);
LOG.debug("insert txn state visible for txnIds batch {}, cost: {}ms",
stateBatch.getTxnIds(), System.currentTimeMillis() - start);
}
return;
} finally {
stateBatch.writeUnlock();
}
}
Locker locker = new Locker();
Set<Long> tableIds = Sets.newHashSet();
for (TransactionState transactionState : stateBatch.getTransactionStates()) {
tableIds.addAll(transactionState.getTableIdList());
}
locker.lockTablesWithIntensiveDbLock(db, new ArrayList<>(tableIds), LockType.WRITE);
try {
boolean txnOperated = false;
stateBatch.writeLock();
try {
writeLock();
try {
stateBatch.setTransactionVisibleInfo();
unprotectSetTransactionStateBatch(stateBatch, false);
txnOperated = true;
} finally {
writeUnlock();
stateBatch.afterVisible(TransactionStatus.VISIBLE, txnOperated);
}
if (Config.lock_manager_enable_using_fine_granularity_lock) {
long start = System.currentTimeMillis();
editLog.logInsertTransactionStateBatch(stateBatch);
LOG.debug("insert txn state visible for txnIds batch {}, cost: {}ms",
stateBatch.getTxnIds(), System.currentTimeMillis() - start);
}
updateCatalogAfterVisibleBatch(stateBatch, db);
} finally {
stateBatch.writeUnlock();
}
} finally {
locker.unLockTablesWithIntensiveDbLock(db, new ArrayList<>(tableIds), LockType.WRITE);
}
// do after transaction finish in batch
for (TransactionState transactionState : stateBatch.getTransactionStates()) {
GlobalStateMgr.getCurrentState().getOperationListenerBus().onStreamJobTransactionFinish(transactionState);
GlobalStateMgr.getCurrentState().getLocalMetastore().handleMVRepair(transactionState);
}
LOG.info("finish transaction {} batch successfully", stateBatch);
} | @Test
public void testFinishTransactionBatch() throws UserException {
FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr);
DatabaseTransactionMgr masterDbTransMgr = masterTransMgr.getDatabaseTransactionMgr(GlobalStateMgrTestUtil.testDbId1);
long txnId6 = lableToTxnId.get(GlobalStateMgrTestUtil.testTxnLable6);
TransactionState transactionState6 = masterDbTransMgr.getTransactionState(txnId6);
long txnId7 = lableToTxnId.get(GlobalStateMgrTestUtil.testTxnLable7);
TransactionState transactionState7 = masterDbTransMgr.getTransactionState(txnId7);
long txnId8 = lableToTxnId.get(GlobalStateMgrTestUtil.testTxnLable8);
TransactionState transactionState8 = masterDbTransMgr.getTransactionState(txnId8);
List<TransactionState> states = new ArrayList<>();
states.add(transactionState6);
states.add(transactionState7);
states.add(transactionState8);
new MockUp<Table>() {
@Mock
public boolean isCloudNativeTableOrMaterializedView() {
return true;
}
};
TransactionStateBatch stateBatch = new TransactionStateBatch(states);
masterTransMgr.finishTransactionBatch(GlobalStateMgrTestUtil.testDbId1, stateBatch, null);
assertEquals(3, masterDbTransMgr.getRunningTxnNums());
assertEquals(4, masterDbTransMgr.getFinishedTxnNums());
assertEquals(TransactionStatus.VISIBLE, transactionState6.getTransactionStatus());
assertEquals(TransactionStatus.VISIBLE, transactionState7.getTransactionStatus());
assertEquals(TransactionStatus.VISIBLE, transactionState8.getTransactionStatus());
FakeGlobalStateMgr.setGlobalStateMgr(slaveGlobalStateMgr);
slaveTransMgr.replayUpsertTransactionStateBatch(stateBatch);
assertEquals(4, masterDbTransMgr.getFinishedTxnNums());
} |
@Override
public String execute(CommandContext commandContext, String[] args) {
return QosConstants.CLOSE;
} | @Test
void testExecute() throws Exception {
Quit quit = new Quit();
String output = quit.execute(Mockito.mock(CommandContext.class), null);
assertThat(output, equalTo(QosConstants.CLOSE));
} |
@Override
public PluginDescriptor find(Path pluginPath) {
Manifest manifest = readManifest(pluginPath);
return createPluginDescriptor(manifest);
} | @Test
public void testFindNotFound() {
PluginDescriptorFinder descriptorFinder = new ManifestPluginDescriptorFinder();
assertThrows(PluginRuntimeException.class, () -> descriptorFinder.find(pluginsPath.resolve("test-plugin-3")));
} |
public static Driver load(String className) throws DriverLoadException {
final ClassLoader loader = DriverLoader.class.getClassLoader();
return load(className, loader);
} | @Test(expected = DriverLoadException.class)
public void testLoad_String_String_badPath() throws Exception {
String className = "com.mysql.jdbc.Driver";
//we know this is in target/test-classes
//File testClassPath = (new File(this.getClass().getClassLoader().getResource("org.mortbay.jetty.jar").getPath())).getParentFile();
File testClassPath = BaseTest.getResourceAsFile(this, "org.mortbay.jetty.jar").getParentFile();
File driver = new File(testClassPath, "../../src/test/bad/mysql-connector-java-5.1.27-bin.jar");
DriverLoader.load(className, driver.getAbsolutePath());
} |
@Override
public CompletableFuture<JobManagerRunnerResult> getResultFuture() {
return resultFuture;
} | @Test
void testInitializationFailureSetsExceptionHistoryProperly()
throws ExecutionException, InterruptedException {
final CompletableFuture<JobMasterService> jobMasterServiceFuture =
new CompletableFuture<>();
DefaultJobMasterServiceProcess serviceProcess = createTestInstance(jobMasterServiceFuture);
final RuntimeException originalCause = new RuntimeException("Expected RuntimeException");
long beforeFailureTimestamp = System.currentTimeMillis();
jobMasterServiceFuture.completeExceptionally(originalCause);
long afterFailureTimestamp = System.currentTimeMillis();
final RootExceptionHistoryEntry entry =
Iterables.getOnlyElement(
serviceProcess
.getResultFuture()
.get()
.getExecutionGraphInfo()
.getExceptionHistory());
assertInitializationException(
entry.getException(),
originalCause,
entry.getTimestamp(),
beforeFailureTimestamp,
afterFailureTimestamp);
assertThat(entry.isGlobal()).isTrue();
} |
public synchronized long nextId() {
long timestamp = genTime();
if (timestamp < this.lastTimestamp) {
if (this.lastTimestamp - timestamp < timeOffset) {
// 容忍指定的回拨,避免NTP校时造成的异常
timestamp = lastTimestamp;
} else {
// 如果服务器时间有问题(时钟后退) 报错。
throw new IllegalStateException(StrUtil.format("Clock moved backwards. Refusing to generate id for {}ms", lastTimestamp - timestamp));
}
}
if (timestamp == this.lastTimestamp) {
final long sequence = (this.sequence + 1) & SEQUENCE_MASK;
if (sequence == 0) {
timestamp = tilNextMillis(lastTimestamp);
}
this.sequence = sequence;
} else {
// issue#I51EJY
if (randomSequenceLimit > 1) {
sequence = RandomUtil.randomLong(randomSequenceLimit);
} else {
sequence = 0L;
}
}
lastTimestamp = timestamp;
return ((timestamp - twepoch) << TIMESTAMP_LEFT_SHIFT)
| (dataCenterId << DATA_CENTER_ID_SHIFT)
| (workerId << WORKER_ID_SHIFT)
| sequence;
} | @Test
public void getSnowflakeLengthTest(){
for (int i = 0; i < 1000; i++) {
final long l = IdUtil.getSnowflake(0, 0).nextId();
assertEquals(19, StrUtil.toString(l).length());
}
} |
public static <InputT> ValueByReduceByBuilder<InputT, InputT> of(PCollection<InputT> input) {
return named(null).of(input);
} | @Test
public void testWindow_applyIf() {
final PCollection<String> dataset = TestUtils.createMockDataset(TypeDescriptors.strings());
final PCollection<Long> output =
ReduceWindow.of(dataset)
.reduceBy(e -> 1L)
.withSortedValues(String::compareTo)
.applyIf(
true,
b ->
b.windowBy(FixedWindows.of(org.joda.time.Duration.standardHours(1)))
.triggeredBy(DefaultTrigger.of())
.discardingFiredPanes())
.output();
final ReduceWindow rw = (ReduceWindow) TestUtils.getProducer(output);
assertTrue(rw.getWindow().isPresent());
@SuppressWarnings("unchecked")
final WindowDesc<?> windowDesc = WindowDesc.of((Window) rw.getWindow().get());
assertEquals(
FixedWindows.of(org.joda.time.Duration.standardHours(1)), windowDesc.getWindowFn());
assertEquals(DefaultTrigger.of(), windowDesc.getTrigger());
assertEquals(AccumulationMode.DISCARDING_FIRED_PANES, windowDesc.getAccumulationMode());
} |
@Deprecated
public static <T> T defaultIfEmpty(String str, Supplier<? extends T> handle, final T defaultValue) {
if (StrUtil.isNotEmpty(str)) {
return handle.get();
}
return defaultValue;
} | @Test
public void defaultIfEmptyTest() {
final String emptyValue = "";
final String dateStr = "2020-10-23 15:12:30";
Instant result1 = ObjectUtil.defaultIfEmpty(emptyValue,
(source) -> DateUtil.parse(source, DatePattern.NORM_DATETIME_PATTERN).toInstant(), Instant.now());
assertNotNull(result1);
Instant result2 = ObjectUtil.defaultIfEmpty(dateStr,
(source) -> DateUtil.parse(source, DatePattern.NORM_DATETIME_PATTERN).toInstant(), Instant.now());
assertNotNull(result2);
} |
public static synchronized Class<?> getClass(String name, Configuration conf
) throws IOException {
Class<?> writableClass = NAME_TO_CLASS.get(name);
if (writableClass != null)
return writableClass;
try {
return conf.getClassByName(name);
} catch (ClassNotFoundException e) {
IOException newE = new IOException("WritableName can't load class: " + name);
newE.initCause(e);
throw newE;
}
} | @Test
public void testBadName() throws Exception {
Configuration conf = new Configuration();
try {
WritableName.getClass("unknown_junk",conf);
assertTrue(false);
} catch(IOException e) {
assertTrue(e.getMessage().matches(".*unknown_junk.*"));
}
} |
@Override
public RemotingCommand processRequest(final ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
final long beginTimeMills = this.brokerController.getMessageStore().now();
request.addExtFieldIfNotExist(BORN_TIME, String.valueOf(System.currentTimeMillis()));
if (Objects.equals(request.getExtFields().get(BORN_TIME), "0")) {
request.addExtField(BORN_TIME, String.valueOf(System.currentTimeMillis()));
}
Channel channel = ctx.channel();
RemotingCommand response = RemotingCommand.createResponseCommand(PopMessageResponseHeader.class);
final PopMessageResponseHeader responseHeader = (PopMessageResponseHeader) response.readCustomHeader();
final PopMessageRequestHeader requestHeader =
(PopMessageRequestHeader) request.decodeCommandCustomHeader(PopMessageRequestHeader.class, true);
StringBuilder startOffsetInfo = new StringBuilder(64);
StringBuilder msgOffsetInfo = new StringBuilder(64);
StringBuilder orderCountInfo = null;
if (requestHeader.isOrder()) {
orderCountInfo = new StringBuilder(64);
}
brokerController.getConsumerManager().compensateBasicConsumerInfo(requestHeader.getConsumerGroup(),
ConsumeType.CONSUME_POP, MessageModel.CLUSTERING);
response.setOpaque(request.getOpaque());
if (brokerController.getBrokerConfig().isEnablePopLog()) {
POP_LOGGER.info("receive PopMessage request command, {}", request);
}
if (requestHeader.isTimeoutTooMuch()) {
response.setCode(ResponseCode.POLLING_TIMEOUT);
response.setRemark(String.format("the broker[%s] pop message is timeout too much",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
if (!PermName.isReadable(this.brokerController.getBrokerConfig().getBrokerPermission())) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark(String.format("the broker[%s] pop message is forbidden",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
if (requestHeader.getMaxMsgNums() > 32) {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(String.format("the broker[%s] pop message's num is greater than 32",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
if (!brokerController.getMessageStore().getMessageStoreConfig().isTimerWheelEnable()) {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(String.format("the broker[%s] pop message is forbidden because timerWheelEnable is false",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
TopicConfig topicConfig =
this.brokerController.getTopicConfigManager().selectTopicConfig(requestHeader.getTopic());
if (null == topicConfig) {
POP_LOGGER.error("The topic {} not exist, consumer: {} ", requestHeader.getTopic(),
RemotingHelper.parseChannelRemoteAddr(channel));
response.setCode(ResponseCode.TOPIC_NOT_EXIST);
response.setRemark(String.format("topic[%s] not exist, apply first please! %s", requestHeader.getTopic(),
FAQUrl.suggestTodo(FAQUrl.APPLY_TOPIC_URL)));
return response;
}
if (!PermName.isReadable(topicConfig.getPerm())) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark("the topic[" + requestHeader.getTopic() + "] peeking message is forbidden");
return response;
}
if (requestHeader.getQueueId() >= topicConfig.getReadQueueNums()) {
String errorInfo = String.format("queueId[%d] is illegal, topic:[%s] topicConfig.readQueueNums:[%d] " +
"consumer:[%s]",
requestHeader.getQueueId(), requestHeader.getTopic(), topicConfig.getReadQueueNums(),
channel.remoteAddress());
POP_LOGGER.warn(errorInfo);
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(errorInfo);
return response;
}
SubscriptionGroupConfig subscriptionGroupConfig =
this.brokerController.getSubscriptionGroupManager().findSubscriptionGroupConfig(requestHeader.getConsumerGroup());
if (null == subscriptionGroupConfig) {
response.setCode(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST);
response.setRemark(String.format("subscription group [%s] does not exist, %s",
requestHeader.getConsumerGroup(), FAQUrl.suggestTodo(FAQUrl.SUBSCRIPTION_GROUP_NOT_EXIST)));
return response;
}
if (!subscriptionGroupConfig.isConsumeEnable()) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark("subscription group no permission, " + requestHeader.getConsumerGroup());
return response;
}
BrokerConfig brokerConfig = brokerController.getBrokerConfig();
SubscriptionData subscriptionData = null;
ExpressionMessageFilter messageFilter = null;
if (requestHeader.getExp() != null && !requestHeader.getExp().isEmpty()) {
try {
subscriptionData = FilterAPI.build(requestHeader.getTopic(), requestHeader.getExp(), requestHeader.getExpType());
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
requestHeader.getTopic(), subscriptionData);
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
SubscriptionData retrySubscriptionData = FilterAPI.build(retryTopic, SubscriptionData.SUB_ALL, requestHeader.getExpType());
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
retryTopic, retrySubscriptionData);
ConsumerFilterData consumerFilterData = null;
if (!ExpressionType.isTagType(subscriptionData.getExpressionType())) {
consumerFilterData = ConsumerFilterManager.build(
requestHeader.getTopic(), requestHeader.getConsumerGroup(), requestHeader.getExp(),
requestHeader.getExpType(), System.currentTimeMillis()
);
if (consumerFilterData == null) {
POP_LOGGER.warn("Parse the consumer's subscription[{}] failed, group: {}",
requestHeader.getExp(), requestHeader.getConsumerGroup());
response.setCode(ResponseCode.SUBSCRIPTION_PARSE_FAILED);
response.setRemark("parse the consumer's subscription failed");
return response;
}
}
messageFilter = new ExpressionMessageFilter(subscriptionData, consumerFilterData,
brokerController.getConsumerFilterManager());
} catch (Exception e) {
POP_LOGGER.warn("Parse the consumer's subscription[{}] error, group: {}", requestHeader.getExp(),
requestHeader.getConsumerGroup());
response.setCode(ResponseCode.SUBSCRIPTION_PARSE_FAILED);
response.setRemark("parse the consumer's subscription failed");
return response;
}
} else {
try {
subscriptionData = FilterAPI.build(requestHeader.getTopic(), "*", ExpressionType.TAG);
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
requestHeader.getTopic(), subscriptionData);
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
SubscriptionData retrySubscriptionData = FilterAPI.build(retryTopic, "*", ExpressionType.TAG);
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
retryTopic, retrySubscriptionData);
} catch (Exception e) {
POP_LOGGER.warn("Build default subscription error, group: {}", requestHeader.getConsumerGroup());
}
}
int randomQ = random.nextInt(100);
int reviveQid;
if (requestHeader.isOrder()) {
reviveQid = KeyBuilder.POP_ORDER_REVIVE_QUEUE;
} else {
reviveQid = (int) Math.abs(ckMessageNumber.getAndIncrement() % this.brokerController.getBrokerConfig().getReviveQueueNum());
}
GetMessageResult getMessageResult = new GetMessageResult(requestHeader.getMaxMsgNums());
ExpressionMessageFilter finalMessageFilter = messageFilter;
StringBuilder finalOrderCountInfo = orderCountInfo;
// Due to the design of the fields startOffsetInfo, msgOffsetInfo, and orderCountInfo,
// a single POP request could only invoke the popMsgFromQueue method once
// for either a normal topic or a retry topic's queue. Retry topics v1 and v2 are
// considered the same type because they share the same retry flag in previous fields.
// Therefore, needRetryV1 is designed as a subset of needRetry, and within a single request,
// only one type of retry topic is able to call popMsgFromQueue.
boolean needRetry = randomQ % 5 == 0;
boolean needRetryV1 = false;
if (brokerConfig.isEnableRetryTopicV2() && brokerConfig.isRetrieveMessageFromPopRetryTopicV1()) {
needRetryV1 = randomQ % 2 == 0;
}
long popTime = System.currentTimeMillis();
CompletableFuture<Long> getMessageFuture = CompletableFuture.completedFuture(0L);
if (needRetry && !requestHeader.isOrder()) {
if (needRetryV1) {
String retryTopic = KeyBuilder.buildPopRetryTopicV1(requestHeader.getTopic(), requestHeader.getConsumerGroup());
getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
} else {
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
}
}
if (requestHeader.getQueueId() < 0) {
// read all queue
getMessageFuture = popMsgFromTopic(topicConfig, false, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
} else {
int queueId = requestHeader.getQueueId();
getMessageFuture = getMessageFuture.thenCompose(restNum ->
popMsgFromQueue(topicConfig.getTopicName(), requestHeader.getAttemptId(), false,
getMessageResult, requestHeader, queueId, restNum, reviveQid, channel, popTime, finalMessageFilter,
startOffsetInfo, msgOffsetInfo, finalOrderCountInfo));
}
// if not full , fetch retry again
if (!needRetry && getMessageResult.getMessageMapedList().size() < requestHeader.getMaxMsgNums() && !requestHeader.isOrder()) {
if (needRetryV1) {
String retryTopicV1 = KeyBuilder.buildPopRetryTopicV1(requestHeader.getTopic(), requestHeader.getConsumerGroup());
getMessageFuture = popMsgFromTopic(retryTopicV1, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
} else {
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
}
}
final RemotingCommand finalResponse = response;
SubscriptionData finalSubscriptionData = subscriptionData;
getMessageFuture.thenApply(restNum -> {
if (!getMessageResult.getMessageBufferList().isEmpty()) {
finalResponse.setCode(ResponseCode.SUCCESS);
getMessageResult.setStatus(GetMessageStatus.FOUND);
if (restNum > 0) {
// all queue pop can not notify specified queue pop, and vice versa
popLongPollingService.notifyMessageArriving(
requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getConsumerGroup(),
null, 0L, null, null);
}
} else {
PollingResult pollingResult = popLongPollingService.polling(
ctx, request, new PollingHeader(requestHeader), finalSubscriptionData, finalMessageFilter);
if (PollingResult.POLLING_SUC == pollingResult) {
if (restNum > 0) {
popLongPollingService.notifyMessageArriving(
requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getConsumerGroup(),
null, 0L, null, null);
}
return null;
} else if (PollingResult.POLLING_FULL == pollingResult) {
finalResponse.setCode(ResponseCode.POLLING_FULL);
} else {
finalResponse.setCode(ResponseCode.POLLING_TIMEOUT);
}
getMessageResult.setStatus(GetMessageStatus.NO_MESSAGE_IN_QUEUE);
}
responseHeader.setInvisibleTime(requestHeader.getInvisibleTime());
responseHeader.setPopTime(popTime);
responseHeader.setReviveQid(reviveQid);
responseHeader.setRestNum(restNum);
responseHeader.setStartOffsetInfo(startOffsetInfo.toString());
responseHeader.setMsgOffsetInfo(msgOffsetInfo.toString());
if (requestHeader.isOrder() && finalOrderCountInfo != null) {
responseHeader.setOrderCountInfo(finalOrderCountInfo.toString());
}
finalResponse.setRemark(getMessageResult.getStatus().name());
switch (finalResponse.getCode()) {
case ResponseCode.SUCCESS:
if (this.brokerController.getBrokerConfig().isTransferMsgByHeap()) {
final byte[] r = this.readGetMessageResult(getMessageResult, requestHeader.getConsumerGroup(),
requestHeader.getTopic(), requestHeader.getQueueId());
this.brokerController.getBrokerStatsManager().incGroupGetLatency(requestHeader.getConsumerGroup(),
requestHeader.getTopic(), requestHeader.getQueueId(),
(int) (this.brokerController.getMessageStore().now() - beginTimeMills));
finalResponse.setBody(r);
} else {
final GetMessageResult tmpGetMessageResult = getMessageResult;
try {
FileRegion fileRegion =
new ManyMessageTransfer(finalResponse.encodeHeader(getMessageResult.getBufferTotalSize()),
getMessageResult);
channel.writeAndFlush(fileRegion)
.addListener((ChannelFutureListener) future -> {
tmpGetMessageResult.release();
Attributes attributes = RemotingMetricsManager.newAttributesBuilder()
.put(LABEL_REQUEST_CODE, RemotingHelper.getRequestCodeDesc(request.getCode()))
.put(LABEL_RESPONSE_CODE, RemotingHelper.getResponseCodeDesc(finalResponse.getCode()))
.put(LABEL_RESULT, RemotingMetricsManager.getWriteAndFlushResult(future))
.build();
RemotingMetricsManager.rpcLatency.record(request.getProcessTimer().elapsed(TimeUnit.MILLISECONDS), attributes);
if (!future.isSuccess()) {
POP_LOGGER.error("Fail to transfer messages from page cache to {}",
channel.remoteAddress(), future.cause());
}
});
} catch (Throwable e) {
POP_LOGGER.error("Error occurred when transferring messages from page cache", e);
getMessageResult.release();
}
return null;
}
break;
default:
return finalResponse;
}
return finalResponse;
}).thenAccept(result -> NettyRemotingAbstract.writeResponse(channel, request, result));
return null;
} | @Test
public void testGetInitOffset_normalTopic() throws RemotingCommandException {
long maxOffset = 999L;
when(messageStore.getMessageStoreConfig()).thenReturn(new MessageStoreConfig());
when(messageStore.getMaxOffsetInQueue(topic, 0)).thenReturn(maxOffset);
String newGroup = group + "-" + System.currentTimeMillis();
GetMessageResult getMessageResult = createGetMessageResult(0);
when(messageStore.getMessageAsync(eq(newGroup), anyString(), anyInt(), anyLong(), anyInt(), any()))
.thenReturn(CompletableFuture.completedFuture(getMessageResult));
long offset = brokerController.getConsumerOffsetManager().queryOffset(newGroup, topic, 0);
Assert.assertEquals(-1, offset);
RemotingCommand request = createPopMsgCommand(newGroup, topic, 0, ConsumeInitMode.MAX);
popMessageProcessor.processRequest(handlerContext, request);
offset = brokerController.getConsumerOffsetManager().queryOffset(newGroup, topic, 0);
Assert.assertEquals(maxOffset - 1, offset); // checkInMem return false
when(messageStore.getMaxOffsetInQueue(topic, 0)).thenReturn(maxOffset * 2);
popMessageProcessor.processRequest(handlerContext, request);
offset = brokerController.getConsumerOffsetManager().queryOffset(newGroup, topic, 0);
Assert.assertEquals(maxOffset - 1, offset); // will not entry getInitOffset() again
messageStore.getMaxOffsetInQueue(topic, 0); // prevent UnnecessaryStubbingException
} |
public static CreateSourceAsProperties from(final Map<String, Literal> literals) {
try {
return new CreateSourceAsProperties(literals, false);
} catch (final ConfigException e) {
final String message = e.getMessage().replace(
"configuration",
"property"
);
throw new KsqlException(message, e);
}
} | @Test
public void shouldThrowIfValueFormatAndFormatProvided() {
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> CreateSourceAsProperties.from(
ImmutableMap.<String, Literal>builder()
.put(VALUE_FORMAT_PROPERTY, new StringLiteral("JSON"))
.put(FORMAT_PROPERTY, new StringLiteral("KAFKA"))
.build())
);
// Then:
assertThat(e.getMessage(), containsString("Cannot supply both 'VALUE_FORMAT' and 'FORMAT' properties, "
+ "as 'FORMAT' sets both key and value formats."));
assertThat(e.getMessage(), containsString("Either use just 'FORMAT', or use 'KEY_FORMAT' and 'VALUE_FORMAT'."));
} |
public FuryBuilder withMetaCompressor(MetaCompressor metaCompressor) {
this.metaCompressor = MetaCompressor.checkMetaCompressor(metaCompressor);
return this;
} | @Test
public void testWithMetaCompressor() {
MetaCompressor metaCompressor =
new FuryBuilder()
.withMetaCompressor(
new MetaCompressor() {
@Override
public byte[] compress(byte[] data, int offset, int size) {
return new byte[0];
}
@Override
public byte[] decompress(byte[] compressedData, int offset, int size) {
return new byte[0];
}
})
.metaCompressor;
Assert.assertEquals(metaCompressor.getClass().getSimpleName(), "TypeEqualMetaCompressor");
new FuryBuilder()
.withMetaCompressor(
new MetaCompressor() {
@Override
public byte[] compress(byte[] data, int offset, int size) {
return new byte[0];
}
@Override
public byte[] decompress(byte[] compressedData, int offset, int size) {
return new byte[0];
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
return o != null && getClass() == o.getClass();
}
@Override
public int hashCode() {
return getClass().hashCode();
}
});
} |
@Override
public void handle(HttpExchange httpExchange) {
try {
String requestUri = httpExchange.getRequestURI().toString();
requestUri = sanitizeRequestUri(requestUri);
final String toServe = requestUri.substring((contextPath + "/").length());
final URL resource = this.getClass().getClassLoader().getResource(rootDir + toServe);
if (resource != null) {
httpExchange.getResponseHeaders().add(ContentType._HEADER_NAME, ContentType.from(toServe));
httpExchange.getResponseHeaders().add("Access-Control-Allow-Origin", "*");
httpExchange.sendResponseHeaders(200, 0);
copyResourceToResponseBody(resource, httpExchange);
} else {
httpExchange.sendResponseHeaders(404, -1);
}
} catch (Exception shouldNotHappen) {
LOGGER.error("Error serving static files", shouldNotHappen);
}
} | @Test
void servesIndexHtmlIfNoFileRequested() throws IOException {
when(httpExchange.getRequestURI()).thenReturn(URI.create("/dashboard"));
staticFileHttpHandler.handle(httpExchange);
verify(httpExchange).sendResponseHeaders(200, 0);
} |
@CanIgnoreReturnValue
public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) {
checkNotNull(expectedMultimap, "expectedMultimap");
checkNotNull(actual);
ListMultimap<?, ?> missing = difference(expectedMultimap, actual);
ListMultimap<?, ?> extra = difference(actual, expectedMultimap);
// TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in
// the subject but not enough times. Similarly for unexpected extra items.
if (!missing.isEmpty()) {
if (!extra.isEmpty()) {
boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries());
// Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be
// grouped by key in the 'missing' and 'unexpected items' parts of the message (we still
// show the actual and expected multimaps in the standard format).
String missingDisplay =
addTypeInfo
? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries())
: countDuplicatesMultimap(annotateEmptyStringsMultimap(missing));
String extraDisplay =
addTypeInfo
? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries())
: countDuplicatesMultimap(annotateEmptyStringsMultimap(extra));
failWithActual(
fact("missing", missingDisplay),
fact("unexpected", extraDisplay),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
} else {
failWithActual(
fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
}
} else if (!extra.isEmpty()) {
failWithActual(
fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
}
return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap);
} | @Test
public void containsExactlyEntriesIn_homogeneousMultimap_failsWithSameToString()
throws Exception {
expectFailureWhenTestingThat(ImmutableMultimap.of(1, "a", 1, "b", 2, "c"))
.containsExactlyEntriesIn(ImmutableMultimap.of(1L, "a", 1L, "b", 2L, "c"));
assertFailureKeys("missing", "unexpected", "---", "expected", "but was");
assertFailureValue("missing", "[1=a, 1=b, 2=c] (Map.Entry<java.lang.Long, java.lang.String>)");
assertFailureValue(
"unexpected", "[1=a, 1=b, 2=c] (Map.Entry<java.lang.Integer, java.lang.String>)");
} |
@Override
public List<ValidationMessage> validate(ValidationContext context) {
return context.query().tokens().stream()
.filter(this::isInvalidOperator)
.map(token -> {
final String errorMessage = String.format(Locale.ROOT, "Query contains invalid operator \"%s\". All AND / OR / NOT operators have to be written uppercase", token.image());
return ValidationMessage.builder(ValidationStatus.WARNING, ValidationType.INVALID_OPERATOR)
.errorMessage(errorMessage)
.relatedProperty(token.image())
.position(QueryPosition.from(token))
.build();
}).collect(Collectors.toList());
} | @Test
void testRepeatedInvalidTokens() {
final ValidationContext context = TestValidationContext.create("not(foo:bar)")
.build();
final List<ValidationMessage> messages = sut.validate(context);
assertThat(messages.size()).isEqualTo(1);
assertThat(messages.stream().allMatch(v -> v.validationType() == ValidationType.INVALID_OPERATOR)).isTrue();
} |
public static <@NonNull E> CompletableSource resolveScopeFromLifecycle(
final LifecycleScopeProvider<E> provider) throws OutsideScopeException {
return resolveScopeFromLifecycle(provider, true);
} | @Test
public void resolveScopeFromLifecycle_complete() {
PublishSubject<Integer> lifecycle = PublishSubject.create();
TestObserver<?> o = testSource(resolveScopeFromLifecycle(lifecycle, 3));
lifecycle.onNext(0);
o.assertNoErrors().assertNotComplete();
lifecycle.onNext(1);
o.assertNoErrors().assertNotComplete();
lifecycle.onNext(0);
o.assertNoErrors().assertNotComplete();
lifecycle.onNext(2);
o.assertNoErrors().assertNotComplete();
// Now we complete
lifecycle.onComplete();
o.assertComplete();
} |
@Override
public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context,
Map<String, Long> recentlyUnloadedBundles,
Map<String, Long> recentlyUnloadedBrokers) {
final var conf = context.brokerConfiguration();
decisionCache.clear();
stats.clear();
Map<String, BrokerLookupData> availableBrokers;
try {
availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync()
.get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS);
} catch (ExecutionException | InterruptedException | TimeoutException e) {
counter.update(Failure, Unknown);
log.warn("Failed to fetch available brokers. Stop unloading.", e);
return decisionCache;
}
try {
final var loadStore = context.brokerLoadDataStore();
stats.setLoadDataStore(loadStore);
boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log);
var skipReason = stats.update(
context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf);
if (skipReason.isPresent()) {
if (debugMode) {
log.warn(CANNOT_CONTINUE_UNLOAD_MSG
+ " Skipped the load stat update. Reason:{}.",
skipReason.get());
}
counter.update(Skip, skipReason.get());
return decisionCache;
}
counter.updateLoadData(stats.avg, stats.std);
if (debugMode) {
log.info("brokers' load stats:{}", stats);
}
// skip metrics
int numOfBrokersWithEmptyLoadData = 0;
int numOfBrokersWithFewBundles = 0;
final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd();
boolean transfer = conf.isLoadBalancerTransferEnabled();
if (stats.std() > targetStd
|| isUnderLoaded(context, stats.peekMinBroker(), stats)
|| isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
unloadConditionHitCount++;
} else {
unloadConditionHitCount = 0;
}
if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Shedding condition hit count:{} is less than or equal to the threshold:{}.",
unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold());
}
counter.update(Skip, HitCount);
return decisionCache;
}
while (true) {
if (!stats.hasTransferableBrokers()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Exhausted target transfer brokers.");
}
break;
}
UnloadDecision.Reason reason;
if (stats.std() > targetStd) {
reason = Overloaded;
} else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) {
reason = Underloaded;
if (debugMode) {
log.info(String.format("broker:%s is underloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this underloaded broker.",
stats.peekMinBroker(),
context.brokerLoadDataStore().get(stats.peekMinBroker()).get(),
stats.std(), targetStd));
}
} else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
reason = Overloaded;
if (debugMode) {
log.info(String.format("broker:%s is overloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this overloaded broker.",
stats.peekMaxBroker(),
context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(),
stats.std(), targetStd));
}
} else {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ "The overall cluster load meets the target, std:{} <= targetStd:{}."
+ "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.",
stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker());
}
break;
}
String maxBroker = stats.pollMaxBroker();
String minBroker = stats.peekMinBroker();
Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker);
Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker);
if (maxBrokerLoadData.isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " MaxBrokerLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
if (minBrokerLoadData.isEmpty()) {
log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker);
numOfBrokersWithEmptyLoadData++;
continue;
}
double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA();
double minLoad = minBrokerLoadData.get().getWeightedMaxEMA();
double offload = (maxLoad - minLoad) / 2;
BrokerLoadData brokerLoadData = maxBrokerLoadData.get();
double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn()
+ brokerLoadData.getMsgThroughputOut();
double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn()
+ minBrokerLoadData.get().getMsgThroughputOut();
double offloadThroughput = maxBrokerThroughput * offload / maxLoad;
if (debugMode) {
log.info(String.format(
"Attempting to shed load from broker:%s%s, which has the max resource "
+ "usage:%.2f%%, targetStd:%.2f,"
+ " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.",
maxBroker, transfer ? " to broker:" + minBroker : "",
maxLoad * 100,
targetStd,
offload * 100,
offloadThroughput / KB
));
}
double trafficMarkedToOffload = 0;
double trafficMarkedToGain = 0;
Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker);
if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " TopBundlesLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData();
if (maxBrokerTopBundlesLoadData.size() == 1) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Sole namespace bundle:%s is overloading the broker. ",
maxBroker, maxBrokerTopBundlesLoadData.iterator().next()));
continue;
}
Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker);
var minBrokerTopBundlesLoadDataIter =
minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() :
null;
if (maxBrokerTopBundlesLoadData.isEmpty()) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Broker overloaded despite having no bundles", maxBroker));
continue;
}
int remainingTopBundles = maxBrokerTopBundlesLoadData.size();
for (var e : maxBrokerTopBundlesLoadData) {
String bundle = e.bundleName();
if (channel != null && !channel.isOwner(bundle, maxBroker)) {
if (debugMode) {
log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " MaxBroker:%s is not the owner.", bundle, maxBroker));
}
continue;
}
if (recentlyUnloadedBundles.containsKey(bundle)) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " Bundle has been recently unloaded at ts:%d.",
bundle, recentlyUnloadedBundles.get(bundle)));
}
continue;
}
if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " This unload can't meet "
+ "affinity(isolation) or anti-affinity group policies.", bundle));
}
continue;
}
if (remainingTopBundles <= 1) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is"
+ " less than or equal to 1.",
bundle, maxBroker));
}
break;
}
var bundleData = e.stats();
double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut;
boolean swap = false;
List<Unload> minToMaxUnloads = new ArrayList<>();
double minBrokerBundleSwapThroughput = 0.0;
if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) {
// see if we can swap bundles from min to max broker to balance better.
if (transfer && minBrokerTopBundlesLoadDataIter != null) {
var maxBrokerNewThroughput =
maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain
- maxBrokerBundleThroughput;
var minBrokerNewThroughput =
minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain
+ maxBrokerBundleThroughput;
while (minBrokerTopBundlesLoadDataIter.hasNext()) {
var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next();
if (!isTransferable(context, availableBrokers,
minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) {
continue;
}
var minBrokerBundleThroughput =
minBrokerBundleData.stats().msgThroughputIn
+ minBrokerBundleData.stats().msgThroughputOut;
var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput;
var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput;
if (maxBrokerNewThroughputTmp < maxBrokerThroughput
&& minBrokerNewThroughputTmp < maxBrokerThroughput) {
minToMaxUnloads.add(new Unload(minBroker,
minBrokerBundleData.bundleName(), Optional.of(maxBroker)));
maxBrokerNewThroughput = maxBrokerNewThroughputTmp;
minBrokerNewThroughput = minBrokerNewThroughputTmp;
minBrokerBundleSwapThroughput += minBrokerBundleThroughput;
if (minBrokerNewThroughput <= maxBrokerNewThroughput
&& maxBrokerNewThroughput < maxBrokerThroughput * 0.75) {
swap = true;
break;
}
}
}
}
if (!swap) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is "
+ "greater than the target :%.2f KByte/s.",
bundle,
(trafficMarkedToOffload + maxBrokerBundleThroughput) / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB,
offloadThroughput / KB));
}
break;
}
}
Unload unload;
if (transfer) {
if (swap) {
minToMaxUnloads.forEach(minToMaxUnload -> {
if (debugMode) {
log.info("Decided to gain bundle:{} from min broker:{}",
minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker());
}
var decision = new UnloadDecision();
decision.setUnload(minToMaxUnload);
decision.succeed(reason);
decisionCache.add(decision);
});
if (debugMode) {
log.info(String.format(
"Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.",
minBrokerBundleSwapThroughput / KB, minBroker, maxBroker));
trafficMarkedToGain += minBrokerBundleSwapThroughput;
}
}
unload = new Unload(maxBroker, bundle, Optional.of(minBroker));
} else {
unload = new Unload(maxBroker, bundle);
}
var decision = new UnloadDecision();
decision.setUnload(unload);
decision.succeed(reason);
decisionCache.add(decision);
trafficMarkedToOffload += maxBrokerBundleThroughput;
remainingTopBundles--;
if (debugMode) {
log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s."
+ " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s."
+ " Target:%.2f KByte/s.",
bundle, maxBrokerBundleThroughput / KB,
trafficMarkedToOffload / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain) / KB,
offloadThroughput / KB));
}
}
if (trafficMarkedToOffload > 0) {
var adjustedOffload =
(trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput;
stats.offload(maxLoad, minLoad, adjustedOffload);
if (debugMode) {
log.info(
String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}",
stats, maxLoad, minLoad, adjustedOffload));
}
} else {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " There is no bundle that can be unloaded in top bundles load data. "
+ "Consider splitting bundles owned by the broker "
+ "to make each bundle serve less traffic "
+ "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport"
+ " to report more bundles in the top bundles load data.", maxBroker));
}
} // while end
if (debugMode) {
log.info("decisionCache:{}", decisionCache);
}
if (decisionCache.isEmpty()) {
UnloadDecision.Reason reason;
if (numOfBrokersWithEmptyLoadData > 0) {
reason = NoLoadData;
} else if (numOfBrokersWithFewBundles > 0) {
reason = NoBundles;
} else {
reason = HitCount;
}
counter.update(Skip, reason);
} else {
unloadConditionHitCount = 0;
}
} catch (Throwable e) {
log.error("Failed to process unloading. ", e);
this.counter.update(Failure, Unknown);
}
return decisionCache;
} | @Test
public void testGetAvailableBrokersFailed() {
UnloadCounter counter = new UnloadCounter();
TransferShedder transferShedder = new TransferShedder(pulsar, counter, null,
isolationPoliciesHelper, antiAffinityGroupPolicyHelper);
var ctx = setupContext();
BrokerRegistry registry = ctx.brokerRegistry();
doReturn(FutureUtil.failedFuture(new TimeoutException())).when(registry).getAvailableBrokerLookupDataAsync();
transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of());
assertEquals(counter.getBreakdownCounters().get(Failure).get(Unknown).get(), 1);
assertEquals(counter.getLoadAvg(), 0.0);
assertEquals(counter.getLoadStd(), 0.0);
} |
@VisibleForTesting
void validateDictTypeUnique(Long id, String type) {
if (StrUtil.isEmpty(type)) {
return;
}
DictTypeDO dictType = dictTypeMapper.selectByType(type);
if (dictType == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的字典类型
if (id == null) {
throw exception(DICT_TYPE_TYPE_DUPLICATE);
}
if (!dictType.getId().equals(id)) {
throw exception(DICT_TYPE_TYPE_DUPLICATE);
}
} | @Test
public void testValidateDictTypeUnique_valueDuplicateForCreate() {
// 准备参数
String type = randomString();
// mock 数据
dictTypeMapper.insert(randomDictTypeDO(o -> o.setType(type)));
// 调用,校验异常
assertServiceException(() -> dictTypeService.validateDictTypeUnique(null, type),
DICT_TYPE_TYPE_DUPLICATE);
} |
public FEELFnResult<List> invoke(@ParameterName("list") List list, @ParameterName("start position") BigDecimal start) {
return invoke( list, start, null );
} | @Test
void invokeStartZero() {
FunctionTestUtil.assertResultError(sublistFunction.invoke(Arrays.asList(1, 2), BigDecimal.ZERO),
InvalidParametersEvent.class);
} |
@Override
public PartitionQuickStats buildQuickStats(ConnectorSession session, SemiTransactionalHiveMetastore metastore,
SchemaTableName table, MetastoreContext metastoreContext, String partitionId, Iterator<HiveFileInfo> files)
{
requireNonNull(session);
requireNonNull(metastore);
requireNonNull(table);
requireNonNull(metastoreContext);
requireNonNull(partitionId);
requireNonNull(files);
if (!files.hasNext()) {
return PartitionQuickStats.EMPTY;
}
// TODO: Consider refactoring storage and/or table format to the interface when we implement an ORC/Iceberg quick stats builder
StorageFormat storageFormat;
if (UNPARTITIONED_ID.getPartitionName().equals(partitionId)) {
Table resolvedTable = metastore.getTable(metastoreContext, table.getSchemaName(), table.getTableName()).get();
storageFormat = resolvedTable.getStorage().getStorageFormat();
}
else {
Partition partition = metastore.getPartitionsByNames(metastoreContext, table.getSchemaName(), table.getTableName(),
ImmutableList.of(new PartitionNameWithVersion(partitionId, Optional.empty()))).get(partitionId).get();
storageFormat = partition.getStorage().getStorageFormat();
}
if (!PARQUET_SERDE_CLASS_NAMES.contains(storageFormat.getSerDe())) {
// Not a parquet table/partition
return PartitionQuickStats.EMPTY;
}
// We want to keep the number of files we use to build quick stats bounded, so that
// 1. We can control total file IO overhead in a measurable way
// 2. Planning time remains bounded
// Future work here is to sample the file list, read their stats only and extrapolate the overall stats (TODO)
List<CompletableFuture<ParquetMetadata>> footerFetchCompletableFutures = new ArrayList<>();
int filesCount = 0;
while (files.hasNext()) {
HiveFileInfo file = files.next();
filesCount++;
Path path = file.getPath();
long fileSize = file.getLength();
HiveFileContext hiveFileContext = new HiveFileContext(
true,
NO_CACHE_CONSTRAINTS,
Optional.empty(),
OptionalLong.of(fileSize),
OptionalLong.empty(),
OptionalLong.empty(),
file.getFileModifiedTime(),
false);
HdfsContext hdfsContext = new HdfsContext(session, table.getSchemaName(), table.getTableName());
Configuration configuration = hdfsEnvironment.getConfiguration(hdfsContext, path);
footerFetchCompletableFutures.add(supplyAsync(() -> {
Stopwatch footerFetchDuration = Stopwatch.createStarted();
try (FSDataInputStream inputStream = hdfsEnvironment.getFileSystem(hdfsContext, path).openFile(path, hiveFileContext);
ParquetDataSource parquetDataSource = buildHdfsParquetDataSource(inputStream, path, stats)) {
ParquetFileMetadata parquetFileMetadata = readFooter(parquetDataSource,
fileSize,
createDecryptor(configuration, path),
getReadNullMaskedParquetEncryptedValue(session));
footerByteSizeDistribution.add(parquetFileMetadata.getMetadataSize());
return parquetFileMetadata.getParquetMetadata();
}
catch (Exception e) {
log.error(e);
throw new RuntimeException(e);
}
finally {
this.footerFetchDuration.add(footerFetchDuration.elapsed(MILLISECONDS), MILLISECONDS);
}
}, footerFetchExecutor));
}
// Record a metric about how many files were seen
session.getRuntimeStats().addMetricValue(String.format("ParquetQuickStatsBuilder/FileCount/%s/%s", table.getTableName(), partitionId), RuntimeUnit.NONE, filesCount);
fileCountPerPartition.add(filesCount);
HashMap<ColumnPath, ColumnQuickStats<?>> rolledUpColStats = new HashMap<>();
try {
// Wait for footer reads to finish
CompletableFuture<Void> overallCompletableFuture = CompletableFuture.allOf(footerFetchCompletableFutures.toArray(new CompletableFuture[0]));
overallCompletableFuture.get(footerFetchTimeoutMillis, MILLISECONDS);
for (CompletableFuture<ParquetMetadata> future : footerFetchCompletableFutures) {
ParquetMetadata parquetMetadata = future.get();
processColumnMetadata(parquetMetadata, rolledUpColStats);
}
}
catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error(e, "Failed to read/build stats from parquet footer");
throw new RuntimeException(e);
}
if (rolledUpColStats.isEmpty()) {
return PartitionQuickStats.EMPTY;
}
return new PartitionQuickStats(partitionId, rolledUpColStats.values(), filesCount);
} | @Test
public void testStatsFromNestedColumnsAreNotIncluded()
{
String resourceDir = TestParquetQuickStatsBuilder.class.getClassLoader().getResource("quick_stats").toString();
// Table definition :
// CREATE TABLE nested_parquet(
// id bigint,
// x row(a bigint, b varchar, c double, d row(d1 bigint, d2 double)),
// y array(row(a bigint, b varchar, c double, d row(d1 bigint, d2 double))))
// with (format = 'PARQUET')
// 3 rows were added to the table
ImmutableList<HiveFileInfo> hiveFileInfos = buildHiveFileInfos(resourceDir, "nested_table", 1);
PartitionQuickStats partitionQuickStats = parquetQuickStatsBuilder.buildQuickStats(SESSION, metastore, new SchemaTableName(TEST_SCHEMA, TEST_TABLE),
metastoreContext, UNPARTITIONED_ID.getPartitionName(), hiveFileInfos.iterator());
assertEquals(partitionQuickStats.getStats().size(), 1, "Expected stats for only non-nested column : 'id'");
ColumnQuickStats<?> idColumnQuickStats = partitionQuickStats.getStats().get(0);
assertEquals(idColumnQuickStats, createLongStats("id", 3L, 0L, 1L, 3L));
} |
public int weekOfYear() {
return getField(DateField.WEEK_OF_YEAR);
} | @Test
public void weekOfYearTest() {
DateTime date = DateUtil.parse("2016-12-27");
//noinspection ConstantConditions
assertEquals(2016, date.year());
//跨年的周返回的总是1
assertEquals(1, date.weekOfYear());
} |
public List<Issue> validateMetadata(ExtensionVersion extVersion) {
return Observation.createNotStarted("ExtensionValidator#validateMetadata", observations).observe(() -> {
var issues = new ArrayList<Issue>();
checkVersion(extVersion.getVersion(), issues);
checkTargetPlatform(extVersion.getTargetPlatform(), issues);
checkCharacters(extVersion.getDisplayName(), "displayName", issues);
checkFieldSize(extVersion.getDisplayName(), DEFAULT_STRING_SIZE, "displayName", issues);
checkCharacters(extVersion.getDescription(), "description", issues);
checkFieldSize(extVersion.getDescription(), DESCRIPTION_SIZE, "description", issues);
checkCharacters(extVersion.getCategories(), "categories", issues);
checkFieldSize(extVersion.getCategories(), DEFAULT_STRING_SIZE, "categories", issues);
checkCharacters(extVersion.getTags(), "keywords", issues);
checkFieldSize(extVersion.getTags(), DEFAULT_STRING_SIZE, "keywords", issues);
checkCharacters(extVersion.getLicense(), "license", issues);
checkFieldSize(extVersion.getLicense(), DEFAULT_STRING_SIZE, "license", issues);
checkURL(extVersion.getHomepage(), "homepage", issues);
checkFieldSize(extVersion.getHomepage(), DEFAULT_STRING_SIZE, "homepage", issues);
checkURL(extVersion.getRepository(), "repository", issues);
checkFieldSize(extVersion.getRepository(), DEFAULT_STRING_SIZE, "repository", issues);
checkURL(extVersion.getBugs(), "bugs", issues);
checkFieldSize(extVersion.getBugs(), DEFAULT_STRING_SIZE, "bugs", issues);
checkInvalid(extVersion.getMarkdown(), s -> !MARKDOWN_VALUES.contains(s), "markdown", issues,
MARKDOWN_VALUES.toString());
checkCharacters(extVersion.getGalleryColor(), "galleryBanner.color", issues);
checkFieldSize(extVersion.getGalleryColor(), GALLERY_COLOR_SIZE, "galleryBanner.color", issues);
checkInvalid(extVersion.getGalleryTheme(), s -> !GALLERY_THEME_VALUES.contains(s), "galleryBanner.theme", issues,
GALLERY_THEME_VALUES.toString());
checkFieldSize(extVersion.getLocalizedLanguages(), DEFAULT_STRING_SIZE, "localizedLanguages", issues);
checkInvalid(extVersion.getQna(), s -> !QNA_VALUES.contains(s) && isInvalidURL(s), "qna", issues,
QNA_VALUES.toString() + " or a URL");
checkFieldSize(extVersion.getQna(), DEFAULT_STRING_SIZE, "qna", issues);
return issues;
});
} | @Test
public void testInvalidURL2() {
var extension = new ExtensionVersion();
extension.setTargetPlatform(TargetPlatform.NAME_UNIVERSAL);
extension.setVersion("1.0.0");
extension.setRepository("https://");
var issues = validator.validateMetadata(extension);
assertThat(issues).hasSize(1);
assertThat(issues.get(0))
.isEqualTo(new ExtensionValidator.Issue("Invalid URL in field 'repository': https://"));
} |
@Override
public CommandLineImpl parse(final List<String> originalArgs, final Logger logger) {
return CommandLineImpl.of(originalArgs, logger);
} | @Test
public void testRunHelp() throws Exception {
final CommandLineParserImpl parser = new CommandLineParserImpl();
final CommandLineImpl commandLine = parse(parser, "-h", "run");
assertEquals(Command.RUN, commandLine.getCommand());
assertEquals(
"Usage: embulk [common options] run [command options] <config.yml>" + NEWLINE
+ NEWLINE
+ "\"embulk run\" runs a bulk load transaction." + NEWLINE
+ NEWLINE
+ "Common options:" + NEWLINE
+ " -h, --help Print help" + NEWLINE
+ " -version, --version Show Embulk version" + NEWLINE
+ " -l, --log-level LEVEL Set log level (error, warn, info, debug, trace)" + NEWLINE
+ " --log-path PATH Output log messages to a file (default: -)" + NEWLINE
+ " -X KEY=VALUE Set Embulk system properties" + NEWLINE
+ " -R OPTION Command-line option for JRuby. (Only '--dev')" + NEWLINE
+ NEWLINE
+ "Plugin options:" + NEWLINE
+ " -L, --load PATH Add a local plugin path" + NEWLINE
+ " -I, --load-path PATH Add Ruby script directory path ($LOAD_PATH)" + NEWLINE
+ " -C, --classpath PATH Add $CLASSPATH for JRuby separated by '" + File.pathSeparator + "'" + NEWLINE
+ " -b, --bundle BUNDLE_DIR Path to a Gemfile directory" + NEWLINE
+ NEWLINE
+ "Other 'run' options:" + NEWLINE
+ " -r, --resume-state PATH Path to a file to write or read resume state" + NEWLINE
+ " -o, --output PATH (deprecated)" + NEWLINE
+ " -c, --config-diff PATH Path to a file of the next configuration diff" + NEWLINE
+ NEWLINE,
commandLine.getStdOut());
assertEquals("", commandLine.getStdErr());
} |
@Udf(schema = "ARRAY<STRUCT<K STRING, V INT>>")
public List<Struct> entriesInt(
@UdfParameter(description = "The map to create entries from") final Map<String, Integer> map,
@UdfParameter(description = "If true then the resulting entries are sorted by key")
final boolean sorted
) {
return entries(map, INT_STRUCT_SCHEMA, sorted);
} | @Test
public void shouldComputeIntEntries() {
final Map<String, Integer> map = createMap(i -> i);
shouldComputeEntries(map, () -> entriesUdf.entriesInt(map, false));
} |
@Override
public Authentication authenticate(Authentication authentication) throws AuthenticationException {
String username = (String) authentication.getPrincipal();
String password = (String) authentication.getCredentials();
if (isAdmin(username)) {
UserDetails userDetails = userDetailsService.loadUserByUsername(username);
if (PasswordEncoderUtil.matches(password, userDetails.getPassword())) {
return new UsernamePasswordAuthenticationToken(userDetails, password, userDetails.getAuthorities());
} else {
return null;
}
}
if (!caseSensitive) {
username = StringUtils.lowerCase(username);
}
try {
if (!ldapLogin(username, password)) {
return null;
}
} catch (Exception e) {
Loggers.AUTH.error("[LDAP-LOGIN] failed", e);
return null;
}
UserDetails userDetails;
try {
userDetails = userDetailsService.loadUserByUsername(AuthConstants.LDAP_PREFIX + username);
} catch (UsernameNotFoundException exception) {
userDetailsService.createUser(AuthConstants.LDAP_PREFIX + username, AuthConstants.LDAP_DEFAULT_ENCODED_PASSWORD);
User user = new User();
user.setUsername(AuthConstants.LDAP_PREFIX + username);
user.setPassword(AuthConstants.LDAP_DEFAULT_ENCODED_PASSWORD);
userDetails = new NacosUserDetails(user);
}
return new UsernamePasswordAuthenticationToken(userDetails, password, userDetails.getAuthorities());
} | @Test
void testCloseCaseSensitive() {
when(ldapTemplate.authenticate("", "(" + filterPrefix + "=" + normalUserName + ")", defaultPassWord)).thenAnswer(
new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
String b = (String) args[1];
String c = (String) args[2];
if (defaultPassWord.equals(c)) {
return true;
}
return false;
}
});
User user = new User();
user.setUsername(LDAP_PREFIX + normalUserName);
user.setPassword(defaultPassWord);
when(userDetailsService.loadUserByUsername(LDAP_PREFIX + normalUserName)).thenReturn(new NacosUserDetails(user));
Authentication authentication = new UsernamePasswordAuthenticationToken(StringUtils.upperCase(normalUserName), defaultPassWord);
Authentication result = ldapAuthenticationProviderForCloseCaseSensitive.authenticate(authentication);
NacosUserDetails nacosUserDetails = (NacosUserDetails) result.getPrincipal();
assertEquals(nacosUserDetails.getUsername(), LDAP_PREFIX + normalUserName);
} |
public <ConfigType extends ConfigInstance> ConfigType toInstance(Class<ConfigType> clazz, String configId) {
return ConfigInstanceUtil.getNewInstance(clazz, configId, this);
} | @Test
public void test_map_of_map() {
Slime slime = new Slime();
Cursor map = slime.setObject().setObject("nestedmap").setObject("my-nested").setObject("inner");
map.setLong("one", 1);
map.setLong("two", 2);
MaptypesConfig config = new ConfigPayload(slime).toInstance(MaptypesConfig.class, "");
assertThat(config.nestedmap("my-nested").inner("one"), is(1));
assertThat(config.nestedmap("my-nested").inner("two"), is(2));
} |
@Override
public ProcResult fetchResult() throws AnalysisException {
final BaseProcResult result = new BaseProcResult();
result.setNames(CurrentQueryStatisticsProcDir.TITLE_NAMES.asList());
List<QueryStatisticsInfo> queryInfos = QueryStatisticsInfo.makeListFromMetricsAndMgrs();
List<QueryStatisticsInfo> otherQueryInfos = GlobalStateMgr.getCurrentState().getQueryStatisticsInfoFromOtherFEs();
List<QueryStatisticsInfo> allInfos = Stream.concat(queryInfos.stream(), otherQueryInfos.stream())
.collect(Collectors.toList());
List<List<String>> sortedRowData = allInfos
.stream()
.map(QueryStatisticsInfo::formatToList)
.collect(Collectors.toList());
result.setRows(sortedRowData);
return result;
} | @Test
public void testFetchResult() throws AnalysisException {
try (MockedStatic<QueryStatisticsInfo> queryStatisticsInfo = mockStatic(QueryStatisticsInfo.class)) {
queryStatisticsInfo.when(QueryStatisticsInfo::makeListFromMetricsAndMgrs)
.thenReturn(LOCAL_TEST_QUERIES);
new MockUp<NodeMgr>() {
@Mock
public List<QueryStatisticsInfo> getQueryStatisticsInfoFromOtherFEs() {
return REMOTE_TEST_QUERIES;
}
};
BaseProcResult result = (BaseProcResult) new CurrentGlobalQueryStatisticsProcDir().fetchResult();
Assert.assertEquals(LOCAL_TEST_QUERIES.size() + REMOTE_TEST_QUERIES.size(),
result.getRows().size());
List<List<String>> expectedQueryStatisticsInfo =
Stream.concat(LOCAL_TEST_QUERIES.stream(), REMOTE_TEST_QUERIES.stream())
.map(QueryStatisticsInfo::formatToList)
.collect(Collectors.toList());
assertThat(result.getRows()).containsExactlyInAnyOrderElementsOf(expectedQueryStatisticsInfo);
}
} |
public static <OutputT> OutputT getArgumentWithDefault(
@Nullable OutputT value, OutputT defaultValue) {
if (value == null) {
return defaultValue;
}
return value;
} | @Test
public void testGetArgumentWithDefault() {
assertEquals("value", SingleStoreUtil.getArgumentWithDefault("value", "default"));
} |
public boolean isBeforeOrAt(KinesisRecord other) {
if (shardIteratorType == AT_TIMESTAMP) {
return timestamp.compareTo(other.getApproximateArrivalTimestamp()) <= 0;
}
int result = extendedSequenceNumber().compareTo(other.getExtendedSequenceNumber());
if (result == 0) {
return shardIteratorType == AT_SEQUENCE_NUMBER;
}
return result < 0;
} | @Test
public void testComparisonWithTimestamp() {
DateTime referenceTimestamp = DateTime.now();
assertThat(
checkpoint(AT_TIMESTAMP, referenceTimestamp.toInstant())
.isBeforeOrAt(recordWith(referenceTimestamp.minusMillis(10).toInstant())))
.isFalse();
assertThat(
checkpoint(AT_TIMESTAMP, referenceTimestamp.toInstant())
.isBeforeOrAt(recordWith(referenceTimestamp.toInstant())))
.isTrue();
assertThat(
checkpoint(AT_TIMESTAMP, referenceTimestamp.toInstant())
.isBeforeOrAt(recordWith(referenceTimestamp.plusMillis(10).toInstant())))
.isTrue();
} |
public ColumnFamilyOptions getColumnOptions() {
// initial options from common profile
ColumnFamilyOptions opt = createBaseCommonColumnOptions();
handlesToClose.add(opt);
// load configurable options on top of pre-defined profile
setColumnFamilyOptionsFromConfigurableOptions(opt, handlesToClose);
// add user-defined options, if specified
if (optionsFactory != null) {
opt = optionsFactory.createColumnOptions(opt, handlesToClose);
}
// if sharedResources is non-null, use the block cache from it and
// set necessary options for performance consideration with memory control
if (sharedResources != null) {
final ForStSharedResources rocksResources = sharedResources.getResourceHandle();
final Cache blockCache = rocksResources.getCache();
TableFormatConfig tableFormatConfig = opt.tableFormatConfig();
BlockBasedTableConfig blockBasedTableConfig;
if (tableFormatConfig == null) {
blockBasedTableConfig = new BlockBasedTableConfig();
} else {
Preconditions.checkArgument(
tableFormatConfig instanceof BlockBasedTableConfig,
"We currently only support BlockBasedTableConfig When bounding total memory.");
blockBasedTableConfig = (BlockBasedTableConfig) tableFormatConfig;
}
if (rocksResources.isUsingPartitionedIndexFilters()
&& overwriteFilterIfExist(blockBasedTableConfig)) {
blockBasedTableConfig.setIndexType(IndexType.kTwoLevelIndexSearch);
blockBasedTableConfig.setPartitionFilters(true);
blockBasedTableConfig.setPinTopLevelIndexAndFilter(true);
}
blockBasedTableConfig.setBlockCache(blockCache);
blockBasedTableConfig.setCacheIndexAndFilterBlocks(true);
blockBasedTableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true);
blockBasedTableConfig.setPinL0FilterAndIndexBlocksInCache(true);
opt.setTableFormatConfig(blockBasedTableConfig);
}
return opt;
} | @Test
public void testGetColumnFamilyOptionsWithPartitionedIndex() throws Exception {
LRUCache cache = new LRUCache(1024L);
WriteBufferManager wbm = new WriteBufferManager(1024L, cache);
ForStSharedResources sharedResources = new ForStSharedResources(cache, wbm, 1024L, true);
final ThrowingRunnable<Exception> disposer = sharedResources::close;
OpaqueMemoryResource<ForStSharedResources> opaqueResource =
new OpaqueMemoryResource<>(sharedResources, 1024L, disposer);
BloomFilter blockBasedFilter = new BloomFilter();
ForStOptionsFactory blockBasedBloomFilterOptionFactory =
new ForStOptionsFactory() {
@Override
public DBOptions createDBOptions(
DBOptions currentOptions, Collection<AutoCloseable> handlesToClose) {
return currentOptions;
}
@Override
public ColumnFamilyOptions createColumnOptions(
ColumnFamilyOptions currentOptions,
Collection<AutoCloseable> handlesToClose) {
TableFormatConfig tableFormatConfig = currentOptions.tableFormatConfig();
BlockBasedTableConfig blockBasedTableConfig =
tableFormatConfig == null
? new BlockBasedTableConfig()
: (BlockBasedTableConfig) tableFormatConfig;
blockBasedTableConfig.setFilter(blockBasedFilter);
handlesToClose.add(blockBasedFilter);
currentOptions.setTableFormatConfig(blockBasedTableConfig);
return currentOptions;
}
};
try (ForStResourceContainer container =
new ForStResourceContainer(blockBasedBloomFilterOptionFactory, opaqueResource)) {
ColumnFamilyOptions columnOptions = container.getColumnOptions();
BlockBasedTableConfig actual =
(BlockBasedTableConfig) columnOptions.tableFormatConfig();
assertThat(actual.indexType(), is(IndexType.kTwoLevelIndexSearch));
assertThat(actual.partitionFilters(), is(true));
assertThat(actual.pinTopLevelIndexAndFilter(), is(true));
assertThat(actual.filterPolicy(), not(blockBasedFilter));
}
assertFalse("Block based filter is left unclosed.", blockBasedFilter.isOwningHandle());
} |
RegistryEndpointProvider<Optional<URL>> initializer() {
return new Initializer();
} | @Test
public void testInitializer_getHttpMethod() {
Assert.assertEquals("POST", testBlobPusher.initializer().getHttpMethod());
} |
public DLQEntry pollEntry(long timeout) throws IOException, InterruptedException {
byte[] bytes = pollEntryBytes(timeout);
if (bytes == null) {
return null;
}
return DLQEntry.deserialize(bytes);
} | @Test
public void testFlushAfterSegmentComplete() throws Exception {
Event event = new Event();
final int EVENTS_BEFORE_FLUSH = randomBetween(1, 32);
event.setField("T", generateMessageContent(PAD_FOR_BLOCK_SIZE_EVENT));
Timestamp timestamp = new Timestamp();
try (DeadLetterQueueWriter writeManager = DeadLetterQueueWriter
.newBuilder(dir, BLOCK_SIZE * EVENTS_BEFORE_FLUSH, defaultDlqSize, Duration.ofHours(1))
.build()) {
for (int i = 1; i < EVENTS_BEFORE_FLUSH; i++) {
DLQEntry entry = new DLQEntry(event, "", "", Integer.toString(i), timestamp);
writeManager.writeEntry(entry);
}
try (DeadLetterQueueReader readManager = new DeadLetterQueueReader(dir)) {
for (int i = 1; i < EVENTS_BEFORE_FLUSH; i++) {
DLQEntry entry = readManager.pollEntry(100);
assertThat(entry, is(nullValue()));
}
}
writeManager.writeEntry(new DLQEntry(event, "", "", "flush event", timestamp));
try (DeadLetterQueueReader readManager = new DeadLetterQueueReader(dir)) {
for (int i = 1; i < EVENTS_BEFORE_FLUSH; i++) {
DLQEntry entry = readManager.pollEntry(100);
assertThat(entry.getReason(), is(String.valueOf(i)));
}
}
}
} |
public static Object convertAvroFormat(
FieldType beamFieldType, Object avroValue, BigQueryUtils.ConversionOptions options) {
TypeName beamFieldTypeName = beamFieldType.getTypeName();
if (avroValue == null) {
if (beamFieldType.getNullable()) {
return null;
} else {
throw new IllegalArgumentException(String.format("Field %s not nullable", beamFieldType));
}
}
switch (beamFieldTypeName) {
case BYTE:
case INT16:
case INT32:
case INT64:
case FLOAT:
case DOUBLE:
case STRING:
case BYTES:
case BOOLEAN:
return convertAvroPrimitiveTypes(beamFieldTypeName, avroValue);
case DATETIME:
// Expecting value in microseconds.
switch (options.getTruncateTimestamps()) {
case TRUNCATE:
return truncateToMillis(avroValue);
case REJECT:
return safeToMillis(avroValue);
default:
throw new IllegalArgumentException(
String.format(
"Unknown timestamp truncation option: %s", options.getTruncateTimestamps()));
}
case DECIMAL:
return convertAvroNumeric(avroValue);
case ARRAY:
return convertAvroArray(beamFieldType, avroValue, options);
case LOGICAL_TYPE:
LogicalType<?, ?> logicalType = beamFieldType.getLogicalType();
assert logicalType != null;
String identifier = logicalType.getIdentifier();
if (SqlTypes.DATE.getIdentifier().equals(identifier)) {
return convertAvroDate(avroValue);
} else if (SqlTypes.TIME.getIdentifier().equals(identifier)) {
return convertAvroTime(avroValue);
} else if (SqlTypes.DATETIME.getIdentifier().equals(identifier)) {
return convertAvroDateTime(avroValue);
} else if (SQL_DATE_TIME_TYPES.contains(identifier)) {
switch (options.getTruncateTimestamps()) {
case TRUNCATE:
return truncateToMillis(avroValue);
case REJECT:
return safeToMillis(avroValue);
default:
throw new IllegalArgumentException(
String.format(
"Unknown timestamp truncation option: %s", options.getTruncateTimestamps()));
}
} else if (logicalType instanceof PassThroughLogicalType) {
return convertAvroFormat(logicalType.getBaseType(), avroValue, options);
} else {
throw new RuntimeException("Unknown logical type " + identifier);
}
case ROW:
Schema rowSchema = beamFieldType.getRowSchema();
if (rowSchema == null) {
throw new IllegalArgumentException("Nested ROW missing row schema");
}
GenericData.Record record = (GenericData.Record) avroValue;
return toBeamRow(record, rowSchema, options);
case MAP:
return convertAvroRecordToMap(beamFieldType, avroValue, options);
default:
throw new RuntimeException(
"Does not support converting unknown type value: " + beamFieldTypeName);
}
} | @Test
public void testSubMilliPrecisionRejected() {
assertThrows(
"precision",
IllegalArgumentException.class,
() -> BigQueryUtils.convertAvroFormat(FieldType.DATETIME, 1000000001L, REJECT_OPTIONS));
} |
public static Bech32Data decode(final String str) throws AddressFormatException {
boolean lower = false, upper = false;
if (str.length() < 8)
throw new AddressFormatException.InvalidDataLength("Input too short: " + str.length());
if (str.length() > 90)
throw new AddressFormatException.InvalidDataLength("Input too long: " + str.length());
for (int i = 0; i < str.length(); ++i) {
char c = str.charAt(i);
if (c < 33 || c > 126) throw new AddressFormatException.InvalidCharacter(c, i);
if (c >= 'a' && c <= 'z') {
if (upper)
throw new AddressFormatException.InvalidCharacter(c, i);
lower = true;
}
if (c >= 'A' && c <= 'Z') {
if (lower)
throw new AddressFormatException.InvalidCharacter(c, i);
upper = true;
}
}
final int pos = str.lastIndexOf('1');
if (pos < 1) throw new AddressFormatException.InvalidPrefix("Missing human-readable part");
final int dataPartLength = str.length() - 1 - pos;
if (dataPartLength < 6) throw new AddressFormatException.InvalidDataLength("Data part too short: " + dataPartLength);
byte[] values = new byte[dataPartLength];
for (int i = 0; i < dataPartLength; ++i) {
char c = str.charAt(i + pos + 1);
if (CHARSET_REV[c] == -1) throw new AddressFormatException.InvalidCharacter(c, i + pos + 1);
values[i] = CHARSET_REV[c];
}
String hrp = str.substring(0, pos).toLowerCase(Locale.ROOT);
Encoding encoding = verifyChecksum(hrp, values);
if (encoding == null) throw new AddressFormatException.InvalidChecksum();
return new Bech32Data(encoding, hrp, Arrays.copyOfRange(values, 0, values.length - 6));
} | @Test(expected = AddressFormatException.InvalidCharacter.class)
public void decode_invalidCharacter_notInAlphabet() {
Bech32.decode("A12OUEL5X");
} |
public String toHexString()
{
return Hex.getString(bytes);
} | @Test
void testGetHex()
{
String expected = "Test subject for testing getHex";
COSString test1 = new COSString(expected);
String hexForm = createHex(expected);
assertEquals(hexForm, test1.toHexString());
COSString escCS = new COSString(ESC_CHAR_STRING);
// Not sure whether the escaped characters should be escaped or not, presumably since
// writePDF() gives you the proper formatted text, getHex() should ONLY convert to hex.
assertEquals(createHex(ESC_CHAR_STRING), escCS.toHexString());
} |
@Override
protected void write(final MySQLPacketPayload payload) {
payload.writeInt1(HEADER);
payload.writeStringNul(authPluginName);
payload.writeStringNul(new String(authPluginData.getAuthenticationPluginData()));
} | @Test
void assertWrite() {
when(authPluginData.getAuthenticationPluginData()).thenReturn(new byte[]{0x11, 0x22});
MySQLAuthSwitchRequestPacket authSwitchRequestPacket = new MySQLAuthSwitchRequestPacket("plugin", authPluginData);
authSwitchRequestPacket.write(payload);
verify(payload).writeInt1(0xfe);
verify(payload, times(2)).writeStringNul(anyString());
} |
public static String formatExpression(final Expression expression) {
return formatExpression(expression, FormatOptions.of(s -> false));
} | @Test
public void shouldFormatSubscriptExpression() {
assertThat(ExpressionFormatter.formatExpression(new SubscriptExpression(
new StringLiteral("abc"),
new IntegerLiteral(3))),
equalTo("'abc'[3]"));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.