focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static void main(String[] args) {
var sd = serviceDiscovery();
var service = sd.findAny();
var goodOrderSaga = service.execute(newSaga("good_order"));
var badOrderSaga = service.execute(newSaga("bad_order"));
LOGGER.info("orders: goodOrder is {}, badOrder is {}",
goodOrderSaga.getResult(), badOrderSaga.getResult());
} | @Test
void shouldExecuteWithoutException() {
assertDoesNotThrow(() -> SagaApplication.main(new String[]{}));
} |
public Class<?> compileGroovy(String sFilterCode) throws CompilationFailedException {
GroovyClassLoader loader = new GroovyClassLoader();
return loader.parseClass(sFilterCode);
} | @Test
void testCompile() {
Class<?> filterClass = FilterVerifier.INSTANCE.compileGroovy(sGoodGroovyScriptFilter);
assertNotNull(filterClass);
filterClass = FilterVerifier.INSTANCE.compileGroovy(sNotZuulFilterGroovy);
assertNotNull(filterClass);
assertThrows(CompilationFailedException.class, () -> FilterVerifier.INSTANCE.compileGroovy(sCompileFailCode));
} |
@Override
public boolean setPonLink(String target) {
DriverHandler handler = handler();
NetconfController controller = handler.get(NetconfController.class);
MastershipService mastershipService = handler.get(MastershipService.class);
DeviceId ncDeviceId = handler.data().deviceId();
checkNotNull(controller, "Netconf controller is null");
if (!mastershipService.isLocalMaster(ncDeviceId)) {
log.warn("Not master for {} Use {} to execute command",
ncDeviceId,
mastershipService.getMasterFor(ncDeviceId));
return false;
}
String[] data = checkSetInput(target);
if (data == null) {
log.error("Failed to check input: {}", target);
return false;
}
boolean result = false;
try {
StringBuilder request = new StringBuilder();
request.append(VOLT_NE_OPEN + VOLT_NE_NAMESPACE);
request.append(ANGLE_RIGHT + NEW_LINE);
request.append(buildStartTag(VOLT_PORTS))
.append(buildStartTag(GPON_PONLINK_PORTS))
.append(buildStartTag(GPON_PONLINK_PORT))
.append(buildStartTag(PONLINK_ID, false))
.append(data[FIRST_PART])
.append(buildEndTag(PONLINK_ID))
.append(buildStartTag(data[SECOND_PART], false))
.append(data[THIRD_PART])
.append(buildEndTag(data[SECOND_PART]))
.append(buildEndTag(GPON_PONLINK_PORT))
.append(buildEndTag(GPON_PONLINK_PORTS))
.append(buildEndTag(VOLT_PORTS))
.append(VOLT_NE_CLOSE);
result = controller.getDevicesMap().get(ncDeviceId).getSession().
editConfig(RUNNING, null, request.toString());
} catch (NetconfException e) {
log.error("Cannot communicate to device {} exception {}", ncDeviceId, e);
}
return result;
} | @Test
public void testValidSetPonLink() throws Exception {
String target;
boolean result;
for (int i = ZERO; i < VALID_SET_TCS.length; i++) {
target = VALID_SET_TCS[i];
currentKey = i;
result = voltConfig.setPonLink(target);
assertTrue("Incorrect response for VALID_SET_TCS", result);
}
} |
private Messages() {
super( BUNDLE_NAME );
} | @Test
public void testMessages() {
assertEquals( "Wrong message returned", "test message 1", Messages.getInstance().getString( "test.MESSAGE1" ) ); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
assertEquals(
"Wrong message returned", "test message 2: A", Messages.getInstance().getString( "test.MESSAGE2", "A" ) ); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$
assertEquals(
"Wrong message returned", "test message 3: A B", Messages.getInstance().getString( "test.MESSAGE3", "A", "B" ) ); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ //$NON-NLS-5$
assertEquals(
"Wrong message returned", "test message 4: A B C", Messages.getInstance().getString( "test.MESSAGE4", "A", "B", "C" ) ); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ //$NON-NLS-5$ //$NON-NLS-6$
assertEquals(
"Wrong message returned", "test message 5: A B C D", Messages.getInstance().getString( "test.MESSAGE5", "A", "B", "C", "D" ) ); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ //$NON-NLS-5$ //$NON-NLS-6$ //$NON-NLS-7$
} |
public ErrorResponse buildErrorResponse(RestLiServiceException result)
{
// In some cases, people use 3XX to signal client a redirection. This falls into the category of blurred boundary
// whether this should be an error or not, in order to not disrupt change the behavior of existing code
// Thus excluding logging errors for 3XX
if (result.getStatus() != null && result.getStatus().getCode() < HttpStatus.S_300_MULTIPLE_CHOICES.getCode())
{
// Invalid to send an error response with success status codes. This should be converted to 500 errors.
// Logging an error message now to detect and fix current use cases before we start converting to 500.
LOGGER.error("Incorrect use of success status code with error response", result);
}
if (result.getStatus() == HttpStatus.S_204_NO_CONTENT)
{
// HTTP Spec requires the response body to be empty for HTTP status 204.
return new ErrorResponse();
}
return buildErrorResponse(result, result.hasOverridingErrorResponseFormat() ? result.getOverridingFormat() : _errorResponseFormat);
} | @SuppressWarnings("deprecation")
@Test
public void testExceptionClass()
{
ErrorResponseBuilder builder = new ErrorResponseBuilder();
RestLiServiceException exception = new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST, "foobar", new IllegalStateException("foo"));
exception.setServiceErrorCode(123);
exception.setOverridingFormat(ErrorResponseFormat.MESSAGE_AND_SERVICECODE_AND_EXCEPTIONCLASS);
ErrorResponse errorResponse = builder.buildErrorResponse(exception);
Assert.assertFalse(errorResponse.hasErrorDetails());
Assert.assertTrue(errorResponse.hasExceptionClass());
Assert.assertTrue(errorResponse.hasStatus());
Assert.assertTrue(errorResponse.hasMessage());
Assert.assertTrue(errorResponse.hasServiceErrorCode());
Assert.assertFalse(errorResponse.hasStackTrace());
} |
@ConstantFunction(name = "bitxor", argTypes = {BIGINT, BIGINT}, returnType = BIGINT)
public static ConstantOperator bitxorBigint(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createBigint(first.getBigint() ^ second.getBigint());
} | @Test
public void bitxorBigint() {
assertEquals(0, ScalarOperatorFunctions.bitxorBigint(O_BI_100, O_BI_100).getBigint());
} |
@Override
public PageResult<TenantDO> getTenantPage(TenantPageReqVO pageReqVO) {
return tenantMapper.selectPage(pageReqVO);
} | @Test
public void testGetTenantPage() {
// mock 数据
TenantDO dbTenant = randomPojo(TenantDO.class, o -> { // 等会查询到
o.setName("芋道源码");
o.setContactName("芋艿");
o.setContactMobile("15601691300");
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
o.setCreateTime(buildTime(2020, 12, 12));
});
tenantMapper.insert(dbTenant);
// 测试 name 不匹配
tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setName(randomString())));
// 测试 contactName 不匹配
tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setContactName(randomString())));
// 测试 contactMobile 不匹配
tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setContactMobile(randomString())));
// 测试 status 不匹配
tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())));
// 测试 createTime 不匹配
tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setCreateTime(buildTime(2021, 12, 12))));
// 准备参数
TenantPageReqVO reqVO = new TenantPageReqVO();
reqVO.setName("芋道");
reqVO.setContactName("艿");
reqVO.setContactMobile("1560");
reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus());
reqVO.setCreateTime(buildBetweenTime(2020, 12, 1, 2020, 12, 24));
// 调用
PageResult<TenantDO> pageResult = tenantService.getTenantPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbTenant, pageResult.getList().get(0));
} |
public Collection<Set<V>> computeMaxCliques() {
lazyRun();
return maximumCliques;
} | @Test
public void test6DisconnectedSubgraphsOfWholeGraph() {
List<List<String>> groups = new ArrayList<>();
for (int i = 0; i < 20; i++) {
int j = i;
int vertexCount = i + 10;
List<String> vertices = IntStream.range(0, vertexCount).mapToObj(v -> j + "_" + v).collect(toList());
groups.add(vertices);
}
Graph<String> graph = populateFullyConnectedGraph(groups.stream().flatMap(Collection::stream).collect(toList()));
List<Integer> groupIndices = new ArrayList<>();
while (groupIndices.size() < 6) {
int rackIndex = RandomPicker.getInt(groups.size());
if (!groupIndices.contains(rackIndex)) {
groupIndices.add(rackIndex);
}
}
groupIndices.sort(Comparator.comparingInt((ToIntFunction<Integer>) i -> groups.get(i).size()).reversed());
for (String v1 : groups.get(groupIndices.get(0))) {
for (String v2 : groups.get(groupIndices.get(5))) {
graph.disconnect(v1, v2);
}
}
for (String v1 : groups.get(groupIndices.get(1))) {
for (String v2 : groups.get(groupIndices.get(4))) {
graph.disconnect(v1, v2);
}
}
for (String v1 : groups.get(groupIndices.get(2))) {
for (String v2 : groups.get(groupIndices.get(3))) {
graph.disconnect(v1, v2);
}
}
Collection<Set<String>> maxCliques = new BronKerboschCliqueFinder<>(graph, 60, TimeUnit.SECONDS).computeMaxCliques();
assumeFalse(maxCliques.isEmpty());
Set<String> expectedClique = new HashSet<>();
for (int i = 0; i < groups.size(); i++) {
if (i != groupIndices.get(3) && i != groupIndices.get(4) && i != groupIndices.get(5)) {
expectedClique.addAll(groups.get(i));
}
}
assertEquals(1, maxCliques.size());
assertEquals(expectedClique, maxCliques.iterator().next());
} |
@Override
public Optional<ScmInfo> getScmInfo(Component component) {
requireNonNull(component, "Component cannot be null");
if (component.getType() != Component.Type.FILE) {
return Optional.empty();
}
return scmInfoCache.computeIfAbsent(component, this::getScmInfoForComponent);
} | @Test
public void return_empty_if_component_is_not_file() {
Component c = mock(Component.class);
when(c.getType()).thenReturn(Type.DIRECTORY);
assertThat(underTest.getScmInfo(c)).isEmpty();
} |
public static String formatExpression(final Expression expression) {
return formatExpression(expression, FormatOptions.of(s -> false));
} | @Test
public void shouldFormatBetweenPredicate() {
final BetweenPredicate predicate = new BetweenPredicate(new StringLiteral("blah"), new LongLiteral(5), new LongLiteral(10));
assertThat(ExpressionFormatter.formatExpression(predicate), equalTo("('blah' BETWEEN 5 AND 10)"));
} |
@Override
public String pluginNamed() {
return PluginEnum.GRPC.getName();
} | @Test
public void testPpluginNamed() {
assertEquals(grpcPluginDataHandler.pluginNamed(), PluginEnum.GRPC.getName());
} |
public Embed embed(byte[] bytes, ResourceType resourceType) {
if (embeds == null) {
embeds = new ArrayList();
}
Embed embed = saveToFileAndCreateEmbed(bytes, resourceType);
embeds.add(embed);
return embed;
} | @Test
void testEmbed() {
run(
"karate.embed('<h1>hello world</h1>', 'text/html')"
);
List<StepResult> results = sr.result.getStepResults();
assertEquals(1, results.size());
List<Embed> embeds = results.get(0).getEmbeds();
assertEquals(1, embeds.size());
assertEquals(embeds.get(0).getAsString(), "<h1>hello world</h1>");
assertEquals(embeds.get(0).getResourceType(), ResourceType.HTML);
} |
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception {
Http2HeadersSink sink = new Http2HeadersSink(
streamId, headers, maxHeaderListSize, validateHeaders);
// Check for dynamic table size updates, which must occur at the beginning:
// https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2
decodeDynamicTableSizeUpdates(in);
decode(in, sink);
// Now that we've read all of our headers we can perform the validation steps. We must
// delay throwing until this point to prevent dynamic table corruption.
sink.finish();
} | @Test
public void testLiteralWithoutIndexingWithLargeValue() throws Http2Exception {
// Ignore header that exceeds max header size
final StringBuilder sb = new StringBuilder();
sb.append("0004");
sb.append(hex("name"));
sb.append("7F813F");
for (int i = 0; i < 8192; i++) {
sb.append("61"); // 'a'
}
assertThrows(Http2Exception.class, new Executable() {
@Override
public void execute() throws Throwable {
decode(sb.toString());
}
});
} |
@Override
public BasicTypeDefine<MysqlType> reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.<MysqlType>builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case NULL:
builder.nativeType(MysqlType.NULL);
builder.columnType(MYSQL_NULL);
builder.dataType(MYSQL_NULL);
break;
case BOOLEAN:
builder.nativeType(MysqlType.BOOLEAN);
builder.columnType(String.format("%s(%s)", MYSQL_TINYINT, 1));
builder.dataType(MYSQL_TINYINT);
builder.length(1L);
break;
case TINYINT:
builder.nativeType(MysqlType.TINYINT);
builder.columnType(MYSQL_TINYINT);
builder.dataType(MYSQL_TINYINT);
break;
case SMALLINT:
builder.nativeType(MysqlType.SMALLINT);
builder.columnType(MYSQL_SMALLINT);
builder.dataType(MYSQL_SMALLINT);
break;
case INT:
builder.nativeType(MysqlType.INT);
builder.columnType(MYSQL_INT);
builder.dataType(MYSQL_INT);
break;
case BIGINT:
builder.nativeType(MysqlType.BIGINT);
builder.columnType(MYSQL_BIGINT);
builder.dataType(MYSQL_BIGINT);
break;
case FLOAT:
builder.nativeType(MysqlType.FLOAT);
builder.columnType(MYSQL_FLOAT);
builder.dataType(MYSQL_FLOAT);
break;
case DOUBLE:
builder.nativeType(MysqlType.DOUBLE);
builder.columnType(MYSQL_DOUBLE);
builder.dataType(MYSQL_DOUBLE);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.nativeType(MysqlType.DECIMAL);
builder.columnType(String.format("%s(%s,%s)", MYSQL_DECIMAL, precision, scale));
builder.dataType(MYSQL_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.nativeType(MysqlType.VARBINARY);
builder.columnType(
String.format("%s(%s)", MYSQL_VARBINARY, MAX_VARBINARY_LENGTH / 2));
builder.dataType(MYSQL_VARBINARY);
} else if (column.getColumnLength() < MAX_VARBINARY_LENGTH) {
builder.nativeType(MysqlType.VARBINARY);
builder.columnType(
String.format("%s(%s)", MYSQL_VARBINARY, column.getColumnLength()));
builder.dataType(MYSQL_VARBINARY);
} else if (column.getColumnLength() < POWER_2_24) {
builder.nativeType(MysqlType.MEDIUMBLOB);
builder.columnType(MYSQL_MEDIUMBLOB);
builder.dataType(MYSQL_MEDIUMBLOB);
} else {
builder.nativeType(MysqlType.LONGBLOB);
builder.columnType(MYSQL_LONGBLOB);
builder.dataType(MYSQL_LONGBLOB);
}
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.nativeType(MysqlType.LONGTEXT);
builder.columnType(MYSQL_LONGTEXT);
builder.dataType(MYSQL_LONGTEXT);
} else if (column.getColumnLength() < POWER_2_8) {
builder.nativeType(MysqlType.VARCHAR);
builder.columnType(
String.format("%s(%s)", MYSQL_VARCHAR, column.getColumnLength()));
builder.dataType(MYSQL_VARCHAR);
} else if (column.getColumnLength() < POWER_2_16) {
builder.nativeType(MysqlType.TEXT);
builder.columnType(MYSQL_TEXT);
builder.dataType(MYSQL_TEXT);
} else if (column.getColumnLength() < POWER_2_24) {
builder.nativeType(MysqlType.MEDIUMTEXT);
builder.columnType(MYSQL_MEDIUMTEXT);
builder.dataType(MYSQL_MEDIUMTEXT);
} else {
builder.nativeType(MysqlType.LONGTEXT);
builder.columnType(MYSQL_LONGTEXT);
builder.dataType(MYSQL_LONGTEXT);
}
break;
case DATE:
builder.nativeType(MysqlType.DATE);
builder.columnType(MYSQL_DATE);
builder.dataType(MYSQL_DATE);
break;
case TIME:
builder.nativeType(MysqlType.TIME);
builder.dataType(MYSQL_TIME);
if (version.isAtOrBefore(MySqlVersion.V_5_5)) {
builder.columnType(MYSQL_TIME);
} else if (column.getScale() != null && column.getScale() > 0) {
int timeScale = column.getScale();
if (timeScale > MAX_TIME_SCALE) {
timeScale = MAX_TIME_SCALE;
log.warn(
"The time column {} type time({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to time({})",
column.getName(),
column.getScale(),
MAX_SCALE,
timeScale);
}
builder.columnType(String.format("%s(%s)", MYSQL_TIME, timeScale));
builder.scale(timeScale);
} else {
builder.columnType(MYSQL_TIME);
}
break;
case TIMESTAMP:
builder.nativeType(MysqlType.DATETIME);
builder.dataType(MYSQL_DATETIME);
if (version.isAtOrBefore(MySqlVersion.V_5_5)) {
builder.columnType(MYSQL_DATETIME);
} else if (column.getScale() != null && column.getScale() > 0) {
int timestampScale = column.getScale();
if (timestampScale > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(String.format("%s(%s)", MYSQL_DATETIME, timestampScale));
builder.scale(timestampScale);
} else {
builder.columnType(MYSQL_DATETIME);
}
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.MYSQL,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
} | @Test
public void testReconvertFloat() {
Column column =
PhysicalColumn.builder().name("test").dataType(BasicType.FLOAT_TYPE).build();
BasicTypeDefine<MysqlType> typeDefine =
MySqlTypeConverter.DEFAULT_INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(MysqlType.FLOAT, typeDefine.getNativeType());
Assertions.assertEquals(MySqlTypeConverter.MYSQL_FLOAT, typeDefine.getColumnType());
Assertions.assertEquals(MySqlTypeConverter.MYSQL_FLOAT, typeDefine.getDataType());
} |
@Override
public int getNumberOfQueuedBuffers() {
if (findCurrentNettyPayloadQueue()) {
return getBacklog();
}
return 0;
} | @Test
void testGetNumberOfQueuedBuffers() {
assertThat(tieredStorageResultSubpartitionView.getNumberOfQueuedBuffers()).isEqualTo(1);
assertThat(tieredStorageResultSubpartitionView.unsynchronizedGetNumberOfQueuedBuffers())
.isEqualTo(1);
} |
@Override
public Iterator<Record> iterator() {
return iterator(BufferSupplier.NO_CACHING);
} | @Test
public void testZStdCompressionTypeWithV0OrV1() {
SimpleRecord[] simpleRecords = new SimpleRecord[] {
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes())
};
// Check V0
try {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V0, 0L,
Compression.zstd().build(), TimestampType.CREATE_TIME, simpleRecords);
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
batch.setLastOffset(1L);
batch.iterator();
fail("Can't reach here");
} catch (IllegalArgumentException e) {
assertEquals("ZStandard compression is not supported for magic 0", e.getMessage());
}
// Check V1
try {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, 0L,
Compression.zstd().build(), TimestampType.CREATE_TIME, simpleRecords);
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
batch.setLastOffset(1L);
batch.iterator();
fail("Can't reach here");
} catch (IllegalArgumentException e) {
assertEquals("ZStandard compression is not supported for magic 1", e.getMessage());
}
} |
@VisibleForTesting
MailTemplateDO validateMailTemplate(String templateCode) {
// 获得邮件模板。考虑到效率,从缓存中获取
MailTemplateDO template = mailTemplateService.getMailTemplateByCodeFromCache(templateCode);
// 邮件模板不存在
if (template == null) {
throw exception(MAIL_TEMPLATE_NOT_EXISTS);
}
return template;
} | @Test
public void testValidateMailTemplateValid_notExists() {
// 准备参数
String templateCode = RandomUtils.randomString();
// mock 方法
// 调用,并断言异常
assertServiceException(() -> mailSendService.validateMailTemplate(templateCode),
MAIL_TEMPLATE_NOT_EXISTS);
} |
public static IpAddress valueOf(int value) {
byte[] bytes =
ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array();
return new IpAddress(Version.INET, bytes);
} | @Test
public void testEqualityIPv4() {
new EqualsTester()
.addEqualityGroup(IpAddress.valueOf("1.2.3.4"),
IpAddress.valueOf("1.2.3.4"))
.addEqualityGroup(IpAddress.valueOf("1.2.3.5"),
IpAddress.valueOf("1.2.3.5"))
.addEqualityGroup(IpAddress.valueOf("0.0.0.0"),
IpAddress.valueOf("0.0.0.0"))
.addEqualityGroup(IpAddress.valueOf("255.255.255.255"),
IpAddress.valueOf("255.255.255.255"))
.testEquals();
} |
public static Expression convert(Predicate[] predicates) {
Expression expression = Expressions.alwaysTrue();
for (Predicate predicate : predicates) {
Expression converted = convert(predicate);
Preconditions.checkArgument(
converted != null, "Cannot convert Spark predicate to Iceberg expression: %s", predicate);
expression = Expressions.and(expression, converted);
}
return expression;
} | @Test
public void testNotEqualToNull() {
String col = "col";
NamedReference namedReference = FieldReference.apply(col);
LiteralValue value = new LiteralValue(null, DataTypes.IntegerType);
org.apache.spark.sql.connector.expressions.Expression[] attrAndValue =
new org.apache.spark.sql.connector.expressions.Expression[] {namedReference, value};
org.apache.spark.sql.connector.expressions.Expression[] valueAndAttr =
new org.apache.spark.sql.connector.expressions.Expression[] {value, namedReference};
Predicate notEq1 = new Predicate("<>", attrAndValue);
assertThatThrownBy(() -> SparkV2Filters.convert(notEq1))
.isInstanceOf(NullPointerException.class)
.hasMessageContaining("Expression is always false");
Predicate notEq2 = new Predicate("<>", valueAndAttr);
assertThatThrownBy(() -> SparkV2Filters.convert(notEq2))
.isInstanceOf(NullPointerException.class)
.hasMessageContaining("Expression is always false");
} |
public static List<String> parseAddressList(String addressInfo) {
if (StringUtils.isBlank(addressInfo)) {
return Collections.emptyList();
}
List<String> addressList = new ArrayList<>();
String[] addresses = addressInfo.split(ADDRESS_SEPARATOR);
for (String address : addresses) {
URI uri = URI.create(address.trim());
addressList.add(uri.getAuthority());
}
return addressList;
} | @Test
public void testEmptyStr() {
List<String> result = AddressUtils.parseAddressList("");
assertThat(result).isNotNull();
assertThat(result).isEmpty();
} |
public <OutputT extends @NonNull Object> CsvIOParse<T> withCustomRecordParsing(
String fieldName, SerializableFunction<String, OutputT> customRecordParsingFn) {
Map<String, SerializableFunction<String, Object>> customProcessingMap =
getConfigBuilder().getOrCreateCustomProcessingMap();
customProcessingMap.put(fieldName, customRecordParsingFn::apply);
getConfigBuilder().setCustomProcessingMap(customProcessingMap);
return this;
} | @Test
public void givenCustomParsingError_emits() {
PCollection<String> records =
csvRecords(pipeline, "instant,instantList", "2024-01-23T10:00:05.000Z,BAD CELL");
CsvIOParse<TimeContaining> underTest =
underTest(
TIME_CONTAINING_SCHEMA,
CSVFormat.DEFAULT
.withHeader("instant", "instantList")
.withAllowDuplicateHeaderNames(false),
new HashMap<>(),
timeContainingFromRowFn(),
TIME_CONTAINING_CODER)
.withCustomRecordParsing("instantList", instantListParsingLambda());
CsvIOParseResult<TimeContaining> result = records.apply(underTest);
PAssert.that(result.getOutput()).empty();
PAssert.thatSingleton(result.getErrors().apply(Count.globally())).isEqualTo(1L);
pipeline.run();
} |
@Override
public int run(String[] args) throws Exception {
YarnConfiguration yarnConf =
getConf() == null ? new YarnConfiguration() : new YarnConfiguration(
getConf());
boolean isHAEnabled =
yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED,
YarnConfiguration.DEFAULT_RM_HA_ENABLED);
if (args.length < 1) {
printUsage("", isHAEnabled);
return -1;
}
int exitCode = -1;
int i = 0;
String cmd = args[i++];
exitCode = 0;
if ("-help".equals(cmd)) {
if (i < args.length) {
printUsage(args[i], isHAEnabled);
} else {
printHelp("", isHAEnabled);
}
return exitCode;
}
if (USAGE.containsKey(cmd)) {
if (isHAEnabled) {
return super.run(args);
}
System.out.println("Cannot run " + cmd
+ " when ResourceManager HA is not enabled");
return -1;
}
//
// verify that we have enough command line parameters
//
String subClusterId = StringUtils.EMPTY;
if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) ||
"-refreshNodesResources".equals(cmd) ||
"-refreshServiceAcl".equals(cmd) ||
"-refreshUserToGroupsMappings".equals(cmd) ||
"-refreshSuperUserGroupsConfiguration".equals(cmd) ||
"-refreshClusterMaxPriority".equals(cmd)) {
subClusterId = parseSubClusterId(args, isHAEnabled);
// If we enable Federation mode, the number of args may be either one or three.
// Example: -refreshQueues or -refreshQueues -subClusterId SC-1
if (isYarnFederationEnabled(getConf()) && args.length != 1 && args.length != 3) {
printUsage(cmd, isHAEnabled);
return exitCode;
} else if (!isYarnFederationEnabled(getConf()) && args.length != 1) {
// If Federation mode is not enabled, then the number of args can only be one.
// Example: -refreshQueues
printUsage(cmd, isHAEnabled);
return exitCode;
}
}
// If it is federation mode, we will print federation mode information
if (isYarnFederationEnabled(getConf())) {
System.out.println("Using YARN Federation mode.");
}
try {
if ("-refreshQueues".equals(cmd)) {
exitCode = refreshQueues(subClusterId);
} else if ("-refreshNodes".equals(cmd)) {
exitCode = handleRefreshNodes(args, cmd, isHAEnabled);
} else if ("-refreshNodesResources".equals(cmd)) {
exitCode = refreshNodesResources(subClusterId);
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
exitCode = refreshUserToGroupsMappings(subClusterId);
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
exitCode = refreshSuperUserGroupsConfiguration(subClusterId);
} else if ("-refreshAdminAcls".equals(cmd)) {
exitCode = refreshAdminAcls(subClusterId);
} else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcls(subClusterId);
} else if ("-refreshClusterMaxPriority".equals(cmd)) {
exitCode = refreshClusterMaxPriority(subClusterId);
} else if ("-getGroups".equals(cmd)) {
String[] usernames = Arrays.copyOfRange(args, i, args.length);
exitCode = getGroups(usernames);
} else if ("-updateNodeResource".equals(cmd)) {
exitCode = handleUpdateNodeResource(args, cmd, isHAEnabled, subClusterId);
} else if ("-addToClusterNodeLabels".equals(cmd)) {
exitCode = handleAddToClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-removeFromClusterNodeLabels".equals(cmd)) {
exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-replaceLabelsOnNode".equals(cmd)) {
exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled);
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
printUsage("", isHAEnabled);
}
} catch (IllegalArgumentException arge) {
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd, isHAEnabled);
} catch (RemoteException e) {
//
// This is a error returned by hadoop server. Print
// out the first line of the error message, ignore the stack trace.
exitCode = -1;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
System.err.println(cmd.substring(1) + ": "
+ content[0]);
} catch (Exception ex) {
System.err.println(cmd.substring(1) + ": "
+ ex.getLocalizedMessage());
}
} catch (Exception e) {
exitCode = -1;
System.err.println(cmd.substring(1) + ": "
+ e.getLocalizedMessage());
}
if (null != localNodeLabelsManager) {
localNodeLabelsManager.stop();
}
return exitCode;
} | @Test
public void testRemoveLabelsOnNodes() throws Exception {
// Successfully replace labels
dummyNodeLabelsManager
.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
String[] args = { "-replaceLabelsOnNode", "node1=x node2=y",
"-directlyAccessNodeLabelStore" };
assertTrue(0 == rmAdminCLI.run(args));
args = new String[] { "-replaceLabelsOnNode", "node1= node2=",
"-directlyAccessNodeLabelStore" };
assertTrue("Labels should get replaced even '=' is used ",
0 == rmAdminCLI.run(args));
} |
@Override
public void search(K q, double radius, List<Neighbor<K, V>> neighbors) {
if (radius <= 0 || radius != (int) radius) {
throw new IllegalArgumentException("The parameter radius has to be an integer: " + radius);
}
long fpq = simhash.hash(q);
Set<Integer> candidates = getCandidates(q);
for (int index : candidates) {
int distance = HammingDistance.d(fpq, signatures.get(index));
if (distance <= radius) {
neighbors.add(new Neighbor<>(keys.get(index), data.get(index), index, distance));
}
}
} | @Test
public void test() throws IOException {
System.out.println("SNLSH");
SNLSH<String[], String> lsh = createLSH(texts);
ArrayList<Neighbor<String[], String>> neighbors = new ArrayList<>();
lsh.search(tokenize(texts[0]), 3, neighbors);
assertEquals(2, neighbors.size());
assertEquals(0, neighbors.get(0).index);
assertEquals(1, neighbors.get(1).index);
neighbors.clear();
lsh.search(tokenize(texts[1]), 3, neighbors);
assertEquals(2, neighbors.size());
assertEquals(0, neighbors.get(0).index);
assertEquals(1, neighbors.get(1).index);
neighbors.clear();
lsh.search(tokenize(texts[2]), 3, neighbors);
assertEquals(1, neighbors.size());
assertEquals(2, neighbors.get(0).index);
neighbors.clear();
lsh.search(tokenize(texts[3]), 3, neighbors);
assertEquals(1, neighbors.size());
assertEquals(3, neighbors.get(0).index);
} |
@Override
@Deprecated
public ByteOrder order() {
return unwrap().order();
} | @Test
public void shouldHaveSameByteOrder() {
ByteBuf buf = buffer(1);
assertSame(BIG_ENDIAN, unmodifiableBuffer(buf).order());
buf = buf.order(LITTLE_ENDIAN);
assertSame(LITTLE_ENDIAN, unmodifiableBuffer(buf).order());
} |
public String decode(String s) throws CannotDecodeException
{
if (s == null)
{
return null;
}
StringBuilder sb = new StringBuilder();
int len = s.length();
for (int i = 0; i < len; i++)
{
char c = s.charAt(i);
if (c == _encodingChar)
{
if (i + 3 > len)
{
throw new CannotDecodeException(s + " - Failed to decode incomplete escaped char at offset " + i);
}
String asciiHex = s.substring(i + 1, i + 3);
try
{
Integer asciiInt = Integer.parseInt(asciiHex, 16);
sb.append((char) asciiInt.intValue());
}
catch (NumberFormatException ex)
{
throw new CannotDecodeException(s + " - Failed to decode escaped char at offset " + i);
}
i += 2;
}
else
{
sb.append(c);
}
}
return sb.toString();
} | @Test
public void testSingleCharDecode() throws Exception
{
Assert.assertEquals(TEST_ENCODING_1.decode("~2E"), ".");
Assert.assertEquals(TEST_ENCODING_1.decode("~5B"), "[");
Assert.assertEquals(TEST_ENCODING_1.decode("~5D"), "]");
Assert.assertEquals(TEST_ENCODING_1.decode("~7E"), "~");
Assert.assertEquals(TEST_ENCODING_1.decode("~10"), "\020"); // first char that does not require 0 padding
Assert.assertEquals(TEST_ENCODING_1.decode("~0F"), "\017"); // last char that requires 0 padding
} |
@Override
public String getModuleName() {
return Modules.Encrypt.MODULE_NAME;
} | @Test
public void getModuleName() {
SAHelper.initSensors(mApplication);
SAEncryptProtocolImpl encryptProtocol = new SAEncryptProtocolImpl();
encryptProtocol.install(SensorsDataAPI.sharedInstance(mApplication).getSAContextManager());
Assert.assertEquals(encryptProtocol.getModuleName(), Modules.Encrypt.MODULE_NAME);
} |
public void runExtractor(Message msg) {
try(final Timer.Context ignored = completeTimer.time()) {
final String field;
try (final Timer.Context ignored2 = conditionTimer.time()) {
// We can only work on Strings.
if (!(msg.getField(sourceField) instanceof String)) {
conditionMissesCounter.inc();
return;
}
field = (String) msg.getField(sourceField);
// Decide if to extract at all.
if (conditionType.equals(ConditionType.STRING)) {
if (field.contains(conditionValue)) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
} else if (conditionType.equals(ConditionType.REGEX)) {
if (regexConditionPattern.matcher(field).find()) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
}
}
try (final Timer.Context ignored2 = executionTimer.time()) {
Result[] results;
try {
results = run(field);
} catch (ExtractorException e) {
final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>";
msg.addProcessingError(new Message.ProcessingError(
ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e)));
return;
}
if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) {
return;
} else if (results.length == 1 && results[0].target == null) {
// results[0].target is null if this extractor cannot produce multiple fields use targetField in that case
msg.addField(targetField, results[0].getValue());
} else {
for (final Result result : results) {
msg.addField(result.getTarget(), result.getValue());
}
}
// Remove original from message?
if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) {
final StringBuilder sb = new StringBuilder(field);
final List<Result> reverseList = Arrays.stream(results)
.sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed())
.collect(Collectors.toList());
// remove all from reverse so that the indices still match
for (final Result result : reverseList) {
sb.delete(result.getBeginIndex(), result.getEndIndex());
}
final String builtString = sb.toString();
final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString;
msg.removeField(sourceField);
// TODO don't add an empty field back, or rather don't add fullyCutByExtractor
msg.addField(sourceField, finalResult);
}
runConverters(msg);
}
}
} | @Test
public void testWithRegexpCondition() throws Exception {
final TestExtractor extractor = new TestExtractor.Builder()
.conditionType(REGEX)
.conditionValue("^hello")
.build();
// Extractor runs if the message matches the condition regexp.
final Message msg1 = createMessage("hello world");
extractor.runExtractor(msg1);
assertThat(msg1.hasField("target")).isTrue();
// Extractor does not run if the message does not match the condition regexp.
final Message msg2 = createMessage("the hello");
extractor.runExtractor(msg2);
assertThat(msg2.hasField("target")).isFalse();
} |
@Override
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException {
/*
* Creates a new instance because the object is not immutable.
*/
new Latin1StringsParser().doParse(stream, handler, metadata, context);
} | @Test
public void testParse() throws Exception {
String testStr =
"These are Latin1 accented scripts: \u00C2 \u00C3 \u00C9 \u00DC \u00E2 " +
"\u00E3 \u00E9 \u00FC";
String smallStr = "ab";
byte[] iso8859Bytes = testStr.getBytes(ISO_8859_1);
byte[] utf8Bytes = testStr.getBytes(UTF_8);
byte[] utf16Bytes = testStr.getBytes(UTF_16);
byte[] zeros = new byte[10];
byte[] smallString = smallStr.getBytes(ISO_8859_1);
byte[] trashBytes = {0x00, 0x01, 0x02, 0x03, 0x1E, 0x1F, (byte) 0xFF};
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(iso8859Bytes);
baos.write(zeros);
baos.write(utf8Bytes);
baos.write(trashBytes);
baos.write(utf16Bytes);
baos.write(zeros);
baos.write(smallString);
Parser parser = new Latin1StringsParser();
ContentHandler handler = new BodyContentHandler();
try (InputStream stream = new ByteArrayInputStream(baos.toByteArray())) {
parser.parse(stream, handler, new Metadata(), new ParseContext());
}
String result = handler.toString();
String expected = testStr + "\n" + testStr + "\n" + testStr + "\n";
// Test if result contains only the test string appended 3 times
assertTrue(result.equals(expected));
} |
public void createView(View view, boolean replace, boolean ifNotExists) {
if (ifNotExists) {
relationsStorage.putIfAbsent(view.name(), view);
} else if (replace) {
relationsStorage.put(view.name(), view);
} else if (!relationsStorage.putIfAbsent(view.name(), view)) {
throw QueryException.error("Mapping or view already exists: " + view.name());
}
} | @Test
public void when_createsViewIfNotExists_then_succeeds() {
// given
View view = view();
given(relationsStorage.putIfAbsent(view.name(), view)).willReturn(true);
// when
catalog.createView(view, false, true);
// then
verify(relationsStorage).putIfAbsent(eq(view.name()), isA(View.class));
} |
public static String md5Hex(byte[] bytes) throws NoSuchAlgorithmException {
try {
MessageDigest messageDigest = MESSAGE_DIGEST_LOCAL.get();
if (messageDigest != null) {
return encodeHexString(messageDigest.digest(bytes));
}
throw new NoSuchAlgorithmException("MessageDigest get MD5 instance error");
} finally {
MESSAGE_DIGEST_LOCAL.remove();
}
} | @Test
public void assertMd5Hex2() {
String md5 = "503840dc3af3cdb39749cd099e4dfeff";
String message = "dynamic-threadpool-example";
Assert.isTrue(md5.equals(Md5Util.md5Hex(message, "UTF-8")));
} |
@ExceptionHandler(RuntimeException.class)
protected ResponseEntity<?> handleRuntimeException(final RuntimeException runtimeException) {
CustomError customError = CustomError.builder()
.httpStatus(HttpStatus.NOT_FOUND)
.header(CustomError.Header.API_ERROR.getName())
.message(runtimeException.getMessage())
.build();
return new ResponseEntity<>(customError, HttpStatus.NOT_FOUND);
} | @Test
void givenRuntimeException_whenHandleRuntimeException_thenRespondWithNotFound() {
// Given
RuntimeException ex = new RuntimeException("Runtime exception message");
CustomError expectedError = CustomError.builder()
.httpStatus(HttpStatus.NOT_FOUND)
.header(CustomError.Header.API_ERROR.getName())
.message("Runtime exception message")
.build();
// When
ResponseEntity<?> responseEntity = globalExceptionHandler.handleRuntimeException(ex);
// Then
assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND);
CustomError actualError = (CustomError) responseEntity.getBody();
checkCustomError(expectedError, actualError);
} |
public static <T> Either<String, T> resolveImportDMN(Import importElement, Collection<T> dmns, Function<T, QName> idExtractor) {
final String importerDMNNamespace = ((Definitions) importElement.getParent()).getNamespace();
final String importerDMNName = ((Definitions) importElement.getParent()).getName();
final String importNamespace = importElement.getNamespace();
final String importName = importElement.getName();
final String importLocationURI = importElement.getLocationURI(); // This is optional
final String importModelName = importElement.getAdditionalAttributes().get(TImport.MODELNAME_QNAME);
LOGGER.debug("Resolving an Import in DMN Model with name={} and namespace={}. " +
"Importing a DMN model with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
List<T> matchingDMNList = dmns.stream()
.filter(m -> idExtractor.apply(m).getNamespaceURI().equals(importNamespace))
.toList();
if (matchingDMNList.size() == 1) {
T located = matchingDMNList.get(0);
// Check if the located DMN Model in the NS, correspond for the import `drools:modelName`.
if (importModelName == null || idExtractor.apply(located).getLocalPart().equals(importModelName)) {
LOGGER.debug("DMN Model with name={} and namespace={} successfully imported a DMN " +
"with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
return Either.ofRight(located);
} else {
LOGGER.error("DMN Model with name={} and namespace={} can't import a DMN with namespace={}, name={}, modelName={}, " +
"located within namespace only {} but does not match for the actual modelName",
importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, idExtractor.apply(located));
return Either.ofLeft(String.format(
"DMN Model with name=%s and namespace=%s can't import a DMN with namespace=%s, name=%s, modelName=%s, " +
"located within namespace only %s but does not match for the actual modelName",
importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, idExtractor.apply(located)));
}
} else {
List<T> usingNSandName = matchingDMNList.stream()
.filter(dmn -> idExtractor.apply(dmn).getLocalPart().equals(importModelName))
.toList();
if (usingNSandName.size() == 1) {
LOGGER.debug("DMN Model with name={} and namespace={} successfully imported a DMN " +
"with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
return Either.ofRight(usingNSandName.get(0));
} else if (usingNSandName.isEmpty()) {
LOGGER.error("DMN Model with name={} and namespace={} failed to import a DMN with namespace={} name={} locationURI={}, modelName={}.",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
return Either.ofLeft(String.format(
"DMN Model with name=%s and namespace=%s failed to import a DMN with namespace=%s name=%s locationURI=%s, modelName=%s. ",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName));
} else {
LOGGER.error("DMN Model with name={} and namespace={} detected a collision ({} elements) trying to import a DMN with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, usingNSandName.size(), importNamespace, importName, importLocationURI, importModelName);
return Either.ofLeft(String.format(
"DMN Model with name=%s and namespace=%s detected a collision trying to import a DMN with %s namespace, " +
"%s name and modelName %s. There are %s DMN files with the same namespace in your project. " +
"Please change the DMN namespaces and make them unique to fix this issue.",
importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, usingNSandName.size()));
}
}
} | @Test
void nSnoModelNameWithAlias() {
final Import i = makeImport("ns1", "mymodel", null);
final List<QName> available = Arrays.asList(new QName("ns1", "m1"),
new QName("ns2", "m2"),
new QName("ns3", "m3"));
final Either<String, QName> result = ImportDMNResolverUtil.resolveImportDMN(i, available, Function.identity());
assertThat(result.isRight()).isTrue();
assertThat(result.getOrElse(null)).isEqualTo(new QName("ns1", "m1"));
} |
static public Entry buildMenuStructure(String xml) {
final Reader reader = new StringReader(xml);
return buildMenuStructure(reader);
} | @Test
public void givenXmlWithSameChildLevels_createsStructure() {
String xmlWithoutContent = "<FreeplaneUIEntries><Entry name='level1'/>"
+ "<Entry name='level2'/>"
+ "</FreeplaneUIEntries>";
Entry builtMenuStructure = XmlEntryStructureBuilder.buildMenuStructure(xmlWithoutContent);
Entry menuStructureWithChildEntry = new Entry();
final Entry childEntry = new Entry();
childEntry.setName("level1");
menuStructureWithChildEntry.addChild(childEntry);
final Entry child2Entry = new Entry();
child2Entry.setName("level2");
menuStructureWithChildEntry.addChild(child2Entry);
assertThat(builtMenuStructure, equalTo(menuStructureWithChildEntry));
} |
public Set<MessageQueue> fetchSubscribeMessageQueues(String topic) throws MQClientException {
Set<MessageQueue> result = this.rebalanceImpl.getTopicSubscribeInfoTable().get(topic);
if (null == result) {
this.mQClientFactory.updateTopicRouteInfoFromNameServer(topic);
result = this.rebalanceImpl.getTopicSubscribeInfoTable().get(topic);
}
if (null == result) {
throw new MQClientException("The topic[" + topic + "] not exist", null);
}
return parseSubscribeMessageQueues(result);
} | @Test
public void testFetchSubscribeMessageQueues() throws MQClientException {
Set<MessageQueue> actual = defaultMQPushConsumerImpl.fetchSubscribeMessageQueues(defaultTopic);
assertNotNull(actual);
Assert.assertEquals(1, actual.size());
MessageQueue next = actual.iterator().next();
assertEquals(defaultTopic, next.getTopic());
assertEquals(defaultBroker, next.getBrokerName());
assertEquals(0, next.getQueueId());
} |
@VisibleForTesting
void updateValueMeta() throws KettleException {
List<ValueMetaInterface> outputValueMetaList = data.outputRowMeta.getValueMetaList();
List<ValueMetaInterface> aggMetaValueMetaList = data.aggMeta.getValueMetaList();
for ( int outputIndex = 0; outputIndex < outputValueMetaList.size(); ++outputIndex ) {
for ( int aggIndex = 0; aggIndex < aggMetaValueMetaList.size(); ++aggIndex ) {
if ( aggMetaValueMetaList.get( aggIndex ).getName().equals( outputValueMetaList.get( outputIndex ).getName() ) ) {
data.outputRowMeta.removeValueMeta( outputValueMetaList.get( outputIndex ).getName() );
data.outputRowMeta.addValueMeta( outputIndex, aggMetaValueMetaList.get( aggIndex ) );
}
}
}
} | @Test
public void updateValueMetaNoMatchTest() throws KettleException {
ValueMetaString stringMetaFromOutput = new ValueMetaString( "stringMeta" );
ValueMetaBinary binaryMetaFromOutput = new ValueMetaBinary( "binaryMeta" );
ValueMetaBinary binaryMetaFromAgg = new ValueMetaBinary( "binaryMeta2" );
ValueMetaInteger integerMetaFromOutput = new ValueMetaInteger( "integerMeta" );
memGroupByData.outputRowMeta.addValueMeta( stringMetaFromOutput );
memGroupByData.outputRowMeta.addValueMeta( binaryMetaFromOutput );
memGroupByData.outputRowMeta.addValueMeta( integerMetaFromOutput );
memGroupByData.aggMeta.addValueMeta( binaryMetaFromAgg );
doCallRealMethod().when( memGroupBy ).updateValueMeta();
memGroupBy.updateValueMeta();
assertTrue( memGroupByData.outputRowMeta.getValueMetaList().contains( binaryMetaFromOutput ) );
assertFalse( memGroupByData.outputRowMeta.getValueMetaList().contains( binaryMetaFromAgg ) );
} |
@Override
public void run() {
MetricsContainerImpl metricsContainer = new MetricsContainerImpl(transform.getFullName());
try (Closeable metricsScope = MetricsEnvironment.scopedMetricsContainer(metricsContainer)) {
Collection<ModelEnforcement<T>> enforcements = new ArrayList<>();
for (ModelEnforcementFactory enforcementFactory : modelEnforcements) {
ModelEnforcement<T> enforcement = enforcementFactory.forBundle(inputBundle, transform);
enforcements.add(enforcement);
}
@Nullable
TransformEvaluator<T> evaluator = evaluatorRegistry.forApplication(transform, inputBundle);
if (evaluator == null) {
onComplete.handleEmpty(transform);
// Nothing to do
return;
}
processElements(evaluator, metricsContainer, enforcements);
finishBundle(evaluator, metricsContainer, enforcements);
} catch (Exception e) {
onComplete.handleException(inputBundle, e);
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
}
throw new RuntimeException(e);
} catch (Error err) {
LOG.error("Error occurred within {}", this, err);
onComplete.handleError(err);
throw err;
} finally {
// Report the physical metrics from the end of this step.
context.getMetrics().commitPhysical(inputBundle, metricsContainer.getCumulative());
transformEvaluationState.complete(this);
}
} | @Test
public void callWithNullInputBundleFinishesBundleAndCompletes() throws Exception {
final TransformResult<Object> result = StepTransformResult.withoutHold(createdProducer).build();
final AtomicBoolean finishCalled = new AtomicBoolean(false);
TransformEvaluator<Object> evaluator =
new TransformEvaluator<Object>() {
@Override
public void processElement(WindowedValue<Object> element) throws Exception {
throw new IllegalArgumentException("Shouldn't be called");
}
@Override
public TransformResult<Object> finishBundle() throws Exception {
finishCalled.set(true);
return result;
}
};
when(registry.forApplication(createdProducer, null)).thenReturn(evaluator);
DirectTransformExecutor<Object> executor =
new DirectTransformExecutor<>(
evaluationContext,
registry,
Collections.emptyList(),
null,
createdProducer,
completionCallback,
transformEvaluationState);
executor.run();
assertThat(finishCalled.get(), is(true));
assertThat(completionCallback.handledResult, equalTo(result));
assertThat(completionCallback.handledException, is(nullValue()));
} |
@Nullable
static Boolean minIsrBasedConcurrencyAdjustment(CruiseControlRequestContext requestContext) {
String parameterString = caseSensitiveParameterName(requestContext.getParameterMap(), MIN_ISR_BASED_CONCURRENCY_ADJUSTMENT_PARAM);
if (parameterString == null) {
return null;
}
return Boolean.parseBoolean(requestContext.getParameter(parameterString));
} | @Test
public void testMinIsrBasedConcurrencyAdjustment() {
String firstResponse = Boolean.TRUE.toString();
String secondResponse = Boolean.FALSE.toString();
// Mock for (1) default response (2) response for valid input with true/false.
CruiseControlRequestContext mockRequest = EasyMock.mock(CruiseControlRequestContext.class);
EasyMock.expect(mockRequest.getParameterMap()).andReturn(Collections.emptyMap())
.andReturn(Collections.singletonMap(ParameterUtils.MIN_ISR_BASED_CONCURRENCY_ADJUSTMENT_PARAM, new String[]{firstResponse}))
.andReturn(Collections.singletonMap(ParameterUtils.MIN_ISR_BASED_CONCURRENCY_ADJUSTMENT_PARAM, new String[]{secondResponse}));
EasyMock.expect(mockRequest.getParameter(ParameterUtils.MIN_ISR_BASED_CONCURRENCY_ADJUSTMENT_PARAM))
.andReturn(firstResponse).andReturn(secondResponse);
// Verify default response.
EasyMock.replay(mockRequest);
Boolean minIsrBasedConcurrencyAdjustment = ParameterUtils.minIsrBasedConcurrencyAdjustment(mockRequest);
Assert.assertNull(minIsrBasedConcurrencyAdjustment);
// Verify response for valid input.
minIsrBasedConcurrencyAdjustment = ParameterUtils.minIsrBasedConcurrencyAdjustment(mockRequest);
Assert.assertNotNull(minIsrBasedConcurrencyAdjustment);
Assert.assertTrue(minIsrBasedConcurrencyAdjustment);
minIsrBasedConcurrencyAdjustment = ParameterUtils.minIsrBasedConcurrencyAdjustment(mockRequest);
Assert.assertNotNull(minIsrBasedConcurrencyAdjustment);
Assert.assertFalse(minIsrBasedConcurrencyAdjustment);
EasyMock.verify(mockRequest);
} |
public boolean contains(double latitude, double longitude) {
return this.minLatitude <= latitude && this.maxLatitude >= latitude
&& this.minLongitude <= longitude && this.maxLongitude >= longitude;
} | @Test
public void containsCoordinatesTest() {
BoundingBox boundingBox = new BoundingBox(MIN_LATITUDE, MIN_LONGITUDE, MAX_LATITUDE, MAX_LONGITUDE);
Assert.assertTrue(boundingBox.contains(MIN_LATITUDE, MIN_LONGITUDE));
Assert.assertTrue(boundingBox.contains(MAX_LATITUDE, MAX_LONGITUDE));
Assert.assertFalse(boundingBox.contains(MIN_LONGITUDE, MIN_LONGITUDE));
Assert.assertFalse(boundingBox.contains(MAX_LATITUDE, MAX_LATITUDE));
} |
DnsOverXmppMiniDnsResolver(DnsClient dnsClient, DnssecClient dnssecClient) {
this.dnsClient = dnsClient;
this.dnssecClient = dnssecClient;
} | @Test
public void dnsOverXmppMiniDnsResolverTest() throws IOException {
TestDnsDataSource dnsSource = new TestDnsDataSource();
TestDnsDataSource dnssecSource = new TestDnsDataSource();
DnsClient dnsClient = new DnsClient(NoopDnsCache.INSTANCE);
dnsClient.setDataSource(dnsSource);
DnssecClient dnssecClient = new DnssecClient(NoopDnsCache.INSTANCE);
dnssecClient.setDataSource(dnssecSource);
DnsOverXmppMiniDnsResolver doxResolver = new DnsOverXmppMiniDnsResolver(dnsClient, dnssecClient);
Question question = new Question("example.org", TYPE.A);
{
DnsMessage nondnssecQuery = question.asQueryMessage();
doxResolver.resolve(nondnssecQuery);
assertTrue(dnsSource.getAndResetWasQueried());
assertFalse(dnssecSource.getAndResetWasQueried());
}
{
DnsMessage.Builder dnssecQueryBuilder = question.asMessageBuilder();
dnssecQueryBuilder.getEdnsBuilder().setDnssecOk();
DnsMessage dnssecQuery = dnssecQueryBuilder.build();
DnssecValidationFailedException dnssecValidationFailedException = null;
try {
doxResolver.resolve(dnssecQuery);
} catch (DnssecValidationFailedException e) {
dnssecValidationFailedException = e;
}
// This exception is expected since we don't have a realy DNS source.
assertNotNull(dnssecValidationFailedException);
assertFalse(dnsSource.getAndResetWasQueried());
assertTrue(dnssecSource.getAndResetWasQueried());
}
} |
public FEELFnResult<Object> invoke(@ParameterName("input") String input, @ParameterName("pattern") String pattern,
@ParameterName( "replacement" ) String replacement ) {
return invoke(input, pattern, replacement, null);
} | @Test
void invokeWithoutFlagsPatternMatches() {
FunctionTestUtil.assertResult(replaceFunction.invoke("testString", "^test", "ttt"), "tttString");
FunctionTestUtil.assertResult(replaceFunction.invoke("testStringtest", "^test", "ttt"), "tttStringtest");
} |
public boolean isObjectExists(String objectName) {
try {
minioClient.statObject(StatObjectArgs.builder()
.bucket(bucketName)
.object(objectName)
.build()
);
return true;
} catch (Exception e) {
return false;
}
} | @Test
void isObjectExists() {
boolean exists = minioService.isObjectExists("fuck-you.jpeg");
System.out.println("exists = " + exists);
assert !exists;
} |
@Override
public void open(Configuration parameters) throws Exception {
this.rateLimiterTriggeredCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED);
this.concurrentRunThrottledCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED);
this.nothingToTriggerCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER);
this.triggerCounters =
taskNames.stream()
.map(
name ->
getRuntimeContext()
.getMetricGroup()
.addGroup(TableMaintenanceMetrics.GROUP_KEY, name)
.counter(TableMaintenanceMetrics.TRIGGERED))
.collect(Collectors.toList());
this.nextEvaluationTimeState =
getRuntimeContext()
.getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG));
this.accumulatedChangesState =
getRuntimeContext()
.getListState(
new ListStateDescriptor<>(
"triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class)));
this.lastTriggerTimesState =
getRuntimeContext()
.getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG));
tableLoader.open();
} | @Test
void testLockCheckDelay() throws Exception {
TableLoader tableLoader = sql.tableLoader(TABLE_NAME);
TriggerManager manager = manager(tableLoader, 1, DELAY);
try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness =
harness(manager)) {
testHarness.open();
addEventAndCheckResult(testHarness, TableChange.builder().commitCount(2).build(), 1);
// Create a lock to prevent execution, and check that there is no result
assertThat(lock.tryLock()).isTrue();
addEventAndCheckResult(testHarness, TableChange.builder().commitCount(2).build(), 1);
long currentTime = testHarness.getProcessingTime();
// Remove the lock, and still no trigger
lock.unlock();
assertThat(testHarness.extractOutputValues()).hasSize(1);
// Check that the trigger fired after the delay
testHarness.setProcessingTime(currentTime + DELAY);
assertThat(testHarness.extractOutputValues()).hasSize(2);
}
} |
public void analyze(ConnectContext context) {
dbName = AnalyzerUtils.getOrDefaultDatabase(dbName, context);
FeNameFormat.checkLabel(labelName);
} | @Test(expected = SemanticException.class)
public void testNoDb() throws SemanticException {
new Expectations() {
{
analyzer.getDefaultDb();
minTimes = 0;
result = null;
}
};
LabelName label = new LabelName("", "testLabel");
label.analyze(new ConnectContext());
Assert.fail("No exception throws");
} |
@Config("function-implementation-type")
public SqlFunctionLanguageConfig setFunctionImplementationType(String implementationType)
{
this.functionImplementationType = FunctionImplementationType.valueOf(implementationType.toUpperCase());
return this;
} | @Test
public void testDefault()
{
assertRecordedDefaults(recordDefaults(SqlFunctionLanguageConfig.class)
.setFunctionImplementationType("SQL"));
} |
@Override
public void writeBytes(Slice source)
{
writeBytes(source, 0, source.length());
} | @Test
public void testWriteBytes()
{
DataSize chunkSize = new DataSize(700, BYTE);
DataSize maxCompressionSize = new DataSize(chunkSize.toBytes() + 3, BYTE); // 3 accounts for the chunk header size
DataSize dataSize = new DataSize(2 * 700 + 10, BYTE);
byte[] testData = createTestData(dataSize);
List<DataSize> expectedChunkSizes = buildExpectedChunks(chunkSize, dataSize);
List<List<DataSize>> allWriteChunks = buildWriteChunksCombos(dataSize, 3);
for (List<DataSize> writeChunks : allWriteChunks) {
assertWriteBytes(testData, maxCompressionSize, writeChunks, expectedChunkSizes);
}
} |
@Override
public NSImage documentIcon(final String extension, final Integer size) {
NSImage image = this.load(extension, size);
if(null == image) {
return this.cache(extension,
this.convert(extension, workspace.iconForFileType(extension), size), size);
}
return image;
} | @Test
public void testDocumentIconNoExtension() {
final NSImage icon = new NSImageIconCache().documentIcon("", 64);
assertNotNull(icon);
assertTrue(icon.isValid());
assertFalse(icon.isTemplate());
assertEquals(64, icon.size().width.intValue());
assertEquals(64, icon.size().height.intValue());
} |
@Override
public Map<RedisClusterNode, Collection<RedisClusterNode>> clusterGetMasterReplicaMap() {
Iterable<RedisClusterNode> res = clusterGetNodes();
Set<RedisClusterNode> masters = new HashSet<RedisClusterNode>();
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
RedisClusterNode redisClusterNode = iterator.next();
if (redisClusterNode.isMaster()) {
masters.add(redisClusterNode);
}
}
Map<RedisClusterNode, Collection<RedisClusterNode>> result = new HashMap<RedisClusterNode, Collection<RedisClusterNode>>();
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
RedisClusterNode redisClusterNode = iterator.next();
for (RedisClusterNode masterNode : masters) {
if (redisClusterNode.getMasterId() != null
&& redisClusterNode.getMasterId().equals(masterNode.getId())) {
Collection<RedisClusterNode> list = result.get(masterNode);
if (list == null) {
list = new ArrayList<RedisClusterNode>();
result.put(masterNode, list);
}
list.add(redisClusterNode);
}
}
}
return result;
} | @Test
public void testClusterGetMasterSlaveMap() {
Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterReplicaMap();
assertThat(map).hasSize(3);
for (Collection<RedisClusterNode> slaves : map.values()) {
assertThat(slaves).hasSize(1);
}
} |
public static <E, K, U> Map<K, Map<U, List<E>>> groupBy2Key(Collection<E> collection, Function<E, K> key1, Function<E, U> key2) {
return groupBy2Key(collection, key1, key2, false);
} | @Test
public void testGroupBy2Key() {
Map<Long, Map<Long, List<Student>>> map = CollStreamUtil.groupBy2Key(null, Student::getTermId, Student::getClassId);
assertEquals(map, Collections.EMPTY_MAP);
List<Student> list = new ArrayList<>();
map = CollStreamUtil.groupBy2Key(list, Student::getTermId, Student::getClassId);
assertEquals(map, Collections.EMPTY_MAP);
list.add(new Student(1, 1, 1, "张三"));
list.add(new Student(1, 2, 2, "李四"));
list.add(new Student(1, 2, 3, "王五"));
list.add(new Student(2, 1, 1, "擎天柱"));
list.add(new Student(2, 2, 2, "威震天"));
list.add(new Student(2, 2, 3, "霸天虎"));
map = CollStreamUtil.groupBy2Key(list, Student::getTermId, Student::getClassId);
Map<Long, Map<Long, List<Student>>> compare = new HashMap<>();
Map<Long, List<Student>> map1 = new HashMap<>();
List<Student> list11 = new ArrayList<>();
list11.add(new Student(1, 1, 1, "张三"));
map1.put(1L, list11);
compare.put(1L, map1);
List<Student> list12 = new ArrayList<>();
list12.add(new Student(1, 2, 2, "李四"));
list12.add(new Student(1, 2, 3, "王五"));
map1.put(2L, list12);
compare.put(2L, map1);
Map<Long, List<Student>> map2 = new HashMap<>();
List<Student> list21 = new ArrayList<>();
list21.add(new Student(2, 1, 1, "擎天柱"));
map2.put(1L, list21);
compare.put(2L, map2);
List<Student> list22 = new ArrayList<>();
list22.add(new Student(2, 2, 2, "威震天"));
list22.add(new Student(2, 2, 3, "霸天虎"));
map2.put(2L, list22);
compare.put(2L, map2);
assertEquals(map, compare);
} |
public void setFlags(String flagsAsString) {
String[] flagsArray = flagsAsString.split(",");
this.flags = new Flag[flagsArray.length];
for (int i = 0; i < flagsArray.length; i++) {
this.flags[i] = Flag.valueOf(flagsArray[i]);
}
} | @Test
public void embeddedCacheWithFlagsTest() throws Exception {
InfinispanEmbeddedConfiguration configuration = new InfinispanEmbeddedConfiguration();
configuration.setFlags(Flag.IGNORE_RETURN_VALUES);
try (InfinispanEmbeddedManager manager = new InfinispanEmbeddedManager(configuration)) {
manager.start();
BasicCache<Object, Object> cache = manager.getCache("default");
assertNotNull(cache);
assertTrue(cache instanceof Cache);
assertTrue(cache instanceof AdvancedCache);
String key = UUID.randomUUID().toString();
assertNull(cache.put(key, "val1"));
// TODO: as we are testing a local cache,
//assertNull(cache.put(key, "val2"));
}
} |
public static Map<String, String> getProps(String properties) {
Map<String, String> configs = new HashMap<>();
if (StringUtils.isNotEmpty(properties)) {
for (String property : properties.split(";")) {
if (StringUtils.isNotEmpty(property)) {
int delimiterPosition = property.indexOf(":");
String key = property.substring(0, delimiterPosition);
String value = property.substring(delimiterPosition + 1);
configs.put(key, value);
}
}
}
return configs;
} | @Test
void givenNullOrEmpty_whenGetConfig_thenEmptyMap() {
assertThat(PropertyUtils.getProps(null)).as("null property").isEmpty();
assertThat(PropertyUtils.getProps("")).as("empty property").isEmpty();
assertThat(PropertyUtils.getProps(";")).as("ends with ;").isEmpty();
} |
@PostConstruct
public void synchronize() throws IOException {
try {
synchronizeConfig();
} catch (NonTransientDataAccessException | StaleStateException e) {
logger.error("Database exception while synchronizing configurations", e);
}
} | @Test
public void test() throws IOException {
Configuration conf1 = new Configuration();
conf1.setName("1");
conf1.setValue("1");
conf1.setDefaultValue("2"); // updated defaultValue
Configuration conf2 = new Configuration();
conf2.setName("2");
conf2.setValue("200");
conf2.setDefaultValue("2"); // existing conf
Configuration conf3 = new Configuration();
conf3.setName("3");
conf3.setValue("3");
Configuration conf4 = new Configuration();
conf4.setName("4");
conf4.setValue("4");
conf4.setDefaultValue("4"); // new conf
List<Configuration> db = new ArrayList<>(); // removed conf
db.add(conf1);
db.add(conf2);
db.add(conf3);
Mockito.when(repo.findAll()).thenReturn(db);
Mockito.when(repo.save(Mockito.isA(Configuration.class))).thenReturn(null);
service.synchronize();
Mockito.verify(repo, Mockito.times(1)).delete(conf3);
Mockito.verify(repo, Mockito.times(0)).delete(conf1);
Mockito.verify(repo, Mockito.times(0)).delete(conf2);
Mockito.verify(repo, Mockito.times(0)).save(conf2);
Mockito.verify(repo, Mockito.times(1)).save(conf1);
} |
public ClosestPointOfApproach closestPointOfApproach() {
checkState(
point1.time().equals(point2.time()),
"This function can only be applied to points at the same Instant"
);
Vector initialVectorBetweenPoints = vectorBetweenPointsInNm();
Vector velocityDifference = velocityInKnots(point2).minus(velocityInKnots(point1));
//will be negative if aircraft are diverging
double timeOfMinDistanceInHours = timeToProjectedMinDistanceInHours(
initialVectorBetweenPoints,
velocityDifference
);
if (!Double.isFinite(timeOfMinDistanceInHours) || timeOfMinDistanceInHours <= 0) {
return new ClosestPointOfApproach(Duration.ZERO, lateralDistance());
}
long timeOfMinDistanceInMillis = (long) (timeOfMinDistanceInHours * 1000 * 60 * 60);
Vector vectorBetweenPointsAtCpa = initialVectorBetweenPoints.plus(
velocityDifference.times(timeOfMinDistanceInHours)
);
return new ClosestPointOfApproach(
Duration.ofMillis(timeOfMinDistanceInMillis),
Distance.ofNauticalMiles(vectorBetweenPointsAtCpa.magnitude())
);
} | @Test
public void closestPointOfApproachRequiresPointsFromTheSameTime() {
Point p1 = Point.builder().latLong(0.0, 0.0).time(EPOCH).build();
Point p2 = Point.builder().latLong(0.0, 0.0).time(EPOCH.plusSeconds(1)).build();
PointPair points = new PointPair(p1, p2);
assertThrows(
IllegalStateException.class,
() -> points.closestPointOfApproach(),
"The points have to be from the same time."
);
} |
@Override
public String getName() {
return ANALYZER_NAME;
} | @Test
public void testGetName() {
assertEquals("pip Analyzer", analyzer.getName());
} |
DistributedHerderRequest addRequest(Callable<Void> action, Callback<Void> callback) {
return addRequest(0, action, callback);
} | @Test
public void testRequestProcessingOrder() {
Callable<Void> action = mock(Callable.class);
Callback<Void> callback = mock(Callback.class);
final DistributedHerder.DistributedHerderRequest req1 = herder.addRequest(100, action, callback);
final DistributedHerder.DistributedHerderRequest req2 = herder.addRequest(10, action, callback);
final DistributedHerder.DistributedHerderRequest req3 = herder.addRequest(200, action, callback);
final DistributedHerder.DistributedHerderRequest req4 = herder.addRequest(200, action, callback);
assertEquals(req2, herder.requests.pollFirst()); // lowest delay
assertEquals(req1, herder.requests.pollFirst()); // next lowest delay
assertEquals(req3, herder.requests.pollFirst()); // same delay as req4, but added first
assertEquals(req4, herder.requests.pollFirst());
} |
@Udf(description = "Returns Euler's number e raised to the power of an INT value.")
public Double exp(
@UdfParameter(
value = "exponent",
description = "the exponent to raise e to."
) final Integer exponent
) {
return exp(exponent == null ? null : exponent.doubleValue());
} | @Test
public void shouldHandlePositive() {
assertThat(udf.exp(1), is(2.718281828459045));
assertThat(udf.exp(1L), is(2.718281828459045));
assertThat(udf.exp(1.0), is(2.718281828459045));
} |
public Properties getProperties()
{
return properties;
} | @Test
public void testUriWithClientTags()
throws SQLException
{
String clientTags = "c1,c2";
PrestoDriverUri parameters = createDriverUri("presto://localhost:8080?clientTags=" + clientTags);
Properties properties = parameters.getProperties();
assertEquals(properties.getProperty(CLIENT_TAGS.getKey()), clientTags);
} |
@Override
public String getHelpMessage() {
return HELP;
} | @Test
public void shouldGetHelp() {
assertThat(command.getHelpMessage(),
containsString("server:" + System.lineSeparator() + "\tShow the current server"));
assertThat(command.getHelpMessage(),
containsString("server <server>:" + System.lineSeparator()
+ "\tChange the current server to <server>"));
} |
public static String truncateMessageLineLength(Object message) {
return truncateMessageLineLength(message, MAX_TRUNCATED_LENGTH);
} | @Test
public void truncateShortLines() throws Exception {
for (int i = 0; i < 20; i++) {
String s = "";
for (int lines = 0; lines < ThreadLocalRandom.current().nextInt(5, 10); lines++) {
s += "\n";
s += CommonUtils.randomAlphaNumString(
ThreadLocalRandom.current().nextInt(1, LogUtils.MAX_TRUNCATED_LENGTH + 1));
}
assertEquals(s, LogUtils.truncateMessageLineLength(s));
}
} |
@SuppressWarnings("deprecation")
public static <K> KStreamHolder<K> build(
final KStreamHolder<K> left,
final KStreamHolder<K> right,
final StreamStreamJoin<K> join,
final RuntimeBuildContext buildContext,
final StreamJoinedFactory streamJoinedFactory) {
final QueryContext queryContext = join.getProperties().getQueryContext();
final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext);
final LogicalSchema leftSchema;
final LogicalSchema rightSchema;
final Formats rightFormats;
final Formats leftFormats;
if (join.getJoinType().equals(RIGHT)) {
leftFormats = join.getRightInternalFormats();
rightFormats = join.getLeftInternalFormats();
leftSchema = right.getSchema();
rightSchema = left.getSchema();
} else {
leftFormats = join.getLeftInternalFormats();
rightFormats = join.getRightInternalFormats();
leftSchema = left.getSchema();
rightSchema = right.getSchema();
}
final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from(
leftSchema,
leftFormats.getKeyFeatures(),
leftFormats.getValueFeatures()
);
final Serde<GenericRow> leftSerde = buildContext.buildValueSerde(
leftFormats.getValueFormat(),
leftPhysicalSchema,
stacker.push(LEFT_SERDE_CTX).getQueryContext()
);
final PhysicalSchema rightPhysicalSchema = PhysicalSchema.from(
rightSchema,
rightFormats.getKeyFeatures(),
rightFormats.getValueFeatures()
);
final Serde<GenericRow> rightSerde = buildContext.buildValueSerde(
rightFormats.getValueFormat(),
rightPhysicalSchema,
stacker.push(RIGHT_SERDE_CTX).getQueryContext()
);
final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde(
leftFormats.getKeyFormat(),
leftPhysicalSchema,
queryContext
);
final StreamJoined<K, GenericRow, GenericRow> joined = streamJoinedFactory.create(
keySerde,
leftSerde,
rightSerde,
StreamsUtil.buildOpName(queryContext),
StreamsUtil.buildOpName(queryContext)
);
final JoinParams joinParams = JoinParamsFactory
.create(join.getKeyColName(), leftSchema, rightSchema);
JoinWindows joinWindows;
// Grace, as optional, helps to identify if a user specified the GRACE PERIOD syntax in the
// join window. If specified, then we'll call the new KStreams API ofTimeDifferenceAndGrace()
// which enables the "spurious" results bugfix with left/outer joins (see KAFKA-10847).
if (join.getGraceMillis().isPresent()) {
joinWindows = JoinWindows.ofTimeDifferenceAndGrace(
join.getBeforeMillis(),
join.getGraceMillis().get());
} else {
joinWindows = JoinWindows.of(join.getBeforeMillis());
}
joinWindows = joinWindows.after(join.getAfterMillis());
final KStream<K, GenericRow> result;
switch (join.getJoinType()) {
case LEFT:
result = left.getStream().leftJoin(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case RIGHT:
result = right.getStream().leftJoin(
left.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case OUTER:
result = left.getStream().outerJoin(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case INNER:
result = left.getStream().join(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
default:
throw new IllegalStateException("invalid join type");
}
return left.withStream(result, joinParams.getSchema());
} | @Test
public void shouldDoInnerJoinWithGrace() {
// Given:
givenInnerJoin(L_KEY, Optional.of(GRACE));
// When:
final KStreamHolder<Struct> result = join.build(planBuilder, planInfo);
// Then:
verify(leftKStream).join(
same(rightKStream),
eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 0)),
eq(WINDOWS_WITH_GRACE),
same(joined)
);
verifyNoMoreInteractions(leftKStream, rightKStream, resultKStream);
assertThat(result.getStream(), is(resultKStream));
assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory));
} |
@Override
public DelayMeasurementCreate decode(ObjectNode json,
CodecContext context) {
if (json == null || !json.isObject()) {
return null;
}
JsonNode dmNode = json.get(DM);
Version version = Version.Y17312011;
if (dmNode.get(VERSION) != null) {
version = Version.valueOf(dmNode.get(VERSION).asText());
}
DmType dmCfgType = DmType.DMDMM;
if (dmNode.get(DM_CFG_TYPE) != null) {
dmCfgType = DmType.valueOf(dmNode.get(DM_CFG_TYPE).asText(DMDMM));
}
MepId remoteMepId = MepId.valueOf(
nullIsIllegal(dmNode.get(REMOTE_MEP_ID), REMOTE_MEP_ID + " is required")
.shortValue());
Priority prio = Priority.valueOf(nullIsIllegal(dmNode.get(PRIORITY),
PRIORITY + " is required in the format 'PRIOn'").asText());
try {
DmCreateBuilder builder = DefaultDelayMeasurementCreate
.builder(dmCfgType, version, remoteMepId, prio);
if (dmNode.get(MEASUREMENTS_ENABLED) != null) {
context.codec(MeasurementOption.class)
.decode((ArrayNode) (dmNode.get(MEASUREMENTS_ENABLED)), context)
.forEach(builder::addToMeasurementsEnabled);
}
if (dmNode.get(BINS_PER_FD_INTERVAL) != null) {
builder = builder.binsPerFdInterval(
(short) dmNode.get(BINS_PER_FD_INTERVAL).asInt());
}
if (dmNode.get(BINS_PER_IFDV_INTERVAL) != null) {
builder = builder.binsPerIfdvInterval(
(short) dmNode.get(BINS_PER_IFDV_INTERVAL).asInt());
}
if (dmNode.get(IFDV_SELECTION_OFFSET) != null) {
builder = builder.ifdvSelectionOffset(
(short) dmNode.get(IFDV_SELECTION_OFFSET).asInt());
}
if (dmNode.get(BINS_PER_FDR_INTERVAL) != null) {
builder = builder.binsPerFdrInterval(
(short) dmNode.get(BINS_PER_FDR_INTERVAL).asInt());
}
if (dmNode.get(FRAME_SIZE) != null) {
builder = (DmCreateBuilder) builder.frameSize(
(short) dmNode.get(FRAME_SIZE).asInt());
}
if (dmNode.get(MESSAGE_PERIOD_MS) != null) {
builder = (DmCreateBuilder) builder.messagePeriod(Duration.ofMillis(
dmNode.get(MESSAGE_PERIOD_MS).asInt()));
}
if (dmNode.get(MEASUREMENT_INTERVAL_MINS) != null) {
builder = (DmCreateBuilder) builder.measurementInterval(
Duration.ofMinutes(
dmNode.get(MEASUREMENT_INTERVAL_MINS).asInt()));
}
if (dmNode.get(ALIGN_MEASUREMENT_INTERVALS) != null) {
builder = (DmCreateBuilder) builder.alignMeasurementIntervals(
dmNode.get(ALIGN_MEASUREMENT_INTERVALS).asBoolean());
}
if (dmNode.get(ALIGN_MEASUREMENT_OFFSET_MINS) != null) {
builder = (DmCreateBuilder) builder.alignMeasurementOffset(Duration.ofMinutes(
dmNode.get(ALIGN_MEASUREMENT_OFFSET_MINS).asInt()));
}
if (dmNode.get(START_TIME) != null) {
builder = (DmCreateBuilder) builder.startTime(context.codec(StartTime.class)
.decode((ObjectNode) dmNode.get(START_TIME), context));
}
if (dmNode.get(STOP_TIME) != null) {
builder = (DmCreateBuilder) builder.stopTime(context.codec(StopTime.class)
.decode((ObjectNode) dmNode.get(STOP_TIME), context));
}
return builder.build();
} catch (SoamConfigException e) {
throw new IllegalArgumentException(e);
}
} | @Test
public void testDecodeObjectNodeCodecContext1()
throws JsonProcessingException, IOException {
String moStr = "{\"dm\": {}}";
InputStream input = new ByteArrayInputStream(
moStr.getBytes(StandardCharsets.UTF_8));
JsonNode cfg = mapper.readTree(input);
try {
context.codec(DelayMeasurementCreate.class)
.decode((ObjectNode) cfg, context);
fail("Expecting an exception");
} catch (IllegalArgumentException e) {
assertEquals("remoteMepId is required", e.getMessage());
}
} |
@Override
public void onRemovedJobGraph(JobID jobId) {
runIfStateIs(State.RUNNING, () -> handleRemovedJobGraph(jobId));
} | @Test
void onRemovedJobGraph_terminatesRunningJob() throws Exception {
jobGraphStore =
TestingJobGraphStore.newBuilder()
.setInitialJobGraphs(Collections.singleton(JOB_GRAPH))
.build();
final CompletableFuture<JobID> terminateJobFuture = new CompletableFuture<>();
final TestingDispatcherGatewayService testingDispatcherService =
TestingDispatcherGatewayService.newBuilder()
.setOnRemovedJobGraphFunction(
jobID -> {
terminateJobFuture.complete(jobID);
return FutureUtils.completedVoidFuture();
})
.build();
dispatcherServiceFactory =
createFactoryBasedOnGenericSupplier(() -> testingDispatcherService);
final ExecutorService executorService = Executors.newSingleThreadExecutor();
try (final SessionDispatcherLeaderProcess dispatcherLeaderProcess =
createDispatcherLeaderProcess()) {
dispatcherLeaderProcess.start();
// wait for the dispatcher process to be created
dispatcherLeaderProcess.getDispatcherGateway().get();
// now remove the Job from the JobGraphStore and notify the dispatcher service
jobGraphStore.globalCleanupAsync(JOB_GRAPH.getJobID(), executorService).join();
dispatcherLeaderProcess.onRemovedJobGraph(JOB_GRAPH.getJobID());
assertThat(terminateJobFuture.get()).isEqualTo(JOB_GRAPH.getJobID());
} finally {
assertThat(executorService.shutdownNow()).isEmpty();
}
} |
@Override
public AssessmentResult verify(
final String siteKey,
final Action action,
final String token,
final String ip)
throws IOException {
final DynamicCaptchaConfiguration config = dynamicConfigurationManager.getConfiguration().getCaptchaConfiguration();
final String body = String.format("response=%s&secret=%s&remoteip=%s",
URLEncoder.encode(token, StandardCharsets.UTF_8),
URLEncoder.encode(this.apiKey, StandardCharsets.UTF_8),
ip);
final HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create("https://hcaptcha.com/siteverify"))
.header("Content-Type", "application/x-www-form-urlencoded")
.POST(HttpRequest.BodyPublishers.ofString(body))
.build();
final HttpResponse<String> response;
try {
response = this.client.sendAsync(request, HttpResponse.BodyHandlers.ofString()).join();
} catch (CompletionException e) {
logger.warn("failed to make http request to hCaptcha: {}", e.getMessage());
throw new IOException(ExceptionUtils.unwrap(e));
}
if (response.statusCode() != Response.Status.OK.getStatusCode()) {
logger.warn("failure submitting token to hCaptcha (code={}): {}", response.statusCode(), response);
throw new IOException("hCaptcha http failure : " + response.statusCode());
}
final HCaptchaResponse hCaptchaResponse = SystemMapper.jsonMapper()
.readValue(response.body(), HCaptchaResponse.class);
logger.debug("received hCaptcha response: {}", hCaptchaResponse);
if (!hCaptchaResponse.success) {
for (String errorCode : hCaptchaResponse.errorCodes) {
Metrics.counter(INVALID_REASON_COUNTER_NAME,
"action", action.getActionName(),
"reason", errorCode).increment();
}
return AssessmentResult.invalid();
}
// hcaptcha uses the inverse scheme of recaptcha (for hcaptcha, a low score is less risky)
final float score = 1.0f - hCaptchaResponse.score;
if (score < 0.0f || score > 1.0f) {
logger.error("Invalid score {} from hcaptcha response {}", hCaptchaResponse.score, hCaptchaResponse);
return AssessmentResult.invalid();
}
final BigDecimal threshold = config.getScoreFloorByAction().getOrDefault(action, config.getScoreFloor());
final AssessmentResult assessmentResult = AssessmentResult.fromScore(score, threshold.floatValue());
for (String reason : hCaptchaResponse.scoreReasons) {
Metrics.counter(ASSESSMENT_REASON_COUNTER_NAME,
"action", action.getActionName(),
"reason", reason,
"score", assessmentResult.getScoreString()).increment();
}
return assessmentResult;
} | @Test
public void errorResponse() throws IOException, InterruptedException {
final FaultTolerantHttpClient httpClient = mockResponder(503, "");
final HCaptchaClient client = new HCaptchaClient("fake", httpClient, mockConfig(true, 0.5));
assertThrows(IOException.class, () -> client.verify(SITE_KEY, Action.CHALLENGE, TOKEN, null));
} |
public Buffer copyTo(OutputStream out) throws IOException {
return copyTo(out, 0, size);
} | @Test public void copyTo() throws Exception {
Buffer source = new Buffer();
source.writeUtf8("party");
Buffer target = new Buffer();
source.copyTo(target, 1, 3);
assertEquals("art", target.readUtf8());
assertEquals("party", source.readUtf8());
} |
ObjectFactory loadObjectFactory() {
Class<? extends ObjectFactory> objectFactoryClass = options.getObjectFactoryClass();
ClassLoader classLoader = classLoaderSupplier.get();
ServiceLoader<ObjectFactory> loader = ServiceLoader.load(ObjectFactory.class, classLoader);
if (objectFactoryClass == null) {
return loadSingleObjectFactoryOrDefault(loader);
}
return loadSelectedObjectFactory(loader, objectFactoryClass);
} | @Test
void test_case_7() {
io.cucumber.core.backend.Options options = () -> OtherFactory.class;
ObjectFactoryServiceLoader loader = new ObjectFactoryServiceLoader(
() -> new ServiceLoaderTestClassLoader(ObjectFactory.class,
DefaultObjectFactory.class,
OtherFactory.class,
YetAnotherFactory.class),
options);
assertThat(loader.loadObjectFactory(), instanceOf(OtherFactory.class));
} |
public void onChange(Multimap<QProfileName, ActiveRuleChange> changedProfiles, long startDate, long endDate) {
if (config.getBoolean(DISABLE_NOTIFICATION_ON_BUILT_IN_QPROFILES).orElse(false)) {
return;
}
BuiltInQPChangeNotificationBuilder builder = new BuiltInQPChangeNotificationBuilder();
changedProfiles.keySet().stream()
.map(changedProfile -> {
String profileName = changedProfile.getName();
Language language = languages.get(changedProfile.getLanguage());
Collection<ActiveRuleChange> activeRuleChanges = changedProfiles.get(changedProfile);
int newRules = (int) activeRuleChanges.stream().map(ActiveRuleChange::getType).filter(ACTIVATED::equals).count();
int updatedRules = (int) activeRuleChanges.stream().map(ActiveRuleChange::getType).filter(UPDATED::equals).count();
int removedRules = (int) activeRuleChanges.stream().map(ActiveRuleChange::getType).filter(DEACTIVATED::equals).count();
return Profile.newBuilder()
.setProfileName(profileName)
.setLanguageKey(language.getKey())
.setLanguageName(language.getName())
.setNewRules(newRules)
.setUpdatedRules(updatedRules)
.setRemovedRules(removedRules)
.setStartDate(startDate)
.setEndDate(endDate)
.build();
})
.forEach(builder::addProfile);
notificationManager.scheduleForSending(builder.build());
} | @Test
public void avoid_notification_if_configured_in_settings() {
settings.setProperty(DISABLE_NOTIFICATION_ON_BUILT_IN_QPROFILES, true);
Multimap<QProfileName, ActiveRuleChange> profiles = ArrayListMultimap.create();
Languages languages = new Languages();
addProfile(profiles, languages, ACTIVATED);
BuiltInQualityProfilesUpdateListener underTest = new BuiltInQualityProfilesUpdateListener(notificationManager, languages, settings.asConfig());
underTest.onChange(profiles, 0, 1);
verifyNoInteractions(notificationManager);
} |
@Override
public OAuth2AccessTokenDO refreshAccessToken(String refreshToken, String clientId) {
// 查询访问令牌
OAuth2RefreshTokenDO refreshTokenDO = oauth2RefreshTokenMapper.selectByRefreshToken(refreshToken);
if (refreshTokenDO == null) {
throw exception0(GlobalErrorCodeConstants.BAD_REQUEST.getCode(), "无效的刷新令牌");
}
// 校验 Client 匹配
OAuth2ClientDO clientDO = oauth2ClientService.validOAuthClientFromCache(clientId);
if (ObjectUtil.notEqual(clientId, refreshTokenDO.getClientId())) {
throw exception0(GlobalErrorCodeConstants.BAD_REQUEST.getCode(), "刷新令牌的客户端编号不正确");
}
// 移除相关的访问令牌
List<OAuth2AccessTokenDO> accessTokenDOs = oauth2AccessTokenMapper.selectListByRefreshToken(refreshToken);
if (CollUtil.isNotEmpty(accessTokenDOs)) {
oauth2AccessTokenMapper.deleteBatchIds(convertSet(accessTokenDOs, OAuth2AccessTokenDO::getId));
oauth2AccessTokenRedisDAO.deleteList(convertSet(accessTokenDOs, OAuth2AccessTokenDO::getAccessToken));
}
// 已过期的情况下,删除刷新令牌
if (DateUtils.isExpired(refreshTokenDO.getExpiresTime())) {
oauth2RefreshTokenMapper.deleteById(refreshTokenDO.getId());
throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "刷新令牌已过期");
}
// 创建访问令牌
return createOAuth2AccessToken(refreshTokenDO, clientDO);
} | @Test
public void testRefreshAccessToken_null() {
// 准备参数
String refreshToken = randomString();
String clientId = randomString();
// mock 方法
// 调用,并断言
assertServiceException(() -> oauth2TokenService.refreshAccessToken(refreshToken, clientId),
new ErrorCode(400, "无效的刷新令牌"));
} |
@Override
protected void removeRange(int fromIndex, int toIndex) {
if (fromIndex == toIndex) {
return;
}
notifyRemoval(fromIndex, toIndex - fromIndex);
super.removeRange(fromIndex, toIndex);
} | @Test
public void testRemoveEmptyRange() {
modelList.removeRange(1, 1);
verifyNoMoreInteractions(observer);
} |
@Override
public List<MailAccountDO> getMailAccountList() {
return mailAccountMapper.selectList();
} | @Test
public void testGetMailAccountList() {
// mock 数据
MailAccountDO dbMailAccount01 = randomPojo(MailAccountDO.class);
mailAccountMapper.insert(dbMailAccount01);
MailAccountDO dbMailAccount02 = randomPojo(MailAccountDO.class);
mailAccountMapper.insert(dbMailAccount02);
// 准备参数
// 调用
List<MailAccountDO> list = mailAccountService.getMailAccountList();
// 断言
assertEquals(2, list.size());
assertPojoEquals(dbMailAccount01, list.get(0));
assertPojoEquals(dbMailAccount02, list.get(1));
} |
public static synchronized ShowResultSet process(List<AlterClause> alterClauses, Database db,
OlapTable olapTable) throws UserException {
Preconditions.checkArgument(alterClauses.size() == 1);
AlterClause alterClause = alterClauses.get(0);
Preconditions.checkState(alterClause instanceof CompactionClause);
CompactionClause compactionClause = (CompactionClause) alterClause;
if (RunMode.isSharedDataMode()) {
Locker locker = new Locker();
locker.lockTablesWithIntensiveDbLock(db, Lists.newArrayList(olapTable.getId()), LockType.READ);
try {
List<Partition> allPartitions = findAllPartitions(olapTable, compactionClause);
for (Partition partition : allPartitions) {
for (PhysicalPartition physicalPartition : partition.getSubPartitions()) {
PartitionIdentifier partitionIdentifier =
new PartitionIdentifier(db.getId(), olapTable.getId(), physicalPartition.getId());
CompactionMgr compactionManager = GlobalStateMgr.getCurrentState().getCompactionMgr();
compactionManager.triggerManualCompaction(partitionIdentifier);
}
}
} finally {
locker.unLockTablesWithIntensiveDbLock(db, Lists.newArrayList(olapTable.getId()), LockType.READ);
}
} else {
ArrayListMultimap<Long, Long> backendToTablets = ArrayListMultimap.create();
AgentBatchTask batchTask = new AgentBatchTask();
Locker locker = new Locker();
locker.lockTablesWithIntensiveDbLock(db, Lists.newArrayList(olapTable.getId()), LockType.READ);
try {
List<Partition> allPartitions = findAllPartitions(olapTable, compactionClause);
for (Partition partition : allPartitions) {
for (PhysicalPartition physicalPartition : partition.getSubPartitions()) {
for (MaterializedIndex index : physicalPartition.getMaterializedIndices(
MaterializedIndex.IndexExtState.VISIBLE)) {
for (Tablet tablet : index.getTablets()) {
for (Long backendId : ((LocalTablet) tablet).getBackendIds()) {
backendToTablets.put(backendId, tablet.getId());
}
}
}
}
}
} catch (Exception e) {
throw new UserException(e.getMessage());
} finally {
locker.unLockTablesWithIntensiveDbLock(db, Lists.newArrayList(olapTable.getId()), LockType.READ);
}
for (Long backendId : backendToTablets.keySet()) {
CompactionTask task = new CompactionTask(null, backendId,
db.getId(),
olapTable.getId(),
backendToTablets.get(backendId),
((CompactionClause) alterClause).isBaseCompaction()
);
// add task to send
batchTask.addTask(task);
}
if (batchTask.getTaskNum() > 0) {
for (AgentTask task : batchTask.getAllTasks()) {
AgentTaskQueue.addTask(task);
}
AgentTaskExecutor.submit(batchTask);
LOG.debug("tablet[{}] send compaction task. num: {}", batchTask.getTaskNum());
}
}
return null;
} | @Test(expected = IllegalStateException.class)
public void testProcessNonCompactionClause() {
AlterClause nonCompactionClause = mock(AlterClause.class);
List<AlterClause> alterList = Collections.singletonList((nonCompactionClause));
try {
compactionHandler.process(alterList, db, olapTable);
} catch (UserException e) {
Assert.fail("process should not throw user exceptions here");
}
} |
public Collection<SimpleTableSegment> extractNotExistTableFromRoutineBody(final RoutineBodySegment routineBody) {
Collection<SimpleTableSegment> result = new LinkedList<>();
for (ValidStatementSegment each : routineBody.getValidStatements()) {
Optional<CreateTableStatement> createTable = each.getCreateTable();
if (createTable.isPresent() && !createTable.get().isIfNotExists()) {
result.add(createTable.get().getTable());
}
}
return result;
} | @Test
void assertNotExistTableFromRoutineBody() {
RoutineBodySegment routineBodySegment = new RoutineBodySegment(0, 3);
ValidStatementSegment validStatement = new ValidStatementSegment(0, 1);
validStatement.setSqlStatement(mock(SQLStatement.class));
routineBodySegment.getValidStatements().add(validStatement);
ValidStatementSegment newValidStatement = new ValidStatementSegment(0, 1);
validStatement.setSqlStatement(mock(CreateTableStatement.class));
routineBodySegment.getValidStatements().add(newValidStatement);
Collection<SimpleTableSegment> nonExistingTables = tableExtractor.extractNotExistTableFromRoutineBody(routineBodySegment);
assertThat(nonExistingTables.size(), is(1));
} |
public static boolean isClientException(SofaRpcException exception) {
int errorType = exception.getErrorType();
return errorType >= 200 && errorType < 300;
} | @Test
public void isClientException() throws Exception {
SofaRpcException exception = new SofaRpcException(RpcErrorType.SERVER_BUSY, "111");
Assert.assertFalse(ExceptionUtils.isClientException(exception));
exception = new SofaRpcException(RpcErrorType.CLIENT_TIMEOUT, "111");
Assert.assertTrue(ExceptionUtils.isClientException(exception));
} |
@Override
public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
wrapped().putAll(entries);
for (final KeyValue<Bytes, byte[]> entry : entries) {
final byte[] valueAndTimestamp = entry.value;
log(entry.key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? context.timestamp() : timestamp(valueAndTimestamp));
}
} | @Test
public void shouldLogChangesOnPutAll() {
store.putAll(Arrays.asList(KeyValue.pair(hi, rawThere),
KeyValue.pair(hello, rawWorld)));
assertThat(collector.collected().size(), equalTo(2));
assertThat(collector.collected().get(0).key(), equalTo(hi));
assertThat(collector.collected().get(0).value(), equalTo(there.value()));
assertThat(collector.collected().get(0).timestamp(), equalTo(there.timestamp()));
assertThat(collector.collected().get(1).key(), equalTo(hello));
assertThat(collector.collected().get(1).value(), equalTo(world.value()));
assertThat(collector.collected().get(1).timestamp(), equalTo(world.timestamp()));
} |
@Override
public Optional<ComputationConfig> fetchConfig(String computationId) {
Preconditions.checkArgument(
!computationId.isEmpty(),
"computationId is empty. Cannot fetch computation config without a computationId.");
GetConfigResponse response =
applianceComputationConfigFetcher.fetchConfig(
GetConfigRequest.newBuilder().addComputations(computationId).build());
if (response == null) {
return Optional.empty();
}
for (Windmill.GetConfigResponse.SystemNameToComputationIdMapEntry entry :
response.getSystemNameToComputationIdMapList()) {
systemNameToComputationIdMap.put(entry.getSystemName(), entry.getComputationId());
}
return createComputationConfig(
// We are only fetching the config for 1 computation, so we should only be getting that
// computation back.
Iterables.getOnlyElement(response.getCloudWorksList()),
transformUserNameToStateFamilyByComputationId(response),
response.getNameMapList().stream()
.collect(toImmutableMap(NameMapEntry::getUserName, NameMapEntry::getSystemName)));
} | @Test
public void testGetComputationConfig_onFetchConfigError() {
StreamingApplianceComputationConfigFetcher configLoader =
createStreamingApplianceConfigLoader();
RuntimeException e = new RuntimeException("something bad happened.");
when(mockWindmillServer.getConfig(any())).thenThrow(e);
Throwable fetchConfigError =
assertThrows(RuntimeException.class, () -> configLoader.fetchConfig("someComputationId"));
assertThat(fetchConfigError).isSameInstanceAs(e);
} |
public static HKDF extractedFrom(byte[] salt, byte[] ikm) {
validateExtractionParams(salt, ikm);
/*
RFC-5869, Step 2.2, Extract:
HKDF-Extract(salt, IKM) -> PRK
Options:
Hash a hash function; HashLen denotes the length of the
hash function output in octets
Inputs:
salt optional salt value (a non-secret random value);
if not provided, it is set to a string of HashLen zeros.
IKM input keying material
Output:
PRK a pseudorandom key (of HashLen octets)
The output PRK is calculated as follows:
PRK = HMAC-Hash(salt, IKM)
*/
var mac = createKeyedHmacSha256(salt); // Note: HKDF is initially keyed on the salt, _not_ on ikm!
mac.update(ikm);
return new HKDF(/*PRK = */ mac.doFinal());
} | @Test
void missing_salt_to_salted_factory_function_throws_exception() {
var ikm = unhex("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b");
assertThrows(NullPointerException.class, () -> HKDF.extractedFrom(null, ikm));
assertThrows(IllegalArgumentException.class, () -> HKDF.extractedFrom(new byte[0], ikm));
} |
static Schema getSchema(Class<? extends Message> clazz) {
return getSchema(ProtobufUtil.getDescriptorForClass(clazz));
} | @Test
public void testEmptySchema() {
assertEquals(
TestProtoSchemas.EMPTY_SCHEMA,
ProtoSchemaTranslator.getSchema(Proto3SchemaMessages.Empty.class));
} |
@Nullable public String linkLocalIp() {
// uses synchronized variant of double-checked locking as getting the endpoint can be expensive
if (linkLocalIp != null) return linkLocalIp;
synchronized (this) {
if (linkLocalIp == null) {
linkLocalIp = produceLinkLocalIp();
}
}
return linkLocalIp;
} | @Test void linkLocalIp_sameInstance() {
Platform platform = new Platform.Jre7();
assertThat(platform.linkLocalIp()).isSameAs(platform.linkLocalIp());
} |
@Override
protected void requestSubpartitions() throws IOException {
boolean retriggerRequest = false;
boolean notifyDataAvailable = false;
// The lock is required to request only once in the presence of retriggered requests.
synchronized (requestLock) {
checkState(!isReleased, "LocalInputChannel has been released already");
if (subpartitionView == null) {
LOG.debug(
"{}: Requesting LOCAL subpartitions {} of partition {}. {}",
this,
consumedSubpartitionIndexSet,
partitionId,
channelStatePersister);
try {
ResultSubpartitionView subpartitionView =
partitionManager.createSubpartitionView(
partitionId, consumedSubpartitionIndexSet, this);
if (subpartitionView == null) {
throw new IOException("Error requesting subpartition.");
}
// make the subpartition view visible
this.subpartitionView = subpartitionView;
// check if the channel was released in the meantime
if (isReleased) {
subpartitionView.releaseAllResources();
this.subpartitionView = null;
} else {
notifyDataAvailable = true;
}
} catch (PartitionNotFoundException notFound) {
if (increaseBackoff()) {
retriggerRequest = true;
} else {
throw notFound;
}
}
}
}
if (notifyDataAvailable) {
notifyDataAvailable(this.subpartitionView);
}
// Do this outside of the lock scope as this might lead to a
// deadlock with a concurrent release of the channel via the
// input gate.
if (retriggerRequest) {
inputGate.retriggerPartitionRequest(partitionId.getPartitionId(), channelInfo);
}
} | @Test
void testRetriggerPartitionRequestWhilePartitionNotFound() throws Exception {
final SingleInputGate inputGate = createSingleInputGate(1);
final LocalInputChannel localChannel =
createLocalInputChannel(inputGate, new ResultPartitionManager(), 1, 1);
inputGate.setInputChannels(localChannel);
localChannel.requestSubpartitions();
// The timer should be initialized at the first time of retriggering partition request.
assertThat(inputGate.getRetriggerLocalRequestTimer()).isNotNull();
} |
public int getQueueInfoFailedRetrieved() {
return numGetQueueInfoFailedRetrieved.value();
} | @Test
public void testGetQueueInfoFailed() {
long totalBadBefore = metrics.getQueueInfoFailedRetrieved();
badSubCluster.getQueueInfo();
Assert.assertEquals(totalBadBefore + 1,
metrics.getQueueInfoFailedRetrieved());
} |
public static void bindEnvironment(ScriptEngine engine, String requestContent, Map<String, Object> requestContext,
StateStore stateStore) {
// Build a map of header values.
bindEnvironment(engine, requestContent, requestContext, stateStore, null);
} | @Test
void testRequestContentIsBound() {
String script = """
return mockRequest.requestContent;
""";
ScriptEngineManager sem = new ScriptEngineManager();
String body = "content";
try {
// Evaluating request with script coming from operation dispatcher rules.
ScriptEngine se = sem.getEngineByExtension("groovy");
ScriptEngineBinder.bindEnvironment(se, body, null, null);
String result = (String) se.eval(script);
assertEquals(body, result);
} catch (Exception e) {
fail("Exception should no be thrown");
}
} |
public static <T> T getFirst(Iterable<T> iterable) {
if (iterable instanceof List) {
final List<T> list = (List<T>) iterable;
return CollUtil.isEmpty(list) ? null: list.get(0);
}
return getFirst(getIter(iterable));
} | @Test
public void getFirstTest() {
assertNull(IterUtil.getFirst((Iterable<Object>) null));
assertNull(IterUtil.getFirst(CollUtil.newArrayList()));
assertEquals("1", IterUtil.getFirst(CollUtil.newArrayList("1", "2", "3")));
final ArrayDeque<String> deque = new ArrayDeque<>();
deque.add("3");
deque.add("4");
assertEquals("3", IterUtil.getFirst(deque));
} |
@CanIgnoreReturnValue
public final Ordered containsAtLeast(
@Nullable Object k0, @Nullable Object v0, @Nullable Object... rest) {
return containsAtLeastEntriesIn(accumulateMultimap(k0, v0, rest));
} | @Test
public void containsAtLeastFailureWithEmptyStringMissing() {
expectFailureWhenTestingThat(ImmutableMultimap.of("key", "value")).containsAtLeast("", "a");
assertFailureKeys("missing", "---", "expected to contain at least", "but was");
assertFailureValue("missing", "{\"\" (empty String)=[a]}");
} |
public String getNetworkAddress() {
return this.startAddress.getHostAddress();
} | @Test
public void getNetworkAddress() {
assertThat(ipSubnet.getNetworkAddress()).isEqualTo(networkAddress);
} |
public int available() {
return buffer != null ? buffer.length - pos : 0;
} | @Test
public void testAvailable() {
int available = out.available();
out.buffer = null;
int availableWhenBufferNull = out.available();
assertEquals(10, available);
assertEquals(0, availableWhenBufferNull);
} |
@Override
public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) {
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof ShowFunctionStatusStatement) {
return Optional.of(new ShowFunctionStatusExecutor((ShowFunctionStatusStatement) sqlStatement));
}
if (sqlStatement instanceof ShowProcedureStatusStatement) {
return Optional.of(new ShowProcedureStatusExecutor((ShowProcedureStatusStatement) sqlStatement));
}
if (sqlStatement instanceof ShowTablesStatement) {
return Optional.of(new ShowTablesExecutor((ShowTablesStatement) sqlStatement, sqlStatementContext.getDatabaseType()));
}
return Optional.empty();
} | @Test
void assertCreateWithOtherSelectStatementForNoResource() {
initProxyContext(Collections.emptyMap());
MySQLSelectStatement selectStatement = mock(MySQLSelectStatement.class);
when(selectStatement.getFrom()).thenReturn(Optional.empty());
ProjectionsSegment projectionsSegment = mock(ProjectionsSegment.class);
when(projectionsSegment.getProjections()).thenReturn(Collections.singletonList(new ExpressionProjectionSegment(0, 10, "CURRENT_DATE()")));
when(selectStatement.getProjections()).thenReturn(projectionsSegment);
when(sqlStatementContext.getSqlStatement()).thenReturn(selectStatement);
Optional<DatabaseAdminExecutor> actual = new MySQLAdminExecutorCreator().create(sqlStatementContext, "select CURRENT_DATE()", null, Collections.emptyList());
assertTrue(actual.isPresent());
assertThat(actual.get(), instanceOf(NoResourceShowExecutor.class));
} |
public Iterable<TimestampedValue<T>> read() {
checkState(
!isClosed,
"OrderedList user state is no longer usable because it is closed for %s",
requestTemplate.getStateKey());
return readRange(Instant.ofEpochMilli(Long.MIN_VALUE), Instant.ofEpochMilli(Long.MAX_VALUE));
} | @Test
public void testNoPersistedValues() throws Exception {
FakeBeamFnStateClient fakeClient = new FakeBeamFnStateClient(Collections.emptyMap());
OrderedListUserState<String> userState =
new OrderedListUserState<>(
Caches.noop(),
fakeClient,
"instructionId",
createOrderedListStateKey("A"),
StringUtf8Coder.of());
assertThat(userState.read(), is(emptyIterable()));
} |
Collection<IndexRelationType> getRelations() {
return relations.values();
} | @Test
@UseDataProvider("indexWithAndWithoutRelations")
public void getRelations_returns_empty_if_no_relation_added(Index index) {
NewIndex<?> newIndex = new SimplestNewIndex(IndexType.main(index, "foo"), defaultSettingsConfiguration);
assertThat(newIndex.getRelations()).isEmpty();
} |
@VisibleForTesting
static ExternalResourceInfoProvider createStaticExternalResourceInfoProvider(
Map<String, Long> externalResourceAmountMap,
Map<String, ExternalResourceDriver> externalResourceDrivers) {
final Map<String, Set<? extends ExternalResourceInfo>> externalResources = new HashMap<>();
for (Map.Entry<String, ExternalResourceDriver> externalResourceDriverEntry :
externalResourceDrivers.entrySet()) {
final String resourceName = externalResourceDriverEntry.getKey();
final ExternalResourceDriver externalResourceDriver =
externalResourceDriverEntry.getValue();
if (externalResourceAmountMap.containsKey(resourceName)) {
try {
final Set<? extends ExternalResourceInfo> externalResourceInfos;
externalResourceInfos =
externalResourceDriver.retrieveResourceInfo(
externalResourceAmountMap.get(resourceName));
externalResources.put(resourceName, externalResourceInfos);
} catch (Exception e) {
LOG.warn(
"Failed to retrieve information of external resource {}.",
resourceName,
e);
}
} else {
LOG.warn("Could not found legal amount configuration for {}.", resourceName);
}
}
return new StaticExternalResourceInfoProvider(externalResources);
} | @Test
public void testGetExternalResourceInfoProvider() {
final Map<String, Long> externalResourceAmountMap = new HashMap<>();
final Map<String, ExternalResourceDriver> externalResourceDrivers = new HashMap<>();
externalResourceAmountMap.put(RESOURCE_NAME_1, RESOURCE_AMOUNT_1);
externalResourceDrivers.put(RESOURCE_NAME_1, new TestingExternalResourceDriver());
final StaticExternalResourceInfoProvider externalResourceInfoProvider =
(StaticExternalResourceInfoProvider)
ExternalResourceUtils.createStaticExternalResourceInfoProvider(
externalResourceAmountMap, externalResourceDrivers);
assertNotNull(externalResourceInfoProvider.getExternalResources().get(RESOURCE_NAME_1));
} |
@Override
public UserSession authenticate(HttpRequest request, HttpResponse response) {
UserAuthResult userAuthResult = loadUser(request, response);
if (nonNull(userAuthResult.getUserDto())) {
if (TOKEN.equals(userAuthResult.getAuthType())) {
return userSessionFactory.create(userAuthResult.getUserDto(), userAuthResult.getTokenDto());
}
boolean isAuthenticatedBrowserSession = JWT.equals(userAuthResult.getAuthType());
return userSessionFactory.create(userAuthResult.getUserDto(), isAuthenticatedBrowserSession);
} else if (GITHUB_WEBHOOK.equals(userAuthResult.getAuthType())) {
return userSessionFactory.createGithubWebhookUserSession();
}
return userSessionFactory.createAnonymous();
} | @Test
public void authenticate_from_basic_token() {
when(request.getHeader("Authorization")).thenReturn("Basic dGVzdDo=");
when(userTokenAuthentication.getUserToken("test")).thenReturn(A_USER_TOKEN);
when(userTokenAuthentication.authenticate(request)).thenReturn(Optional.of(new UserAuthResult(A_USER, A_USER_TOKEN, UserAuthResult.AuthType.TOKEN)));
when(httpHeadersAuthentication.authenticate(request, response)).thenReturn(Optional.empty());
when(jwtHttpHandler.validateToken(request, response)).thenReturn(Optional.empty());
UserSession userSession = underTest.authenticate(request, response);
assertThat(userSession.getUuid()).isEqualTo(A_USER.getUuid());
assertThat(userSession.isAuthenticatedBrowserSession()).isFalse();
verify(jwtHttpHandler).validateToken(request, response);
verify(userTokenAuthentication).authenticate(request);
verify(response, never()).setStatus(anyInt());
} |
public GsonAzureRepoList getRepos(String serverUrl, String token, @Nullable String projectName) {
String url = Stream.of(getTrimmedUrl(serverUrl), projectName, "_apis/git/repositories?" + API_VERSION_3)
.filter(StringUtils::isNotBlank)
.collect(joining("/"));
return doGet(token, url, r -> buildGson().fromJson(r.body().charStream(), GsonAzureRepoList.class));
} | @Test
public void get_repos_without_project_name() throws InterruptedException {
enqueueResponse(200, "{ \"value\": [], \"count\": 0 }");
GsonAzureRepoList repos = underTest.getRepos(server.url("").toString(), "token", null);
RecordedRequest request = server.takeRequest(10, TimeUnit.SECONDS);
String azureDevOpsUrlCall = request.getRequestUrl().toString();
assertThat(azureDevOpsUrlCall).isEqualTo(server.url("") + "_apis/git/repositories?api-version=3.0");
assertThat(request.getMethod()).isEqualTo("GET");
assertThat(repos.getValues()).isEmpty();
} |
@VisibleForTesting
ZonedDateTime parseZoned(final String text, final ZoneId zoneId) {
final TemporalAccessor parsed = formatter.parse(text);
final ZoneId parsedZone = parsed.query(TemporalQueries.zone());
ZonedDateTime resolved = DEFAULT_ZONED_DATE_TIME.apply(
ObjectUtils.defaultIfNull(parsedZone, zoneId));
for (final TemporalField override : ChronoField.values()) {
if (parsed.isSupported(override)) {
if (!resolved.isSupported(override)) {
throw new KsqlException(
"Unsupported temporal field in timestamp: " + text + " (" + override + ")");
}
final long value = parsed.getLong(override);
if (override == ChronoField.DAY_OF_YEAR && value == LEAP_DAY_OF_THE_YEAR) {
if (!parsed.isSupported(ChronoField.YEAR)) {
throw new KsqlException("Leap day cannot be parsed without supplying the year field");
}
// eagerly override year, to avoid mismatch with epoch year, which is not a leap year
resolved = resolved.withYear(parsed.get(ChronoField.YEAR));
}
resolved = resolved.with(override, value);
}
}
return resolved;
} | @Test
public void shouldParseLeapDay() {
// Given
final String format = "yyyy-MM-dd'T'HH:mm:ss.SSS";
final String timestamp = "2012-12-31T23:59:58.660";
// When
final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID);
// Then
assertThat(ts, is(sameInstant(NEW_YEARS_EVE_2012)));
} |
@VisibleForTesting
String importSingleAlbum(UUID jobId, TokensAndUrlAuthData authData, PhotoAlbum inputAlbum)
throws IOException, InvalidTokenException, PermissionDeniedException, UploadErrorException {
// Set up album
GoogleAlbum googleAlbum = new GoogleAlbum();
googleAlbum.setTitle(GooglePhotosImportUtils.cleanAlbumTitle(inputAlbum.getName()));
GoogleAlbum responseAlbum =
getOrCreatePhotosInterface(jobId, authData).createAlbum(googleAlbum);
return responseAlbum.getId();
} | @Test
public void importAlbumWithITString()
throws PermissionDeniedException, InvalidTokenException, IOException, UploadErrorException {
String albumId = "Album Id";
String albumName = "Album Name";
String albumDescription = "Album Description";
PhotoAlbum albumModel = new PhotoAlbum(albumId, albumName, albumDescription);
PortabilityJob portabilityJob = Mockito.mock(PortabilityJob.class);
Mockito.when(portabilityJob.userLocale()).thenReturn("it");
JobStore jobStore = Mockito.mock(JobStore.class);
Mockito.when(jobStore.findJob(uuid)).thenReturn(portabilityJob);
GoogleAlbum responseAlbum = new GoogleAlbum();
responseAlbum.setId(NEW_ALBUM_ID);
Mockito.when(googlePhotosInterface.createAlbum(any(GoogleAlbum.class)))
.thenReturn(responseAlbum);
GooglePhotosImporter sut =
new GooglePhotosImporter(
null, jobStore, null, null, googlePhotosInterface, connectionProvider, monitor, 1.0);
sut.importSingleAlbum(uuid, null, albumModel);
ArgumentCaptor<GoogleAlbum> albumArgumentCaptor = ArgumentCaptor.forClass(GoogleAlbum.class);
Mockito.verify(googlePhotosInterface).createAlbum(albumArgumentCaptor.capture());
assertEquals(albumArgumentCaptor.getValue().getTitle(), albumName);
} |
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
if(directory.isRoot()) {
final AttributedList<Path> list = new AttributedList<>();
list.add(MYFILES_NAME);
list.add(SHARED_NAME);
listener.chunk(directory, list);
return list;
}
else if(new SimplePathPredicate(SHARED_NAME).test(directory)) {
return new SharedWithMeListService(session, fileid).list(directory, listener);
}
else {
return new GraphItemListService(session, fileid).list(directory, listener);
}
} | @Test
public void testListMyFiles() throws Exception {
final AttributedList<Path> list = new OneDriveListService(session, fileid).list(OneDriveListService.MYFILES_NAME, new DisabledListProgressListener());
assertFalse(list.isEmpty());
for(Path f : list) {
assertEquals(OneDriveListService.MYFILES_NAME, f.getParent());
}
} |
static List<InetAddress> resolve(String host, HostResolver hostResolver) throws UnknownHostException {
InetAddress[] addresses = hostResolver.resolve(host);
List<InetAddress> result = filterPreferredAddresses(addresses);
if (log.isDebugEnabled())
log.debug("Resolved host {} as {}", host, result.stream().map(InetAddress::getHostAddress).collect(Collectors.joining(",")));
return result;
} | @Test
public void testResolveDnsLookup() throws UnknownHostException {
InetAddress[] addresses = new InetAddress[] {
InetAddress.getByName("198.51.100.0"), InetAddress.getByName("198.51.100.5")
};
HostResolver hostResolver = new AddressChangeHostResolver(addresses, addresses);
assertEquals(asList(addresses), ClientUtils.resolve("kafka.apache.org", hostResolver));
} |
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
// Check the request path and set the cache-control header if we find
// it matches what we're looking for.
if (request instanceof HttpServletRequest) {
if (isCacheableResourceRequest((HttpServletRequest)request)) {
HttpServletResponse httpServletResponse = (HttpServletResponse) response;
//
// Set the expiry to one year.
//
// Note that this does NOT mean that the browser will never send a request
// for these resources. If you click reload in the browser (def in Chrome) it will
// send an If-Modified-Since request to the server (at a minimum), which means you at
// least have the request overhead even if it results in a 304 response. Setting the
// Cache-Control header helps for normal browsing (clicking on links, bookmarks etc),
// in which case the local cache is fully used (no If-Modified-Since requests for
// non-stale resources).
//
httpServletResponse.setHeader("Cache-Control", "public, max-age=31536000");
response = new HttpServletResponseWrapper(httpServletResponse) {
@Override
public void setHeader(String name, String value) {
// Block the setting of the legacy HTTP/1.0 "Expires" header.
// Note that, strictly speaking, this should not be required because
// the HTTP spec dictates that the Cache-Control header takes priority.
// Lets eliminate it anyway in case a browser/intermediary doesn't comply.
if (!name.equalsIgnoreCase("Expires")) {
super.setHeader(name, value);
}
}
};
}
}
// continue to execute the filer chain as normal
chain.doFilter(request, response);
} | @Test
public void test_cache_control_not_set() throws IOException, ServletException {
Mockito.when(servletRequest.getPathInfo()).thenReturn("/a/bc.js");
resourceCacheControl.doFilter(servletRequest, servletResponse, filterChain);
Mockito.verify(servletResponse, Mockito.never()).setHeader("Cache-Control", "public, max-age=31536000");
} |
@Override
public int partition(RowData row, int numPartitions) {
// reuse the sortKey and rowDataWrapper
sortKey.wrap(rowDataWrapper.wrap(row));
return SketchUtil.partition(sortKey, numPartitions, rangeBounds, comparator);
} | @Test
public void testRangePartitioningWithRangeBounds() {
SketchRangePartitioner partitioner =
new SketchRangePartitioner(TestFixtures.SCHEMA, SORT_ORDER, RANGE_BOUNDS);
GenericRowData row =
GenericRowData.of(StringData.fromString("data"), 0L, StringData.fromString("2023-06-20"));
for (long id = 0; id < MAX_ID; ++id) {
row.setField(1, id);
int partition = partitioner.partition(row, NUM_PARTITIONS);
assertThat(partition).isGreaterThanOrEqualTo(0).isLessThan(NUM_PARTITIONS);
int expectedPartition = id == 0L ? 0 : (int) ((id - 1) / RANGE_STEP);
assertThat(partition).isEqualTo(expectedPartition);
}
} |
public DataTableDiff calculateDiffs() {
Map<Integer, Delta> deltasByLine = createDeltasByLine();
return createTableDiff(deltasByLine);
} | @Test
void should_not_fail_with_out_of_memory() {
DataTable expected = TableParser.parse("" +
"| I'm going to work |\n");
List<List<String>> actual = new ArrayList<>();
actual.add(singletonList("I just woke up"));
actual.add(singletonList("I'm going to work"));
new TableDiffer(expected, DataTable.create(actual)).calculateDiffs();
} |
@Override
public RowData nextRecord(RowData reuse) {
// return the next row
row.setRowId(this.nextRow++);
return row;
} | @Test
void testReadFileWithSelectFields() throws IOException {
FileInputSplit[] splits = createSplits(testFileFlat, 4);
long cnt = 0;
long totalF0 = 0;
Map<String, Object> partSpec = new HashMap<>();
partSpec.put("f1", 1);
partSpec.put("f3", 3L);
partSpec.put("f5", "f5");
partSpec.put("f8", BigDecimal.valueOf(5.333));
partSpec.put("f13", "f13");
// read all splits
for (FileInputSplit split : splits) {
try (OrcColumnarRowSplitReader reader =
createReader(
new int[] {8, 1, 3, 0, 5, 2},
new DataType[] {
/* 0 */ DataTypes.INT(),
/* 1 */ DataTypes.INT(), // part-1
/* 2 */ DataTypes.STRING(),
/* 3 */ DataTypes.BIGINT(), // part-2
/* 4 */ DataTypes.STRING(),
/* 5 */ DataTypes.STRING(), // part-3
/* 6 */ DataTypes.STRING(),
/* 7 */ DataTypes.INT(),
/* 8 */ DataTypes.DECIMAL(10, 5), // part-4
/* 9 */ DataTypes.STRING(),
/* 11*/ DataTypes.INT(),
/* 12*/ DataTypes.INT(),
/* 13*/ DataTypes.STRING(), // part-5
/* 14*/ DataTypes.INT()
},
partSpec,
split)) {
// read and count all rows
while (!reader.reachedEnd()) {
RowData row = reader.nextRecord(null);
// data values
assertThat(row.isNullAt(3)).isFalse();
assertThat(row.isNullAt(5)).isFalse();
totalF0 += row.getInt(3);
assertThat(row.getString(5).toString()).isNotNull();
// part values
assertThat(row.isNullAt(0)).isFalse();
assertThat(row.isNullAt(1)).isFalse();
assertThat(row.isNullAt(2)).isFalse();
assertThat(row.isNullAt(4)).isFalse();
assertThat(row.getDecimal(0, 10, 5))
.isEqualTo(DecimalDataUtils.castFrom(5.333, 10, 5));
assertThat(row.getInt(1)).isEqualTo(1);
assertThat(row.getLong(2)).isEqualTo(3);
assertThat(row.getString(4).toString()).isEqualTo("f5");
cnt++;
}
}
}
// check that all rows have been read
assertThat(cnt).isEqualTo(1920800);
assertThat(totalF0).isEqualTo(1844737280400L);
} |
public static Set<Integer> mapColumnToNode(Set<Integer> columnIndexes, List<OrcType> orcTypes)
{
requireNonNull(columnIndexes, "columnIndexes is null");
requireNonNull(orcTypes, "orcTypes is null");
if (columnIndexes.isEmpty()) {
return ImmutableSet.of();
}
OrcType rootType = orcTypes.get(0);
int fieldCount = rootType.getFieldCount();
return columnIndexes.stream()
.filter(columnIndex -> columnIndex < fieldCount)
.map(rootType::getFieldTypeIndex)
.collect(toImmutableSet());
} | @Test
public void testMapColumnToNodeEmpty()
{
Set<Integer> actual = mapColumnToNode(ImmutableSet.of(), ImmutableList.of());
assertTrue(actual.isEmpty());
} |
@Override
public void updateApiErrorLogProcess(Long id, Integer processStatus, Long processUserId) {
ApiErrorLogDO errorLog = apiErrorLogMapper.selectById(id);
if (errorLog == null) {
throw exception(API_ERROR_LOG_NOT_FOUND);
}
if (!ApiErrorLogProcessStatusEnum.INIT.getStatus().equals(errorLog.getProcessStatus())) {
throw exception(API_ERROR_LOG_PROCESSED);
}
// 标记处理
apiErrorLogMapper.updateById(ApiErrorLogDO.builder().id(id).processStatus(processStatus)
.processUserId(processUserId).processTime(LocalDateTime.now()).build());
} | @Test
public void testUpdateApiErrorLogProcess_processed() {
// 准备参数
ApiErrorLogDO apiErrorLogDO = randomPojo(ApiErrorLogDO.class,
o -> o.setProcessStatus(ApiErrorLogProcessStatusEnum.DONE.getStatus()));
apiErrorLogMapper.insert(apiErrorLogDO);
// 准备参数
Long id = apiErrorLogDO.getId();
Integer processStatus = randomEle(ApiErrorLogProcessStatusEnum.values()).getStatus();
Long processUserId = randomLongId();
// 调用,并断言异常
assertServiceException(() ->
apiErrorLogService.updateApiErrorLogProcess(id, processStatus, processUserId),
API_ERROR_LOG_PROCESSED);
} |
public Set<String> getReferencedSearchFiltersIds(final Collection<UsesSearchFilters> searchFiltersOwners) {
return searchFiltersOwners
.stream()
.map(UsesSearchFilters::filters)
.filter(Objects::nonNull)
.flatMap(Collection::stream)
.filter(usedSearchFilter -> usedSearchFilter instanceof ReferencedSearchFilter)
.map(usedSearchFilter -> (ReferencedSearchFilter) usedSearchFilter)
.map(ReferencedSearchFilter::id)
.collect(Collectors.toSet());
} | @Test
void testGetReferencedSearchFiltersIdsReturnsEmptyCollectionOnEmptyOwners() {
final Set<String> referencedSearchFiltersIds = toTest.getReferencedSearchFiltersIds(ImmutableSet.of());
assertTrue(referencedSearchFiltersIds.isEmpty());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.