focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Deprecated
private static String compatibleTypeDeclare(String declare) {
switch (declare.trim().toUpperCase()) {
case "LONG":
return "BIGINT";
case "SHORT":
return "SMALLINT";
case "BYTE":
return "TINYINT";
default:
return declare;
}
} | @Test
public void testCompatibleTypeDeclare() {
SeaTunnelDataType<?> longType =
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType("c_long", "long");
Assertions.assertEquals(BasicType.LONG_TYPE, longType);
SeaTunnelDataType<?> shortType =
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType("c_short", "short");
Assertions.assertEquals(BasicType.SHORT_TYPE, shortType);
SeaTunnelDataType<?> byteType =
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType("c_byte", "byte");
Assertions.assertEquals(BasicType.BYTE_TYPE, byteType);
ArrayType<?, ?> longArrayType =
(ArrayType<?, ?>)
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType(
"c_long_array", "array<long>");
Assertions.assertEquals(ArrayType.LONG_ARRAY_TYPE, longArrayType);
ArrayType<?, ?> shortArrayType =
(ArrayType<?, ?>)
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType(
"c_short_array", "array<short>");
Assertions.assertEquals(ArrayType.SHORT_ARRAY_TYPE, shortArrayType);
ArrayType<?, ?> byteArrayType =
(ArrayType<?, ?>)
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType(
"c_byte_array", "array<byte>");
Assertions.assertEquals(ArrayType.BYTE_ARRAY_TYPE, byteArrayType);
MapType<?, ?> longMapType =
(MapType<?, ?>)
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType(
"c_long_map", "map<long, long>");
Assertions.assertEquals(BasicType.LONG_TYPE, longMapType.getKeyType());
Assertions.assertEquals(BasicType.LONG_TYPE, longMapType.getValueType());
MapType<?, ?> shortMapType =
(MapType<?, ?>)
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType(
"c_short_map", "map<short, short>");
Assertions.assertEquals(BasicType.SHORT_TYPE, shortMapType.getKeyType());
Assertions.assertEquals(BasicType.SHORT_TYPE, shortMapType.getValueType());
MapType<?, ?> byteMapType =
(MapType<?, ?>)
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType(
"c_byte_map", "map<byte, byte>");
Assertions.assertEquals(BasicType.BYTE_TYPE, byteMapType.getKeyType());
Assertions.assertEquals(BasicType.BYTE_TYPE, byteMapType.getValueType());
SeaTunnelRowType longRow =
(SeaTunnelRowType)
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType(
"c_long_row", "{c = long}");
Assertions.assertEquals(BasicType.LONG_TYPE, longRow.getFieldType(0));
SeaTunnelRowType shortRow =
(SeaTunnelRowType)
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType(
"c_short_row", "{c = short}");
Assertions.assertEquals(BasicType.SHORT_TYPE, shortRow.getFieldType(0));
SeaTunnelRowType byteRow =
(SeaTunnelRowType)
SeaTunnelDataTypeConvertorUtil.deserializeSeaTunnelDataType(
"c_byte_row", "{c = byte}");
Assertions.assertEquals(BasicType.BYTE_TYPE, byteRow.getFieldType(0));
} |
@Override
public ObjectNode encode(LispSegmentAddress address, CodecContext context) {
checkNotNull(address, "LispSegmentAddress cannot be null");
final ObjectNode result = context.mapper().createObjectNode()
.put(INSTANCE_ID, address.getInstanceId());
if (address.getAddress() != null) {
final JsonCodec<MappingAddress> addressCodec =
context.codec(MappingAddress.class);
ObjectNode addressNode = addressCodec.encode(address.getAddress(), context);
result.set(ADDRESS, addressNode);
}
return result;
} | @Test
public void testLispSegmentAddressEncode() {
LispSegmentAddress address = new LispSegmentAddress.Builder()
.withInstanceId(INSTANCE_ID)
.withAddress(MappingAddresses.ipv4MappingAddress(IPV4_PREFIX))
.build();
ObjectNode addressJson = segmentAddressCodec.encode(address, context);
assertThat("errors in encoding segment address JSON",
addressJson, LispSegmentAddressJsonMatcher.matchesSegmentAddress(address));
} |
@Nullable
public static EpoxyModel<?> getModelFromPayload(List<Object> payloads, long modelId) {
if (payloads.isEmpty()) {
return null;
}
for (Object payload : payloads) {
DiffPayload diffPayload = (DiffPayload) payload;
if (diffPayload.singleModel != null) {
if (diffPayload.singleModel.id() == modelId) {
return diffPayload.singleModel;
}
} else {
EpoxyModel<?> modelForId = diffPayload.modelsById.get(modelId);
if (modelForId != null) {
return modelForId;
}
}
}
return null;
} | @Test
public void getMultipleModelsFromMultipleDiffPayloads() {
TestModel model1Payload1 = new TestModel(1);
TestModel model2Payload1 = new TestModel(2);
DiffPayload diffPayload1 = diffPayloadWithModels(model1Payload1, model2Payload1);
TestModel model1Payload2 = new TestModel(3);
TestModel model2Payload2 = new TestModel(4);
DiffPayload diffPayload2 = diffPayloadWithModels(model1Payload2, model2Payload2);
List<Object> payloads = payloadsWithDiffPayloads(diffPayload1, diffPayload2);
EpoxyModel<?> model1FromPayload1 = getModelFromPayload(payloads, model1Payload1.id());
EpoxyModel<?> model2FromPayload1 = getModelFromPayload(payloads, model2Payload1.id());
EpoxyModel<?> model1FromPayload2 = getModelFromPayload(payloads, model1Payload2.id());
EpoxyModel<?> model2FromPayload2 = getModelFromPayload(payloads, model2Payload2.id());
assertEquals(model1Payload1, model1FromPayload1);
assertEquals(model2Payload1, model2FromPayload1);
assertEquals(model1Payload2, model1FromPayload2);
assertEquals(model2Payload2, model2FromPayload2);
} |
public IterableSubject asList() {
return checkNoNeedToDisplayBothValues("asList()").that(Shorts.asList(checkNotNull(actual)));
} | @Test
public void asListWithoutCastingFails() {
expectFailureWhenTestingThat(array(1, 1, 0)).asList().containsAtLeast(1, 0);
assertFailureKeys(
"value of",
"missing (2)",
"though it did contain (3)",
"---",
"expected to contain at least",
"but was");
} |
@Override
public Object getValue( Node node ) throws KettleException {
switch ( storageType ) {
case STORAGE_TYPE_NORMAL:
String valueString = XMLHandler.getNodeValue( node );
if ( Utils.isEmpty( valueString ) ) {
return null;
}
// Handle Content -- only when not NULL
//
switch ( getType() ) {
case TYPE_STRING:
return valueString;
case TYPE_NUMBER:
return Double.parseDouble( valueString );
case TYPE_INTEGER:
return Long.parseLong( valueString );
case TYPE_DATE:
return XMLHandler.stringToDate( valueString );
case TYPE_TIMESTAMP:
return XMLHandler.stringToTimestamp( valueString );
case TYPE_BIGNUMBER:
return new BigDecimal( valueString );
case TYPE_BOOLEAN:
return "Y".equalsIgnoreCase( valueString );
case TYPE_BINARY:
return XMLHandler.stringToBinary( XMLHandler.getTagValue( node, "binary-value" ) );
default:
throw new KettleException( toString() + " : Unable to de-serialize '" + valueString
+ "' from XML for data type " + getType() );
}
case STORAGE_TYPE_BINARY_STRING:
// Handle binary string content -- only when not NULL
// In this case, we opt not to convert anything at all for speed.
// That way, we can save on CPU power.
// Since the streams can be compressed, volume shouldn't be an issue at
// all.
//
String binaryString = XMLHandler.getTagValue( node, "binary-string" );
if ( Utils.isEmpty( binaryString ) ) {
return null;
}
return XMLHandler.stringToBinary( binaryString );
case STORAGE_TYPE_INDEXED:
String indexString = XMLHandler.getTagValue( node, "index-value" );
if ( Utils.isEmpty( indexString ) ) {
return null;
}
return Integer.parseInt( indexString );
default:
throw new KettleException( toString() + " : Unknown storage type " + getStorageType() );
}
} | @Test
public void testGetValueFromNode() throws Exception {
ValueMetaBase valueMetaBase = null;
Node xmlNode = null;
valueMetaBase = new ValueMetaBase( "test", ValueMetaInterface.TYPE_STRING );
xmlNode = XMLHandler.loadXMLString( "<value-data>String val</value-data>" ).getFirstChild();
assertEquals( "String val", valueMetaBase.getValue( xmlNode ) );
valueMetaBase = new ValueMetaBase( "test", ValueMetaInterface.TYPE_NUMBER );
xmlNode = XMLHandler.loadXMLString( "<value-data>689.2</value-data>" ).getFirstChild();
assertEquals( 689.2, valueMetaBase.getValue( xmlNode ) );
valueMetaBase = new ValueMetaBase( "test", ValueMetaInterface.TYPE_NUMBER );
xmlNode = XMLHandler.loadXMLString( "<value-data>689.2</value-data>" ).getFirstChild();
assertEquals( 689.2, valueMetaBase.getValue( xmlNode ) );
valueMetaBase = new ValueMetaBase( "test", ValueMetaInterface.TYPE_INTEGER );
xmlNode = XMLHandler.loadXMLString( "<value-data>68933</value-data>" ).getFirstChild();
assertEquals( 68933l, valueMetaBase.getValue( xmlNode ) );
valueMetaBase = new ValueMetaBase( "test", ValueMetaInterface.TYPE_DATE );
xmlNode = XMLHandler.loadXMLString( "<value-data>2017/11/27 08:47:10.000</value-data>" ).getFirstChild();
assertEquals( XMLHandler.stringToDate( "2017/11/27 08:47:10.000" ), valueMetaBase.getValue( xmlNode ) );
valueMetaBase = new ValueMetaBase( "test", ValueMetaInterface.TYPE_TIMESTAMP );
xmlNode = XMLHandler.loadXMLString( "<value-data>2017/11/27 08:47:10.123456789</value-data>" ).getFirstChild();
assertEquals( XMLHandler.stringToTimestamp( "2017/11/27 08:47:10.123456789" ), valueMetaBase.getValue( xmlNode ) );
valueMetaBase = new ValueMetaBase( "test", ValueMetaInterface.TYPE_BOOLEAN );
xmlNode = XMLHandler.loadXMLString( "<value-data>Y</value-data>" ).getFirstChild();
assertEquals( true, valueMetaBase.getValue( xmlNode ) );
valueMetaBase = new ValueMetaBase( "test", ValueMetaInterface.TYPE_BINARY );
byte[] bytes = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
String s = XMLHandler.encodeBinaryData( bytes );
xmlNode = XMLHandler.loadXMLString( "<value-data>test<binary-value>" + s + "</binary-value></value-data>" ).getFirstChild();
assertArrayEquals( bytes, (byte[]) valueMetaBase.getValue( xmlNode ) );
valueMetaBase = new ValueMetaBase( "test", ValueMetaInterface.TYPE_STRING );
xmlNode = XMLHandler.loadXMLString( "<value-data></value-data>" ).getFirstChild();
assertNull( valueMetaBase.getValue( xmlNode ) );
} |
@Override
public ImportResult importItem(
UUID jobId,
IdempotentImportExecutor idempotentImportExecutor,
TokensAndUrlAuthData authData,
PhotosContainerResource data)
throws Exception {
if (data == null) {
return ImportResult.OK;
}
AppleMediaInterface mediaInterface = factory
.getOrCreateMediaInterface(jobId, authData, appCredentials, exportingService, monitor);
// Uploads album metadata
final int albumCount =
mediaInterface.importAlbums(
jobId,
idempotentImportExecutor,
data.getAlbums().stream()
.map(MediaAlbum::photoToMediaAlbum)
.collect(Collectors.toList()),
DataVertical.PHOTOS.getDataType());
final Map<String, Long> importPhotosResult =
mediaInterface.importAllMedia(
jobId,
idempotentImportExecutor,
data.getPhotos(),
DataVertical.PHOTOS.getDataType());
// generate import result
final ImportResult result = ImportResult.OK;
final Map<String, Integer> counts =
new ImmutableMap.Builder<String, Integer>()
.put(PhotosContainerResource.ALBUMS_COUNT_DATA_NAME, albumCount)
.put(
PhotosContainerResource.PHOTOS_COUNT_DATA_NAME,
importPhotosResult.get(ApplePhotosConstants.COUNT_KEY).intValue())
.build();
return result
.copyWithBytes(importPhotosResult.get(ApplePhotosConstants.BYTES_KEY))
.copyWithCounts(counts);
} | @Test
public void importPhotosMultipleBatches() throws Exception {
// set up
final int photoCount = ApplePhotosConstants.maxNewMediaRequests + 1;
final List<PhotoModel> photos = createTestPhotos(photoCount);
final Map<String, Integer> dataIdToStatus =
photos.stream()
.collect(
Collectors.toMap(
PhotoModel::getDataId,
photoModel -> SC_OK));
setUpGetUploadUrlResponse(dataIdToStatus);
setUpUploadContentResponse(dataIdToStatus);
setUpCreateMediaResponse(dataIdToStatus);
// run test
PhotosContainerResource data = new PhotosContainerResource(null, photos);
final ImportResult importResult =
applePhotosImporter.importItem(uuid, executor, authData, data);
// verify correct methods were called
verify(mediaInterface, times(2)).getUploadUrl(anyString(), anyString(), anyList());
verify(mediaInterface)
.getUploadUrl(
uuid.toString(),
DataVertical.PHOTOS.getDataType(),
photos.subList(0, ApplePhotosConstants.maxNewMediaRequests).stream()
.map(PhotoModel::getDataId)
.collect(Collectors.toList()));
verify(mediaInterface)
.getUploadUrl(
uuid.toString(),
DataVertical.PHOTOS.getDataType(),
photos.subList(ApplePhotosConstants.maxNewMediaRequests, photoCount).stream()
.map(PhotoModel::getDataId)
.collect(Collectors.toList()));
verify(mediaInterface, times(2)).uploadContent(anyMap(), anyList());
verify(mediaInterface, times(2)).createMedia(anyString(), anyString(), anyList());
// check the result
assertThat(importResult.getCounts().isPresent()).isTrue();
assertThat(importResult.getCounts().get().get(ALBUMS_COUNT_DATA_NAME) == 0).isTrue();
assertThat(importResult.getCounts().get().get(PHOTOS_COUNT_DATA_NAME) == photoCount).isTrue();
assertThat(importResult.getBytes().get() == photoCount * PHOTOS_FILE_SIZE).isTrue();
final Map<String, Serializable> expectedKnownValue =
photos.stream()
.collect(
Collectors.toMap(
photoModel -> photoModel.getAlbumId() + "-" + photoModel.getDataId(),
photoModel -> MEDIA_RECORDID_BASE + photoModel.getDataId()));
checkKnownValues(expectedKnownValue);
} |
@Override
public List<String> getChildrenKeys(final String key) {
try {
List<String> result = client.getChildren().forPath(key);
result.sort(Comparator.reverseOrder());
return result;
// CHECKSTYLE:OFF
} catch (final Exception ex) {
// CHECKSTYLE:ON
ZookeeperExceptionHandler.handleException(ex);
return Collections.emptyList();
}
} | @Test
void assertGetChildrenKeys() throws Exception {
List<String> keys = Arrays.asList("/test/children/keys/1", "/test/children/keys/2");
when(getChildrenBuilder.forPath("/test/children/keys")).thenReturn(keys);
List<String> childrenKeys = REPOSITORY.getChildrenKeys("/test/children/keys");
assertThat(childrenKeys.size(), is(2));
} |
public static CommitTransactionRequest fromJson(String json) {
return JsonUtil.parse(json, CommitTransactionRequestParser::fromJson);
} | @Test
public void invalidTableIdentifier() {
assertThatThrownBy(
() ->
CommitTransactionRequestParser.fromJson(
"{\"table-changes\":[{\"ns1.table1\" : \"ns1.table1\"}]}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid table changes: table identifier is required");
assertThatThrownBy(
() ->
CommitTransactionRequestParser.fromJson(
"{\"table-changes\":[{\"identifier\" : {}}]}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing string: name");
assertThatThrownBy(
() ->
CommitTransactionRequestParser.fromJson(
"{\"table-changes\":[{\"identifier\" : { \"name\": 23}}]}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse to a string value: name: 23");
} |
@VisibleForTesting
void processIncludeView( Object[] outputRow ) {
// Views
if ( meta.isIncludeView() ) {
try {
String[] viewNames = data.db.getViews( data.realSchemaName, meta.isAddSchemaInOut() );
String[] viewNamesWithoutSchema = data.db.getViews( data.realSchemaName, false );
String ObjectType = BaseMessages.getString( PKG, "GetTableNamesDialog.ObjectType.View" );
for ( int i = 0; i < viewNames.length && !isStopped(); i++ ) {
Object[] outputRowView = outputRow.clone();
int outputIndex = data.totalpreviousfields;
String viewName = viewNames[i];
String viewNameWithoutSchema = viewNamesWithoutSchema[i];
outputRowView[outputIndex++] = viewName;
if ( !Utils.isEmpty( data.realObjectTypeFieldName ) ) {
outputRowView[outputIndex++] = ObjectType;
}
if ( !Utils.isEmpty( data.realIsSystemObjectFieldName ) ) {
outputRowView[outputIndex++] = Boolean.valueOf( data.db.isSystemTable( viewNameWithoutSchema ) );
}
if ( !Utils.isEmpty( data.realSQLCreationFieldName ) ) {
outputRowView[outputIndex++] = null;
}
data.rownr++;
putRow( data.outputRowMeta, outputRowView ); // copy row to output rowset(s);
logInfo( outputRowView );
}
} catch ( Exception e ) {
// Ignore
}
}
} | @Test
public void processIncludeViewIncludesSchemaTest() throws KettleException {
prepareIncludeViewTest( true );
getTableNamesSpy.processIncludeView( new Object[] { "", "", "", "" } );
//Regardless of include schema is true or false calls to isSystemTable should be done
//with the table name without the schema concatenated
for ( String table : getTableNamesWithoutSchema() ) {
verify( database ).isSystemTable( table );
}
//getViews without including schema, must be called only once, because it is always needed
//to call isSystemTable without schema.
//Since includeSchema in meta is set, then a call to getViews including schema is also done.
verify( database, times( 1 ) ).getViews( "schema", false );
verify( database, times( 1 ) ).getViews( "schema", true );
} |
public static <T> RetryTransformer<T> of(Retry retry) {
return new RetryTransformer<>(retry);
} | @Test
public void shouldThrowMaxRetriesExceptionAfterRetriesExhaustedWhenConfigured() throws InterruptedException {
RetryConfig config = RetryConfig.<String>custom()
.retryOnResult("retry"::equals)
.waitDuration(Duration.ofMillis(50))
.maxAttempts(3)
.failAfterMaxAttempts(true)
.build();
Retry retry = Retry.of("testName", config);
given(helloWorldService.returnHelloWorld())
.willReturn("retry");
Flowable.fromCallable(helloWorldService::returnHelloWorld)
.compose(RetryTransformer.of(retry))
.test()
.await()
.assertFailure(MaxRetriesExceededException.class, "retry");
then(helloWorldService).should(times(3)).returnHelloWorld();
} |
public static int getValueMetaLen() {
return VALUE_DATA_OFFSET;
} | @Test
void testValueSpacePutAndGet() {
for (int i = 0; i < 100; i++) {
int valueLen = ThreadLocalRandom.current().nextInt(100) + 1;
ValueSpace valueSpace = createValueSpace(valueLen);
int valueMetaLen = SkipListUtils.getValueMetaLen();
int totalValueSpaceLen = valueMetaLen + valueLen;
int offset = 100;
MemorySegment segment =
MemorySegmentFactory.allocateUnpooledSegment(totalValueSpaceLen + offset);
putValueSpace(valueSpace, segment, offset);
verifyGetValueSpace(valueSpace, segment, offset);
}
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void setChatMenuButton() {
BaseResponse response = bot.execute(new SetChatMenuButton().chatId(chatId)
.menuButton(new MenuButtonWebApp("webapp", new WebAppInfo("https://core.telegram.org"))));
assertTrue(response.isOk());
response = bot.execute(new SetChatMenuButton().chatId(chatId)
.menuButton(new MenuButtonCommands()));
assertTrue(response.isOk());
response = bot.execute(new SetChatMenuButton().chatId(chatId)
.menuButton(new MenuButtonDefault()));
assertTrue(response.isOk());
} |
public boolean fence(HAServiceTarget fromSvc) {
return fence(fromSvc, null);
} | @Test
public void testShortNameSshWithUser() throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer("sshfence(user)");
assertFalse(fencer.fence(MOCK_TARGET));
} |
@Override
protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) {
final String param = exchange.getAttribute(Constants.PARAM_TRANSFORM);
ShenyuContext shenyuContext = exchange.getAttribute(Constants.CONTEXT);
assert shenyuContext != null;
MetaData metaData = exchange.getAttribute(Constants.META_DATA);
if (!checkMetaData(metaData)) {
LOG.error(" path is :{}, meta data have error.... {}", shenyuContext.getPath(), metaData);
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.META_DATA_ERROR);
return WebFluxResultUtils.result(exchange, error);
}
assert metaData != null;
if (StringUtils.isNoneBlank(metaData.getParameterTypes()) && StringUtils.isBlank(param)) {
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.GRPC_HAVE_BODY_PARAM);
return WebFluxResultUtils.result(exchange, error);
}
final ShenyuGrpcClient client = GrpcClientCache.getGrpcClient(selector.getId());
if (Objects.isNull(client)) {
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.GRPC_CLIENT_NULL);
return WebFluxResultUtils.result(exchange, error);
}
// load balance context
Context.current().withValue(GrpcConstants.GRPC_SELECTOR_ID, selector.getId()).attach();
Context.current().withValue(GrpcConstants.GRPC_RULE_ID, rule.getId()).attach();
Context.current().withValue(GrpcConstants.GRPC_REMOTE_ADDRESS,
Objects.requireNonNull(exchange.getRequest().getRemoteAddress()).getAddress().getHostAddress()).attach();
GrpcExtInfo extInfo = GsonUtils.getGson().fromJson(metaData.getRpcExt(), GrpcExtInfo.class);
CallOptions callOptions = CallOptions.DEFAULT.withDeadlineAfter(extInfo.timeout, TimeUnit.MILLISECONDS);
Map<String, Map<String, String>> rpcContext = exchange.getAttribute(Constants.GENERAL_CONTEXT);
Optional.ofNullable(rpcContext).map(context -> context.get(PluginEnum.GRPC.getName())).ifPresent(
context -> Context.current().withValue(RPC_CONTEXT_KEY, context).attach());
CompletableFuture<ShenyuGrpcResponse> result = client.call(metaData, callOptions, param, extInfo.methodType);
Context.current().detach(Context.ROOT);
return Mono.fromFuture(result.thenApply(ret -> {
exchange.getAttributes().put(Constants.RPC_RESULT, ret.getResults());
exchange.getAttributes().put(Constants.CLIENT_RESPONSE_RESULT_TYPE, ResultEnum.SUCCESS.getName());
return ret;
})).onErrorMap(ShenyuException::new).then(chain.execute(exchange));
} | @Test
@SuppressWarnings("all")
public void testDoExecute() throws ClassNotFoundException, NoSuchFieldException, IllegalAccessException {
ServerWebExchange exchange = getServerWebExchange();
exchange.getAttributes().put(Constants.PARAM_TRANSFORM, "{message:1}");
exchange.getAttributes().put(Constants.META_DATA, getMetaData());
Class<?> grpcClientCacheClass = Class.forName("org.apache.shenyu.plugin.grpc.cache.GrpcClientCache");
Field clientCacheField = grpcClientCacheClass.getDeclaredField("CLIENT_CACHE");
clientCacheField.setAccessible(true);
Map<String, ShenyuGrpcClient> clientCacheMap = (Map<String, ShenyuGrpcClient>) clientCacheField.get(grpcClientCacheClass);
ShenyuGrpcClient mockClient = mock(ShenyuGrpcClient.class);
ShenyuGrpcResponse response = new ShenyuGrpcResponse();
response.getResults().add("success");
when(mockClient.call(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()))
.thenReturn(CompletableFuture.completedFuture(response));
clientCacheMap.put("/grpc", mockClient);
when(chain.execute(Mockito.any())).thenReturn(Mono.empty());
RuleData data = mock(RuleData.class);
StepVerifier.create(grpcPlugin.doExecute(exchange, chain, selector, data)).expectSubscription().verifyComplete();
} |
@Override
public short readShort(@Nonnull String fieldName) throws IOException {
FieldDefinition fd = cd.getField(fieldName);
if (fd == null) {
return 0;
}
switch (fd.getType()) {
case SHORT:
return super.readShort(fieldName);
case BYTE:
return super.readByte(fieldName);
default:
throw createIncompatibleClassChangeError(fd, SHORT);
}
} | @Test
public void testReadShort() throws Exception {
int aByte = reader.readShort("byte");
int aShort = reader.readShort("short");
assertEquals(1, aByte);
assertEquals(3, aShort);
assertEquals(0, reader.readShort("NO SUCH FIELD"));
} |
@Override
@DSTransactional // 多数据源,使用 @DSTransactional 保证本地事务,以及数据源的切换
public Long createTenant(TenantSaveReqVO createReqVO) {
// 校验租户名称是否重复
validTenantNameDuplicate(createReqVO.getName(), null);
// 校验租户域名是否重复
validTenantWebsiteDuplicate(createReqVO.getWebsite(), null);
// 校验套餐被禁用
TenantPackageDO tenantPackage = tenantPackageService.validTenantPackage(createReqVO.getPackageId());
// 创建租户
TenantDO tenant = BeanUtils.toBean(createReqVO, TenantDO.class);
tenantMapper.insert(tenant);
// 创建租户的管理员
TenantUtils.execute(tenant.getId(), () -> {
// 创建角色
Long roleId = createRole(tenantPackage);
// 创建用户,并分配角色
Long userId = createUser(roleId, createReqVO);
// 修改租户的管理员
tenantMapper.updateById(new TenantDO().setId(tenant.getId()).setContactUserId(userId));
});
return tenant.getId();
} | @Test
public void testCreateTenant() {
// mock 套餐 100L
TenantPackageDO tenantPackage = randomPojo(TenantPackageDO.class, o -> o.setId(100L));
when(tenantPackageService.validTenantPackage(eq(100L))).thenReturn(tenantPackage);
// mock 角色 200L
when(roleService.createRole(argThat(role -> {
assertEquals(RoleCodeEnum.TENANT_ADMIN.getName(), role.getName());
assertEquals(RoleCodeEnum.TENANT_ADMIN.getCode(), role.getCode());
assertEquals(0, role.getSort());
assertEquals("系统自动生成", role.getRemark());
return true;
}), eq(RoleTypeEnum.SYSTEM.getType()))).thenReturn(200L);
// mock 用户 300L
when(userService.createUser(argThat(user -> {
assertEquals("yunai", user.getUsername());
assertEquals("yuanma", user.getPassword());
assertEquals("芋道", user.getNickname());
assertEquals("15601691300", user.getMobile());
return true;
}))).thenReturn(300L);
// 准备参数
TenantSaveReqVO reqVO = randomPojo(TenantSaveReqVO.class, o -> {
o.setContactName("芋道");
o.setContactMobile("15601691300");
o.setPackageId(100L);
o.setStatus(randomCommonStatus());
o.setWebsite("https://www.iocoder.cn");
o.setUsername("yunai");
o.setPassword("yuanma");
}).setId(null); // 设置为 null,方便后面校验
// 调用
Long tenantId = tenantService.createTenant(reqVO);
// 断言
assertNotNull(tenantId);
// 校验记录的属性是否正确
TenantDO tenant = tenantMapper.selectById(tenantId);
assertPojoEquals(reqVO, tenant, "id");
assertEquals(300L, tenant.getContactUserId());
// verify 分配权限
verify(permissionService).assignRoleMenu(eq(200L), same(tenantPackage.getMenuIds()));
// verify 分配角色
verify(permissionService).assignUserRole(eq(300L), eq(singleton(200L)));
} |
public List<DirectEncryptedPseudonymType> provideDep(ProvideDEPsRequest request) throws BsnkException {
try {
return ((BSNKDEPPort) this.bindingProvider).bsnkProvideDEPs(request).getDirectEncryptedPseudonyms();
} catch (SOAPFaultException ex) {
if (ex.getCause().getMessage().equals("The signature or decryption was invalid")) {
throw new BsnkException("SignatureValidationFault", ex.getCause().getMessage(), ex.getCause());
}
throw new BsnkException("BSNKProvideDEPFault", ex.getMessage(), ex);
} catch (WebServiceException ex) {
throw new BsnkException("Could not send bsnkProvidePPPPCAOptimized", ex.getCause().getMessage(),
ex.getCause());
} catch (BSNKProvideDEPFault ex) {
throw new BsnkException("BSNKProvideDEPFault", ex.getCause().getMessage(), ex.getCause());
}
} | @Test
public void testValidResponseSuccess() {
setupWireMock();
try {
assertEquals(1, client.provideDep(request).size());
} catch (BsnkException ex) {
fail(ex.getMessage());
}
} |
public static void delete(final File file, final boolean ignoreFailures)
{
if (file.exists())
{
if (file.isDirectory())
{
final File[] files = file.listFiles();
if (null != files)
{
for (final File f : files)
{
delete(f, ignoreFailures);
}
}
}
if (!file.delete() && !ignoreFailures)
{
try
{
Files.delete(file.toPath());
}
catch (final IOException ex)
{
LangUtil.rethrowUnchecked(ex);
}
}
}
} | @Test
void deleteIgnoreFailuresNonExistingDirectory()
{
final File dir = tempDir.resolve("shadow-dir").toFile();
assertTrue(dir.mkdir());
assertTrue(dir.delete());
IoUtil.delete(dir, false);
assertFalse(dir.exists());
} |
public static InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(
YarnConfiguration.PROXY_BIND_HOST,
YarnConfiguration.PROXY_ADDRESS,
YarnConfiguration.DEFAULT_PROXY_ADDRESS,
YarnConfiguration.DEFAULT_PROXY_PORT);
} | @Test
void testStart() {
webAppProxy.init(conf);
assertEquals(STATE.INITED, webAppProxy.getServiceState());
webAppProxy.start();
for (Service service : webAppProxy.getServices()) {
if (service instanceof WebAppProxy) {
assertEquals(proxyAddress, ((WebAppProxy) service).getBindAddress());
}
}
assertEquals(STATE.STARTED, webAppProxy.getServiceState());
} |
public static Row toBeamRow(GenericRecord record, Schema schema, ConversionOptions options) {
List<Object> valuesInOrder =
schema.getFields().stream()
.map(
field -> {
try {
org.apache.avro.Schema.Field avroField =
record.getSchema().getField(field.getName());
Object value = avroField != null ? record.get(avroField.pos()) : null;
return convertAvroFormat(field.getType(), value, options);
} catch (Exception cause) {
throw new IllegalArgumentException(
"Error converting field " + field + ": " + cause.getMessage(), cause);
}
})
.collect(toList());
return Row.withSchema(schema).addValues(valuesInOrder).build();
} | @Test
public void testToBeamRow_enum() {
Row beamRow = BigQueryUtils.toBeamRow(ENUM_STRING_TYPE, BQ_ENUM_ROW);
assertEquals(ENUM_STRING_ROW, beamRow);
} |
static Collection<File> internalGetFileResources(String path, Pattern pattern) {
final File file = new File(path);
if (!file.isDirectory()) {
return Collections.emptySet();
}
return getFileResourcesFromDirectory(file, pattern);
} | @Test
public void internalGetResourcesNotExisting() {
String path = "." + File.separator + "target" + File.separator + "test-classes";
Pattern pattern = Pattern.compile(".*arg");
final Collection<File> retrieved = ResourceHelper.internalGetFileResources(path, pattern);
commonVerifyCollectionWithoutExpectedFile(retrieved);
} |
public Response downloadDumpFile(String topologyId, String hostPort, String fileName, String user) throws IOException {
String[] hostPortSplit = hostPort.split(":");
String host = hostPortSplit[0];
String portStr = hostPortSplit[1];
Path rawFile = logRoot.resolve(topologyId).resolve(portStr).resolve(fileName);
Path absFile = rawFile.toAbsolutePath().normalize();
if (!absFile.startsWith(logRoot) || !rawFile.normalize().toString().equals(rawFile.toString())) {
//Ensure filename doesn't contain ../ parts
return LogviewerResponseBuilder.buildResponsePageNotFound();
}
if (absFile.toFile().exists()) {
String workerFileRelativePath = String.join(File.separator, topologyId, portStr, WORKER_LOG_FILENAME);
if (resourceAuthorizer.isUserAllowedToAccessFile(user, workerFileRelativePath)) {
String downloadedFileName = host + "-" + topologyId + "-" + portStr + "-" + absFile.getFileName();
return LogviewerResponseBuilder.buildDownloadFile(downloadedFileName, absFile.toFile(), numFileDownloadExceptions);
} else {
return LogviewerResponseBuilder.buildResponseUnauthorizedUser(user);
}
} else {
return LogviewerResponseBuilder.buildResponsePageNotFound();
}
} | @Test
public void testDownloadDumpFile() throws IOException {
try (TmpPath rootPath = new TmpPath()) {
LogviewerProfileHandler handler = createHandlerTraversalTests(rootPath.getFile().toPath());
Response topoAResponse = handler.downloadDumpFile("topoA", "localhost:1111", "worker.jfr", "user");
Response topoBResponse = handler.downloadDumpFile("topoB", "localhost:1111", "worker.txt", "user");
Utils.forceDelete(rootPath.toString());
assertThat(topoAResponse.getStatus(), is(Response.Status.OK.getStatusCode()));
assertThat(topoAResponse.getEntity(), not(nullValue()));
String topoAContentDisposition = topoAResponse.getHeaderString(HttpHeaders.CONTENT_DISPOSITION);
assertThat(topoAContentDisposition, containsString("localhost-topoA-1111-worker.jfr"));
assertThat(topoBResponse.getStatus(), is(Response.Status.OK.getStatusCode()));
assertThat(topoBResponse.getEntity(), not(nullValue()));
String topoBContentDisposition = topoBResponse.getHeaderString(HttpHeaders.CONTENT_DISPOSITION);
assertThat(topoBContentDisposition, containsString("localhost-topoB-1111-worker.txt"));
}
} |
public PointBuilder<T> time(Instant time) {
requireNonNull(time);
this.epochTime = time.toEpochMilli();
return this;
} | @Test
public void testTime() {
Instant time = Instant.EPOCH.plusSeconds(12);
Point<?> p1 = (new PointBuilder<>()).time(time)
.latLong(0.0, 0.0)
.build();
assertTrue(p1.time().equals(time));
} |
public List<StorageLocation> check(
final Configuration conf,
final Collection<StorageLocation> dataDirs)
throws InterruptedException, IOException {
final HashMap<StorageLocation, Boolean> goodLocations =
new LinkedHashMap<>();
final Set<StorageLocation> failedLocations = new HashSet<>();
final Map<StorageLocation, ListenableFuture<VolumeCheckResult>> futures =
Maps.newHashMap();
final LocalFileSystem localFS = FileSystem.getLocal(conf);
final CheckContext context = new CheckContext(localFS, expectedPermission);
// Start parallel disk check operations on all StorageLocations.
for (StorageLocation location : dataDirs) {
goodLocations.put(location, true);
Optional<ListenableFuture<VolumeCheckResult>> olf =
delegateChecker.schedule(location, context);
if (olf.isPresent()) {
futures.put(location, olf.get());
}
}
if (maxVolumeFailuresTolerated >= dataDirs.size()) {
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
+ maxVolumeFailuresTolerated + ". Value configured is >= "
+ "to the number of configured volumes (" + dataDirs.size() + ").");
}
final long checkStartTimeMs = timer.monotonicNow();
// Retrieve the results of the disk checks.
for (Map.Entry<StorageLocation,
ListenableFuture<VolumeCheckResult>> entry : futures.entrySet()) {
// Determine how much time we can allow for this check to complete.
// The cumulative wait time cannot exceed maxAllowedTimeForCheck.
final long waitSoFarMs = (timer.monotonicNow() - checkStartTimeMs);
final long timeLeftMs = Math.max(0,
maxAllowedTimeForCheckMs - waitSoFarMs);
final StorageLocation location = entry.getKey();
try {
final VolumeCheckResult result =
entry.getValue().get(timeLeftMs, TimeUnit.MILLISECONDS);
switch (result) {
case HEALTHY:
break;
case DEGRADED:
LOG.warn("StorageLocation {} appears to be degraded.", location);
break;
case FAILED:
LOG.warn("StorageLocation {} detected as failed.", location);
failedLocations.add(location);
goodLocations.remove(location);
break;
default:
LOG.error("Unexpected health check result {} for StorageLocation {}",
result, location);
}
} catch (ExecutionException|TimeoutException e) {
LOG.warn("Exception checking StorageLocation " + location,
e.getCause());
failedLocations.add(location);
goodLocations.remove(location);
}
}
if (maxVolumeFailuresTolerated == DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
if (dataDirs.size() == failedLocations.size()) {
throw new DiskErrorException("Too many failed volumes - "
+ "current valid volumes: " + goodLocations.size()
+ ", volumes configured: " + dataDirs.size()
+ ", volumes failed: " + failedLocations.size()
+ ", volume failures tolerated: " + maxVolumeFailuresTolerated);
}
} else {
if (failedLocations.size() > maxVolumeFailuresTolerated) {
throw new DiskErrorException("Too many failed volumes - "
+ "current valid volumes: " + goodLocations.size()
+ ", volumes configured: " + dataDirs.size()
+ ", volumes failed: " + failedLocations.size()
+ ", volume failures tolerated: " + maxVolumeFailuresTolerated);
}
}
if (goodLocations.size() == 0) {
throw new DiskErrorException("All directories in "
+ DFS_DATANODE_DATA_DIR_KEY + " are invalid: "
+ failedLocations);
}
return new ArrayList<>(goodLocations.keySet());
} | @Test(timeout=30000)
public void testBadConfiguration() throws Exception {
final List<StorageLocation> locations =
makeMockLocations(HEALTHY, HEALTHY, HEALTHY);
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 3);
thrown.expect(HadoopIllegalArgumentException.class);
thrown.expectMessage("Invalid value configured");
StorageLocationChecker checker =
new StorageLocationChecker(conf, new FakeTimer());
checker.check(conf, locations);
} |
public static <K> KStreamHolder<K> build(
final KStreamHolder<K> left,
final KTableHolder<K> right,
final StreamTableJoin<K> join,
final RuntimeBuildContext buildContext,
final JoinedFactory joinedFactory
) {
final Formats leftFormats = join.getInternalFormats();
final QueryContext queryContext = join.getProperties().getQueryContext();
final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext);
final LogicalSchema leftSchema = left.getSchema();
final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from(
leftSchema,
leftFormats.getKeyFeatures(),
leftFormats.getValueFeatures()
);
final Serde<GenericRow> leftSerde = buildContext.buildValueSerde(
leftFormats.getValueFormat(),
leftPhysicalSchema,
stacker.push(SERDE_CTX).getQueryContext()
);
final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde(
leftFormats.getKeyFormat(),
leftPhysicalSchema,
queryContext
);
final Joined<K, GenericRow, GenericRow> joined = joinedFactory.create(
keySerde,
leftSerde,
null,
StreamsUtil.buildOpName(queryContext)
);
final LogicalSchema rightSchema = right.getSchema();
final JoinParams joinParams = JoinParamsFactory
.create(join.getKeyColName(), leftSchema, rightSchema);
final KStream<K, GenericRow> result;
switch (join.getJoinType()) {
case LEFT:
result = left.getStream().leftJoin(right.getTable(), joinParams.getJoiner(), joined);
break;
case INNER:
result = left.getStream().join(right.getTable(), joinParams.getJoiner(), joined);
break;
default:
throw new IllegalStateException("invalid join type");
}
return left.withStream(result, joinParams.getSchema());
} | @Test
public void shouldDoInnerJoin() {
// Given:
givenInnerJoin(L_KEY);
// When:
final KStreamHolder<Struct> result = join.build(planBuilder, planInfo);
// Then:
verify(leftKStream).join(
same(rightKTable),
eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 0)),
same(joined)
);
verifyNoMoreInteractions(leftKStream, rightKTable, resultStream);
assertThat(result.getStream(), is(resultStream));
assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory));
} |
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
} | @Test
public void testUnknownSpdyWindowUpdateFrameFlags() throws Exception {
short type = 9;
byte flags = (byte) 0xFF; // undefined flags
int length = 8;
int streamId = RANDOM.nextInt() & 0x7FFFFFFF;
int deltaWindowSize = RANDOM.nextInt() & 0x7FFFFFFF | 0x01;
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeControlFrameHeader(buf, type, flags, length);
buf.writeInt(streamId);
buf.writeInt(deltaWindowSize);
decoder.decode(buf);
verify(delegate).readWindowUpdateFrame(streamId, deltaWindowSize);
assertFalse(buf.isReadable());
buf.release();
} |
public Future<KafkaVersionChange> reconcile() {
return getVersionFromController()
.compose(i -> getPods())
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
} | @Test
public void testUpgradeFromUnsupportedKafkaVersion(VertxTestContext context) {
String oldKafkaVersion = "2.8.0";
String oldInterBrokerProtocolVersion = "2.8";
String oldLogMessageFormatVersion = "2.8";
String kafkaVersion = VERSIONS.defaultVersion().version();
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(kafkaVersion, null, null),
mockNewCluster(
null,
mockSps(kafkaVersion),
mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion)
)
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion)));
assertThat(c.to(), is(VERSIONS.defaultVersion()));
assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion));
assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion));
async.flag();
})));
} |
public static Collection<MetaDataLoaderMaterial> getMetaDataLoaderMaterials(final Collection<String> tableNames,
final GenericSchemaBuilderMaterial material, final boolean checkMetaDataEnable) {
Map<String, Collection<String>> dataSourceTableGroups = new LinkedHashMap<>();
Collection<DatabaseType> unsupportedThreeTierStorageStructureDatabaseTypes = getUnsupportedThreeTierStorageStructureDatabaseTypes(material.getStorageTypes().values());
DataNodes dataNodes = new DataNodes(material.getRules());
for (String each : tableNames) {
checkDataSourceTypeIncludeInstanceAndSetDatabaseTableMap(unsupportedThreeTierStorageStructureDatabaseTypes, dataNodes, each);
if (checkMetaDataEnable) {
addAllActualTableDataNode(material, dataSourceTableGroups, dataNodes, each);
} else {
addOneActualTableDataNode(material, dataSourceTableGroups, dataNodes, each);
}
}
Collection<MetaDataLoaderMaterial> result = new LinkedList<>();
for (Entry<String, Collection<String>> entry : dataSourceTableGroups.entrySet()) {
DatabaseType storageType = material.getStorageTypes().get(entry.getKey());
String defaultSchemaName = getDefaultSchemaNameByStorageType(storageType, material.getDefaultSchemaName());
result.add(new MetaDataLoaderMaterial(entry.getValue(), getDataSource(material, entry.getKey()), storageType, defaultSchemaName));
}
return result;
} | @Test
void assertGetSchemaMetaDataLoaderMaterialsWhenConfigCheckMetaDataEnable() {
ShardingSphereRule rule = mock(ShardingSphereRule.class);
DataNodeRuleAttribute ruleAttribute = mock(DataNodeRuleAttribute.class);
when(ruleAttribute.getDataNodesByTableName("t_order")).thenReturn(mockShardingDataNodes());
when(rule.getAttributes()).thenReturn(new RuleAttributes(ruleAttribute));
GenericSchemaBuilderMaterial material = new GenericSchemaBuilderMaterial(mock(DatabaseType.class), mockStorageTypes(), mockDataSourceMap(),
Arrays.asList(rule, mock(ShardingSphereRule.class)), mock(ConfigurationProperties.class), "sharding_db");
Collection<MetaDataLoaderMaterial> actual = SchemaMetaDataUtils.getMetaDataLoaderMaterials(Collections.singleton("t_order"), material, true);
assertThat(actual.size(), is(2));
Iterator<MetaDataLoaderMaterial> iterator = actual.iterator();
MetaDataLoaderMaterial firstMaterial = iterator.next();
assertThat(firstMaterial.getDefaultSchemaName(), is("sharding_db"));
assertThat(firstMaterial.getActualTableNames(), is(Collections.singletonList("t_order_0")));
MetaDataLoaderMaterial secondMaterial = iterator.next();
assertThat(secondMaterial.getDefaultSchemaName(), is("sharding_db"));
assertThat(secondMaterial.getActualTableNames(), is(Collections.singletonList("t_order_1")));
} |
@Override
@CheckForNull
public EmailMessage format(Notification notif) {
if (!(notif instanceof ChangesOnMyIssuesNotification)) {
return null;
}
ChangesOnMyIssuesNotification notification = (ChangesOnMyIssuesNotification) notif;
if (notification.getChange() instanceof AnalysisChange) {
checkState(!notification.getChangedIssues().isEmpty(), "changedIssues can't be empty");
return formatAnalysisNotification(notification.getChangedIssues().keySet().iterator().next(), notification);
}
return formatMultiProject(notification);
} | @Test
public void format_sets_subject_with_project_name_and_branch_name_of_first_issue_in_set_when_change_from_Analysis() {
Set<ChangedIssue> changedIssues = IntStream.range(0, 2 + new Random().nextInt(4))
.mapToObj(i -> newChangedIssue(i + "", randomValidStatus(), newBranch("prj_" + i, "br_" + i), newRandomNotAHotspotRule("rule_" + i)))
.collect(toSet());
AnalysisChange analysisChange = newAnalysisChange();
EmailMessage emailMessage = underTest.format(new ChangesOnMyIssuesNotification(analysisChange, changedIssues));
Project project = changedIssues.iterator().next().getProject();
assertThat(emailMessage.getSubject()).isEqualTo("Analysis has changed some of your issues in " + project.getProjectName() + " (" + project.getBranchName().get() + ")");
} |
@Override
public void addPath(String word, int outputSymbol) {
MutableState state = getStartState();
if (state == null) {
throw new IllegalStateException("Start state cannot be null");
}
List<MutableArc> arcs = state.getArcs();
boolean isFound = false;
for (MutableArc arc : arcs) {
if (arc.getNextState().getLabel() == word.charAt(0)) {
state = arc.getNextState();
isFound = true;
break;
}
}
int foundPos = -1;
if (isFound) {
Pair<MutableState, Integer> pair = findPointOfDiversion(state, word);
if (pair == null) {
// Word already exists
return;
}
foundPos = pair.getRight();
state = pair.getLeft();
}
for (int i = foundPos + 1; i < word.length(); i++) {
MutableState nextState = new MutableState();
nextState.setLabel(word.charAt(i));
int currentOutputSymbol = -1;
if (i == word.length() - 1) {
currentOutputSymbol = outputSymbol;
}
MutableArc mutableArc = new MutableArc(currentOutputSymbol, nextState);
state.addArc(mutableArc);
state = nextState;
}
state.setIsTerminal(true);
} | @Test
public void testRegexMatcherPrefix() {
MutableFST fst = new MutableFSTImpl();
fst.addPath("he", 127);
fst.addPath("hp", 136);
RoaringBitmapWriter<MutableRoaringBitmap> writer = RoaringBitmapWriter.bufferWriter().get();
RealTimeRegexpMatcher.regexMatch("h.*", fst, writer::add);
Assert.assertEquals(writer.get().getCardinality(), 2);
} |
@Override
public int compareTo(IndexKey o) {
if (_name.equals(o._name)) {
return _type.getId().compareTo(o._type.getId());
}
return _name.compareTo(o._name);
} | @Test
public void testCompareTo() {
List<IndexKey> iks = Arrays
.asList(new IndexKey("foo", StandardIndexes.inverted()), new IndexKey("bar", StandardIndexes.bloomFilter()),
new IndexKey("foo", StandardIndexes.forward()), new IndexKey("bar", StandardIndexes.dictionary()),
new IndexKey("baz", StandardIndexes.json()), new IndexKey("baz", StandardIndexes.fst()));
Collections.sort(iks);
assertEquals(iks, Arrays
.asList(new IndexKey("bar", StandardIndexes.bloomFilter()), new IndexKey("bar", StandardIndexes.dictionary()),
new IndexKey("baz", StandardIndexes.fst()), new IndexKey("baz", StandardIndexes.json()),
new IndexKey("foo", StandardIndexes.forward()), new IndexKey("foo", StandardIndexes.inverted())));
} |
@Override
@CanIgnoreReturnValue
public FileChannel truncate(long size) throws IOException {
Util.checkNotNegative(size, "size");
checkOpen();
checkWritable();
synchronized (this) {
boolean completed = false;
try {
if (!beginBlocking()) {
return this; // AsynchronousCloseException will be thrown
}
file.writeLock().lockInterruptibly();
try {
file.truncate(size);
if (position > size) {
position = size;
}
file.setLastModifiedTime(fileSystemState.now());
completed = true;
} finally {
file.writeLock().unlock();
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
endBlocking(completed);
}
}
return this;
} | @Test
public void testTruncateNegative() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
try {
channel.truncate(-1);
fail();
} catch (IllegalArgumentException expected) {
}
} |
@Override
public String execute(CommandContext commandContext, String[] args) {
Channel channel = commandContext.getRemote();
if (ArrayUtils.isEmpty(args)) {
return "Please input service name, eg: \r\ncd XxxService\r\ncd com.xxx.XxxService";
}
String message = args[0];
StringBuilder buf = new StringBuilder();
if ("/".equals(message) || "..".equals(message)) {
String service = channel.attr(SERVICE_KEY).getAndRemove();
buf.append("Cancelled default service ").append(service).append('.');
} else {
boolean found = false;
for (Exporter<?> exporter : dubboProtocol.getExporters()) {
if (message.equals(exporter.getInvoker().getInterface().getSimpleName())
|| message.equals(exporter.getInvoker().getInterface().getName())
|| message.equals(exporter.getInvoker().getUrl().getPath())
|| message.equals(exporter.getInvoker().getUrl().getServiceKey())) {
found = true;
break;
}
}
if (found) {
channel.attr(SERVICE_KEY).set(message);
buf.append("Used the ")
.append(message)
.append(" as default.\r\nYou can cancel default service by command: cd /");
} else {
buf.append("No such service ").append(message);
}
}
return buf.toString();
} | @Test
void testChangeServiceNotExport() {
String result = change.execute(mockCommandContext, new String[] {"demo"});
assertEquals("No such service demo", result);
} |
@Override
public boolean hasOperatePermission(CaseInsensitiveString username, UserRoleMatcher userRoleMatcher, boolean everyoneIsAllowedToOperateIfNoAuthIsDefined) {
return this.getAuthorizationPart().hasOperatePermission(username, userRoleMatcher, everyoneIsAllowedToOperateIfNoAuthIsDefined);
} | @Test
public void shouldUseDefaultPermissionsForOperatePermissionIfAuthorizationIsNotDefined_When2ConfigParts() {
BasicPipelineConfigs filePart = new BasicPipelineConfigs();
filePart.setOrigin(new FileConfigOrigin());
assertThat(new MergePipelineConfigs(filePart, new BasicPipelineConfigs())
.hasOperatePermission(new CaseInsensitiveString("anyone"), null, true), is(true));
assertThat(new MergePipelineConfigs(filePart, new BasicPipelineConfigs())
.hasOperatePermission(new CaseInsensitiveString("anyone"), null, false), is(false));
} |
@Override
public void remove(String objectName) {
writeLock.lock();
try {
indexKeyObjectNamesMap.values().removeIf(objectName::equals);
} finally {
writeLock.unlock();
}
} | @Test
void remove() {
var spec =
PrimaryKeySpecUtils.primaryKeyIndexSpec(IndexEntryContainerTest.FakeExtension.class);
var descriptor = new IndexDescriptor(spec);
var entry = new IndexEntryImpl(descriptor);
entry.addEntry(List.of("slug-1"), "fake-name-1");
assertThat(entry.indexedKeys()).containsExactly("slug-1");
assertThat(entry.entries()).hasSize(1);
entry.removeEntry("slug-1", "fake-name-1");
assertThat(entry.indexedKeys()).isEmpty();
assertThat(entry.entries()).isEmpty();
} |
@Override
public Connection getConnection() {
return null;
} | @Test
void assertGetConnection() {
assertNull(metaData.getConnection());
} |
@Override
public Optional<Long> getTimeUntilBackupRequestNano()
{
_costLimiter.arrive();
synchronized (_lock)
{
if (_histogramReady)
{
return Optional.of(Math.max(_minBackupDelayNano, _histogram.getValueAtPercentile(_percentile)));
} else
{
return Optional.empty();
}
}
} | @Test
public void testValuesOutOfRange()
{
BoundedCostBackupRequestsStrategy strategy = new BoundedCostBackupRequestsStrategy(5, 64, 1024, 128, 0);
BackupRequestsSimulator simulator = new BackupRequestsSimulator(new PoissonEventsArrival(200, TimeUnit.SECONDS),
new GaussianResponseTimeDistribution(BoundedCostBackupRequestsStrategy.HIGH,
2 * BoundedCostBackupRequestsStrategy.HIGH, BoundedCostBackupRequestsStrategy.HIGH, TimeUnit.NANOSECONDS),
strategy);
simulator.simulate(ITERATIONS);
assertTrue(((100d * simulator.getNumberOfBackupRequestsMade()) / ITERATIONS) < 5 + EXPECTED_PRECISSION);
assertTrue(strategy.getTimeUntilBackupRequestNano().get() >= BoundedCostBackupRequestsStrategy.HIGH);
} |
@Override
public int hashCode() {
return Objects.hash(negativeSign, positiveSign, zeroDigit, decimalMark, minDecimals, decimalGroups, shift,
roundingMode, Arrays.hashCode(codes), codeSeparator, codePrefixed);
} | @Test
public void testHashCode() {
MonetaryFormat mf1 = new MonetaryFormat(true);
MonetaryFormat mf2 = new MonetaryFormat(true);
assertEquals(mf1.hashCode(), mf2.hashCode());
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldThrowOnInsertHeaders() {
// Given:
givenSourceStreamWithSchema(SCHEMA_WITH_HEADERS, SerdeFeatures.of(), SerdeFeatures.of());
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
allColumnNames(SCHEMA_WITH_HEADERS),
ImmutableList.of(
new StringLiteral("key"),
new StringLiteral("str"),
new LongLiteral(2L),
new NullLiteral()
)
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext)
);
// Then:
assertThat(e.getMessage(), is("Cannot insert into HEADER columns: HEAD0"));
} |
@Override
public String pluginNamed() {
return PluginEnum.LOGGING_CLICK_HOUSE.getName();
} | @Test
public void testPluginNamed() {
Assertions.assertEquals(loggingClickHousePluginDataHandler.pluginNamed(), "loggingClickHouse");
} |
public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException {
checkMaybeCompatible(source, target);
if (source.isOptional() && !target.isOptional()) {
if (target.defaultValue() != null) {
if (record != null) {
return projectRequiredSchema(source, record, target);
} else {
return target.defaultValue();
}
} else {
throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value.");
}
} else {
if (record != null) {
return projectRequiredSchema(source, record, target);
} else {
return null;
}
}
} | @Test
public void testNumericTypeProjection() {
Schema[] promotableSchemas = {Schema.INT8_SCHEMA, Schema.INT16_SCHEMA, Schema.INT32_SCHEMA, Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA, Schema.FLOAT64_SCHEMA};
Schema[] promotableOptionalSchemas = {Schema.OPTIONAL_INT8_SCHEMA, Schema.OPTIONAL_INT16_SCHEMA, Schema.OPTIONAL_INT32_SCHEMA, Schema.OPTIONAL_INT64_SCHEMA,
Schema.OPTIONAL_FLOAT32_SCHEMA, Schema.OPTIONAL_FLOAT64_SCHEMA};
Object[] values = {(byte) 127, (short) 255, 32767, 327890L, 1.2F, 1.2345};
Map<Object, List<?>> expectedProjected = new HashMap<>();
expectedProjected.put(values[0], Arrays.asList((byte) 127, (short) 127, 127, 127L, 127.F, 127.));
expectedProjected.put(values[1], Arrays.asList((short) 255, 255, 255L, 255.F, 255.));
expectedProjected.put(values[2], Arrays.asList(32767, 32767L, 32767.F, 32767.));
expectedProjected.put(values[3], Arrays.asList(327890L, 327890.F, 327890.));
expectedProjected.put(values[4], Arrays.asList(1.2F, 1.2));
expectedProjected.put(values[5], Collections.singletonList(1.2345));
Object promoted;
for (int i = 0; i < promotableSchemas.length; ++i) {
Schema source = promotableSchemas[i];
List<?> expected = expectedProjected.get(values[i]);
for (int j = i; j < promotableSchemas.length; ++j) {
Schema target = promotableSchemas[j];
promoted = SchemaProjector.project(source, values[i], target);
if (target.type() == Type.FLOAT64) {
assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6);
} else {
assertEquals(expected.get(j - i), promoted);
}
}
for (int j = i; j < promotableOptionalSchemas.length; ++j) {
Schema target = promotableOptionalSchemas[j];
promoted = SchemaProjector.project(source, values[i], target);
if (target.type() == Type.FLOAT64) {
assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6);
} else {
assertEquals(expected.get(j - i), promoted);
}
}
}
for (int i = 0; i < promotableOptionalSchemas.length; ++i) {
Schema source = promotableSchemas[i];
List<?> expected = expectedProjected.get(values[i]);
for (int j = i; j < promotableOptionalSchemas.length; ++j) {
Schema target = promotableOptionalSchemas[j];
promoted = SchemaProjector.project(source, values[i], target);
if (target.type() == Type.FLOAT64) {
assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6);
} else {
assertEquals(expected.get(j - i), promoted);
}
}
}
Schema[] nonPromotableSchemas = {Schema.BOOLEAN_SCHEMA, Schema.BYTES_SCHEMA, Schema.STRING_SCHEMA};
for (Schema promotableSchema: promotableSchemas) {
for (Schema nonPromotableSchema: nonPromotableSchemas) {
Object dummy = new Object();
assertThrows(DataException.class, () -> SchemaProjector.project(promotableSchema, dummy, nonPromotableSchema),
"Cannot promote " + promotableSchema.type() + " to " + nonPromotableSchema.type());
}
}
} |
public HostAndPort getHttpBindAddress() {
return httpBindAddress
.requireBracketsForIPv6()
.withDefaultPort(GRAYLOG_DEFAULT_PORT);
} | @Test
public void testHttpBindAddressWithDefaultPort() throws RepositoryException, ValidationException {
jadConfig.setRepository(new InMemoryRepository(ImmutableMap.of("http_bind_address", "example.com")))
.addConfigurationBean(configuration)
.process();
assertThat(configuration.getHttpBindAddress()).isEqualTo(HostAndPort.fromParts("example.com", 9000));
} |
@Override
public <T> T run(Supplier<T> toRun, Function<Throwable, T> fallback) {
Supplier<T> toRunDecorator = decorator.decorateSupplier(toRun);
try {
return toRunDecorator.get();
}
catch (CallAbortedException e) {
LOGGER.debug("PolarisCircuitBreaker CallAbortedException: {}", e.getMessage());
PolarisCircuitBreakerUtils.reportStatus(consumerAPI, conf, e);
return fallback.apply(e);
}
catch (Exception e) {
return fallback.apply(e);
}
} | @Test
public void run() {
this.contextRunner.run(context -> {
PolarisCircuitBreakerFactory polarisCircuitBreakerFactory = context.getBean(PolarisCircuitBreakerFactory.class);
CircuitBreaker cb = polarisCircuitBreakerFactory.create(SERVICE_CIRCUIT_BREAKER);
PolarisCircuitBreakerConfigBuilder.PolarisCircuitBreakerConfiguration configuration =
polarisCircuitBreakerFactory.configBuilder(SERVICE_CIRCUIT_BREAKER).build();
polarisCircuitBreakerFactory.configureDefault(id -> configuration);
assertThat(cb.run(() -> "foobar")).isEqualTo("foobar");
assertThat((String) cb.run(() -> {
throw new RuntimeException("boom");
}, t -> "fallback")).isEqualTo("fallback");
});
} |
public void asyncAddData(T data, AddDataCallback callback, Object ctx){
if (!batchEnabled){
if (state == State.CLOSING || state == State.CLOSED){
callback.addFailed(BUFFERED_WRITER_CLOSED_EXCEPTION, ctx);
return;
}
ByteBuf byteBuf = dataSerializer.serialize(data);
managedLedger.asyncAddEntry(byteBuf, DisabledBatchCallback.INSTANCE,
AsyncAddArgs.newInstance(callback, ctx, System.currentTimeMillis(), byteBuf));
return;
}
CompletableFuture
.runAsync(
() -> internalAsyncAddData(data, callback, ctx), singleThreadExecutorForWrite)
.exceptionally(e -> {
log.warn("Execute 'internalAsyncAddData' fail", e);
return null;
});
} | @Test
public void testFailWhenAddData() throws Exception {
int batchedWriteMaxSize = 1024;
TxnLogBufferedWriter.DataSerializer dataSerializer =
new WrongDataSerializer(batchedWriteMaxSize, true, true, true);
int writeCount = 100;
var callbackWithCounter = createCallBackWithCounter();
// Create TxnLogBufferedWriter.
var txnLogBufferedWriterContext = createTxnBufferedWriterContextWithMetrics(
dataSerializer, Integer.MAX_VALUE, batchedWriteMaxSize, Integer.MAX_VALUE);
var txnLogBufferedWriter = txnLogBufferedWriterContext.txnLogBufferedWriter;
// Add some data.
for (int i = 0; i < writeCount; i++){
txnLogBufferedWriter.asyncAddData(i, callbackWithCounter.callback, i);
}
// Wait for all data write finish.
Awaitility.await().atMost(2, TimeUnit.SECONDS).until(
() -> {
return callbackWithCounter.failureCounter.get() == writeCount;
}
);
assertEquals(txnLogBufferedWriterContext.mockedManagedLedger.writeCounter.get(), 0);
// cleanup.
releaseTxnLogBufferedWriterContext(txnLogBufferedWriterContext);
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void getGameHighScores() {
GameHighScore[] scores = bot.execute(new GetGameHighScores(chatId, "AgAAAPrwAQCj_Q4D2s-51_8jsuU")).result();
GameHighScoreTest.check(scores);
scores = bot.execute(new GetGameHighScores(chatId, chatId, 8162)).result();
GameHighScoreTest.check(scores);
} |
public void clear() {
for (Section s : sections) {
s.clear();
}
} | @Test
public void testClear() {
ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
.expectedItems(2)
.concurrencyLevel(1)
.autoShrink(true)
.mapIdleFactor(0.25f)
.build();
assertEquals(map.capacity(), 4);
assertTrue(map.put(1, 1, 11, 11));
assertTrue(map.put(2, 2, 22, 22));
assertTrue(map.put(3, 3, 33, 33));
assertEquals(map.capacity(), 8);
map.clear();
assertEquals(map.capacity(), 4);
} |
@Override
public Mono<Void> execute(final ServerWebExchange exchange, final ShenyuPluginChain chain) {
initCacheConfig();
final String pluginName = named();
PluginData pluginData = BaseDataCache.getInstance().obtainPluginData(pluginName);
// early exit
if (Objects.isNull(pluginData) || !pluginData.getEnabled()) {
return chain.execute(exchange);
}
final String path = getRawPath(exchange);
List<SelectorData> selectors = BaseDataCache.getInstance().obtainSelectorData(pluginName);
if (CollectionUtils.isEmpty(selectors)) {
return handleSelectorIfNull(pluginName, exchange, chain);
}
SelectorData selectorData = obtainSelectorDataCacheIfEnabled(path);
// handle Selector
if (Objects.nonNull(selectorData) && StringUtils.isBlank(selectorData.getId())) {
return handleSelectorIfNull(pluginName, exchange, chain);
}
if (Objects.isNull(selectorData)) {
selectorData = trieMatchSelector(exchange, pluginName, path);
if (Objects.isNull(selectorData)) {
selectorData = defaultMatchSelector(exchange, selectors, path);
if (Objects.isNull(selectorData)) {
return handleSelectorIfNull(pluginName, exchange, chain);
}
}
}
printLog(selectorData, pluginName);
if (!selectorData.getContinued()) {
// if continued, not match rules
return doExecute(exchange, chain, selectorData, defaultRuleData(selectorData));
}
List<RuleData> rules = BaseDataCache.getInstance().obtainRuleData(selectorData.getId());
if (CollectionUtils.isEmpty(rules)) {
return handleRuleIfNull(pluginName, exchange, chain);
}
if (selectorData.getType() == SelectorTypeEnum.FULL_FLOW.getCode()) {
//get last
RuleData rule = rules.get(rules.size() - 1);
printLog(rule, pluginName);
return doExecute(exchange, chain, selectorData, rule);
}
// lru map as L1 cache,the cache is enabled by default.
// if the L1 cache fails to hit, using L2 cache based on trie cache.
// if the L2 cache fails to hit, execute default strategy.
RuleData ruleData = obtainRuleDataCacheIfEnabled(path);
if (Objects.nonNull(ruleData) && Objects.isNull(ruleData.getId())) {
return handleRuleIfNull(pluginName, exchange, chain);
}
if (Objects.isNull(ruleData)) {
// L1 cache not exist data, try to get data through trie cache
ruleData = trieMatchRule(exchange, selectorData, path);
// trie cache fails to hit, execute default strategy
if (Objects.isNull(ruleData)) {
ruleData = defaultMatchRule(exchange, rules, path);
if (Objects.isNull(ruleData)) {
return handleRuleIfNull(pluginName, exchange, chain);
}
}
}
printLog(ruleData, pluginName);
return doExecute(exchange, chain, selectorData, ruleData);
} | @Test
public void executePluginIsNullTest() {
StepVerifier.create(testShenyuPlugin.execute(exchange, shenyuPluginChain)).expectSubscription().verifyComplete();
verify(shenyuPluginChain).execute(exchange);
} |
@POST
@Path("/{connector}/restart")
@Operation(summary = "Restart the specified connector")
public Response restartConnector(final @PathParam("connector") String connector,
final @Context HttpHeaders headers,
final @DefaultValue("false") @QueryParam("includeTasks") @Parameter(description = "Whether to also restart tasks") Boolean includeTasks,
final @DefaultValue("false") @QueryParam("onlyFailed") @Parameter(description = "Whether to only restart failed tasks/connectors")Boolean onlyFailed,
final @Parameter(hidden = true) @QueryParam("forward") Boolean forward) throws Throwable {
RestartRequest restartRequest = new RestartRequest(connector, onlyFailed, includeTasks);
String forwardingPath = "/connectors/" + connector + "/restart";
if (restartRequest.forceRestartConnectorOnly()) {
// For backward compatibility, just restart the connector instance and return OK with no body
FutureCallback<Void> cb = new FutureCallback<>();
herder.restartConnector(connector, cb);
requestHandler.completeOrForwardRequest(cb, forwardingPath, "POST", headers, null, forward);
return Response.noContent().build();
}
// In all other cases, submit the async restart request and return connector state
FutureCallback<ConnectorStateInfo> cb = new FutureCallback<>();
herder.restartConnectorAndTasks(restartRequest, cb);
Map<String, String> queryParameters = new HashMap<>();
queryParameters.put("includeTasks", includeTasks.toString());
queryParameters.put("onlyFailed", onlyFailed.toString());
ConnectorStateInfo stateInfo = requestHandler.completeOrForwardRequest(cb, forwardingPath, "POST", headers, queryParameters, null, new TypeReference<ConnectorStateInfo>() {
}, new IdentityTranslator<>(), forward);
return Response.accepted().entity(stateInfo).build();
} | @Test
public void testRestartConnectorAndTasksLeaderRedirect() throws Throwable {
RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, true, false);
final ArgumentCaptor<Callback<ConnectorStateInfo>> cb = ArgumentCaptor.forClass(Callback.class);
expectAndCallbackNotLeaderException(cb).when(herder)
.restartConnectorAndTasks(eq(restartRequest), cb.capture());
when(restClient.httpRequest(eq(LEADER_URL + "connectors/" + CONNECTOR_NAME + "/restart?forward=true&includeTasks=" + restartRequest.includeTasks() + "&onlyFailed=" + restartRequest.onlyFailed()), eq("POST"), isNull(), isNull(), any()))
.thenReturn(new RestClient.HttpResponse<>(202, new HashMap<>(), null));
Response response = connectorsResource.restartConnector(CONNECTOR_NAME, NULL_HEADERS, restartRequest.includeTasks(), restartRequest.onlyFailed(), null);
assertEquals(Response.Status.ACCEPTED.getStatusCode(), response.getStatus());
} |
void removeSubscription(final Subscription subscription)
{
clientLock.lock();
try
{
if (isTerminating || isClosed)
{
return;
}
if (!subscription.isClosed())
{
ensureNotReentrant();
subscription.internalClose(EXPLICIT_CLOSE_LINGER_NS);
final long registrationId = subscription.registrationId();
if (subscription == resourceByRegIdMap.remove(registrationId))
{
asyncCommandIdSet.add(driverProxy.removeSubscription(registrationId));
}
}
}
finally
{
clientLock.unlock();
}
} | @Test
void shouldThrowAeronExceptionOnAttemptToRemoveWrongResourceUsingSubscriptionRegistrationId()
{
final long registrationId = 42;
conductor.resourceByRegIdMap.put(registrationId, "test resource");
final AeronException exception =
assertThrowsExactly(AeronException.class, () -> conductor.removeSubscription(registrationId));
assertEquals("ERROR - registration id is not a Subscription: String", exception.getMessage());
} |
@Override
public Local create(final Path file) {
return this.create(new UUIDRandomStringService().random(), file);
} | @Test
public void testTemporaryPath() {
final Path file = new Path("/f1/f2/t.txt", EnumSet.of(Path.Type.file));
file.attributes().setDuplicate(true);
file.attributes().setVersionId("1");
final Local local = new DefaultTemporaryFileService().create(file);
assertEquals("t.txt", file.getName());
assertEquals("t.txt", local.getName());
assertEquals("1744270094", local.getParent().getName());
assertEquals("f2", local.getParent().getParent().getName());
assertEquals("f1", local.getParent().getParent().getParent().getName());
} |
public static TableIdentifier toIcebergTableIdentifier(SnowflakeIdentifier identifier) {
Preconditions.checkArgument(
identifier.type() == SnowflakeIdentifier.Type.TABLE,
"SnowflakeIdentifier must be type TABLE, got '%s'",
identifier);
return TableIdentifier.of(
identifier.databaseName(), identifier.schemaName(), identifier.tableName());
} | @Test
public void testToIcebergTableIdentifierWrongType() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(
() ->
NamespaceHelpers.toIcebergTableIdentifier(
SnowflakeIdentifier.ofSchema("DB1", "SCHEMA1")))
.withMessageContaining("must be type TABLE");
} |
public <T> void update(T[][] observations, int iterations, ToIntFunction<T> ordinal) {
update(
Arrays.stream(observations)
.map(sequence -> Arrays.stream(sequence).mapToInt(ordinal).toArray())
.toArray(int[][]::new),
iterations);
} | @Test
public void testUpdate() {
System.out.println("update");
MathEx.setSeed(19650218); // to get repeatable results.
EmpiricalDistribution initial = new EmpiricalDistribution(pi);
EmpiricalDistribution[] transition = new EmpiricalDistribution[a.length];
for (int i = 0; i < transition.length; i++) {
transition[i] = new EmpiricalDistribution(a[i]);
}
EmpiricalDistribution[] emission = new EmpiricalDistribution[b.length];
for (int i = 0; i < emission.length; i++) {
emission[i] = new EmpiricalDistribution(b[i]);
}
int[][] sequences = new int[5000][];
int[][] labels = new int[5000][];
for (int i = 0; i < sequences.length; i++) {
sequences[i] = new int[30 * (MathEx.randomInt(5) + 1)];
labels[i] = new int[sequences[i].length];
int state = (int) initial.rand();
sequences[i][0] = (int) emission[state].rand();
labels[i][0] = state;
for (int j = 1; j < sequences[i].length; j++) {
state = (int) transition[state].rand();
sequences[i][j] = (int) emission[state].rand();
labels[i][j] = state;
}
}
double[] expPi2 = {0.47245901561967496, 0.527540984380325};
double[][] expA2 = {{0.8006, 0.1994}, {0.1986, 0.8014}};
double[][] expB2 = {{0.6008, 0.3992}, {0.3997, 0.6003}};
HMM model = new HMM(pi, Matrix.of(a), Matrix.of(b));
model.update(sequences, 100);
System.out.println(model);
double[] pi2 = model.getInitialStateProbabilities();
for (int i = 0; i < pi.length; i++) {
assertEquals(expPi2[i], pi2[i], 1E-4);
}
Matrix a2 = model.getStateTransitionProbabilities();
for (int i = 0; i < a.length; i++) {
for (int j = 0; j < a[i].length; j++) {
assertEquals(expA2[i][j], a2.get(i, j), 1E-4);
}
}
Matrix b2 = model.getSymbolEmissionProbabilities();
for (int i = 0; i < b.length; i++) {
for (int j = 0; j < b[i].length; j++) {
assertEquals(expB2[i][j], b2.get(i, j), 1E-4);
}
}
} |
@Override
public GroupAssignment assign(
GroupSpec groupSpec,
SubscribedTopicDescriber subscribedTopicDescriber
) throws PartitionAssignorException {
if (groupSpec.memberIds().isEmpty()) {
return new GroupAssignment(Collections.emptyMap());
} else if (groupSpec.subscriptionType() == SubscriptionType.HOMOGENEOUS) {
return assignHomogeneousGroup(groupSpec, subscribedTopicDescriber);
} else {
return assignHeterogeneousGroup(groupSpec, subscribedTopicDescriber);
}
} | @Test
public void testReassignmentWhenMultipleSubscriptionsRemovedAfterInitialAssignmentWithThreeMembersTwoTopics() {
Map<Uuid, TopicMetadata> topicMetadata = new HashMap<>();
topicMetadata.put(topic1Uuid, new TopicMetadata(
topic1Uuid,
topic1Name,
3,
Collections.emptyMap()
));
topicMetadata.put(topic2Uuid, new TopicMetadata(
topic2Uuid,
topic2Name,
3,
Collections.emptyMap()
));
topicMetadata.put(topic3Uuid, new TopicMetadata(
topic3Uuid,
topic3Name,
2,
Collections.emptyMap()
));
// Let initial subscriptions be A -> T1, T2 // B -> T2 // C -> T2, T3
// Change the subscriptions to A -> T1 // B -> T1, T2, T3 // C -> T2
Map<String, MemberSubscriptionAndAssignmentImpl> members = new TreeMap<>();
members.put(memberA, new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.empty(),
mkSet(topic1Uuid),
new Assignment(mkAssignment(
mkTopicAssignment(topic1Uuid, 0, 1, 2),
mkTopicAssignment(topic2Uuid, 0)
))
));
members.put(memberB, new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.empty(),
mkSet(topic1Uuid, topic2Uuid, topic3Uuid),
new Assignment(mkAssignment(
mkTopicAssignment(topic2Uuid, 1)
))
));
members.put(memberC, new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.empty(),
mkSet(topic2Uuid),
new Assignment(mkAssignment(
mkTopicAssignment(topic2Uuid, 2),
mkTopicAssignment(topic3Uuid, 0, 1)
))
));
GroupSpec groupSpec = new GroupSpecImpl(
members,
HETEROGENEOUS,
invertedTargetAssignment(members)
);
SubscribedTopicDescriberImpl subscribedTopicMetadata = new SubscribedTopicDescriberImpl(topicMetadata);
GroupAssignment computedAssignment = assignor.assign(
groupSpec,
subscribedTopicMetadata
);
Map<String, Map<Uuid, Set<Integer>>> expectedAssignment = new HashMap<>();
expectedAssignment.put(memberA, mkAssignment(
mkTopicAssignment(topic1Uuid, 0, 1)
));
expectedAssignment.put(memberB, mkAssignment(
mkTopicAssignment(topic1Uuid, 2),
mkTopicAssignment(topic2Uuid, 0, 1),
mkTopicAssignment(topic3Uuid, 0, 1)
));
expectedAssignment.put(memberC, mkAssignment(
mkTopicAssignment(topic2Uuid, 2)
));
assertAssignment(expectedAssignment, computedAssignment);
} |
public String getQuery() throws Exception {
return getQuery(weatherConfiguration.getLocation());
} | @Test
public void testSingleIdStationQuery() throws Exception {
WeatherConfiguration weatherConfiguration = new WeatherConfiguration();
weatherConfiguration.setIds("52");
weatherConfiguration.setMode(WeatherMode.JSON);
weatherConfiguration.setLanguage(WeatherLanguage.nl);
weatherConfiguration.setAppid(APPID);
weatherConfiguration.setWeatherApi(WeatherApi.Station);
WeatherQuery weatherQuery = new WeatherQuery(weatherConfiguration);
weatherConfiguration.setGeoLocationProvider(geoLocationProvider);
String query = weatherQuery.getQuery();
assertThat(query,
is("http://api.openweathermap.org/data/2.5/station?id=52&lang=nl&APPID=9162755b2efa555823cfe0451d7fff38"));
} |
@Override
public Response toResponse(Throwable e) {
if (log.isDebugEnabled()) {
log.debug("Uncaught exception in REST call: ", e);
} else if (log.isInfoEnabled()) {
log.info("Uncaught exception in REST call: {}", e.getMessage());
}
if (e instanceof NotFoundException) {
return buildResponse(Response.Status.NOT_FOUND, e);
} else if (e instanceof InvalidRequestException) {
return buildResponse(Response.Status.BAD_REQUEST, e);
} else if (e instanceof InvalidTypeIdException) {
return buildResponse(Response.Status.NOT_IMPLEMENTED, e);
} else if (e instanceof JsonMappingException) {
return buildResponse(Response.Status.BAD_REQUEST, e);
} else if (e instanceof ClassNotFoundException) {
return buildResponse(Response.Status.NOT_IMPLEMENTED, e);
} else if (e instanceof SerializationException) {
return buildResponse(Response.Status.BAD_REQUEST, e);
} else if (e instanceof RequestConflictException) {
return buildResponse(Response.Status.CONFLICT, e);
} else {
return buildResponse(Response.Status.INTERNAL_SERVER_ERROR, e);
}
} | @Test
public void testToResponseInvalidTypeIdException() {
RestExceptionMapper mapper = new RestExceptionMapper();
JsonParser parser = null;
JavaType type = null;
Response resp = mapper.toResponse(InvalidTypeIdException.from(parser, "dummy msg", type, "dummy typeId"));
assertEquals(resp.getStatus(), Response.Status.NOT_IMPLEMENTED.getStatusCode());
} |
public static ServiceConfiguration convertFrom(PulsarConfiguration conf, boolean ignoreNonExistMember)
throws RuntimeException {
try {
final ServiceConfiguration convertedConf = ServiceConfiguration.class
.getDeclaredConstructor().newInstance();
Field[] confFields = conf.getClass().getDeclaredFields();
Properties sourceProperties = conf.getProperties();
Properties targetProperties = convertedConf.getProperties();
Arrays.stream(confFields).forEach(confField -> {
try {
confField.setAccessible(true);
Field convertedConfField = ServiceConfiguration.class.getDeclaredField(confField.getName());
if (!Modifier.isStatic(convertedConfField.getModifiers())
&& convertedConfField.getDeclaredAnnotation(FieldContext.class) != null) {
convertedConfField.setAccessible(true);
convertedConfField.set(convertedConf, confField.get(conf));
}
} catch (NoSuchFieldException e) {
if (!ignoreNonExistMember) {
throw new IllegalArgumentException(
"Exception caused while converting configuration: " + e.getMessage());
}
// add unknown fields to properties
try {
String propertyName = confField.getName();
if (!sourceProperties.containsKey(propertyName) && confField.get(conf) != null) {
targetProperties.put(propertyName, confField.get(conf));
}
} catch (Exception ignoreException) {
// should not happen
}
} catch (IllegalAccessException e) {
throw new RuntimeException("Exception caused while converting configuration: " + e.getMessage());
}
});
// Put the rest of properties to new config
targetProperties.putAll(sourceProperties);
return convertedConf;
} catch (InstantiationException | IllegalAccessException
| InvocationTargetException | NoSuchMethodException e) {
throw new RuntimeException("Exception caused while converting configuration: " + e.getMessage());
}
} | @Test
public void testConfigurationConverting() {
MockConfiguration mockConfiguration = new MockConfiguration();
ServiceConfiguration serviceConfiguration = PulsarConfigurationLoader.convertFrom(mockConfiguration);
// check whether converting correctly
assertEquals(serviceConfiguration.getMetadataStoreUrl(), "zk:localhost:2181");
assertEquals(serviceConfiguration.getConfigurationMetadataStoreUrl(), "zk:localhost:2184");
assertEquals(serviceConfiguration.getBrokerServicePort().get(), Integer.valueOf(7650));
assertEquals(serviceConfiguration.getBrokerServicePortTls().get(), Integer.valueOf((7651)));
assertEquals(serviceConfiguration.getWebServicePort().get(), Integer.valueOf((9080)));
assertEquals(serviceConfiguration.getWebServicePortTls().get(), Integer.valueOf((9443)));
} |
private IcebergUUIDObjectInspector() {
super(TypeInfoFactory.stringTypeInfo);
} | @Test
public void testIcebergUUIDObjectInspector() {
IcebergUUIDObjectInspector oi = IcebergUUIDObjectInspector.get();
assertThat(oi.getCategory()).isEqualTo(ObjectInspector.Category.PRIMITIVE);
assertThat(oi.getPrimitiveCategory())
.isEqualTo(PrimitiveObjectInspector.PrimitiveCategory.STRING);
assertThat(oi.getTypeInfo()).isEqualTo(TypeInfoFactory.stringTypeInfo);
assertThat(oi.getTypeName()).isEqualTo(TypeInfoFactory.stringTypeInfo.getTypeName());
assertThat(oi.getJavaPrimitiveClass()).isEqualTo(String.class);
assertThat(oi.getPrimitiveWritableClass()).isEqualTo(Text.class);
assertThat(oi.copyObject(null)).isNull();
assertThat(oi.getPrimitiveJavaObject(null)).isNull();
assertThat(oi.getPrimitiveWritableObject(null)).isNull();
assertThat(oi.convert(null)).isNull();
UUID uuid = UUID.randomUUID();
String uuidStr = uuid.toString();
Text text = new Text(uuidStr);
assertThat(oi.getPrimitiveJavaObject(text)).isEqualTo(uuidStr);
assertThat(oi.getPrimitiveWritableObject(uuidStr)).isEqualTo(text);
assertThat(oi.convert(uuidStr)).isEqualTo(uuid);
Text copy = (Text) oi.copyObject(text);
assertThat(copy).isEqualTo(text);
assertThat(copy).isNotSameAs(text);
assertThat(oi.preferWritable()).isFalse();
} |
public List<String> split(String in) {
final StringBuilder result = new StringBuilder();
final char[] chars = in.toCharArray();
for (int i = 0; i < chars.length; i++) {
final char c = chars[i];
if (CHAR_OPERATORS.contains(String.valueOf(c))) {
if (i < chars.length - 2 && CHAR_OPERATORS.contains(String.valueOf(chars[i + 1]))
&& !("(".equals(String.valueOf(chars[i + 1])) || ")".equals(String.valueOf(chars[i + 1])))) {
result.append(" ").append(c).append(chars[i + 1]).append(" ");
i++;
} else {
result.append(" ").append(c).append(" ");
}
} else {
result.append(c);
}
}
final String[] tokens = result.toString().split(SPLIT_EXPRESSION);
final List<String> list = new ArrayList<>();
for (int i = 0; i < tokens.length; i++) {
tokens[i] = tokens[i].trim();
if (!tokens[i].equals("")) {
list.add(tokens[i]);
}
}
return list;
} | @Test
public void split3() {
List<String> tokens = parser.split("((a and b))");
assertEquals(Arrays.asList("(", "(", "a", "and", "b", ")", ")"), tokens);
} |
@SuppressWarnings("unchecked")
public static <T> T getObjectWithKey(String key, RequestContext requestContext, Class<T> clazz)
{
final Object object = requestContext.getLocalAttr(key);
return (clazz.isInstance(object)) ? (T) object : null;
} | @Test
public void testGetObjectWithKey()
{
_requestContext.putLocalAttr(KEY, VALUE);
Assert.assertEquals(RequestContextUtil.getObjectWithKey(KEY, _requestContext, String.class), VALUE);
} |
public String filterNamespaceName(String namespaceName) {
if (namespaceName.toLowerCase().endsWith(".properties")) {
int dotIndex = namespaceName.lastIndexOf(".");
return namespaceName.substring(0, dotIndex);
}
return namespaceName;
} | @Test
public void testFilterNamespaceNameWithMultiplePropertiesSuffix() throws Exception {
String someName = "a.properties.properties";
assertEquals("a.properties", namespaceUtil.filterNamespaceName(someName));
} |
public String filterNamespaceName(String namespaceName) {
if (namespaceName.toLowerCase().endsWith(".properties")) {
int dotIndex = namespaceName.lastIndexOf(".");
return namespaceName.substring(0, dotIndex);
}
return namespaceName;
} | @Test
public void testFilterNamespaceNameUnchanged() throws Exception {
String someName = "a.xml";
assertEquals(someName, namespaceUtil.filterNamespaceName(someName));
} |
public <T> T parse(String input, Class<T> cls) {
return readFlow(input, cls, type(cls));
} | @Test
void noDefault() throws IOException {
Flow flow = this.parse("flows/valids/parallel.yaml");
String s = mapper.writeValueAsString(flow);
assertThat(s, not(containsString("\"-c\"")));
assertThat(s, containsString("\"deleted\":false"));
} |
public DataSource<T> loadDataSource(Path csvPath, String responseName) throws IOException {
return loadDataSource(csvPath, Collections.singleton(responseName));
} | @Test
public void testLoadMultiOutput() throws IOException {
URL path = CSVLoaderTest.class.getResource("/org/tribuo/data/csv/test-multioutput.csv");
Set<String> responses = new HashSet<>(Arrays.asList("R1", "R2"));
CSVLoader<MockMultiOutput> loader = new CSVLoader<>(new MockMultiOutputFactory());
DataSource<MockMultiOutput> source = loader.loadDataSource(path, responses);
MutableDataset<MockMultiOutput> data = new MutableDataset<>(source);
assertEquals(6, data.size());
Example<MockMultiOutput> example = data.getExample(0);
MockMultiOutput y = example.getOutput();
assertTrue(y.contains("R1"));
assertFalse(y.contains("R2"));
assertEquals(1.0, example.lookup("A").getValue(), 1e-7);
//
// Row #1: R1=True, R2=True
assertTrue(data.getExample(1).getOutput().contains("R1"));
assertTrue(data.getExample(1).getOutput().contains("R2"));
//
// Row #2: R1=False and R2=False.
// In this case, the labelSet is empty and the labelString is the empty string.
assertEquals(0, data.getExample(2).getOutput().getLabelSet().size());
assertEquals("", data.getExample(2).getOutput().getLabelString());
assertTrue(data.getExample(2).validateExample());
URL singlePath = CSVLoaderTest.class.getResource("/org/tribuo/data/csv/test-multioutput-singlecolumn.csv");
DataSource<MockMultiOutput> singleSource = loader.loadDataSource(singlePath, "Label");
MutableDataset<MockMultiOutput> singleData = new MutableDataset<>(singleSource);
assertEquals(6, singleData.size());
for (int i = 0; i < 6; i++) {
assertEquals(data.getExample(i).getOutput().getLabelString(), singleData.getExample(i).getOutput().getLabelString());
}
} |
@Override
@Deprecated
public <K1, V1> KStream<K1, V1> flatTransform(final org.apache.kafka.streams.kstream.TransformerSupplier<? super K, ? super V, Iterable<KeyValue<K1, V1>>> transformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(transformerSupplier, "transformerSupplier can't be null");
final String name = builder.newProcessorName(TRANSFORM_NAME);
return flatTransform(transformerSupplier, Named.as(name), stateStoreNames);
} | @Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullStoreNameOnFlatTransformWithNamed() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.flatTransform(flatTransformerSupplier, Named.as("flatTransform"), (String) null));
assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name"));
} |
@Override
public int hashCode() {
return name.hashCode();
} | @Test
public void test_hash() {
assertEquals(new Option<>("foo", String.class).hashCode(), new Option<>("foo", String.class).hashCode());
//hash is only based on name
assertEquals(new Option<>("foo", String.class).hashCode(), new Option<>("foo", Integer.class).hashCode());
} |
public T send() throws IOException {
return web3jService.send(this, responseType);
} | @Test
public void testEthGetLogsWithNumericBlockRange() throws Exception {
web3j.ethGetLogs(
new EthFilter(
DefaultBlockParameter.valueOf(Numeric.toBigInt("0xe8")),
DefaultBlockParameter.valueOf("latest"),
""))
.send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"eth_getLogs\","
+ "\"params\":[{\"topics\":[],\"fromBlock\":\"0xe8\","
+ "\"toBlock\":\"latest\",\"address\":[\"\"]}],\"id\":1}");
} |
private void performChecking(Context context, ResourceWrapper r) throws BlockException {
// If user has set a degrade rule for the resource, the default rule will not be activated
if (DegradeRuleManager.hasConfig(r.getName())) {
return;
}
List<CircuitBreaker> circuitBreakers = DefaultCircuitBreakerRuleManager.getDefaultCircuitBreakers(r.getName());
if (circuitBreakers == null || circuitBreakers.isEmpty()) {
return;
}
for (CircuitBreaker cb : circuitBreakers) {
if (!cb.tryPass(context)) {
throw new DegradeException(cb.getRule().getLimitApp(), cb.getRule());
}
}
} | @Test
public void testPerformChecking() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
DefaultCircuitBreakerSlot defaultCircuitBreakerSlot = mock(DefaultCircuitBreakerSlot.class);
Context context = mock(Context.class);
String resA = "resA";
Method pCMethod = DefaultCircuitBreakerSlot.class.getDeclaredMethod("performChecking", Context.class, ResourceWrapper.class);
pCMethod.setAccessible(true);
pCMethod.invoke(defaultCircuitBreakerSlot, context, new StringResourceWrapper(resA, EntryType.IN));
} |
public CruiseConfig deserializeConfig(String content) throws Exception {
String md5 = md5Hex(content);
Element element = parseInputStream(new ByteArrayInputStream(content.getBytes()));
LOGGER.debug("[Config Save] Updating config cache with new XML");
CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse();
setMd5(configForEdit, md5);
configForEdit.setOrigins(new FileConfigOrigin());
return configForEdit;
} | @Test
void shouldLoadTasksWithOnCancel() throws Exception {
CruiseConfig config = xmlLoader.deserializeConfig(TASKS_WITH_ON_CANCEL);
JobConfig job = config.jobConfigByName("pipeline1", "mingle", "cardlist", true);
Task task = job.tasks().findFirstByType(AntTask.class);
assertThat(task.hasCancelTask()).isTrue();
assertThat(task.cancelTask()).isEqualTo(new ExecTask("kill.rb", "", "utils"));
Task task2 = job.tasks().findFirstByType(ExecTask.class);
assertThat(task2.hasCancelTask()).isFalse();
} |
public static String getTmpSegmentNamePrefix(String segmentName) {
return segmentName + TMP;
} | @Test
public void testGenerateSegmentFilePrefix() {
String segmentName = "segment";
assertEquals(SegmentCompletionUtils.getTmpSegmentNamePrefix(segmentName), "segment.tmp.");
} |
private Mono<ServerResponse> listNotificationPreferences(ServerRequest request) {
var username = request.pathVariable("username");
return listReasonTypeNotifierMatrix(username)
.flatMap(matrix -> ServerResponse.ok().bodyValue(matrix));
} | @Test
void listNotificationPreferences() {
when(client.list(eq(ReasonType.class), eq(null), any())).thenReturn(Flux.empty());
when(client.list(eq(NotifierDescriptor.class), eq(null), any())).thenReturn(Flux.empty());
when(userNotificationPreferenceService.getByUser(any())).thenReturn(Mono.empty());
webTestClient.post()
.uri("/userspaces/{username}/notification-preferences", "guqing")
.exchange()
.expectStatus()
.isOk();
} |
@VisibleForTesting
static int getIdForInsertionRequest(EditorInfo info) {
return info == null
? 0
: Arrays.hashCode(new int[] {info.fieldId, info.packageName.hashCode()});
} | @Test
public void testCallsRemoteInsertionWithCorrectArguments() {
simulateFinishInputFlow();
EditorInfo info = createEditorInfoTextWithSuggestionsForSetUp();
EditorInfoCompat.setContentMimeTypes(info, new String[] {"image/gif"});
simulateOnStartInputFlow(false, info);
Mockito.verify(mRemoteInsertion, Mockito.never())
.startMediaRequest(Mockito.any(), Mockito.anyInt(), Mockito.any());
mAnySoftKeyboardUnderTest.simulateKeyPress(KeyCodes.IMAGE_MEDIA_POPUP);
Mockito.verify(mRemoteInsertion)
.startMediaRequest(
Mockito.eq(new String[] {"image/gif"}),
Mockito.eq(AnySoftKeyboardMediaInsertion.getIdForInsertionRequest(info)),
Mockito.any());
} |
@Override
public Class<? extends ModuleDefine> module() {
return AlarmModule.class;
} | @Test
public void module() {
assertEquals(AlarmModule.class, moduleProvider.module());
} |
boolean fillIssueInFileLocation(NewIssueLocation newIssueLocation, Location location) {
PhysicalLocation physicalLocation = location.getPhysicalLocation();
String fileUri = getFileUriOrThrow(location);
Optional<InputFile> file = findFile(fileUri);
if (file.isEmpty()) {
return false;
}
InputFile inputFile = file.get();
newIssueLocation.on(inputFile);
regionMapper.mapRegion(physicalLocation.getRegion(), inputFile).ifPresent(newIssueLocation::at);
return true;
} | @Test
public void fillIssueInFileLocation_ifNullArtifactLocation_throws() {
when(location.getPhysicalLocation().getArtifactLocation()).thenReturn(null);
assertThatIllegalArgumentException()
.isThrownBy(() -> locationMapper.fillIssueInFileLocation(newIssueLocation, location))
.withMessage(EXPECTED_MESSAGE_URI_MISSING);
} |
List<MethodSpec> buildFunctions(AbiDefinition functionDefinition)
throws ClassNotFoundException {
return buildFunctions(functionDefinition, true);
} | @Test
public void testBuildFunctionTransactionAndCall() throws Exception {
AbiDefinition functionDefinition =
new AbiDefinition(
false,
Arrays.asList(new NamedType("param", "uint8")),
"functionName",
Arrays.asList(new NamedType("result", "int8")),
"type",
false);
List<MethodSpec> methodSpecs =
solidityFunctionWrapperBoth.buildFunctions(functionDefinition);
String expectedSend =
"public org.web3j.protocol.core.RemoteFunctionCall<org.web3j.protocol.core.methods.response.TransactionReceipt> send_functionName(\n"
+ " java.math.BigInteger param) {\n"
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\n"
+ " FUNC_FUNCTIONNAME, \n"
+ " java.util.Arrays.<org.web3j.abi.datatypes.Type>asList(new org.web3j.abi.datatypes.generated.Uint8(param)), \n"
+ " java.util.Collections.<org.web3j.abi.TypeReference<?>>emptyList());\n"
+ " return executeRemoteCallTransaction(function);\n"
+ "}\n";
String expectedCall =
"public org.web3j.protocol.core.RemoteFunctionCall<java.math.BigInteger> call_functionName(\n"
+ " java.math.BigInteger param) {\n"
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(FUNC_FUNCTIONNAME, \n"
+ " java.util.Arrays.<org.web3j.abi.datatypes.Type>asList(new org.web3j.abi.datatypes.generated.Uint8(param)), \n"
+ " java.util.Arrays.<org.web3j.abi.TypeReference<?>>asList(new org.web3j.abi.TypeReference<org.web3j.abi.datatypes.generated.Int8>() {}));\n"
+ " return executeRemoteCallSingleValueReturn(function, java.math.BigInteger.class);\n"
+ "}\n";
assertEquals(2, methodSpecs.size());
assertEquals(expectedSend, methodSpecs.get(0).toString());
assertEquals(expectedCall, methodSpecs.get(1).toString());
} |
@Override
public String processType() {
return TYPE;
} | @Test
void processType() {
assertEquals(DistroClientDataProcessor.TYPE, distroClientDataProcessor.processType());
} |
public static void deletePath(String path, BrokerDesc brokerDesc) throws UserException {
TNetworkAddress address = getAddress(brokerDesc);
try {
TBrokerDeletePathRequest tDeletePathRequest = new TBrokerDeletePathRequest(
TBrokerVersion.VERSION_ONE, path, brokerDesc.getProperties());
TBrokerOperationStatus tOperationStatus = ThriftRPCRequestExecutor.call(
ThriftConnectionPool.brokerPool,
address,
client -> client.deletePath(tDeletePathRequest));
if (tOperationStatus.getStatusCode() != TBrokerOperationStatusCode.OK) {
throw new UserException("Broker delete path failed. path=" + path + ", broker=" + address
+ ", msg=" + tOperationStatus.getMessage());
}
} catch (TException e) {
LOG.warn("Broker read path exception, path={}, address={}, exception={}", path, address, e);
throw new UserException("Broker read path exception. path=" + path + ",broker=" + address);
}
} | @Test
public void testDeletePath(@Mocked TFileBrokerService.Client client, @Mocked GlobalStateMgr globalStateMgr,
@Injectable BrokerMgr brokerMgr) throws AnalysisException, TException {
// delete response
TBrokerOperationStatus status = new TBrokerOperationStatus();
status.statusCode = TBrokerOperationStatusCode.OK;
FsBroker fsBroker = new FsBroker("127.0.0.1", 99999);
new MockUp<ThriftConnectionPool<TFileBrokerService.Client>>() {
@Mock
public TFileBrokerService.Client borrowObject(TNetworkAddress address, int timeoutMs) throws Exception {
return client;
}
@Mock
public void returnObject(TNetworkAddress address, TFileBrokerService.Client object) {
return;
}
@Mock
public void invalidateObject(TNetworkAddress address, TFileBrokerService.Client object) {
return;
}
};
try (MockedStatic<ThriftRPCRequestExecutor> thriftConnectionPoolMockedStatic =
Mockito.mockStatic(ThriftRPCRequestExecutor.class)) {
thriftConnectionPoolMockedStatic.when(()
-> ThriftRPCRequestExecutor.call(Mockito.any(), Mockito.any(), Mockito.any()))
.thenReturn(status);
BrokerDesc brokerDesc = new BrokerDesc("broker0", Maps.newHashMap());
byte[] configs = "{'label': 'label0'}".getBytes(StandardCharsets.UTF_8);
String destFilePath = "hdfs://127.0.0.1:10000/starrocks/jobs/1/label6/9/configs/jobconfig.json";
try {
BrokerUtil.deletePath("hdfs://127.0.0.1:10000/starrocks/jobs/1/label6/9", brokerDesc);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
} |
@Deprecated
public static Schema parse(File file) throws IOException {
return new Parser().parse(file);
} | @Test
void testParserNullValidate() {
new Schema.Parser((NameValidator) null).parse("{\"type\":\"record\",\"name\":\"\",\"fields\":[]}"); // Empty name
} |
@Override
public Object invokeReflectively(EvaluationContext ctx, Object[] params) {
// use reflection to call the appropriate invoke method
try {
boolean isNamedParams = params.length > 0 && params[0] instanceof NamedParameter;
if (!isCustomFunction()) {
CandidateMethod cm = getCandidateMethod(ctx, params, isNamedParams);
if (cm != null) {
Object result = cm.actualMethod.invoke(this, cm.actualParams);
if (result instanceof Either) {
@SuppressWarnings("unchecked")
Either<FEELEvent, Object> either = (Either<FEELEvent, Object>) result;
return getEitherResult(ctx,
either,
() -> Stream.of(cm.actualMethod.getParameters()).map(p -> p.getAnnotation(ParameterName.class).value()).collect(Collectors.toList()),
() -> Arrays.asList(cm.actualParams));
}
return result;
} else {
// CandidateMethod cm could be null also if reflection failed on Platforms not supporting
// getClass().getDeclaredMethods()
String ps = getClass().toString();
logger.error("Unable to find function '" + getName() + "( " + ps.substring(1, ps.length() - 1) +
" )'");
ctx.notifyEvt(() -> new FEELEventBase(Severity.ERROR, "Unable to find function '" + getName() +
"( " + ps.substring(1, ps.length() - 1) + " )'", null));
}
} else {
if (isNamedParams) {
// This is inherently frail because it expects that, if, the first parameter is NamedParameter
// and the function is a CustomFunction, then all parameters are NamedParameter
NamedParameter[] namedParams =
Arrays.stream(params).map(NamedParameter.class::cast).toArray(NamedParameter[]::new);
params = BaseFEELFunctionHelper.rearrangeParameters(namedParams,
this.getParameters().get(0).stream().map(Param::getName).collect(Collectors.toList()));
}
Object result = invoke(ctx, params);
if (result instanceof Either) {
@SuppressWarnings("unchecked")
Either<FEELEvent, Object> either = (Either<FEELEvent, Object>) result;
final Object[] usedParams = params;
Object eitherResult = getEitherResult(ctx,
either,
() -> IntStream.of(0, usedParams.length).mapToObj(i -> "arg"
+ i).collect(Collectors.toList()),
() -> Arrays.asList(usedParams));
return BaseFEELFunctionHelper.normalizeResult(eitherResult);
}
return BaseFEELFunctionHelper.normalizeResult(result);
}
} catch (Exception e) {
logger.error("Error trying to call function " + getName() + ".", e);
ctx.notifyEvt(() -> new FEELEventBase(Severity.ERROR, "Error trying to call function " + getName() + ".",
e));
}
return null;
} | @Test
void invokeReflectiveCustomFunction() {
List<FEELFunction.Param> parameters = List.of(new FEELFunction.Param("foo", BuiltInType.UNKNOWN),
new FEELFunction.Param("person's age", BuiltInType.UNKNOWN));
BaseNode left = new InfixOpNode(InfixOperator.EQ,
new NameRefNode(BuiltInType.UNKNOWN, "foo"),
new NullNode(""),
"foo = null");
BaseNode right = new InfixOpNode(InfixOperator.LT,
new NameRefNode(BuiltInType.UNKNOWN, "person's age"),
new NumberNode(BigDecimal.valueOf(18), "18"),
"person's age < 18");
BaseNode body = new InfixOpNode(InfixOperator.AND, left, right, "foo = null and person's age < 18");
BaseFEELFunction toTest = new CustomFEELFunction("<anonymous>",
parameters,
body,
ctx);
Object[] params = {new NamedParameter("foo", null),
new NamedParameter("person's age", 16)};
Object retrieved = toTest.invokeReflectively(ctx, params);
assertNotNull(retrieved);
assertInstanceOf(Boolean.class, retrieved);
assertTrue((Boolean) retrieved);
params = new Object[]{new NamedParameter("foo", null),
new NamedParameter("person's age", 19)};
retrieved = toTest.invokeReflectively(ctx, params);
assertNotNull(retrieved);
assertInstanceOf(Boolean.class, retrieved);
assertFalse((Boolean) retrieved);
} |
public EnumSet<RepositoryFilePermission> processCheckboxes() {
return processCheckboxes( false );
} | @Test
public void testProcessCheckboxesManageCheckedEnableAppropriateTrue() {
when( readCheckbox.isChecked() ).thenReturn( false );
when( writeCheckbox.isChecked() ).thenReturn( false );
when( deleteCheckbox.isChecked() ).thenReturn( false );
when( manageCheckbox.isChecked() ).thenReturn( true );
assertEquals( EnumSet.of( RepositoryFilePermission.READ, RepositoryFilePermission.WRITE,
RepositoryFilePermission.DELETE, RepositoryFilePermission.ACL_MANAGEMENT ), permissionsCheckboxHandler
.processCheckboxes( true ) );
verify( readCheckbox, times( 1 ) ).setDisabled( true );
verify( writeCheckbox, times( 1 ) ).setDisabled( true );
verify( deleteCheckbox, times( 1 ) ).setDisabled( true );
verify( manageCheckbox, times( 1 ) ).setDisabled( false );
} |
@Override
public String getFieldDefinition( ValueMetaInterface v, String tk, String pk, boolean useAutoinc,
boolean addFieldName, boolean addCr ) {
String retval = "";
String fieldname = v.getName();
int length = v.getLength();
int precision = v.getPrecision();
if ( addFieldName ) {
retval += fieldname + " ";
}
int type = v.getType();
switch ( type ) {
case ValueMetaInterface.TYPE_TIMESTAMP:
case ValueMetaInterface.TYPE_DATE:
retval += "TIMESTAMP";
break;
case ValueMetaInterface.TYPE_BOOLEAN:
retval += "CHAR(1)";
break;
case ValueMetaInterface.TYPE_NUMBER:
case ValueMetaInterface.TYPE_INTEGER:
case ValueMetaInterface.TYPE_BIGNUMBER:
if ( length <= 0 && precision <= 0 ) {
retval += "DOUBLE";
} else {
retval += "DECIMAL";
if ( length > 0 ) {
retval += "(" + length;
if ( precision > 0 ) {
retval += ", " + precision;
}
retval += ")";
}
}
break;
case ValueMetaInterface.TYPE_STRING:
if ( length > getMaxVARCHARLength() || length >= DatabaseMeta.CLOB_LENGTH ) {
retval += "CLOB";
} else {
retval += "VARCHAR";
if ( length > 0 ) {
retval += "(" + length;
} else {
retval += "("; // Maybe use some default DB String length?
}
retval += ")";
}
break;
default:
retval += " UNKNOWN";
break;
}
if ( addCr ) {
retval += Const.CR;
}
return retval;
} | @Test
public void testGetFieldDefinition() {
assertEquals( "FOO TIMESTAMP",
nativeMeta.getFieldDefinition( new ValueMetaDate( "FOO" ), "", "", false, true, false ) );
assertEquals( "TIMESTAMP",
nativeMeta.getFieldDefinition( new ValueMetaTimestamp( "FOO" ), "", "", false, false, false ) );
assertEquals( "CHAR(1)",
nativeMeta.getFieldDefinition( new ValueMetaBoolean( "FOO" ), "", "", false, false, false ) );
assertEquals( "DOUBLE",
nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO" ), "", "", false, false, false ) );
assertEquals( "DECIMAL(5)",
nativeMeta.getFieldDefinition( new ValueMetaInteger( "FOO", 5, 0 ), "", "", false, false, false ) );
assertEquals( "DECIMAL(5, 3)",
nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 5, 3 ), "", "", false, false, false ) );
assertEquals( "DECIMAL",
nativeMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 0, 3 ), "", "", false, false, false ) ); // This is a bug
assertEquals( "CLOB",
nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", DatabaseMeta.CLOB_LENGTH + 1, 0 ), "", "", false, false, false ) );
assertEquals( String.format( "VARCHAR(%d)", ( nativeMeta.getMaxVARCHARLength() - 1 ) ),
nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", nativeMeta.getMaxVARCHARLength() - 1, 0 ), "", "", false, false, false ) );
assertEquals( "CLOB",
nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", nativeMeta.getMaxVARCHARLength() + 1, 0 ), "", "", false, false, false ) );
assertEquals( "CLOB",
nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", DatabaseMeta.CLOB_LENGTH - 1, 0 ), "", "", false, false, false ) );
assertEquals( " UNKNOWN",
nativeMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, false ) );
assertEquals( " UNKNOWN" + System.getProperty( "line.separator" ),
nativeMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, true ) );
} |
@Override
public void process() {
JMeterContext context = getThreadContext();
Sampler sam = context.getCurrentSampler();
SampleResult res = context.getPreviousResult();
HTTPSamplerBase sampler;
HTTPSampleResult result;
if (!(sam instanceof HTTPSamplerBase) || !(res instanceof HTTPSampleResult)) {
log.info("Can't apply HTML Link Parser when the previous" + " sampler run is not an HTTP Request.");
return;
} else {
sampler = (HTTPSamplerBase) sam;
result = (HTTPSampleResult) res;
}
List<HTTPSamplerBase> potentialLinks = new ArrayList<>();
String responseText = result.getResponseDataAsString();
int index = responseText.indexOf('<'); // $NON-NLS-1$
if (index == -1) {
index = 0;
}
if (log.isDebugEnabled()) {
log.debug("Check for matches against: "+sampler.toString());
}
Document html = (Document) HtmlParsingUtils.getDOM(responseText.substring(index));
addAnchorUrls(html, result, sampler, potentialLinks);
addFormUrls(html, result, sampler, potentialLinks);
addFramesetUrls(html, result, sampler, potentialLinks);
if (!potentialLinks.isEmpty()) {
HTTPSamplerBase url = potentialLinks.get(ThreadLocalRandom.current().nextInt(potentialLinks.size()));
if (log.isDebugEnabled()) {
log.debug("Selected: "+url.toString());
}
sampler.setDomain(url.getDomain());
sampler.setPath(url.getPath());
if (url.getMethod().equals(HTTPConstants.POST)) {
for (JMeterProperty jMeterProperty : sampler.getArguments()) {
Argument arg = (Argument) jMeterProperty.getObjectValue();
modifyArgument(arg, url.getArguments());
}
} else {
sampler.setArguments(url.getArguments());
}
sampler.setProtocol(url.getProtocol());
} else {
log.debug("No matches found");
}
} | @Test
public void testSimpleParse2() throws Exception {
HTTPSamplerBase config = makeUrlConfig("/index\\.html");
HTTPSamplerBase context = makeContext("http://www.apache.org/subdir/previous.html");
String responseText = "<html><head><title>Test page</title></head><body>"
+ "<a href=\"/index.html\">Goto index page</a>" + "hfdfjiudfjdfjkjfkdjf"
+ "<b>bold text</b><a href=lowerdir/index.html>lower</a>" + "</body></html>";
HTTPSampleResult result = new HTTPSampleResult();
result.setResponseData(responseText, null);
result.setSampleLabel(context.toString());
result.setURL(context.getUrl());
jmctx.setCurrentSampler(context);
jmctx.setCurrentSampler(config);
jmctx.setPreviousResult(result);
parser.process();
String newUrl = config.getUrl().toString();
Assertions.assertTrue("http://www.apache.org/index.html".equals(newUrl)
|| "http://www.apache.org/subdir/lowerdir/index.html".equals(newUrl));
} |
@Override
public void putTaskConfigs(final String connName, final List<Map<String, String>> configs, final Callback<Void> callback, InternalRequestSignature requestSignature) {
log.trace("Submitting put task configuration request {}", connName);
if (requestNotSignedProperly(requestSignature, callback)) {
return;
}
addRequest(
() -> {
if (!isLeader())
callback.onCompletion(new NotLeaderException("Only the leader may write task configurations.", leaderUrl()), null);
else if (!configState.contains(connName))
callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null);
else {
writeTaskConfigs(connName, configs);
callback.onCompletion(null, null);
}
return null;
},
forwardErrorAndTickThreadStages(callback)
);
} | @Test
public void testPutTaskConfigsSignatureNotRequiredV0() {
when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0);
Callback<Void> taskConfigCb = mock(Callback.class);
List<String> stages = expectRecordStages(taskConfigCb);
herder.putTaskConfigs(CONN1, TASK_CONFIGS, taskConfigCb, null);
// Expect a wakeup call after the request to write task configs is added to the herder's request queue
verify(member).wakeup();
verifyNoMoreInteractions(member, taskConfigCb);
assertEquals(
singletonList("awaiting startup"),
stages
);
} |
public void connect() throws ConnectException {
connect(s -> {}, t -> {}, () -> {});
} | @Test
public void testReconnectIfFirstConnectionFailed() throws Exception {
when(webSocketClient.connectBlocking()).thenReturn(false);
assertThrows(
ConnectException.class,
() -> {
service.connect();
});
service.connect();
// reconnectBlocking() should be called if 1st attempt was failed, if we call
// connectBlocking() for a second time, we will get IllegalStateException with real
// webSocketClient
verify(webSocketClient, atMostOnce()).reconnectBlocking();
} |
@Override
public Publisher<Exchange> toStream(String name, Object data) {
return doRequest(
name,
ReactiveStreamsHelper.convertToExchange(context, data));
} | @Test
public void testToStream() throws Exception {
context.addRoutes(new RouteBuilder() {
public void configure() {
from("reactive-streams:reactive").setBody().constant("123");
}
});
context.start();
Publisher<Exchange> publisher = crs.toStream("reactive", new DefaultExchange(context));
Exchange res = Flowable.fromPublisher(publisher).blockingFirst();
assertNotNull(res);
String content = res.getIn().getBody(String.class);
assertNotNull(content);
assertEquals("123", content);
} |
public static Set<Metric> mapFromDataProvider(TelemetryDataProvider<?> provider) {
switch (provider.getDimension()) {
case INSTALLATION -> {
return mapInstallationMetric(provider);
} case PROJECT -> {
return mapProjectMetric(provider);
} case USER -> {
return mapUserMetric(provider);
} case LANGUAGE -> {
return mapLanguageMetric(provider);
} default -> throw new IllegalArgumentException("Dimension: " + provider.getDimension() + " not yet implemented.");
}
} | @Test
void mapFromDataProvider_whenAdhocInstallationProviderWithoutValue_shouldNotMapToMetric() {
TestTelemetryAdhocBean provider = new TestTelemetryAdhocBean(Dimension.INSTALLATION, false); // Force the value so that nothing is returned
Set<Metric> metrics = TelemetryMetricsMapper.mapFromDataProvider(provider);
List<InstallationMetric> userMetrics = retrieveList(metrics);
assertThat(userMetrics).isEmpty();
} |
public static void validateApplicationConfig(ApplicationConfig config) {
if (config == null) {
return;
}
if (!config.isValid()) {
throw new IllegalStateException("No application config found or it's not a valid config! "
+ "Please add <dubbo:application name=\"...\" /> to your spring config.");
}
// backward compatibility
ScopeModel scopeModel = ScopeModelUtil.getOrDefaultApplicationModel(config.getScopeModel());
PropertiesConfiguration configuration = scopeModel.modelEnvironment().getPropertiesConfiguration();
String wait = configuration.getProperty(SHUTDOWN_WAIT_KEY);
if (wait != null && wait.trim().length() > 0) {
System.setProperty(SHUTDOWN_WAIT_KEY, wait.trim());
} else {
wait = configuration.getProperty(SHUTDOWN_WAIT_SECONDS_KEY);
if (wait != null && wait.trim().length() > 0) {
System.setProperty(SHUTDOWN_WAIT_SECONDS_KEY, wait.trim());
}
}
checkName(NAME, config.getName());
checkMultiName(OWNER, config.getOwner());
checkName(ORGANIZATION, config.getOrganization());
checkName(ARCHITECTURE, config.getArchitecture());
checkName(ENVIRONMENT, config.getEnvironment());
checkParameterName(config.getParameters());
checkQosDependency(config);
} | @Test
void testValidateApplicationConfig() throws Exception {
try (MockedStatic<ConfigValidationUtils> mockedStatic = Mockito.mockStatic(ConfigValidationUtils.class); ) {
mockedStatic
.when(() -> ConfigValidationUtils.validateApplicationConfig(any()))
.thenCallRealMethod();
ApplicationConfig config = new ApplicationConfig();
Assertions.assertThrows(IllegalStateException.class, () -> {
ConfigValidationUtils.validateApplicationConfig(config);
});
config.setName("testName");
config.setOwner("testOwner");
config.setOrganization("testOrg");
config.setArchitecture("testArchitecture");
config.setEnvironment("test");
Map<String, String> map = new HashMap();
map.put("k1", "v1");
map.put("k2", "v2");
config.setParameters(map);
ConfigValidationUtils.validateApplicationConfig(config);
mockedStatic.verify(
() -> {
ConfigValidationUtils.checkName(any(), any());
},
times(4));
mockedStatic.verify(
() -> {
ConfigValidationUtils.checkMultiName(any(), any());
},
times(1));
mockedStatic.verify(
() -> {
ConfigValidationUtils.checkParameterName(any());
},
times(1));
}
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testReadCommittedLagMetric() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax);
Map<String, String> tags = new HashMap<>();
tags.put("topic", tp0.topic());
tags.put("partition", String.valueOf(tp0.partition()));
MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric);
// recordsFetchLagMax should be initialized to NaN
assertEquals(Double.NaN, (Double) recordsFetchLagMax.metricValue(), EPSILON);
// recordsFetchLagMax should be lso - fetchOffset after receiving an empty FetchResponse
fetchRecords(tidp0, MemoryRecords.EMPTY, Errors.NONE, 100L, 50L, 0);
assertEquals(50, (Double) recordsFetchLagMax.metricValue(), EPSILON);
KafkaMetric partitionLag = allMetrics.get(partitionLagMetric);
assertEquals(50, (Double) partitionLag.metricValue(), EPSILON);
// recordsFetchLagMax should be lso - offset of the last message after receiving a non-empty FetchResponse
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE,
TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++)
builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
fetchRecords(tidp0, builder.build(), Errors.NONE, 200L, 150L, 0);
assertEquals(147, (Double) recordsFetchLagMax.metricValue(), EPSILON);
assertEquals(147, (Double) partitionLag.metricValue(), EPSILON);
// verify de-registration of partition lag
subscriptions.unsubscribe();
sendFetches();
assertFalse(allMetrics.containsKey(partitionLagMetric));
} |
public Mono<Object> genericInvoker(final String body, final MetaData metaData, final ServerWebExchange exchange) throws ShenyuException {
ConsumerConfig<GenericService> reference = ApplicationConfigCache.getInstance().get(metaData.getPath());
if (Objects.isNull(reference) || StringUtils.isEmpty(reference.getInterfaceId())) {
ApplicationConfigCache.getInstance().invalidate(metaData.getPath());
reference = ApplicationConfigCache.getInstance().initRef(metaData);
}
Pair<String[], Object[]> pair;
if (StringUtils.isBlank(metaData.getParameterTypes()) || ParamCheckUtils.bodyIsEmpty(body)) {
pair = new ImmutablePair<>(new String[]{}, new Object[]{});
} else {
pair = sofaParamResolveService.buildParameter(body, metaData.getParameterTypes());
}
CompletableFuture<Object> future = new CompletableFuture<>();
RpcInvokeContext.getContext().setResponseCallback(new SofaResponseCallback<>() {
@Override
public void onAppResponse(final Object o, final String s, final RequestBase requestBase) {
future.complete(o);
}
@Override
public void onAppException(final Throwable throwable, final String s, final RequestBase requestBase) {
future.completeExceptionally(throwable);
}
@Override
public void onSofaException(final SofaRpcException e, final String s, final RequestBase requestBase) {
future.completeExceptionally(e);
}
});
GenericService genericService = reference.refer();
genericService.$genericInvoke(metaData.getMethodName(), pair.getLeft(), pair.getRight());
return Mono.fromFuture(future.thenApply(ret -> {
if (Objects.isNull(ret)) {
ret = Constants.SOFA_RPC_RESULT_EMPTY;
}
GenericObject genericObject = (GenericObject) ret;
exchange.getAttributes().put(Constants.RPC_RESULT, genericObject.getFields());
exchange.getAttributes().put(Constants.CLIENT_RESPONSE_RESULT_TYPE, ResultEnum.SUCCESS.getName());
return ret;
})).onErrorMap(ShenyuException::new);
} | @Test
@SuppressWarnings("all")
public void testGenericInvoker() throws IllegalAccessException {
ConsumerConfig consumerConfig = mock(ConsumerConfig.class);
GenericService genericService = mock(GenericService.class);
when(consumerConfig.refer()).thenReturn(genericService);
when(consumerConfig.getInterfaceId()).thenReturn(PATH);
when(genericService.$genericInvoke(METHOD_NAME, LEFT, RIGHT)).thenReturn(null);
ApplicationConfigCache applicationConfigCache = ApplicationConfigCache.getInstance();
final Field cacheField = FieldUtils.getDeclaredField(ApplicationConfigCache.class, "cache", true);
assertNotNull(cacheField);
final Object cache = cacheField.get(applicationConfigCache);
assertTrue(cache instanceof LoadingCache);
((LoadingCache) cache).put(PATH, consumerConfig);
SofaProxyService sofaProxyService = new SofaProxyService(new SofaParamResolveServiceImpl());
sofaProxyService.genericInvoker("", metaData, exchange);
RpcInvokeContext.getContext().getResponseCallback().onAppResponse("success", null, null);
final SofaRegisterConfig sofaRegisterConfig = new SofaRegisterConfig();
sofaRegisterConfig.setThreadpool(Constants.SHARED);
applicationConfigCache.init(sofaRegisterConfig);
} |
public static void skipFully(InputStream in, long len) throws IOException {
long amt = len;
while (amt > 0) {
long ret = in.skip(amt);
if (ret == 0) {
// skip may return 0 even if we're not at EOF. Luckily, we can
// use the read() method to figure out if we're at the end.
int b = in.read();
if (b == -1) {
throw new EOFException( "Premature EOF from inputStream after " +
"skipping " + (len - amt) + " byte(s).");
}
ret = 1;
}
amt -= ret;
}
} | @Test
public void testSkipFully() throws IOException {
byte inArray[] = new byte[] {0, 1, 2, 3, 4};
ByteArrayInputStream in = new ByteArrayInputStream(inArray);
try {
in.mark(inArray.length);
IOUtils.skipFully(in, 2);
IOUtils.skipFully(in, 2);
try {
IOUtils.skipFully(in, 2);
fail("expected to get a PrematureEOFException");
} catch (EOFException e) {
assertEquals("Premature EOF from inputStream " +
"after skipping 1 byte(s).",e.getMessage());
}
in.reset();
try {
IOUtils.skipFully(in, 20);
fail("expected to get a PrematureEOFException");
} catch (EOFException e) {
assertEquals("Premature EOF from inputStream " +
"after skipping 5 byte(s).",e.getMessage());
}
in.reset();
IOUtils.skipFully(in, 5);
try {
IOUtils.skipFully(in, 10);
fail("expected to get a PrematureEOFException");
} catch (EOFException e) {
assertEquals("Premature EOF from inputStream " +
"after skipping 0 byte(s).",e.getMessage());
}
} finally {
in.close();
}
} |
@WithSpan
@Override
public SearchResponse apply(SearchResponse searchResponse) {
final List<ResultMessageSummary> summaries = searchResponse.messages().stream()
.map(summary -> {
if (requireAllFields && !usedVariables.stream().allMatch(variable -> summary.message().containsKey(variable.name))) {
return summary;
}
final String formattedString = template.transform(summary.message(), Locale.ENGLISH);
if (formattedString == null) {
return summary;
}
final Message message = messageFactory.createMessage(ImmutableMap.copyOf(summary.message()));
message.addField(targetField, formattedString);
return summary.toBuilder().message(message.getFields()).build();
})
.collect(Collectors.toList());
return searchResponse.toBuilder().messages(summaries).build();
} | @Test
public void formatAllowEmptyValues() {
final DecoratorImpl decorator = getDecoratorConfig("${field_a}: ${field_b}", "message", false);
final FormatStringDecorator formatStringDecorator = new FormatStringDecorator(decorator, templateEngine, messageFactory);
final SearchResponse searchResponse = getSearchResponse();
final SearchResponse response = formatStringDecorator.apply(searchResponse);
assertThat(response.messages().size()).isEqualTo(4);
assertThat(response.messages().get(0).message().get("message")).isEqualTo("1: b");
assertThat(response.messages().get(1).message().get("message")).isEqualTo("1:");
assertThat(response.messages().get(2).message().get("message")).isEqualTo(": b");
assertThat(response.messages().get(3).message().get("message")).isEqualTo(":");
} |
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final Map<Path, List<ObjectKeyAndVersion>> map = new HashMap<>();
final List<Path> containers = new ArrayList<>();
for(Path file : files.keySet()) {
if(containerService.isContainer(file)) {
containers.add(file);
continue;
}
callback.delete(file);
final Path bucket = containerService.getContainer(file);
if(file.getType().contains(Path.Type.upload)) {
// In-progress multipart upload
try {
multipartService.delete(new MultipartUpload(file.attributes().getVersionId(),
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)));
}
catch(NotfoundException ignored) {
log.warn(String.format("Ignore failure deleting multipart upload %s", file));
}
}
else {
final List<ObjectKeyAndVersion> keys = new ArrayList<>();
// Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys
keys.add(new ObjectKeyAndVersion(containerService.getKey(file), file.attributes().getVersionId()));
if(map.containsKey(bucket)) {
map.get(bucket).addAll(keys);
}
else {
map.put(bucket, keys);
}
}
}
// Iterate over all containers and delete list of keys
for(Map.Entry<Path, List<ObjectKeyAndVersion>> entry : map.entrySet()) {
final Path container = entry.getKey();
final List<ObjectKeyAndVersion> keys = entry.getValue();
this.delete(container, keys, prompt);
}
for(Path file : containers) {
callback.delete(file);
// Finally delete bucket itself
try {
final String bucket = containerService.getContainer(file).getName();
session.getClient().deleteBucket(bucket);
session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket);
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
} | @Test
public void testDeleteContainer() throws Exception {
final Path container = new Path(new AsciiRandomStringService().random(), EnumSet.of(Path.Type.volume, Path.Type.directory));
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(container, new TransferStatus());
assertTrue(new S3FindFeature(session, acl).find(container));
new S3MultipleDeleteFeature(session, acl).delete(Arrays.asList(container,
new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file))), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public void handleRequest(HttpServerExchange httpServerExchange) {
if (!httpServerExchange.getRequestMethod().equals(HttpString.tryFromString("GET"))) {
httpServerExchange.setStatusCode(HTTP_METHOD_NOT_ALLOWED);
httpServerExchange.getResponseSender().send("");
} else {
// For now if this endpoint is reachable then the service is up.
// There is no hard dependency that could be down.
httpServerExchange.setStatusCode(HTTP_OK);
httpServerExchange.getResponseHeaders().put(Headers.CONTENT_TYPE, "application/json");
httpServerExchange.getResponseSender().send(STATUS_UP);
}
} | @Test
void methodNotAllowed() {
var sut = new HealthEndpoint();
// when
var httpServerExchange = mock(HttpServerExchange.class);
var sender = mock(Sender.class);
when(httpServerExchange.getResponseSender()).thenReturn(sender);
when(httpServerExchange.getRequestMethod()).thenReturn(HttpString.tryFromString("POST"));
sut.handleRequest(httpServerExchange);
// then
verify(httpServerExchange).setStatusCode(Status.METHOD_NOT_ALLOWED.getStatusCode());
} |
public static Map<String, Integer> indexByName(Types.StructType struct) {
IndexByName indexer = new IndexByName();
visit(struct, indexer);
return indexer.byName();
} | @Test
public void testValidateSchemaViaIndexByName() {
Types.NestedField nestedType =
Types.NestedField.required(
1,
"a",
Types.StructType.of(
required(2, "b", Types.StructType.of(required(3, "c", Types.BooleanType.get()))),
required(4, "b.c", Types.BooleanType.get())));
assertThatThrownBy(() -> TypeUtil.indexByName(Types.StructType.of(nestedType)))
.isInstanceOf(RuntimeException.class)
.hasMessageContaining("Invalid schema: multiple fields for name a.b.c");
} |
@Override
public Thread newThread(Runnable r) {
Thread t = newThread(FastThreadLocalRunnable.wrap(r), prefix + nextId.incrementAndGet());
try {
if (t.isDaemon() != daemon) {
t.setDaemon(daemon);
}
if (t.getPriority() != priority) {
t.setPriority(priority);
}
} catch (Exception ignored) {
// Doesn't matter even if failed to set.
}
return t;
} | @Test
@Timeout(value = 2000, unit = TimeUnit.MILLISECONDS)
public void testDescendantThreadGroups() throws InterruptedException {
final SecurityManager current = System.getSecurityManager();
boolean securityManagerSet = false;
try {
try {
// install security manager that only allows parent thread groups to mess with descendant thread groups
System.setSecurityManager(new SecurityManager() {
@Override
public void checkAccess(ThreadGroup g) {
final ThreadGroup source = Thread.currentThread().getThreadGroup();
if (source != null) {
if (!source.parentOf(g)) {
throw new SecurityException("source group is not an ancestor of the target group");
}
super.checkAccess(g);
}
}
// so we can restore the security manager at the end of the test
@Override
public void checkPermission(Permission perm) {
}
});
} catch (UnsupportedOperationException e) {
Assumptions.assumeFalse(true, "Setting SecurityManager not supported");
}
securityManagerSet = true;
// holder for the thread factory, plays the role of a global singleton
final AtomicReference<DefaultThreadFactory> factory = new AtomicReference<DefaultThreadFactory>();
final AtomicInteger counter = new AtomicInteger();
final Runnable task = new Runnable() {
@Override
public void run() {
counter.incrementAndGet();
}
};
final AtomicReference<Throwable> interrupted = new AtomicReference<Throwable>();
// create the thread factory, since we are running the thread group brother, the thread
// factory will now forever be tied to that group
// we then create a thread from the factory to run a "task" for us
final Thread first = new Thread(new ThreadGroup("brother"), new Runnable() {
@Override
public void run() {
factory.set(new DefaultThreadFactory("test", false, Thread.NORM_PRIORITY, null));
final Thread t = factory.get().newThread(task);
t.start();
try {
t.join();
} catch (InterruptedException e) {
interrupted.set(e);
Thread.currentThread().interrupt();
}
}
});
first.start();
first.join();
assertNull(interrupted.get());
// now we will use factory again, this time from a sibling thread group sister
// if DefaultThreadFactory is "sticky" about thread groups, a security manager
// that forbids sibling thread groups from messing with each other will strike this down
final Thread second = new Thread(new ThreadGroup("sister"), new Runnable() {
@Override
public void run() {
final Thread t = factory.get().newThread(task);
t.start();
try {
t.join();
} catch (InterruptedException e) {
interrupted.set(e);
Thread.currentThread().interrupt();
}
}
});
second.start();
second.join();
assertNull(interrupted.get());
assertEquals(2, counter.get());
} finally {
if (securityManagerSet) {
System.setSecurityManager(current);
}
}
} |
@Override
public Serde<GenericRow> create(
final FormatInfo format,
final PersistenceSchema schema,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> srClientFactory,
final String loggerNamePrefix,
final ProcessingLogContext processingLogContext,
final Optional<TrackedCallback> tracker
) {
final Serde<List<?>> formatSerde =
innerFactory.createFormatSerde("Value", format, schema, ksqlConfig, srClientFactory, false);
final Serde<GenericRow> genericRowSerde = toGenericRowSerde(formatSerde, schema);
final Serde<GenericRow> loggingSerde = innerFactory.wrapInLoggingSerde(
genericRowSerde,
loggerNamePrefix,
processingLogContext,
queryId);
final Serde<GenericRow> serde = tracker
.map(callback -> innerFactory.wrapInTrackingSerde(loggingSerde, callback))
.orElse(loggingSerde);
serde.configure(Collections.emptyMap(), false);
return serde;
} | @Test
public void shouldConfigureLoggingSerde() {
// When:
factory.create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt,
Optional.empty());
// Then:
verify(loggingSerde).configure(ImmutableMap.of(), false);
} |
@Override
protected void encode(
ChannelHandlerContext ctx, AddressedEnvelope<M, InetSocketAddress> msg, List<Object> out) throws Exception {
assert out.isEmpty();
encoder.encode(ctx, msg.content(), out);
if (out.size() != 1) {
throw new EncoderException(
StringUtil.simpleClassName(encoder) + " must produce only one message.");
}
Object content = out.get(0);
if (content instanceof ByteBuf) {
// Replace the ByteBuf with a DatagramPacket.
out.set(0, new DatagramPacket((ByteBuf) content, msg.recipient(), msg.sender()));
} else {
throw new EncoderException(
StringUtil.simpleClassName(encoder) + " must produce only ByteBuf.");
}
} | @Test
public void testEncode() {
testEncode(false);
} |
public static String finalSigningKeyStringWithDefaultInfo(String secret, String region) {
String signDate = LocalDateTime.now(UTC_0).format(V4_SIGN_DATE_FORMATTER);
return finalSigningKeyString(secret, signDate, region, RamConstants.SIGNATURE_V4_PRODUCE,
RamConstants.SIGNATURE_V4_METHOD);
} | @Test
void testFinalSigningKeyStringWithDefaultInfo() {
assertNotNull(CalculateV4SigningKeyUtil.finalSigningKeyStringWithDefaultInfo("", "cn-hangzhou"));
} |
@Override
public Result reconcile(Request request) {
return client.fetch(ReverseProxy.class, request.name())
.map(reverseProxy -> {
if (isDeleted(reverseProxy)) {
cleanUpResourcesAndRemoveFinalizer(request.name());
return new Result(false, null);
}
addFinalizerIfNecessary(reverseProxy);
registerReverseProxy(reverseProxy);
return new Result(false, null);
})
.orElse(new Result(false, null));
} | @Test
void reconcileRemoval() {
// fix gh-2937
ReverseProxy reverseProxy = new ReverseProxy();
reverseProxy.setMetadata(new Metadata());
reverseProxy.getMetadata().setName("fake-reverse-proxy");
reverseProxy.getMetadata().setDeletionTimestamp(Instant.now());
reverseProxy.getMetadata()
.setLabels(Map.of(PluginConst.PLUGIN_NAME_LABEL_NAME, "fake-plugin"));
reverseProxy.setRules(List.of());
doNothing().when(routerFunctionRegistry).remove(anyString(), anyString());
when(client.fetch(ReverseProxy.class, "fake-reverse-proxy"))
.thenReturn(Optional.of(reverseProxy));
reverseProxyReconciler.reconcile(new Reconciler.Request("fake-reverse-proxy"));
verify(routerFunctionRegistry, never()).register(anyString(), any(ReverseProxy.class));
verify(routerFunctionRegistry, times(1))
.remove(eq("fake-plugin"), eq("fake-reverse-proxy"));
} |
public void evaluate(List<AuthorizationContext> contexts) {
if (CollectionUtils.isEmpty(contexts)) {
return;
}
contexts.forEach(this.authorizationStrategy::evaluate);
} | @Test
public void evaluate8() {
if (MixAll.isMac()) {
return;
}
User user = User.of("test", "test");
this.authenticationMetadataManager.createUser(user).join();
Acl acl = AuthTestHelper.buildAcl("User:test", "Topic:test*", "Pub", "192.168.0.0/24", Decision.DENY);
this.authorizationMetadataManager.createAcl(acl).join();
Assert.assertThrows(AuthorizationException.class, () -> {
Subject subject = Subject.of("User:test");
Resource resource = Resource.ofTopic("test");
Action action = Action.PUB;
String sourceIp = "192.168.0.1";
DefaultAuthorizationContext context = DefaultAuthorizationContext.of(subject, resource, action, sourceIp);
context.setRpcCode("10");
this.evaluator.evaluate(Collections.singletonList(context));
});
Assert.assertThrows(AuthorizationException.class, () -> {
Subject subject = Subject.of("User:test");
Resource resource = Resource.ofTopic("abc");
Action action = Action.PUB;
String sourceIp = "192.168.0.1";
DefaultAuthorizationContext context = DefaultAuthorizationContext.of(subject, resource, action, sourceIp);
context.setRpcCode("10");
this.evaluator.evaluate(Collections.singletonList(context));
});
acl = AuthTestHelper.buildAcl("User:test", PolicyType.DEFAULT, "Topic:*", "Pub", null, Decision.ALLOW);
this.authorizationMetadataManager.updateAcl(acl).join();
{
Subject subject = Subject.of("User:test");
Resource resource = Resource.ofTopic("abc");
Action action = Action.PUB;
String sourceIp = "192.168.0.1";
DefaultAuthorizationContext context = DefaultAuthorizationContext.of(subject, resource, action, sourceIp);
context.setRpcCode("10");
this.evaluator.evaluate(Collections.singletonList(context));
}
} |
public static ByteBuf wrappedBuffer(byte[] array) {
if (array.length == 0) {
return EMPTY_BUFFER;
}
return new UnpooledHeapByteBuf(ALLOC, array, array.length);
} | @Test
public void testGetBytesByteBuffer() {
byte[] bytes = {'a', 'b', 'c', 'd', 'e', 'f', 'g'};
// Ensure destination buffer is bigger then what is wrapped in the ByteBuf.
final ByteBuffer nioBuffer = ByteBuffer.allocate(bytes.length + 1);
final ByteBuf wrappedBuffer = wrappedBuffer(bytes);
try {
assertThrows(IndexOutOfBoundsException.class, new Executable() {
@Override
public void execute() throws Throwable {
wrappedBuffer.getBytes(wrappedBuffer.readerIndex(), nioBuffer);
}
});
} finally {
wrappedBuffer.release();
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.