focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static <T> CompressedSource<T> from(FileBasedSource<T> sourceDelegate) {
return new CompressedSource<>(sourceDelegate, CompressionMode.AUTO);
} | @Test
public void testUnsplittable() throws IOException {
String baseName = "test-input";
File compressedFile = tmpFolder.newFile(baseName + ".gz");
byte[] input = generateInput(10000);
writeFile(compressedFile, input, Compression.GZIP);
CompressedSource<Byte> source =
CompressedSource.from(new ByteSource(compressedFile.getPath(), 1));
List<Byte> expected = Lists.newArrayList();
for (byte i : input) {
expected.add(i);
}
PipelineOptions options = PipelineOptionsFactory.create();
BoundedReader<Byte> reader = source.createReader(options);
List<Byte> actual = Lists.newArrayList();
for (boolean hasNext = reader.start(); hasNext; hasNext = reader.advance()) {
actual.add(reader.getCurrent());
// checkpoint every 9 elements
if (actual.size() % 9 == 0) {
Double fractionConsumed = reader.getFractionConsumed();
assertNotNull(fractionConsumed);
assertNull(reader.splitAtFraction(fractionConsumed));
}
}
assertEquals(expected.size(), actual.size());
assertEquals(Sets.newHashSet(expected), Sets.newHashSet(actual));
} |
@Override
protected void handleEndTxn(CommandEndTxn command) {
checkArgument(state == State.Connected);
final long requestId = command.getRequestId();
final int txnAction = command.getTxnAction().getValue();
TxnID txnID = new TxnID(command.getTxnidMostBits(), command.getTxnidLeastBits());
final TransactionCoordinatorID tcId = TransactionCoordinatorID.get(command.getTxnidMostBits());
if (!checkTransactionEnableAndSendError(requestId)) {
return;
}
TransactionMetadataStoreService transactionMetadataStoreService =
service.pulsar().getTransactionMetadataStoreService();
verifyTxnOwnership(txnID)
.thenCompose(isOwner -> {
if (!isOwner) {
return failedFutureTxnNotOwned(txnID);
}
return transactionMetadataStoreService.endTransaction(txnID, txnAction, false);
})
.whenComplete((v, ex) -> {
if (ex == null) {
commandSender.sendEndTxnResponse(requestId, txnID, txnAction);
} else {
ex = handleTxnException(ex, BaseCommand.Type.END_TXN.name(), requestId);
commandSender.sendEndTxnErrorResponse(requestId, txnID,
BrokerServiceException.getClientErrorCode(ex), ex.getMessage());
transactionMetadataStoreService.handleOpFail(ex, tcId);
}
});
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void shouldFailHandleEndTxn() throws Exception {
ServerCnx serverCnx = mock(ServerCnx.class, CALLS_REAL_METHODS);
Field stateUpdater = ServerCnx.class.getDeclaredField("state");
stateUpdater.setAccessible(true);
stateUpdater.set(serverCnx, ServerCnx.State.Failed);
serverCnx.handleEndTxn(any());
} |
@Override
public int partition(Integer bucketId, int numPartitions) {
Preconditions.checkNotNull(bucketId, BUCKET_NULL_MESSAGE);
Preconditions.checkArgument(bucketId >= 0, BUCKET_LESS_THAN_LOWER_BOUND_MESSAGE, bucketId);
Preconditions.checkArgument(
bucketId < maxNumBuckets, BUCKET_GREATER_THAN_UPPER_BOUND_MESSAGE, bucketId, maxNumBuckets);
if (numPartitions <= maxNumBuckets) {
return bucketId % numPartitions;
} else {
return getPartitionWithMoreWritersThanBuckets(bucketId, numPartitions);
}
} | @Test
public void testPartitionerBucketIdOutOfRangeFail() {
PartitionSpec partitionSpec = TableSchemaType.ONE_BUCKET.getPartitionSpec(DEFAULT_NUM_BUCKETS);
BucketPartitioner bucketPartitioner = new BucketPartitioner(partitionSpec);
int negativeBucketId = -1;
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> bucketPartitioner.partition(negativeBucketId, 1))
.withMessage(BUCKET_LESS_THAN_LOWER_BOUND_MESSAGE, negativeBucketId);
int tooBigBucketId = DEFAULT_NUM_BUCKETS;
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> bucketPartitioner.partition(tooBigBucketId, 1))
.withMessage(BUCKET_GREATER_THAN_UPPER_BOUND_MESSAGE, tooBigBucketId, DEFAULT_NUM_BUCKETS);
} |
@PostMapping("/authorize")
@Operation(summary = "申请授权", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【提交】调用")
@Parameters({
@Parameter(name = "response_type", required = true, description = "响应类型", example = "code"),
@Parameter(name = "client_id", required = true, description = "客户端编号", example = "tudou"),
@Parameter(name = "scope", description = "授权范围", example = "userinfo.read"), // 使用 Map<String, Boolean> 格式,Spring MVC 暂时不支持这么接收参数
@Parameter(name = "redirect_uri", required = true, description = "重定向 URI", example = "https://www.iocoder.cn"),
@Parameter(name = "auto_approve", required = true, description = "用户是否接受", example = "true"),
@Parameter(name = "state", example = "1")
})
public CommonResult<String> approveOrDeny(@RequestParam("response_type") String responseType,
@RequestParam("client_id") String clientId,
@RequestParam(value = "scope", required = false) String scope,
@RequestParam("redirect_uri") String redirectUri,
@RequestParam(value = "auto_approve") Boolean autoApprove,
@RequestParam(value = "state", required = false) String state) {
@SuppressWarnings("unchecked")
Map<String, Boolean> scopes = JsonUtils.parseObject(scope, Map.class);
scopes = ObjectUtil.defaultIfNull(scopes, Collections.emptyMap());
// 0. 校验用户已经登录。通过 Spring Security 实现
// 1.1 校验 responseType 是否满足 code 或者 token 值
OAuth2GrantTypeEnum grantTypeEnum = getGrantTypeEnum(responseType);
// 1.2 校验 redirectUri 重定向域名是否合法 + 校验 scope 是否在 Client 授权范围内
OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientId, null,
grantTypeEnum.getGrantType(), scopes.keySet(), redirectUri);
// 2.1 假设 approved 为 null,说明是场景一
if (Boolean.TRUE.equals(autoApprove)) {
// 如果无法自动授权通过,则返回空 url,前端不进行跳转
if (!oauth2ApproveService.checkForPreApproval(getLoginUserId(), getUserType(), clientId, scopes.keySet())) {
return success(null);
}
} else { // 2.2 假设 approved 非 null,说明是场景二
// 如果计算后不通过,则跳转一个错误链接
if (!oauth2ApproveService.updateAfterApproval(getLoginUserId(), getUserType(), clientId, scopes)) {
return success(OAuth2Utils.buildUnsuccessfulRedirect(redirectUri, responseType, state,
"access_denied", "User denied access"));
}
}
// 3.1 如果是 code 授权码模式,则发放 code 授权码,并重定向
List<String> approveScopes = convertList(scopes.entrySet(), Map.Entry::getKey, Map.Entry::getValue);
if (grantTypeEnum == OAuth2GrantTypeEnum.AUTHORIZATION_CODE) {
return success(getAuthorizationCodeRedirect(getLoginUserId(), client, approveScopes, redirectUri, state));
}
// 3.2 如果是 token 则是 implicit 简化模式,则发送 accessToken 访问令牌,并重定向
return success(getImplicitGrantRedirect(getLoginUserId(), client, approveScopes, redirectUri, state));
} | @Test // autoApprove = false,通过 + code
public void testApproveOrDeny_approveWithCode() {
// 准备参数
String responseType = "code";
String clientId = randomString();
String scope = "{\"read\": true, \"write\": false}";
String redirectUri = "https://www.iocoder.cn";
String state = "test";
// mock 方法(client)
OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId(clientId).setAdditionalInformation(null);
when(oauth2ClientService.validOAuthClientFromCache(eq(clientId), isNull(), eq("authorization_code"),
eq(asSet("read", "write")), eq(redirectUri))).thenReturn(client);
// mock 方法(场景二)
when(oauth2ApproveService.updateAfterApproval(isNull(), eq(UserTypeEnum.ADMIN.getValue()), eq(clientId),
eq(MapUtil.builder(new LinkedHashMap<String, Boolean>()).put("read", true).put("write", false).build())))
.thenReturn(true);
// mock 方法(访问令牌)
String authorizationCode = "test_code";
when(oauth2GrantService.grantAuthorizationCodeForCode(isNull(), eq(UserTypeEnum.ADMIN.getValue()),
eq(clientId), eq(ListUtil.toList("read")), eq(redirectUri), eq(state))).thenReturn(authorizationCode);
// 调用
CommonResult<String> result = oauth2OpenController.approveOrDeny(responseType, clientId,
scope, redirectUri, false, state);
// 断言
assertEquals(0, result.getCode());
assertEquals("https://www.iocoder.cn?code=test_code&state=test", result.getData());
} |
@Override
public void describe(SensorDescriptor descriptor) {
descriptor
.name("Xoo Significant Code Ranges Sensor")
.onlyOnLanguages(Xoo.KEY);
} | @Test
public void testDescriptor() {
sensor.describe(new DefaultSensorDescriptor());
} |
@Override
public List<PartitionKey> getPrunedPartitions(Table table, ScalarOperator predicate, long limit, TableVersionRange version) {
IcebergTable icebergTable = (IcebergTable) table;
String dbName = icebergTable.getRemoteDbName();
String tableName = icebergTable.getRemoteTableName();
if (version.end().isEmpty()) {
return new ArrayList<>();
}
PredicateSearchKey key = PredicateSearchKey.of(dbName, tableName, version.end().get(), predicate);
triggerIcebergPlanFilesIfNeeded(key, icebergTable, predicate, limit);
List<PartitionKey> partitionKeys = new ArrayList<>();
List<FileScanTask> icebergSplitTasks = splitTasks.get(key);
if (icebergSplitTasks == null) {
throw new StarRocksConnectorException("Missing iceberg split task for table:[{}.{}]. predicate:[{}]",
dbName, tableName, predicate);
}
Set<List<String>> scannedPartitions = new HashSet<>();
PartitionSpec spec = icebergTable.getNativeTable().spec();
List<Column> partitionColumns = icebergTable.getPartitionColumnsIncludeTransformed();
boolean existPartitionTransformedEvolution = ((IcebergTable) table).hasPartitionTransformedEvolution();
for (FileScanTask fileScanTask : icebergSplitTasks) {
org.apache.iceberg.PartitionData partitionData = (org.apache.iceberg.PartitionData) fileScanTask.file().partition();
List<String> values = PartitionUtil.getIcebergPartitionValues(
spec, partitionData, existPartitionTransformedEvolution);
if (values.size() != partitionColumns.size()) {
// ban partition evolution and non-identify column.
continue;
}
if (scannedPartitions.contains(values)) {
continue;
} else {
scannedPartitions.add(values);
}
try {
List<com.starrocks.catalog.Type> srTypes = new ArrayList<>();
for (PartitionField partitionField : spec.fields()) {
if (partitionField.transform().isVoid()) {
continue;
}
if (!partitionField.transform().isIdentity()) {
Type sourceType = spec.schema().findType(partitionField.sourceId());
Type resultType = partitionField.transform().getResultType(sourceType);
if (resultType == Types.DateType.get()) {
resultType = Types.IntegerType.get();
}
srTypes.add(fromIcebergType(resultType));
continue;
}
srTypes.add(icebergTable.getColumn(icebergTable.getPartitionSourceName(spec.schema(),
partitionField)).getType());
}
if (existPartitionTransformedEvolution) {
srTypes = partitionColumns.stream()
.map(Column::getType)
.collect(Collectors.toList());
}
partitionKeys.add(createPartitionKeyWithType(values, srTypes, table.getType()));
} catch (Exception e) {
LOG.error("create partition key failed.", e);
throw new StarRocksConnectorException(e.getMessage());
}
}
return partitionKeys;
} | @Test
public void testTransformedPartitionPrune() {
IcebergHiveCatalog icebergHiveCatalog = new IcebergHiveCatalog(CATALOG_NAME, new Configuration(), DEFAULT_CONFIG);
List<Column> columns = Lists.newArrayList(new Column("k1", INT), new Column("ts", DATETIME));
IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergHiveCatalog,
Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null);
IcebergTable icebergTable = new IcebergTable(1, "srTableName", CATALOG_NAME, "resource_name", "db_name",
"table_name", "", columns, mockedNativeTableD, Maps.newHashMap());
org.apache.iceberg.PartitionKey partitionKey = new org.apache.iceberg.PartitionKey(SPEC_D_5, SCHEMA_D);
partitionKey.set(0, 438292);
DataFile tsDataFiles =
DataFiles.builder(SPEC_D_5)
.withPath("/path/to/data-d.parquet")
.withFileSizeInBytes(20)
.withPartition(partitionKey)
.withRecordCount(2)
.build();
mockedNativeTableD.newAppend().appendFile(tsDataFiles).commit();
mockedNativeTableD.refresh();
TableVersionRange version = TableVersionRange.withEnd(Optional.of(
mockedNativeTableD.currentSnapshot().snapshotId()));
List<PartitionKey> partitionKeys = metadata.getPrunedPartitions(icebergTable, null, -1, version);
Assert.assertEquals("438292", partitionKeys.get(0).getKeys().get(0).getStringValue());
} |
public static ConnectedComponents findComponentsRecursive(Graph graph, EdgeTransitionFilter edgeTransitionFilter, boolean excludeSingleEdgeComponents) {
return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, excludeSingleEdgeComponents).findComponentsRecursive();
} | @Test
public void linearOneWay() {
// 0 -> 1 -> 2
g.edge(0, 1).setDistance(1).set(speedEnc, 10, 0);
g.edge(1, 2).setDistance(1).set(speedEnc, 10, 0);
ConnectedComponents result = EdgeBasedTarjanSCC.findComponentsRecursive(g, fwdAccessFilter, false);
assertEquals(4, result.getEdgeKeys());
assertEquals(4, result.getTotalComponents());
assertEquals(0, result.getComponents().size());
// we only have two directed edges here, but we always calculate the component indices for all edge keys and
// here every (directed) edge belongs to its own component
assertEquals(4, result.getSingleEdgeComponents().cardinality());
assertEquals(IntArrayList.from(), result.getBiggestComponent());
} |
@Subscribe
public void onVarbitChanged(VarbitChanged varbitChanged)
{
if (varbitChanged.getVarpId() == VarPlayer.CANNON_AMMO)
{
int old = cballsLeft;
cballsLeft = varbitChanged.getValue();
if (cballsLeft > old)
{
cannonBallNotificationSent = false;
}
if (!cannonBallNotificationSent && cballsLeft > 0 && config.lowWarningThreshold() >= cballsLeft)
{
notifier.notify(config.showCannonNotifications(), String.format("Your cannon has %d cannon balls remaining!", cballsLeft));
cannonBallNotificationSent = true;
}
}
else if (varbitChanged.getVarpId() == VarPlayer.CANNON_COORD)
{
WorldPoint c = WorldPoint.fromCoord(varbitChanged.getValue());
cannonPosition = buildCannonWorldArea(c);
}
else if (varbitChanged.getVarpId() == VarPlayer.CANNON_STATE)
{
cannonPlaced = varbitChanged.getValue() == 4;
if (cannonPlaced)
{
addCounter();
}
else
{
removeCounter();
}
}
} | @Test
public void testThresholdNotificationShouldNotifyOnce()
{
when(config.showCannonNotifications()).thenReturn(Notification.ON);
when(config.lowWarningThreshold()).thenReturn(10);
for (int cballs = 15; cballs >= 8; --cballs)
{
cannonAmmoChanged.setValue(cballs);
plugin.onVarbitChanged(cannonAmmoChanged);
}
verify(notifier, times(1)).notify(Notification.ON, "Your cannon has 10 cannon balls remaining!");
} |
public void removeWorkerFromMap(long workerId, String workerIpPort) {
try (LockCloseable lock = new LockCloseable(rwLock.writeLock())) {
workerToNode.remove(workerId);
workerToId.remove(workerIpPort);
}
LOG.info("remove worker {} success from StarMgr", workerIpPort);
} | @Test
public void testRemoveWorkerFromMap() {
String workerHost = "127.0.0.1:8090";
Map<String, Long> mockWorkerToId = Maps.newHashMap();
mockWorkerToId.put(workerHost, 5L);
Deencapsulation.setField(starosAgent, "workerToId", mockWorkerToId);
Assert.assertEquals(5L, starosAgent.getWorkerId(workerHost));
starosAgent.removeWorkerFromMap(5L, workerHost);
ExceptionChecker.expectThrows(NullPointerException.class, () -> starosAgent.getWorkerId(workerHost));
} |
public static Set<String> minus(Collection<String> list1, Collection<String> list2) {
HashSet<String> s1 = new HashSet<>(list1);
s1.removeAll(list2);
return s1;
} | @Test
public void testMinus() {
String topicName1 = "persistent://my-property/my-ns/pattern-topic-1";
String topicName2 = "persistent://my-property/my-ns/pattern-topic-2";
String topicName3 = "persistent://my-property/my-ns/pattern-topic-3";
String topicName4 = "persistent://my-property/my-ns/pattern-topic-4";
String topicName5 = "persistent://my-property/my-ns/pattern-topic-5";
String topicName6 = "persistent://my-property/my-ns/pattern-topic-6";
List<String> oldNames = Lists.newArrayList(topicName1, topicName2, topicName3, topicName4);
List<String> newNames = Lists.newArrayList(topicName3, topicName4, topicName5, topicName6);
Set<String> addedNames = TopicList.minus(newNames, oldNames);
Set<String> removedNames = TopicList.minus(oldNames, newNames);
assertTrue(addedNames.size() == 2 &&
addedNames.contains(topicName5) &&
addedNames.contains(topicName6));
assertTrue(removedNames.size() == 2 &&
removedNames.contains(topicName1) &&
removedNames.contains(topicName2));
// totally 2 different list, should return content of first lists.
Set<String> addedNames2 = TopicList.minus(addedNames, removedNames);
assertTrue(addedNames2.size() == 2 &&
addedNames2.contains(topicName5) &&
addedNames2.contains(topicName6));
// 2 same list, should return empty list.
Set<String> addedNames3 = TopicList.minus(addedNames, addedNames);
assertEquals(addedNames3.size(), 0);
// empty list minus: addedNames2.size = 2, addedNames3.size = 0
Set<String> addedNames4 = TopicList.minus(addedNames2, addedNames3);
assertEquals(addedNames2.size(), addedNames4.size());
addedNames4.forEach(name -> assertTrue(addedNames2.contains(name)));
Set<String> addedNames5 = TopicList.minus(addedNames3, addedNames2);
assertEquals(addedNames5.size(), 0);
} |
static boolean isFullWidth(int codePoint) {
int value = UCharacter.getIntPropertyValue(codePoint, UProperty.EAST_ASIAN_WIDTH);
switch (value) {
case UCharacter.EastAsianWidth.NEUTRAL:
case UCharacter.EastAsianWidth.AMBIGUOUS:
case UCharacter.EastAsianWidth.HALFWIDTH:
case UCharacter.EastAsianWidth.NARROW:
return false;
case UCharacter.EastAsianWidth.FULLWIDTH:
case UCharacter.EastAsianWidth.WIDE:
return true;
default:
throw new RuntimeException("unknown UProperty.EAST_ASIAN_WIDTH: " + value);
}
} | @Test
void testCharFullWidth() {
char[] chars = new char[] {'A', 'a', ',', '中', ',', 'こ'};
boolean[] expected = new boolean[] {false, false, false, true, true, true};
for (int i = 0; i < chars.length; i++) {
assertThat(TableauStyle.isFullWidth(Character.codePointAt(chars, i)))
.isEqualTo(expected[i]);
}
} |
@VisibleForTesting
void forceFreeMemory()
{
memoryManager.close();
} | @Test
public void testForceFreeMemory()
throws Throwable
{
PartitionedOutputBuffer buffer = createPartitionedBuffer(
createInitialEmptyOutputBuffers(PARTITIONED)
.withBuffer(FIRST, 0)
.withNoMoreBufferIds(),
sizeOfPages(10));
for (int i = 0; i < 5; i++) {
addPage(buffer, createPage(1), 0);
}
OutputBufferMemoryManager memoryManager = buffer.getMemoryManager();
assertTrue(memoryManager.getBufferedBytes() > 0);
buffer.forceFreeMemory();
assertEquals(memoryManager.getBufferedBytes(), 0);
// adding a page after forceFreeMemory() should be NOOP
addPage(buffer, createPage(1));
assertEquals(memoryManager.getBufferedBytes(), 0);
} |
public boolean eval(ContentFile<?> file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return new MetricsEvalVisitor().eval(file);
} | @Test
public void testIntegerNotIn() {
boolean shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, notIn("id", INT_MIN_VALUE - 25, INT_MIN_VALUE - 24))
.eval(FILE);
assertThat(shouldRead).as("Should read: id below lower bound (5 < 30, 6 < 30)").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, notIn("id", INT_MIN_VALUE - 2, INT_MIN_VALUE - 1))
.eval(FILE);
assertThat(shouldRead).as("Should read: id below lower bound (28 < 30, 29 < 30)").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, notIn("id", INT_MIN_VALUE - 1, INT_MIN_VALUE))
.eval(FILE);
assertThat(shouldRead).as("Should read: id equal to lower bound (30 == 30)").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, notIn("id", INT_MAX_VALUE - 4, INT_MAX_VALUE - 3))
.eval(FILE);
assertThat(shouldRead)
.as("Should read: id between lower and upper bounds (30 < 75 < 79, 30 < 76 < 79)")
.isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, notIn("id", INT_MAX_VALUE, INT_MAX_VALUE + 1))
.eval(FILE);
assertThat(shouldRead).as("Should read: id equal to upper bound (79 == 79)").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, notIn("id", INT_MAX_VALUE + 1, INT_MAX_VALUE + 2))
.eval(FILE);
assertThat(shouldRead).as("Should read: id above upper bound (80 > 79, 81 > 79)").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, notIn("id", INT_MAX_VALUE + 6, INT_MAX_VALUE + 7))
.eval(FILE);
assertThat(shouldRead).as("Should read: id above upper bound (85 > 79, 86 > 79)").isTrue();
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notIn("all_nulls", "abc", "def")).eval(FILE);
assertThat(shouldRead).as("Should read: notIn on all nulls column").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, notIn("some_nulls", "abc", "def")).eval(FILE);
assertThat(shouldRead).as("Should read: notIn on some nulls column").isTrue();
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notIn("no_nulls", "abc", "def")).eval(FILE);
assertThat(shouldRead).as("Should read: notIn on no nulls column").isTrue();
} |
public static String getServiceName(NetworkService networkService) {
if (isWebService(networkService) && networkService.hasSoftware()) {
return Ascii.toLowerCase(networkService.getSoftware().getName());
}
return Ascii.toLowerCase(networkService.getServiceName());
} | @Test
public void getServiceName_whenNonWebService_returnsServiceName() {
assertThat(
NetworkServiceUtils.getServiceName(
NetworkService.newBuilder()
.setNetworkEndpoint(forIpAndPort("127.0.0.1", 22))
.setServiceName("ssh")
.build()))
.isEqualTo("ssh");
} |
@Override
public Type classify(final Throwable e) {
Type type = Type.UNKNOWN;
if (e instanceof KsqlSerializationException
|| (e instanceof StreamsException
&& (ExceptionUtils.indexOfThrowable(e, KsqlSerializationException.class) != -1))) {
if (!hasInternalTopicPrefix(e)) {
type = Type.USER;
LOG.info(
"Classified error as USER error based on schema mismatch. Query ID: {} Exception: {}",
queryId,
e);
}
}
return type;
} | @Test
public void shouldClassifyKsqlSerializationExceptionWithRepartitionTopicAsUnknownError() {
// Given:
final String topic = "_confluent-ksql-default_query_CTAS_USERS_0-Aggregate-GroupBy-repartition";
final Exception e = new KsqlSerializationException(
topic,
"Error serializing message to topic: " + topic);
// When:
final Type type = new KsqlSerializationClassifier("").classify(e);
// Then:
assertThat(type, is(Type.UNKNOWN));
} |
public H3IndexResolution getResolution() {
return _resolution;
} | @Test
public void withDisabledFalse()
throws JsonProcessingException {
String confStr = "{\"disabled\": false}";
H3IndexConfig config = JsonUtils.stringToObject(confStr, H3IndexConfig.class);
assertFalse(config.isDisabled(), "Unexpected disabled");
assertNull(config.getResolution(), "Unexpected resolution");
} |
@Override
public void characters(char[] ch, int start, int length) throws SAXException {
advance(length);
super.characters(ch, start, length);
} | @Test
public void testTenCharactersPerByte() throws IOException {
try {
char[] ch = new char[10];
for (int i = 0; i < MANY_BYTES; i++) {
stream.read();
handler.characters(ch, 0, ch.length);
}
} catch (SAXException e) {
fail("Unexpected SAXException");
}
} |
@Override
public ImagesAndRegistryClient call()
throws IOException, RegistryException, LayerPropertyNotFoundException,
LayerCountMismatchException, BadContainerConfigurationFormatException,
CacheCorruptedException, CredentialRetrievalException {
EventHandlers eventHandlers = buildContext.getEventHandlers();
try (ProgressEventDispatcher progressDispatcher =
progressDispatcherFactory.create("pulling base image manifest", 4);
TimerEventDispatcher ignored1 = new TimerEventDispatcher(eventHandlers, DESCRIPTION)) {
// Skip this step if this is a scratch image
ImageReference imageReference = buildContext.getBaseImageConfiguration().getImage();
if (imageReference.isScratch()) {
Set<Platform> platforms = buildContext.getContainerConfiguration().getPlatforms();
Verify.verify(!platforms.isEmpty());
eventHandlers.dispatch(LogEvent.progress("Getting scratch base image..."));
ImmutableList.Builder<Image> images = ImmutableList.builder();
for (Platform platform : platforms) {
Image.Builder imageBuilder = Image.builder(buildContext.getTargetFormat());
imageBuilder.setArchitecture(platform.getArchitecture()).setOs(platform.getOs());
images.add(imageBuilder.build());
}
return new ImagesAndRegistryClient(images.build(), null);
}
eventHandlers.dispatch(
LogEvent.progress("Getting manifest for base image " + imageReference + "..."));
if (buildContext.isOffline()) {
List<Image> images = getCachedBaseImages();
if (!images.isEmpty()) {
return new ImagesAndRegistryClient(images, null);
}
throw new IOException(
"Cannot run Jib in offline mode; " + imageReference + " not found in local Jib cache");
} else if (imageReference.getDigest().isPresent()) {
List<Image> images = getCachedBaseImages();
if (!images.isEmpty()) {
RegistryClient noAuthRegistryClient =
buildContext.newBaseImageRegistryClientFactory().newRegistryClient();
// TODO: passing noAuthRegistryClient may be problematic. It may return 401 unauthorized
// if layers have to be downloaded.
// https://github.com/GoogleContainerTools/jib/issues/2220
return new ImagesAndRegistryClient(images, noAuthRegistryClient);
}
}
Optional<ImagesAndRegistryClient> mirrorPull =
tryMirrors(buildContext, progressDispatcher.newChildProducer());
if (mirrorPull.isPresent()) {
return mirrorPull.get();
}
try {
// First, try with no credentials. This works with public GCR images (but not Docker Hub).
// TODO: investigate if we should just pass credentials up front. However, this involves
// some risk. https://github.com/GoogleContainerTools/jib/pull/2200#discussion_r359069026
// contains some related discussions.
RegistryClient noAuthRegistryClient =
buildContext.newBaseImageRegistryClientFactory().newRegistryClient();
return new ImagesAndRegistryClient(
pullBaseImages(noAuthRegistryClient, progressDispatcher.newChildProducer()),
noAuthRegistryClient);
} catch (RegistryUnauthorizedException ex) {
eventHandlers.dispatch(
LogEvent.lifecycle(
"The base image requires auth. Trying again for " + imageReference + "..."));
Credential credential =
RegistryCredentialRetriever.getBaseImageCredential(buildContext).orElse(null);
RegistryClient registryClient =
buildContext
.newBaseImageRegistryClientFactory()
.setCredential(credential)
.newRegistryClient();
String wwwAuthenticate = ex.getHttpResponseException().getHeaders().getAuthenticate();
if (wwwAuthenticate != null) {
eventHandlers.dispatch(
LogEvent.debug("WWW-Authenticate for " + imageReference + ": " + wwwAuthenticate));
registryClient.authPullByWwwAuthenticate(wwwAuthenticate);
return new ImagesAndRegistryClient(
pullBaseImages(registryClient, progressDispatcher.newChildProducer()),
registryClient);
} else {
// Not getting WWW-Authenticate is unexpected in practice, and we may just blame the
// server and fail. However, to keep some old behavior, try a few things as a last resort.
// TODO: consider removing this fallback branch.
if (credential != null && !credential.isOAuth2RefreshToken()) {
eventHandlers.dispatch(
LogEvent.debug("Trying basic auth as fallback for " + imageReference + "..."));
registryClient.configureBasicAuth();
try {
return new ImagesAndRegistryClient(
pullBaseImages(registryClient, progressDispatcher.newChildProducer()),
registryClient);
} catch (RegistryUnauthorizedException ignored) {
// Fall back to try bearer auth.
}
}
eventHandlers.dispatch(
LogEvent.debug("Trying bearer auth as fallback for " + imageReference + "..."));
registryClient.doPullBearerAuth();
return new ImagesAndRegistryClient(
pullBaseImages(registryClient, progressDispatcher.newChildProducer()),
registryClient);
}
}
}
} | @Test
public void testCall_allMirrorsFail()
throws InvalidImageReferenceException, IOException, RegistryException,
LayerPropertyNotFoundException, LayerCountMismatchException,
BadContainerConfigurationFormatException, CacheCorruptedException,
CredentialRetrievalException {
Mockito.when(imageConfiguration.getImage()).thenReturn(ImageReference.parse("registry/repo"));
Mockito.when(imageConfiguration.getImageRegistry()).thenReturn("registry");
Mockito.when(buildContext.getRegistryMirrors())
.thenReturn(ImmutableListMultimap.of("registry", "quay.io", "registry", "gcr.io"));
Mockito.when(buildContext.newBaseImageRegistryClientFactory(Mockito.any()))
.thenReturn(registryClientFactory);
Mockito.when(registryClient.pullManifest(Mockito.any()))
.thenThrow(new RegistryException("not found"));
Mockito.when(containerConfig.getPlatforms())
.thenReturn(ImmutableSet.of(new Platform("amd64", "linux")));
RegistryClient.Factory dockerHubRegistryClientFactory =
setUpWorkingRegistryClientFactoryWithV22ManifestTemplate();
Mockito.when(buildContext.newBaseImageRegistryClientFactory())
.thenReturn(dockerHubRegistryClientFactory);
ImagesAndRegistryClient result = pullBaseImageStep.call();
Assert.assertEquals(dockerHubRegistryClientFactory.newRegistryClient(), result.registryClient);
InOrder inOrder = Mockito.inOrder(eventHandlers);
inOrder
.verify(eventHandlers)
.dispatch(LogEvent.info("trying mirror quay.io for the base image"));
inOrder
.verify(eventHandlers)
.dispatch(LogEvent.debug("failed to get manifest from mirror quay.io: not found"));
inOrder
.verify(eventHandlers)
.dispatch(LogEvent.info("trying mirror gcr.io for the base image"));
inOrder
.verify(eventHandlers)
.dispatch(LogEvent.debug("failed to get manifest from mirror gcr.io: not found"));
} |
public RestException(Response.Status status, String message) {
this(status.getStatusCode(), message);
} | @Test
public void testRestException() {
RestException re = new RestException(Status.TEMPORARY_REDIRECT, "test rest exception");
RestException testException = new RestException(re);
assertEquals(Status.TEMPORARY_REDIRECT.getStatusCode(), testException.getResponse().getStatus());
assertEquals(re.getResponse().getEntity(), testException.getResponse().getEntity());
} |
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
if (remaining.split("/").length > 1) {
throw new IllegalArgumentException("Invalid URI: " + URISupport.sanitizeUri(uri));
}
SplunkHECEndpoint answer = new SplunkHECEndpoint(uri, this, new SplunkHECConfiguration());
setProperties(answer, parameters);
answer.setSplunkURL(remaining);
return answer;
} | @Test
public void testTokenValid() throws Exception {
SplunkHECEndpoint endpoint = (SplunkHECEndpoint) component.createEndpoint(
"splunk-hec:localhost:18808?token=11111111-1111-1111-1111-111111111111");
endpoint.init();
assertEquals("11111111-1111-1111-1111-111111111111", endpoint.getConfiguration().getToken());
} |
public SourceWithMetadata lookupSource(int globalLineNumber, int sourceColumn)
throws IncompleteSourceWithMetadataException {
LineToSource lts = this.sourceReferences().stream()
.filter(lts1 -> lts1.includeLine(globalLineNumber))
.findFirst()
.orElseThrow(() -> new IllegalArgumentException("can't find the config segment related to line " + globalLineNumber));
return new SourceWithMetadata(lts.source.getProtocol(), lts.source.getId(),
globalLineNumber + 1 - lts.startLine, sourceColumn, lts.source.getText());
} | @Test
public void testSourceAndLineRemapping_pipelineDefinedInSingleFileMultiLine() throws IncompleteSourceWithMetadataException {
final SourceWithMetadata swm = new SourceWithMetadata("file", "/tmp/1", 0, 0, PIPELINE_CONFIG_PART_1);
sut = new PipelineConfig(source, pipelineIdSym, toRubyArray(new SourceWithMetadata[]{swm}), SETTINGS);
assertEquals("return the same line of the queried L1", 1, (int) sut.lookupSource(1, 0).getLine());
assertEquals("return the same line of the queried L2", 2, (int) sut.lookupSource(2, 0).getLine());
} |
public CharSequence format(Monetary monetary) {
// determine maximum number of decimals that can be visible in the formatted string
// (if all decimal groups were to be used)
int max = minDecimals;
if (decimalGroups != null)
for (int group : decimalGroups)
max += group;
final int maxVisibleDecimals = max;
int smallestUnitExponent = monetary.smallestUnitExponent();
checkState(maxVisibleDecimals <= smallestUnitExponent, () ->
"maxVisibleDecimals cannot exceed " + smallestUnitExponent + ": " + maxVisibleDecimals);
// convert to decimal
long satoshis = Math.abs(monetary.getValue());
int decimalShift = smallestUnitExponent - shift;
DecimalNumber decimal = satoshisToDecimal(satoshis, roundingMode, decimalShift, maxVisibleDecimals);
long numbers = decimal.numbers;
long decimals = decimal.decimals;
// formatting
String decimalsStr = decimalShift > 0 ? String.format(Locale.US,
"%0" + Integer.toString(decimalShift) + "d", decimals) : "";
StringBuilder str = new StringBuilder(decimalsStr);
while (str.length() > minDecimals && str.charAt(str.length() - 1) == '0')
str.setLength(str.length() - 1); // trim trailing zero
int i = minDecimals;
if (decimalGroups != null) {
for (int group : decimalGroups) {
if (str.length() > i && str.length() < i + group) {
while (str.length() < i + group)
str.append('0');
break;
}
i += group;
}
}
if (str.length() > 0)
str.insert(0, decimalMark);
str.insert(0, numbers);
if (monetary.getValue() < 0)
str.insert(0, negativeSign);
else if (positiveSign != 0)
str.insert(0, positiveSign);
if (codes != null) {
if (codePrefixed) {
str.insert(0, codeSeparator);
str.insert(0, code());
} else {
str.append(codeSeparator);
str.append(code());
}
}
// Convert to non-arabic digits.
if (zeroDigit != '0') {
int offset = zeroDigit - '0';
for (int d = 0; d < str.length(); d++) {
char c = str.charAt(d);
if (Character.isDigit(c))
str.setCharAt(d, (char) (c + offset));
}
}
return str;
} | @Test
public void uBtcRounding() {
assertEquals("0", format(ZERO, 6, 0));
assertEquals("0.00", format(ZERO, 6, 2));
assertEquals("1000000", format(COIN, 6, 0));
assertEquals("1000000", format(COIN, 6, 0, 2));
assertEquals("1000000.0", format(COIN, 6, 1));
assertEquals("1000000.00", format(COIN, 6, 2));
final Coin justNot = COIN.subtract(SATOSHI);
assertEquals("1000000", format(justNot, 6, 0));
assertEquals("999999.99", format(justNot, 6, 0, 2));
assertEquals("1000000.0", format(justNot, 6, 1));
assertEquals("999999.99", format(justNot, 6, 2));
final Coin slightlyMore = COIN.add(SATOSHI);
assertEquals("1000000", format(slightlyMore, 6, 0));
assertEquals("1000000.01", format(slightlyMore, 6, 0, 2));
assertEquals("1000000.0", format(slightlyMore, 6, 1));
assertEquals("1000000.01", format(slightlyMore, 6, 2));
final Coin pivot = COIN.add(SATOSHI.multiply(5));
assertEquals("1000000.05", format(pivot, 6, 2));
assertEquals("1000000.05", format(pivot, 6, 0, 2));
assertEquals("1000000.1", format(pivot, 6, 1));
assertEquals("1000000.1", format(pivot, 6, 0, 1));
final Coin value = Coin.valueOf(1122334455667788l);
assertEquals("11223344556678", format(value, 6, 0));
assertEquals("11223344556677.88", format(value, 6, 2));
assertEquals("11223344556677.9", format(value, 6, 1));
assertEquals("11223344556677.88", format(value, 6, 2));
} |
static void setDefaultEnsemblePlacementPolicy(
ClientConfiguration bkConf,
ServiceConfiguration conf,
MetadataStore store
) {
bkConf.setProperty(BookieRackAffinityMapping.METADATA_STORE_INSTANCE, store);
if (conf.isBookkeeperClientRackawarePolicyEnabled() || conf.isBookkeeperClientRegionawarePolicyEnabled()) {
if (conf.isBookkeeperClientRegionawarePolicyEnabled()) {
bkConf.setEnsemblePlacementPolicy(RegionAwareEnsemblePlacementPolicy.class);
bkConf.setProperty(
REPP_ENABLE_VALIDATION,
conf.getProperties().getProperty(REPP_ENABLE_VALIDATION, "true")
);
bkConf.setProperty(
REPP_REGIONS_TO_WRITE,
conf.getProperties().getProperty(REPP_REGIONS_TO_WRITE, null)
);
bkConf.setProperty(
REPP_MINIMUM_REGIONS_FOR_DURABILITY,
conf.getProperties().getProperty(REPP_MINIMUM_REGIONS_FOR_DURABILITY, "2")
);
bkConf.setProperty(
REPP_ENABLE_DURABILITY_ENFORCEMENT_IN_REPLACE,
conf.getProperties().getProperty(REPP_ENABLE_DURABILITY_ENFORCEMENT_IN_REPLACE, "true")
);
} else {
bkConf.setEnsemblePlacementPolicy(RackawareEnsemblePlacementPolicy.class);
}
bkConf.setMinNumRacksPerWriteQuorum(conf.getBookkeeperClientMinNumRacksPerWriteQuorum());
bkConf.setEnforceMinNumRacksPerWriteQuorum(conf.isBookkeeperClientEnforceMinNumRacksPerWriteQuorum());
bkConf.setProperty(REPP_DNS_RESOLVER_CLASS,
conf.getProperties().getProperty(
REPP_DNS_RESOLVER_CLASS,
BookieRackAffinityMapping.class.getName()));
bkConf.setProperty(NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
conf.getProperties().getProperty(
NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
""));
}
if (conf.getBookkeeperClientIsolationGroups() != null && !conf.getBookkeeperClientIsolationGroups().isEmpty()) {
bkConf.setEnsemblePlacementPolicy(IsolatedBookieEnsemblePlacementPolicy.class);
bkConf.setProperty(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS,
conf.getBookkeeperClientIsolationGroups());
bkConf.setProperty(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS,
conf.getBookkeeperClientSecondaryIsolationGroups());
}
} | @Test
public void testSetDefaultEnsemblePlacementPolicyRackAwareEnabled() {
ClientConfiguration bkConf = new ClientConfiguration();
ServiceConfiguration conf = new ServiceConfiguration();
MetadataStore store = mock(MetadataStore.class);
assertNull(bkConf.getProperty(REPP_ENABLE_VALIDATION));
assertNull(bkConf.getProperty(REPP_REGIONS_TO_WRITE));
assertNull(bkConf.getProperty(REPP_MINIMUM_REGIONS_FOR_DURABILITY));
assertNull(bkConf.getProperty(REPP_ENABLE_DURABILITY_ENFORCEMENT_IN_REPLACE));
assertNull(bkConf.getProperty(REPP_DNS_RESOLVER_CLASS));
assertNull(bkConf.getProperty(MIN_NUM_RACKS_PER_WRITE_QUORUM));
assertNull(bkConf.getProperty(ENFORCE_MIN_NUM_RACKS_PER_WRITE_QUORUM));
conf.setBookkeeperClientRegionawarePolicyEnabled(true);
BookKeeperClientFactoryImpl.setDefaultEnsemblePlacementPolicy(
bkConf,
conf,
store
);
assertTrue(bkConf.getBoolean(REPP_ENABLE_VALIDATION));
assertNull(bkConf.getString(REPP_REGIONS_TO_WRITE));
assertEquals(2, bkConf.getInt(REPP_MINIMUM_REGIONS_FOR_DURABILITY));
assertTrue(bkConf.getBoolean(REPP_ENABLE_DURABILITY_ENFORCEMENT_IN_REPLACE));
assertEquals(
bkConf.getProperty(REPP_DNS_RESOLVER_CLASS),
BookieRackAffinityMapping.class.getName());
assertFalse(bkConf.getEnforceMinNumRacksPerWriteQuorum());
assertEquals(2, bkConf.getMinNumRacksPerWriteQuorum());
} |
@Override
public final void run() {
long valueCount = collector.getMergingValueCount();
if (valueCount == 0) {
return;
}
runInternal();
assert operationCount > 0 : "No merge operations have been invoked in AbstractContainerMerger";
try {
long timeoutMillis = Math.max(valueCount * TIMEOUT_FACTOR, MINIMAL_TIMEOUT_MILLIS);
if (!semaphore.tryAcquire(operationCount, timeoutMillis, TimeUnit.MILLISECONDS)) {
logger.warning("Split-brain healing for " + getLabel() + " didn't finish within the timeout...");
}
} catch (InterruptedException e) {
logger.finest("Interrupted while waiting for split-brain healing of " + getLabel() + "...");
Thread.currentThread().interrupt();
} finally {
collector.destroy();
}
} | @Test
@RequireAssertEnabled
@Category(SlowTest.class)
public void testMergerRun_whenMergeOperationBlocks_thenMergerFinishesEventually() {
TestMergeOperation operation = new TestMergeOperation(BLOCKS);
TestContainerMerger merger = new TestContainerMerger(collector, nodeEngine, operation);
merger.run();
operation.unblock();
assertTrue("Expected the merge operation to be invoked", operation.hasBeenInvoked);
assertTrue("Expected collected containers to be destroyed", collector.onDestroyHasBeenCalled);
} |
public static void isTrue(boolean expression, String message) {
if (expression == false) {
throw new IllegalArgumentException(message);
}
} | @Test
public void testIsTrueThrow() {
Assertions.assertThrows(IllegalArgumentException.class, () -> Utils.isTrue(false, "foo"));
} |
public static Ip6Prefix valueOf(byte[] address, int prefixLength) {
return new Ip6Prefix(Ip6Address.valueOf(address), prefixLength);
} | @Test
public void testContainsIpAddressIPv6() {
Ip6Prefix ipPrefix;
ipPrefix = Ip6Prefix.valueOf("1111:2222:3333:4444::/120");
assertTrue(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4444::")));
assertTrue(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4444::1")));
assertFalse(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4445::")));
assertFalse(ipPrefix.contains(Ip6Address.valueOf("::")));
assertFalse(ipPrefix.contains(
Ip6Address.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")));
ipPrefix = Ip6Prefix.valueOf("1111:2222:3333:4444::/128");
assertTrue(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4444::")));
assertFalse(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4444::1")));
assertFalse(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4445::")));
assertFalse(ipPrefix.contains(Ip6Address.valueOf("::")));
assertFalse(ipPrefix.contains(
Ip6Address.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")));
ipPrefix = Ip6Prefix.valueOf("::/0");
assertTrue(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4444::")));
assertTrue(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4444::1")));
assertTrue(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4445::")));
assertTrue(ipPrefix.contains(Ip6Address.valueOf("::")));
assertTrue(ipPrefix.contains(
Ip6Address.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")));
ipPrefix =
Ip6Prefix.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128");
assertFalse(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4444::")));
assertFalse(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4444::1")));
assertFalse(ipPrefix.contains(
Ip6Address.valueOf("1111:2222:3333:4445::")));
assertFalse(ipPrefix.contains(Ip6Address.valueOf("::")));
assertTrue(ipPrefix.contains(
Ip6Address.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")));
} |
public static void invoke(Object instance, String methodName)
throws SecurityException, IllegalArgumentException, JMeterException
{
Method m;
try {
m = ClassUtils.getPublicMethod(instance.getClass(), methodName, new Class [] {});
m.invoke(instance, (Object [])null);
} catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) {
throw new JMeterException(e);
}
} | @Test
public void testInvoke() throws Exception {
Dummy dummy = new Dummy();
ClassTools.invoke(dummy, "callMe");
assertTrue(dummy.wasCalled());
} |
@GET
@Path("/entity-uid/{uid}/")
@Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
public TimelineEntity getEntity(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("uid") String uId,
@QueryParam("confstoretrieve") String confsToRetrieve,
@QueryParam("metricstoretrieve") String metricsToRetrieve,
@QueryParam("fields") String fields,
@QueryParam("metricslimit") String metricsLimit,
@QueryParam("metricstimestart") String metricsTimeStart,
@QueryParam("metricstimeend") String metricsTimeEnd) {
String url = req.getRequestURI() +
(req.getQueryString() == null ? "" :
QUERY_STRING_SEP + req.getQueryString());
UserGroupInformation callerUGI =
TimelineReaderWebServicesUtils.getUser(req);
LOG.info("Received URL {} from user {}",
url, TimelineReaderWebServicesUtils.getUserName(callerUGI));
long startTime = Time.monotonicNow();
boolean succeeded = false;
init(res);
TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
TimelineEntity entity = null;
try {
TimelineReaderContext context =
TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uId);
if (context == null) {
throw new BadRequestException("Incorrect UID " + uId);
}
entity = timelineReaderManager.getEntity(context,
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
checkAccessForGenericEntity(entity, callerUGI);
succeeded = true;
} catch (Exception e) {
handleException(e, url, startTime, "Either metricslimit or metricstime"
+ " start/end");
} finally {
long latency = Time.monotonicNow() - startTime;
METRICS.addGetEntitiesLatency(latency, succeeded);
LOG.info("Processed URL {} (Took {} ms.)", url, latency);
}
if (entity == null) {
LOG.info("Processed URL {} but entity not found" + " (Took {} ms.)",
url, (Time.monotonicNow() - startTime));
throw new NotFoundException("Timeline entity with uid: " + uId +
"is not found");
}
return entity;
} | @Test
void testGetEntityDefaultView() throws Exception {
Client client = createClient();
try {
URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
"timeline/clusters/cluster1/apps/app1/entities/app/id_1");
ClientResponse resp = getResponse(client, uri);
TimelineEntity entity = resp.getEntity(TimelineEntity.class);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entity);
assertEquals("id_1", entity.getId());
assertEquals("app", entity.getType());
assertEquals((Long) 1425016502000L, entity.getCreatedTime());
// Default view i.e. when no fields are specified, entity contains only
// entity id, entity type and created time.
assertEquals(0, entity.getConfigs().size());
assertEquals(0, entity.getMetrics().size());
} finally {
client.destroy();
}
} |
public static boolean matches(MetricsFilter filter, MetricKey key) {
if (filter == null) {
return true;
}
@Nullable String stepName = key.stepName();
if (stepName == null) {
if (!filter.steps().isEmpty()) {
// The filter specifies steps, but the metric is not associated with a step.
return false;
}
} else if (!matchesScope(stepName, filter.steps())) {
// The filter specifies steps that differ from the metric's step
return false;
}
// The filter's steps match the metric's step.
return matchesName(key.metricName(), filter.names());
} | @Test
public void testMatchStringNamespaceFilters() {
// MetricsFilter with a String-namespace + name filter. Without step filter.
// Successful match.
assertTrue(
MetricFiltering.matches(
MetricsFilter.builder()
.addNameFilter(MetricNameFilter.named("myNamespace", "myMetricName"))
.build(),
MetricKey.create("anyStep", MetricName.named("myNamespace", "myMetricName"))));
// Unsuccessful match.
assertFalse(
MetricFiltering.matches(
MetricsFilter.builder()
.addNameFilter(MetricNameFilter.named("myOtherNamespace", "myMetricName"))
.build(),
MetricKey.create("anyStep", MetricName.named("myNamespace", "myMetricname"))));
} |
public static Type convertType(TypeInfo typeInfo) {
switch (typeInfo.getOdpsType()) {
case BIGINT:
return Type.BIGINT;
case INT:
return Type.INT;
case SMALLINT:
return Type.SMALLINT;
case TINYINT:
return Type.TINYINT;
case FLOAT:
return Type.FLOAT;
case DECIMAL:
DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale());
case DOUBLE:
return Type.DOUBLE;
case CHAR:
CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo;
return ScalarType.createCharType(charTypeInfo.getLength());
case VARCHAR:
VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo;
return ScalarType.createVarcharType(varcharTypeInfo.getLength());
case STRING:
case JSON:
return ScalarType.createDefaultCatalogString();
case BINARY:
return Type.VARBINARY;
case BOOLEAN:
return Type.BOOLEAN;
case DATE:
return Type.DATE;
case TIMESTAMP:
case DATETIME:
return Type.DATETIME;
case MAP:
MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()),
convertType(mapTypeInfo.getValueTypeInfo()));
case ARRAY:
ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo;
return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo()));
case STRUCT:
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
List<Type> fieldTypeList =
structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType)
.collect(Collectors.toList());
return new StructType(fieldTypeList);
default:
return Type.VARCHAR;
}
} | @Test
public void testConvertTypeCaseBinary() {
TypeInfo typeInfo = TypeInfoFactory.BINARY;
Type result = EntityConvertUtils.convertType(typeInfo);
assertEquals(Type.VARBINARY, result);
} |
@VisibleForTesting
Object evaluate(final GenericRow row) {
return term.getValue(new TermEvaluationContext(row));
} | @Test
public void shouldEvaluateBetween() {
// Given:
final Expression expression1 = new BetweenPredicate(
new IntegerLiteral(4), new IntegerLiteral(3), new IntegerLiteral(8)
);
final Expression expression2 = new BetweenPredicate(
new IntegerLiteral(0), new IntegerLiteral(3), new IntegerLiteral(8)
);
final Expression expression3 = new BetweenPredicate(
new StringLiteral("b"), new StringLiteral("a"), new StringLiteral("c")
);
final Expression expression4 = new BetweenPredicate(
new StringLiteral("z"), new StringLiteral("a"), new StringLiteral("c")
);
// When:
InterpretedExpression interpreter1 = interpreter(expression1);
InterpretedExpression interpreter2 = interpreter(expression2);
InterpretedExpression interpreter3 = interpreter(expression3);
InterpretedExpression interpreter4 = interpreter(expression4);
// Then:
assertThat(interpreter1.evaluate(ROW), is(true));
assertThat(interpreter2.evaluate(ROW), is(false));
assertThat(interpreter3.evaluate(ROW), is(true));
assertThat(interpreter4.evaluate(ROW), is(false));
} |
@Override
public void deactivate(String id, Boolean anonymize) {
userSession.checkLoggedIn().checkIsSystemAdministrator();
checkRequest(!id.equals(userSession.getUuid()), "Self-deactivation is not possible");
userService.deactivate(id, anonymize);
} | @Test
public void deactivate_whenAnonymizeIsNotSpecified_shouldDeactivateUserWithoutAnonymization() throws Exception {
userSession.logIn().setSystemAdministrator();
mockMvc.perform(delete(USER_ENDPOINT + "/userToDelete"))
.andExpect(status().isNoContent());
verify(userService).deactivate("userToDelete", false);
} |
@Deprecated
@VisibleForTesting
static native void nativeVerifyChunkedSums(
int bytesPerSum, int checksumType,
ByteBuffer sums, int sumsOffset,
ByteBuffer data, int dataOffset, int dataLength,
String fileName, long basePos) throws ChecksumException; | @Test
@SuppressWarnings("deprecation")
public void testNativeVerifyChunkedSumsSuccess() throws ChecksumException {
allocateDirectByteBuffers();
fillDataAndValidChecksums();
NativeCrc32.nativeVerifyChunkedSums(bytesPerChecksum, checksumType.id,
checksums, checksums.position(), data, data.position(), data.remaining(),
fileName, BASE_POSITION);
} |
public static String post(HttpURLConnection con,
Map<String, String> headers,
String requestBody,
Integer connectTimeoutMs,
Integer readTimeoutMs)
throws IOException, UnretryableException {
handleInput(con, headers, requestBody, connectTimeoutMs, readTimeoutMs);
return handleOutput(con);
} | @Test
public void testErrorResponseIsInvalidJson() throws IOException {
HttpURLConnection mockedCon = createHttpURLConnection("dummy");
when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read"));
when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream(
"non json error output".getBytes(StandardCharsets.UTF_8)));
when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_INTERNAL_ERROR);
IOException ioe = assertThrows(IOException.class,
() -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null));
assertTrue(ioe.getMessage().contains("{non json error output}"));
} |
public static HttpRequest toNettyRequest(RestRequest request) throws Exception
{
HttpMethod nettyMethod = HttpMethod.valueOf(request.getMethod());
URL url = new URL(request.getURI().toString());
String path = url.getFile();
// RFC 2616, section 5.1.2:
// Note that the absolute path cannot be empty; if none is present in the original URI,
// it MUST be given as "/" (the server root).
if (path.isEmpty())
{
path = "/";
}
ByteBuf content = Unpooled.wrappedBuffer(request.getEntity().asByteBuffer());
HttpRequest nettyRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, nettyMethod, path, content);
nettyRequest.headers().set(HttpConstants.CONTENT_LENGTH, request.getEntity().length());
setHttpHeadersAndCookies(request, url, nettyRequest);
return nettyRequest;
} | @Test
public void testRestToNettyRequestWithMultipleCookies() throws Exception
{
RestRequestBuilder restRequestBuilder = new RestRequestBuilder(new URI(ANY_URI));
restRequestBuilder.setCookies(ANY_COOKIES);
RestRequest restRequest = restRequestBuilder.build();
HttpRequest nettyRequest = NettyRequestAdapter.toNettyRequest(restRequest);
Assert.assertEquals(nettyRequest.headers().get("Cookie"), ENCODED_COOKIES_HEADER_VALUE);
} |
@Override
public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
if(proxy.isSupported(source, target)) {
return proxy.copy(source, target, status, callback, listener);
}
// Copy between encrypted and unencrypted data room
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) {
// File key must be set for new upload
status.setFilekey(SDSTripleCryptEncryptorFeature.generateFileKey());
}
final Path result = copy.copy(source, target, status, callback, listener);
nodeid.cache(target, null);
return result.withAttributes(new SDSAttributesFinderFeature(session, nodeid).find(result));
} | @Test
public void testCopyFileWithRename() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new SDSTouchFeature(session, nodeid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Path copy = new Path(new SDSDirectoryFeature(session, nodeid).mkdir(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SDSCopyFeature proxy = new SDSCopyFeature(session, nodeid);
final SDSDelegatingCopyFeature feature = new SDSDelegatingCopyFeature(session, nodeid, proxy);
assertFalse(proxy.isSupported(test, copy));
assertTrue(feature.isSupported(test, copy));
final Path target = feature.copy(test, copy, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener());
assertNotEquals(test.attributes().getVersionId(), target.attributes().getVersionId());
assertEquals(target.attributes().getVersionId(), new SDSAttributesFinderFeature(session, nodeid).find(target).getVersionId());
assertTrue(new SDSFindFeature(session, nodeid).find(test));
assertTrue(new SDSFindFeature(session, nodeid).find(copy));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public void transitionToActive(final StreamTask streamTask, final RecordCollector recordCollector, final ThreadCache newCache) {
if (stateManager.taskType() != TaskType.ACTIVE) {
throw new IllegalStateException("Tried to transition processor context to active but the state manager's " +
"type was " + stateManager.taskType());
}
this.streamTask = streamTask;
this.collector = recordCollector;
this.cache = newCache;
addAllFlushListenersToNewCache();
} | @Test
public void localTimestampedKeyValueStoreShouldNotAllowInitOrClose() {
foreachSetUp();
when(stateManager.taskType()).thenReturn(TaskType.ACTIVE);
when(stateManager.getGlobalStore(anyString())).thenReturn(null);
final TimestampedKeyValueStore<String, Long> timestampedKeyValueStoreMock = mock(TimestampedKeyValueStore.class);
when(stateManager.getStore("LocalTimestampedKeyValueStore"))
.thenAnswer(answer -> timestampedKeyValueStoreMock(timestampedKeyValueStoreMock));
mockTimestampedKeyValueOperation(timestampedKeyValueStoreMock);
mockStateStoreFlush(timestampedKeyValueStoreMock);
context = buildProcessorContextImpl(streamsConfig, stateManager);
final StreamTask task = mock(StreamTask.class);
context.transitionToActive(task, null, null);
mockProcessorNodeWithLocalKeyValueStore();
doTest("LocalTimestampedKeyValueStore", (Consumer<TimestampedKeyValueStore<String, Long>>) store -> {
verifyStoreCannotBeInitializedOrClosed(store);
store.flush();
assertTrue(flushExecuted);
store.put("1", ValueAndTimestamp.make(1L, 2L));
assertTrue(putExecuted);
store.putIfAbsent("1", ValueAndTimestamp.make(1L, 2L));
assertTrue(putIfAbsentExecuted);
store.putAll(Collections.emptyList());
assertTrue(putAllExecuted);
store.delete("1");
assertTrue(deleteExecuted);
assertEquals(VALUE_AND_TIMESTAMP, store.get(KEY));
assertEquals(timestampedRangeIter, store.range("one", "two"));
assertEquals(timestampedAllIter, store.all());
assertEquals(VALUE, store.approximateNumEntries());
});
} |
public static HashSet<String> expand(String val) {
HashSet<String> set = new HashSet<>();
Matcher matcher = NUMERIC_RANGE_PATTERN.matcher(val);
if (!matcher.matches()) {
set.add(val);
return set;
}
String prequel = matcher.group(1);
String rangeStart = matcher.group(2);
String rangeEnd = matcher.group(3);
String epilog = matcher.group(4);
int rangeStartInt = Integer.parseInt(rangeStart);
int rangeEndInt = Integer.parseInt(rangeEnd);
if (rangeEndInt < rangeStartInt) {
throw new RuntimeException("Invalid range: start " + rangeStartInt +
" is higher than end " + rangeEndInt);
}
for (int i = rangeStartInt; i <= rangeEndInt; i++) {
set.add(String.format("%s%d%s", prequel, i, epilog));
}
return set;
} | @Test
public void testNoExpansionNeeded() {
assertEquals(Collections.singleton("foo"), StringExpander.expand("foo"));
assertEquals(Collections.singleton("bar"), StringExpander.expand("bar"));
assertEquals(Collections.singleton(""), StringExpander.expand(""));
} |
public static void checkNotNull(Object o) {
if (o == null) {
throw new NullPointerException();
}
} | @Test
public void testPreconditionsNull(){
Preconditions.checkNotNull("");
try{
Preconditions.checkNotNull(null);
} catch (NullPointerException e){
assertNull(e.getMessage());
}
Preconditions.checkNotNull("", "Message %s here", 10);
try{
Preconditions.checkNotNull(null, "Message %s here", 10);
} catch (NullPointerException e){
assertEquals("Message 10 here", e.getMessage());
}
Preconditions.checkNotNull("", "Message %s here %s there", 10, 20);
try{
Preconditions.checkNotNull(null, "Message %s here %s there", 10, 20);
} catch (NullPointerException e){
assertEquals("Message 10 here 20 there", e.getMessage());
}
Preconditions.checkNotNull("", "Message %s here %s there %s more", 10, 20, 30);
try{
Preconditions.checkNotNull(null, "Message %s here %s there %s more", 10, 20, 30);
} catch (NullPointerException e){
assertEquals("Message 10 here 20 there 30 more", e.getMessage());
}
Preconditions.checkNotNull("", "Message %s here", 10L);
try{
Preconditions.checkNotNull(null, "Message %s here", 10L);
} catch (NullPointerException e){
assertEquals("Message 10 here", e.getMessage());
}
Preconditions.checkNotNull("", "Message %s here %s there", 10L, 20L);
try{
Preconditions.checkNotNull(null, "Message %s here %s there", 10L, 20L);
} catch (NullPointerException e){
assertEquals("Message 10 here 20 there", e.getMessage());
}
Preconditions.checkNotNull("", "Message %s here %s there %s more", 10L, 20L, 30L);
try{
Preconditions.checkNotNull(null, "Message %s here %s there %s more", 10L, 20L, 30L);
} catch (NullPointerException e){
assertEquals("Message 10 here 20 there 30 more", e.getMessage());
}
Preconditions.checkNotNull("", "Message %s here %s there %s more", "A", "B", "C");
try{
Preconditions.checkNotNull(null, "Message %s here %s there %s more", "A", "B", "C");
} catch (NullPointerException e){
assertEquals("Message A here B there C more", e.getMessage());
}
Preconditions.checkNotNull("", "Message %s here %s there %s more", new int[]{0,1}, new double[]{2.0, 3.0}, new boolean[]{true, false});
try{
Preconditions.checkNotNull(null, "Message %s here %s there %s more", new int[]{0,1}, new double[]{2.0, 3.0}, new boolean[]{true, false});
} catch (NullPointerException e){
assertEquals("Message [0, 1] here [2.0, 3.0] there [true, false] more", e.getMessage());
}
Preconditions.checkNotNull("", "Message %s here %s there", new String[]{"A", "B"}, new Object[]{1.0, "C"});
try{
Preconditions.checkNotNull(null, "Message %s here %s there", new String[]{"A", "B"}, new Object[]{1.0, "C"});
} catch (NullPointerException e){
assertEquals("Message [A, B] here [1.0, C] there", e.getMessage());
}
} |
public Span nextSpan(Message message) {
TraceContextOrSamplingFlags extracted =
extractAndClearTraceIdProperties(processorExtractor, message, message);
Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler.
// When an upstream context was not present, lookup keys are unlikely added
if (extracted.context() == null && !result.isNoop()) {
// simplify code by re-using an existing MessagingRequest impl
tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result);
}
return result;
} | @Test void nextSpan_prefers_b3_header() {
TraceContext incoming = newTraceContext(SamplingFlags.NOT_SAMPLED);
setStringProperty(message, "b3", B3SingleFormat.writeB3SingleFormat(incoming));
Span child;
try (Scope scope = tracing.currentTraceContext().newScope(parent)) {
child = jmsTracing.nextSpan(message);
}
assertChildOf(child.context(), incoming);
assertThat(child.isNoop()).isTrue();
} |
@Override
public void readFully(final byte[] b) throws IOException {
if (read(b) == -1) {
throw new EOFException("End of stream reached");
}
} | @Test
public void testReadFullyForBOffLen() throws Exception {
byte[] readFull = new byte[10];
in.readFully(readFull, 0, 5);
for (int i = 0; i < 5; i++) {
assertEquals(readFull[i], in.data[i]);
}
} |
@Override
public boolean isOperator() {
if (expression != null) {
return expression.isOperator();
}
return false;
} | @Test
public void isOperator() {
when(expr.isAction()).thenReturn(true).thenReturn(false);
assertTrue(test.isAction());
assertFalse(test.isAction());
verify(expr, times(2)).isAction();
verifyNoMoreInteractions(expr);
} |
public long getTableOffset() {
return table_offset;
} | @Test
public void testGetTableOffset() {
assertEquals(TestParameters.VP_TBL_OFFSET, chmLzxcResetTable.getTableOffset());
} |
public synchronized long run(JobConfig jobConfig)
throws JobDoesNotExistException, ResourceExhaustedException {
long jobId = getNewJobId();
run(jobConfig, jobId);
return jobId;
} | @Test
public void runNonExistingJobConfig() throws Exception {
try {
mJobMaster.run(new DummyPlanConfig());
Assert.fail("cannot run non-existing job");
} catch (JobDoesNotExistException e) {
Assert.assertEquals(ExceptionMessage.JOB_DEFINITION_DOES_NOT_EXIST.getMessage("dummy"),
e.getMessage());
}
} |
@PUT
@Path("{id}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response updateFloatingIp(@PathParam("id") String id, InputStream input) throws IOException {
log.trace(String.format(MESSAGE, "UPDATE " + id));
String inputStr = IOUtils.toString(input, REST_UTF8);
if (!haService.isActive()
&& !DEFAULT_ACTIVE_IP_ADDRESS.equals(haService.getActiveIp())) {
return syncPut(haService, FLOATING_IPS, id, inputStr);
}
final NeutronFloatingIP floatingIp = (NeutronFloatingIP)
jsonToModelEntity(inputStr, NeutronFloatingIP.class);
adminService.updateFloatingIp(floatingIp);
return status(Response.Status.OK).build();
} | @Test
public void testUpdateFloatingIpWithUpdatingOperation() {
expect(mockOpenstackHaService.isActive()).andReturn(true).anyTimes();
replay(mockOpenstackHaService);
mockOpenstackRouterAdminService.updateFloatingIp(anyObject());
replay(mockOpenstackRouterAdminService);
final WebTarget wt = target();
InputStream jsonStream = OpenstackFloatingIpWebResourceTest.class
.getResourceAsStream("openstack-floatingip1.json");
Response response = wt.path(PATH + "/2f245a7b-796b-4f26-9cf9-9e82d248fda7")
.request(MediaType.APPLICATION_JSON_TYPE)
.put(Entity.json(jsonStream));
final int status = response.getStatus();
assertThat(status, is(200));
verify(mockOpenstackRouterAdminService);
} |
public List<ChangeStreamRecord> toChangeStreamRecords(
PartitionMetadata partition,
ChangeStreamResultSet resultSet,
ChangeStreamResultSetMetadata resultSetMetadata) {
if (this.isPostgres()) {
// In PostgresQL, change stream records are returned as JsonB.
return Collections.singletonList(
toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata));
}
// In GoogleSQL, change stream records are returned as an array of structs.
return resultSet.getCurrentRowAsStruct().getStructList(0).stream()
.flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata))
.collect(Collectors.toList());
} | @Test
public void testMappingInsertJsonRowNewRowToDataChangeRecord() {
final DataChangeRecord dataChangeRecord =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeSecondsAndNanos(10L, 20),
"transactionId",
false,
"1",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L),
new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)),
Collections.singletonList(
new Mod("{\"column1\":\"value1\"}", null, "{\"column2\":\"newValue2\"}")),
ModType.INSERT,
ValueCaptureType.NEW_ROW,
10L,
2L,
"transactionTag",
true,
null);
final String jsonString = recordToJson(dataChangeRecord, false, false);
assertNotNull(jsonString);
ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
when(resultSet.getPgJsonb(0)).thenReturn(jsonString);
assertEquals(
Collections.singletonList(dataChangeRecord),
mapperPostgres.toChangeStreamRecords(partition, resultSet, resultSetMetadata));
} |
@Override
public Object run() throws ZuulException {
// Exit the entries in order.
// The entries can be retrieved from the request context.
SentinelEntryUtils.tryExitFromCurrentContext();
return null;
} | @Test
public void testRun() throws Exception {
SentinelZuulPostFilter sentinelZuulPostFilter = new SentinelZuulPostFilter();
Object result = sentinelZuulPostFilter.run();
Assert.assertNull(result);
} |
@Override
public Input find(String id) throws NotFoundException {
if (!ObjectId.isValid(id)) {
throw new NotFoundException("Input id <" + id + "> is invalid!");
}
final DBObject o = get(org.graylog2.inputs.InputImpl.class, id);
if (o == null) {
throw new NotFoundException("Input <" + id + "> not found!");
}
return createFromDbObject(o);
} | @Test
@MongoDBFixtures("InputServiceImplTest.json")
public void findReturnsExistingInput() throws NotFoundException {
final Input input = inputService.find("54e3deadbeefdeadbeef0002");
assertThat(input.getId()).isEqualTo("54e3deadbeefdeadbeef0002");
} |
@Override
public void invoke() throws Exception {
// --------------------------------------------------------------------
// Initialize
// --------------------------------------------------------------------
initInputFormat();
LOG.debug(getLogString("Start registering input and output"));
try {
initOutputs(getEnvironment().getUserCodeClassLoader());
} catch (Exception ex) {
throw new RuntimeException(
"The initialization of the DataSource's outputs caused an error: "
+ ex.getMessage(),
ex);
}
LOG.debug(getLogString("Finished registering input and output"));
// --------------------------------------------------------------------
// Invoke
// --------------------------------------------------------------------
LOG.debug(getLogString("Starting data source operator"));
RuntimeContext ctx = createRuntimeContext();
final Counter numRecordsOut;
{
Counter tmpNumRecordsOut;
try {
InternalOperatorIOMetricGroup ioMetricGroup =
((InternalOperatorMetricGroup) ctx.getMetricGroup()).getIOMetricGroup();
ioMetricGroup.reuseInputMetricsForTask();
if (this.config.getNumberOfChainedStubs() == 0) {
ioMetricGroup.reuseOutputMetricsForTask();
}
tmpNumRecordsOut = ioMetricGroup.getNumRecordsOutCounter();
} catch (Exception e) {
LOG.warn("An exception occurred during the metrics setup.", e);
tmpNumRecordsOut = new SimpleCounter();
}
numRecordsOut = tmpNumRecordsOut;
}
Counter completedSplitsCounter = ctx.getMetricGroup().counter("numSplitsProcessed");
if (RichInputFormat.class.isAssignableFrom(this.format.getClass())) {
((RichInputFormat) this.format).setRuntimeContext(ctx);
LOG.debug(getLogString("Rich Source detected. Initializing runtime context."));
((RichInputFormat) this.format).openInputFormat();
LOG.debug(getLogString("Rich Source detected. Opening the InputFormat."));
}
ExecutionConfig executionConfig = getExecutionConfig();
boolean objectReuseEnabled = executionConfig.isObjectReuseEnabled();
LOG.debug(
"DataSourceTask object reuse: "
+ (objectReuseEnabled ? "ENABLED" : "DISABLED")
+ ".");
final TypeSerializer<OT> serializer = this.serializerFactory.getSerializer();
try {
// start all chained tasks
BatchTask.openChainedTasks(this.chainedTasks, this);
// get input splits to read
final Iterator<InputSplit> splitIterator = getInputSplits();
// for each assigned input split
while (!this.taskCanceled && splitIterator.hasNext()) {
// get start and end
final InputSplit split = splitIterator.next();
LOG.debug(getLogString("Opening input split " + split.toString()));
final InputFormat<OT, InputSplit> format = this.format;
// open input format
format.open(split);
LOG.debug(getLogString("Starting to read input from split " + split.toString()));
try {
final Collector<OT> output =
new CountingCollector<>(this.output, numRecordsOut);
if (objectReuseEnabled) {
OT reuse = serializer.createInstance();
// as long as there is data to read
while (!this.taskCanceled && !format.reachedEnd()) {
OT returned;
if ((returned = format.nextRecord(reuse)) != null) {
output.collect(returned);
}
}
} else {
// as long as there is data to read
while (!this.taskCanceled && !format.reachedEnd()) {
OT returned;
if ((returned = format.nextRecord(serializer.createInstance()))
!= null) {
output.collect(returned);
}
}
}
if (LOG.isDebugEnabled() && !this.taskCanceled) {
LOG.debug(getLogString("Closing input split " + split.toString()));
}
} finally {
// close. We close here such that a regular close throwing an exception marks a
// task as failed.
format.close();
}
completedSplitsCounter.inc();
} // end for all input splits
// close all chained tasks letting them report failure
BatchTask.closeChainedTasks(this.chainedTasks, this);
// close the output collector
this.output.close();
} catch (Exception ex) {
// close the input, but do not report any exceptions, since we already have another root
// cause
try {
this.format.close();
} catch (Throwable ignored) {
}
BatchTask.cancelChainedTasks(this.chainedTasks);
ex = ExceptionInChainedStubException.exceptionUnwrap(ex);
if (ex instanceof CancelTaskException) {
// forward canceling exception
throw ex;
} else if (!this.taskCanceled) {
// drop exception, if the task was canceled
BatchTask.logAndThrowException(ex, this);
}
} finally {
BatchTask.clearWriters(eventualOutputs);
// --------------------------------------------------------------------
// Closing
// --------------------------------------------------------------------
if (this.format != null
&& RichInputFormat.class.isAssignableFrom(this.format.getClass())) {
((RichInputFormat) this.format).closeInputFormat();
LOG.debug(getLogString("Rich Source detected. Closing the InputFormat."));
}
}
if (!this.taskCanceled) {
LOG.debug(getLogString("Finished data source operator"));
} else {
LOG.debug(getLogString("Data source operator cancelled"));
}
} | @Test
void testCancelDataSourceTask() throws IOException {
int keyCnt = 20;
int valCnt = 4;
super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
super.addOutput(new NirvanaOutputList());
File tempTestFile = new File(tempFolder.toFile(), UUID.randomUUID().toString());
InputFilePreparator.prepareInputFile(
new UniformRecordGenerator(keyCnt, valCnt, false), tempTestFile, false);
final DataSourceTask<Record> testTask = new DataSourceTask<>(this.mockEnv);
super.registerFileInputTask(
testTask, MockDelayingInputFormat.class, tempTestFile.toURI().toString(), "\n");
Thread taskRunner =
new Thread() {
@Override
public void run() {
try {
testTask.invoke();
} catch (Exception ie) {
ie.printStackTrace();
fail("Task threw exception although it was properly canceled");
}
}
};
taskRunner.start();
TaskCancelThread tct = new TaskCancelThread(1, taskRunner, testTask);
tct.start();
try {
tct.join();
taskRunner.join();
} catch (InterruptedException ie) {
fail("Joining threads failed");
}
// assert that temp file was created
assertThat(tempTestFile).withFailMessage("Temp output file does not exist").exists();
} |
public static boolean isKafkaInvokeBySermant(StackTraceElement[] stackTrace) {
return isInvokeBySermant(KAFKA_CONSUMER_CLASS_NAME, KAFKA_CONSUMER_CONTROLLER_CLASS_NAME, stackTrace);
} | @Test
public void testInvokeBySermantWithNestedInvoke() {
StackTraceElement[] stackTrace = new StackTraceElement[5];
stackTrace[0] = new StackTraceElement("testClass0", "testMethod0", "testFileName0", 0);
stackTrace[1] = new StackTraceElement("testClass1", "testMethod1", "testFileName1", 1);
stackTrace[2] = new StackTraceElement("org.apache.kafka.clients.consumer.KafkaConsumer", "unsubscribe",
"testFileName2", 2);
stackTrace[3] = new StackTraceElement("org.apache.kafka.clients.consumer.KafkaConsumer", "subscribe",
"testFileName3", 3);
stackTrace[4] = new StackTraceElement("io.sermant.mq.prohibition.controller.kafka.KafkaConsumerController",
"testMethod4", "testFileName4", 4);
Assert.assertTrue(InvokeUtils.isKafkaInvokeBySermant(stackTrace));
} |
public PrefetchableIterable<T> get() {
checkState(
!isClosed,
"Bag user state is no longer usable because it is closed for %s",
request.getStateKey());
if (isCleared) {
// If we were cleared we should disregard old values.
return PrefetchableIterables.limit(Collections.unmodifiableList(newValues), newValues.size());
} else if (newValues.isEmpty()) {
// If we have no new values then just return the old values.
return oldValues;
}
return PrefetchableIterables.concat(
oldValues, Iterables.limit(Collections.unmodifiableList(newValues), newValues.size()));
} | @Test
public void testGet() throws Exception {
FakeBeamFnStateClient fakeClient =
new FakeBeamFnStateClient(
StringUtf8Coder.of(), ImmutableMap.of(key("A"), asList("A1", "A2", "A3")));
BagUserState<String> userState =
new BagUserState<>(
Caches.noop(), fakeClient, "instructionId", key("A"), StringUtf8Coder.of());
assertArrayEquals(
new String[] {"A1", "A2", "A3"}, Iterables.toArray(userState.get(), String.class));
userState.asyncClose();
assertThrows(IllegalStateException.class, () -> userState.get());
} |
public static void addNumEntriesImmMemTablesMetric(final StreamsMetricsImpl streamsMetrics,
final RocksDBMetricContext metricContext,
final Gauge<BigInteger> valueProvider) {
addMutableMetric(
streamsMetrics,
metricContext,
valueProvider,
NUMBER_OF_ENTRIES_IMMUTABLE_MEMTABLES,
NUMBER_OF_ENTRIES_IMMUTABLE_MEMTABLES_DESCRIPTION
);
} | @Test
public void shouldAddNumEntriesImmutableMemTablesMetric() {
final String name = "num-entries-imm-mem-tables";
final String description = "Total number of entries in the unflushed immutable memtables";
runAndVerifyMutableMetric(
name,
description,
() -> RocksDBMetrics.addNumEntriesImmMemTablesMetric(streamsMetrics, ROCKSDB_METRIC_CONTEXT, VALUE_PROVIDER)
);
} |
public static <T> T getAnnotationValue(AnnotatedElement annotationEle, Class<? extends Annotation> annotationType) throws UtilException {
return getAnnotationValue(annotationEle, annotationType, "value");
} | @Test
public void getAnnotationValueTest() {
final Object value = AnnotationUtil.getAnnotationValue(ClassWithAnnotation.class, AnnotationForTest.class);
assertTrue(value.equals("测试") || value.equals("repeat-annotation"));
} |
public void commitAsync(final Map<TopicIdPartition, Acknowledgements> acknowledgementsMap) {
final Cluster cluster = metadata.fetch();
final ResultHandler resultHandler = new ResultHandler(Optional.empty());
sessionHandlers.forEach((nodeId, sessionHandler) -> {
Node node = cluster.nodeById(nodeId);
if (node != null) {
Map<TopicIdPartition, Acknowledgements> acknowledgementsMapForNode = new HashMap<>();
acknowledgeRequestStates.putIfAbsent(nodeId, new Pair<>(null, null));
for (TopicIdPartition tip : sessionHandler.sessionPartitions()) {
Acknowledgements acknowledgements = acknowledgementsMap.get(tip);
if (acknowledgements != null) {
acknowledgementsMapForNode.put(tip, acknowledgements);
metricsManager.recordAcknowledgementSent(acknowledgements.size());
log.debug("Added async acknowledge request for partition {} to node {}", tip.topicPartition(), node.id());
AcknowledgeRequestState asyncRequestState = acknowledgeRequestStates.get(nodeId).getAsyncRequest();
if (asyncRequestState == null) {
acknowledgeRequestStates.get(nodeId).setAsyncRequest(new AcknowledgeRequestState(logContext,
ShareConsumeRequestManager.class.getSimpleName() + ":2",
Long.MAX_VALUE,
retryBackoffMs,
retryBackoffMaxMs,
sessionHandler,
nodeId,
acknowledgementsMapForNode,
this::handleShareAcknowledgeSuccess,
this::handleShareAcknowledgeFailure,
resultHandler,
AcknowledgeRequestType.COMMIT_ASYNC
));
} else {
Acknowledgements prevAcks = asyncRequestState.acknowledgementsToSend.putIfAbsent(tip, acknowledgements);
if (prevAcks != null) {
asyncRequestState.acknowledgementsToSend.get(tip).merge(acknowledgements);
}
}
}
}
}
});
resultHandler.completeIfEmpty();
} | @Test
public void testCommitAsync() {
buildRequestManager();
assignFromSubscribed(Collections.singleton(tp0));
// normal fetch
assertEquals(1, sendFetches());
assertFalse(shareConsumeRequestManager.hasCompletedFetches());
client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE));
networkClientDelegate.poll(time.timer(0));
assertTrue(shareConsumeRequestManager.hasCompletedFetches());
Acknowledgements acknowledgements = Acknowledgements.empty();
acknowledgements.add(1L, AcknowledgeType.ACCEPT);
acknowledgements.add(2L, AcknowledgeType.ACCEPT);
acknowledgements.add(3L, AcknowledgeType.REJECT);
shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements));
assertEquals(1, shareConsumeRequestManager.sendAcknowledgements());
client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.NONE));
networkClientDelegate.poll(time.timer(0));
assertTrue(shareConsumeRequestManager.hasCompletedFetches());
assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0));
completedAcknowledgements.clear();
} |
@Override
public CompletableFuture<TxnOffsetCommitResponseData> commitTransactionalOffsets(
RequestContext context,
TxnOffsetCommitRequestData request,
BufferSupplier bufferSupplier
) {
if (!isActive.get()) {
return CompletableFuture.completedFuture(TxnOffsetCommitRequest.getErrorResponse(
request,
Errors.COORDINATOR_NOT_AVAILABLE
));
}
if (!isGroupIdNotEmpty(request.groupId())) {
return CompletableFuture.completedFuture(TxnOffsetCommitRequest.getErrorResponse(
request,
Errors.INVALID_GROUP_ID
));
}
return runtime.scheduleTransactionalWriteOperation(
"txn-commit-offset",
topicPartitionFor(request.groupId()),
request.transactionalId(),
request.producerId(),
request.producerEpoch(),
Duration.ofMillis(config.offsetCommitTimeoutMs()),
coordinator -> coordinator.commitTransactionalOffset(context, request),
context.apiVersion()
).exceptionally(exception -> handleOperationException(
"txn-commit-offset",
request,
exception,
(error, __) -> TxnOffsetCommitRequest.getErrorResponse(request, error)
));
} | @Test
public void testCommitTransactionalOffsetsWhenNotStarted() throws ExecutionException, InterruptedException {
CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime();
GroupCoordinatorService service = new GroupCoordinatorService(
new LogContext(),
createConfig(),
runtime,
new GroupCoordinatorMetrics(),
createConfigManager()
);
TxnOffsetCommitRequestData request = new TxnOffsetCommitRequestData()
.setGroupId("foo")
.setTransactionalId("transactional-id")
.setMemberId("member-id")
.setGenerationId(10)
.setTopics(Collections.singletonList(new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic()
.setName("topic")
.setPartitions(Collections.singletonList(new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition()
.setPartitionIndex(0)
.setCommittedOffset(100)))));
CompletableFuture<TxnOffsetCommitResponseData> future = service.commitTransactionalOffsets(
requestContext(ApiKeys.TXN_OFFSET_COMMIT),
request,
BufferSupplier.NO_CACHING
);
assertEquals(
new TxnOffsetCommitResponseData()
.setTopics(Collections.singletonList(new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic()
.setName("topic")
.setPartitions(Collections.singletonList(new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition()
.setPartitionIndex(0)
.setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()))))),
future.get()
);
} |
public static PDImageXObject createFromByteArray(PDDocument document, byte[] byteArray, String name) throws IOException
{
FileType fileType = FileTypeDetector.detectFileType(byteArray);
if (fileType == null)
{
throw new IllegalArgumentException("Image type not supported: " + name);
}
if (fileType == FileType.JPEG)
{
return JPEGFactory.createFromByteArray(document, byteArray);
}
if (fileType == FileType.PNG)
{
// Try to directly convert the image without recoding it.
PDImageXObject image = PNGConverter.convertPNGImage(document, byteArray);
if (image != null)
{
return image;
}
}
if (fileType == FileType.TIFF)
{
try
{
return CCITTFactory.createFromByteArray(document, byteArray);
}
catch (IOException ex)
{
LOG.debug("Reading as TIFF failed, setting fileType to PNG", ex);
// Plan B: try reading with ImageIO
// common exception:
// First image in tiff is not CCITT T4 or T6 compressed
fileType = FileType.PNG;
}
}
if (fileType == FileType.BMP || fileType == FileType.GIF || fileType == FileType.PNG)
{
ByteArrayInputStream bais = new ByteArrayInputStream(byteArray);
BufferedImage bim = ImageIO.read(bais);
return LosslessFactory.createFromImage(document, bim);
}
throw new IllegalArgumentException("Image type " + fileType + " not supported: " + name);
} | @Test
void testCreateFromByteArray() throws IOException, URISyntaxException
{
testCompareCreatedFromByteArrayWithCreatedByCCITTFactory("ccittg4.tif");
testCompareCreatedFromByteArrayWithCreatedByJPEGFactory("jpeg.jpg");
testCompareCreatedFromByteArrayWithCreatedByJPEGFactory("jpegcmyk.jpg");
testCompareCreatedFromByteArrayWithCreatedByLosslessFactory("gif.gif");
testCompareCreatedFromByteArrayWithCreatedByLosslessFactory("gif-1bit-transparent.gif");
testCompareCreatedFromByteArrayWithCreatedByLosslessFactory("png_indexed_8bit_alpha.png");
testCompareCreatedFromByteArrayWithCreatedByLosslessFactory("png.png");
testCompareCreatedFromByteArrayWithCreatedByLosslessFactory("lzw.tif");
} |
@Override
public boolean equals(Object toBeCompared) {
if (toBeCompared instanceof ControllerInfo) {
ControllerInfo that = (ControllerInfo) toBeCompared;
return Objects.equals(this.type, that.type) &&
Objects.equals(this.ip, that.ip) &&
Objects.equals(this.port, that.port);
}
return false;
} | @Test
public void testEquals() {
String target1 = "ptcp:6653:192.168.1.1";
ControllerInfo controllerInfo1 = new ControllerInfo(target1);
String target2 = "ptcp:6653:192.168.1.1";
ControllerInfo controllerInfo2 = new ControllerInfo(target2);
assertTrue("wrong equals method", controllerInfo1.equals(controllerInfo2));
} |
public abstract MySqlSplit toMySqlSplit(); | @Test
public void testRecordBinlogSplitState() throws Exception {
final MySqlBinlogSplit split =
getTestBinlogSplitWithOffset(
BinlogOffset.ofBinlogFilePosition("mysql-bin.000001", 4));
final MySqlBinlogSplitState mySqlSplitState = new MySqlBinlogSplitState(split);
mySqlSplitState.setStartingOffset(
BinlogOffset.ofBinlogFilePosition("mysql-bin.000001", 100));
assertEquals(
getTestBinlogSplitWithOffset(
BinlogOffset.ofBinlogFilePosition("mysql-bin.000001", 100)),
mySqlSplitState.toMySqlSplit());
mySqlSplitState.setStartingOffset(
BinlogOffset.ofBinlogFilePosition("mysql-bin.000001", 400));
assertEquals(
getTestBinlogSplitWithOffset(
BinlogOffset.ofBinlogFilePosition("mysql-bin.000001", 400)),
mySqlSplitState.toMySqlSplit());
} |
public SchemaMapping fromArrow(Schema arrowSchema) {
List<Field> fields = arrowSchema.getFields();
List<TypeMapping> parquetFields = fromArrow(fields);
MessageType parquetType =
addToBuilder(parquetFields, Types.buildMessage()).named("root");
return new SchemaMapping(arrowSchema, parquetType, parquetFields);
} | @Test(expected = UnsupportedOperationException.class)
public void testArrowTimeSecondToParquet() {
converter
.fromArrow(new Schema(asList(field("a", new ArrowType.Time(TimeUnit.SECOND, 32)))))
.getParquetSchema();
} |
static BlockStmt getComplexPartialScoreVariableDeclaration(final String variableName, final ComplexPartialScore complexPartialScore) {
final MethodDeclaration methodDeclaration = COMPLEX_PARTIAL_SCORE_TEMPLATE.getMethodsByName(GETKIEPMMLCOMPLEXPARTIALSCORE).get(0).clone();
final BlockStmt complexPartialScoreBody = methodDeclaration.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration)));
final VariableDeclarator variableDeclarator = getVariableDeclarator(complexPartialScoreBody, COMPLEX_PARTIAL_SCORE) .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, COMPLEX_PARTIAL_SCORE, complexPartialScoreBody)));
variableDeclarator.setName(variableName);
final BlockStmt toReturn = new BlockStmt();
String nestedVariableName = String.format(VARIABLE_NAME_TEMPLATE, variableName, 0);
BlockStmt toAdd = getKiePMMLExpressionBlockStmt(nestedVariableName, complexPartialScore.getExpression());
toAdd.getStatements().forEach(toReturn::addStatement);
final ObjectCreationExpr objectCreationExpr = variableDeclarator.getInitializer()
.orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, COMPLEX_PARTIAL_SCORE, toReturn)))
.asObjectCreationExpr();
objectCreationExpr.getArguments().set(0, new StringLiteralExpr(variableName));
objectCreationExpr.getArguments().set(2, new NameExpr(nestedVariableName));
complexPartialScoreBody.getStatements().forEach(toReturn::addStatement);
return toReturn;
} | @Test
void getComplexPartialScoreVariableDeclarationWithFieldRef() throws IOException {
final String variableName = "variableName";
FieldRef fieldRef = new FieldRef();
fieldRef.setField("FIELD_REF");
ComplexPartialScore complexPartialScore = new ComplexPartialScore();
complexPartialScore.setExpression(fieldRef);
BlockStmt retrieved =
KiePMMLComplexPartialScoreFactory.getComplexPartialScoreVariableDeclaration(variableName,
complexPartialScore);
String text = getFileContent(TEST_02_SOURCE);
Statement expected = JavaParserUtils.parseBlock(String.format(text,fieldRef.getField(),
variableName));
assertThat(retrieved).isEqualTo(expected);
List<Class<?>> imports = Arrays.asList(KiePMMLFieldRef.class,
KiePMMLComplexPartialScore.class,
Collections.class);
commonValidateCompilationWithImports(retrieved, imports);
} |
@Override
public void collect(MetricsEmitter metricsEmitter) {
for (Map.Entry<MetricKey, KafkaMetric> entry : ledger.getMetrics()) {
MetricKey metricKey = entry.getKey();
KafkaMetric metric = entry.getValue();
try {
collectMetric(metricsEmitter, metricKey, metric);
} catch (Exception e) {
// catch and log to continue processing remaining metrics
log.error("Error processing Kafka metric {}", metricKey, e);
}
}
} | @Test
public void testCollectFilterWithDeltaTemporality() {
MetricName name1 = metrics.metricName("nonMeasurable", "group1", tags);
MetricName name2 = metrics.metricName("windowed", "group1", tags);
MetricName name3 = metrics.metricName("cumulative", "group1", tags);
metrics.addMetric(name1, (Gauge<Double>) (config, now) -> 99d);
Sensor sensor = metrics.sensor("test");
sensor.add(name2, new WindowedCount());
sensor.add(name3, new CumulativeSum());
testEmitter.onlyDeltaMetrics(true);
collector.collect(testEmitter);
List<SinglePointMetric> result = testEmitter.emittedMetrics();
// no-filter shall result in all 4 data metrics.
assertEquals(4, result.size());
testEmitter.reset();
testEmitter.reconfigurePredicate(k -> !k.key().name().endsWith(".count"));
collector.collect(testEmitter);
result = testEmitter.emittedMetrics();
// Drop metrics for Count type.
assertEquals(3, result.size());
testEmitter.reset();
testEmitter.reconfigurePredicate(k -> !k.key().name().endsWith(".nonmeasurable"));
collector.collect(testEmitter);
result = testEmitter.emittedMetrics();
// Drop non-measurable metric.
assertEquals(3, result.size());
testEmitter.reset();
testEmitter.reconfigurePredicate(key -> true);
collector.collect(testEmitter);
result = testEmitter.emittedMetrics();
// Again no filter.
assertEquals(4, result.size());
} |
@Override
public MatchType convert(@NotNull String type) {
if (type.contains(DELIMITER)) {
String[] matchType = type.split(DELIMITER);
return new MatchType(RateLimitType.valueOf(matchType[0].toUpperCase()), matchType[1]);
}
return new MatchType(RateLimitType.valueOf(type.toUpperCase()), null);
} | @Test
public void testConvertStringTypeHttpMethodOnly() {
MatchType matchType = target.convert("http_method");
assertThat(matchType).isNotNull();
assertThat(matchType.getType()).isEqualByComparingTo(RateLimitType.HTTP_METHOD);
assertThat(matchType.getMatcher()).isNull();
} |
@Override
protected void verifyConditions(ScesimModelDescriptor scesimModelDescriptor,
ScenarioRunnerData scenarioRunnerData,
ExpressionEvaluatorFactory expressionEvaluatorFactory,
Map<String, Object> requestContext) {
for (InstanceGiven input : scenarioRunnerData.getGivens()) {
FactIdentifier factIdentifier = input.getFactIdentifier();
List<ScenarioExpect> assertionOnFact = scenarioRunnerData.getExpects().stream()
.filter(elem -> !elem.isNewFact())
.filter(elem -> Objects.equals(elem.getFactIdentifier(), factIdentifier)).collect(toList());
// check if this fact has something to check
if (assertionOnFact.isEmpty()) {
continue;
}
getScenarioResultsFromGivenFacts(scesimModelDescriptor, assertionOnFact, input, expressionEvaluatorFactory).forEach(scenarioRunnerData::addResult);
}
} | @Test
public void verifyConditions_scenario1() {
List<InstanceGiven> scenario1Inputs = extractGivenValuesForScenario1();
List<ScenarioExpect> scenario1Outputs = runnerHelper.extractExpectedValues(scenario1.getUnmodifiableFactMappingValues());
ScenarioRunnerData scenarioRunnerData1 = new ScenarioRunnerData();
scenario1Inputs.forEach(scenarioRunnerData1::addGiven);
scenario1Outputs.forEach(scenarioRunnerData1::addExpect);
runnerHelper.verifyConditions(simulation.getScesimModelDescriptor(),
scenarioRunnerData1,
expressionEvaluatorFactory,
null);
assertThat(scenarioRunnerData1.getResults()).hasSize(1);
} |
public Map<String, String> build() {
Map<String, String> builder = new HashMap<>();
configureFileSystem(builder);
configureNetwork(builder);
configureCluster(builder);
configureSecurity(builder);
configureOthers(builder);
LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]",
builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY),
builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY));
return builder;
} | @Test
public void configureSecurity_givenClusterSearchPasswordProvided_addXpackParameters_file_exists() throws Exception {
Props props = minProps(true);
props.set(CLUSTER_SEARCH_PASSWORD.getKey(), "qwerty");
File keystore = temp.newFile("keystore.p12");
File truststore = temp.newFile("truststore.p12");
props.set(CLUSTER_ES_KEYSTORE.getKey(), keystore.getAbsolutePath());
props.set(CLUSTER_ES_TRUSTSTORE.getKey(), truststore.getAbsolutePath());
EsSettings settings = new EsSettings(props, new EsInstallation(props), system);
Map<String, String> outputParams = settings.build();
assertThat(outputParams)
.containsEntry("xpack.security.transport.ssl.enabled", "true")
.containsEntry("xpack.security.transport.ssl.supported_protocols", "TLSv1.3,TLSv1.2")
.containsEntry("xpack.security.transport.ssl.keystore.path", keystore.getName())
.containsEntry("xpack.security.transport.ssl.truststore.path", truststore.getName());
} |
@Override
public void judgeContinueToExecute(final SQLStatement statement) throws SQLException {
ShardingSpherePreconditions.checkState(statement instanceof CommitStatement || statement instanceof RollbackStatement,
() -> new SQLFeatureNotSupportedException("Current transaction is aborted, commands ignored until end of transaction block."));
} | @Test
void assertJudgeContinueToExecuteWithNotAllowedStatement() {
assertThrows(SQLFeatureNotSupportedException.class, () -> allowedSQLStatementHandler.judgeContinueToExecute(mock(SelectStatement.class)));
} |
@Override
public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException {
if(!new LocalFindFeature(session).find(file)) {
throw new NotfoundException(file.getAbsolute());
}
if(status.isExists()) {
new LocalDeleteFeature(session).delete(Collections.singletonMap(renamed, status), new DisabledPasswordCallback(), callback);
}
if(!session.toPath(file).toFile().renameTo(session.toPath(renamed).toFile())) {
throw new LocalExceptionMappingService().map("Cannot rename {0}", new NoSuchFileException(file.getName()), file);
}
return renamed;
} | @Test(expected = NotfoundException.class)
public void testMoveNotFound() throws Exception {
final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname()));
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Path workdir = new LocalHomeFinderFeature().find();
final Path test = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
new LocalMoveFeature(session).move(test, new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
} |
@Override
public KeyValues getLowCardinalityKeyValues(DubboClientContext context) {
KeyValues keyValues = super.getLowCardinalityKeyValues(context.getInvocation());
return withRemoteHostPort(keyValues, context);
} | @Test
void testGetLowCardinalityKeyValues() throws NoSuchFieldException, IllegalAccessException {
RpcInvocation invocation = new RpcInvocation();
invocation.setMethodName("testMethod");
invocation.setAttachment("interface", "com.example.TestService");
invocation.setTargetServiceUniqueName("targetServiceName1");
Invoker<?> invoker = ObservationConventionUtils.getMockInvokerWithUrl();
invocation.setInvoker(invoker);
DubboClientContext context = new DubboClientContext(invoker, invocation);
KeyValues keyValues = dubboClientObservationConvention.getLowCardinalityKeyValues(context);
Assertions.assertEquals("testMethod", ObservationConventionUtils.getValueForKey(keyValues, "rpc.method"));
Assertions.assertEquals(
"targetServiceName1", ObservationConventionUtils.getValueForKey(keyValues, "rpc.service"));
Assertions.assertEquals("apache_dubbo", ObservationConventionUtils.getValueForKey(keyValues, "rpc.system"));
} |
public int estimateK(int k, int n) {
return (estimate && (n >= MIN_N))
? Math.min(k, (int)Math.ceil(estimateExactK(k, n, defaultP)))
: k;
} | @Test
void requireThatLargeKAreSane() {
// System.out.println(dumpProbability(10, 0.05));
TopKEstimator idealEstimator = new TopKEstimator(30, 0.9999);
TopKEstimator skewedEstimator = new TopKEstimator(30, 0.9999, 0.05);
int [] K = {10, 20, 40, 80, 100, 200, 400, 800, 1000, 2000, 4000, 8000, 10000, 20000, 40000, 80000, 100000};
int [] expecedWithZeroSkew = {6, 9, 14, 22, 26, 42, 71, 123, 148, 268, 496, 936, 1152, 2215, 4304, 8429, 10480};
int [] expecedWith5pSkew = {6, 10, 14, 23, 26, 43, 73, 128, 154, 280, 518, 979, 1205, 2319, 4509, 8837, 10989};
for (int i = 0; i < K.length; i++) {
assertEquals(expecedWithZeroSkew[i], idealEstimator.estimateK(K[i], 10));
assertEquals(expecedWith5pSkew[i], skewedEstimator.estimateK(K[i], 10));
}
String expected =
"Prob/Hits: 1.0000000000 0.9999000000 0.9999900000 0.9999990000 0.9999999000 0.9999999900 0.9999999990 0.9999999999\n" +
" 10: 10.000 6.000 7.000 8.000 9.000 10.000 10.000 10.000\n" +
" 20: 10.000 4.500 5.000 5.500 6.500 7.000 7.500 8.000\n" +
" 40: 10.000 3.500 4.000 4.250 4.750 5.250 5.500 6.000\n" +
" 80: 10.000 2.750 3.000 3.250 3.625 3.875 4.250 4.500\n" +
" 100: 10.000 2.600 2.800 3.100 3.300 3.600 3.900 4.200\n" +
" 200: 10.000 2.100 2.250 2.450 2.650 2.800 3.000 3.200\n" +
" 400: 10.000 1.775 1.900 2.025 2.150 2.275 2.425 2.575\n" +
" 800: 10.000 1.538 1.625 1.713 1.813 1.900 2.000 2.100\n" +
" 1000: 10.000 1.480 1.560 1.640 1.720 1.810 1.890 1.990\n" +
" 2000: 10.000 1.340 1.395 1.450 1.510 1.570 1.630 1.695\n" +
" 4000: 10.000 1.240 1.280 1.320 1.360 1.403 1.445 1.493\n" +
" 8000: 10.000 1.170 1.198 1.225 1.254 1.284 1.315 1.348\n" +
" 10000: 10.000 1.152 1.177 1.202 1.227 1.254 1.282 1.311\n" +
" 20000: 10.000 1.108 1.125 1.143 1.161 1.180 1.199 1.220\n" +
" 40000: 10.000 1.076 1.088 1.101 1.114 1.127 1.141 1.156\n" +
" 80000: 10.000 1.054 1.062 1.071 1.080 1.090 1.100 1.110\n" +
" 100000: 10.000 1.048 1.056 1.064 1.072 1.080 1.089 1.098\n";
assertEquals(expected, dumpProbability(10, 0.0));
String expectedSkew =
"Prob/Hits: 1.0000000000 0.9999000000 0.9999900000 0.9999990000 0.9999999000 0.9999999900 0.9999999990 0.9999999999\n" +
" 10: 10.000 6.000 7.000 8.000 9.000 10.000 10.000 10.000\n" +
" 20: 10.000 5.000 5.500 6.000 6.500 7.000 7.500 8.500\n" +
" 40: 10.000 3.500 4.000 4.500 4.750 5.250 5.750 6.250\n" +
" 80: 10.000 2.875 3.125 3.375 3.750 4.000 4.375 4.625\n" +
" 100: 10.000 2.600 2.900 3.100 3.400 3.700 4.000 4.300\n" +
" 200: 10.000 2.150 2.350 2.500 2.700 2.900 3.100 3.300\n" +
" 400: 10.000 1.825 1.950 2.075 2.225 2.350 2.500 2.650\n" +
" 800: 10.000 1.600 1.688 1.775 1.875 1.975 2.075 2.175\n" +
" 1000: 10.000 1.540 1.620 1.700 1.790 1.870 1.960 2.060\n" +
" 2000: 10.000 1.400 1.455 1.510 1.570 1.630 1.695 1.760\n" +
" 4000: 10.000 1.295 1.335 1.375 1.418 1.460 1.505 1.553\n" +
" 8000: 10.000 1.224 1.251 1.280 1.309 1.340 1.371 1.405\n" +
" 10000: 10.000 1.205 1.230 1.255 1.282 1.309 1.337 1.367\n" +
" 20000: 10.000 1.160 1.177 1.195 1.214 1.233 1.253 1.275\n" +
" 40000: 10.000 1.127 1.140 1.153 1.166 1.179 1.194 1.209\n" +
" 80000: 10.000 1.105 1.114 1.123 1.132 1.141 1.152 1.162\n" +
" 100000: 10.000 1.099 1.107 1.115 1.123 1.132 1.141 1.150\n";
assertEquals(expectedSkew, dumpProbability(10, 0.05));
} |
@Override
protected boolean isAccessAllowed(final ServletRequest servletRequest,
final ServletResponse servletResponse,
final Object o) {
return false;
} | @Test
public void testIsAccessAllowed() {
Object obj = mock(Object.class);
assertFalse(statelessAuthFilter.isAccessAllowed(httpServletRequest, httpServletResponse, obj));
} |
@Override public Repository getRepository() {
try {
// NOTE: this class formerly used a ranking system to prioritize the providers registered and would check them in order
// of priority for the first non-null repository. In practice, we only ever registered one at a time, spoon or PUC.
// As such, the priority ranking is gone and will need to be reintroduced if desired later.
Collection<KettleRepositoryProvider> repositoryProviders = PluginServiceLoader.loadServices( KettleRepositoryProvider.class );
return repositoryProviders.stream().map( KettleRepositoryProvider::getRepository ).filter( Objects::nonNull ).findFirst().orElse( null );
} catch ( KettlePluginException e ) {
logger.error( "Error getting repository", e );
}
return null;
} | @Test
public void testGetRepositorySingleNull() {
KettleRepositoryProvider provider = mock( KettleRepositoryProvider.class );
Collection<KettleRepositoryProvider> providerCollection = new ArrayList<>();
providerCollection.add( provider );
try ( MockedStatic<PluginServiceLoader> pluginServiceLoaderMockedStatic = Mockito.mockStatic( PluginServiceLoader.class ) ) {
pluginServiceLoaderMockedStatic.when( () -> PluginServiceLoader.loadServices( any() ) ).thenReturn( providerCollection );
assertNull( kettleRepositoryLocator.getRepository() );
verify( provider ).getRepository();
}
} |
public static Set<Class<? extends PipelineOptions>> getRegisteredOptions() {
return Collections.unmodifiableSet(CACHE.get().registeredOptions);
} | @Test
public void testDefaultRegistration() {
assertTrue(PipelineOptionsFactory.getRegisteredOptions().contains(PipelineOptions.class));
} |
@Override
public List<AdminUserDO> getUserList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return Collections.emptyList();
}
return userMapper.selectBatchIds(ids);
} | @Test
public void testGetUserList() {
// mock 数据
AdminUserDO user = randomAdminUserDO();
userMapper.insert(user);
// 测试 id 不匹配
userMapper.insert(randomAdminUserDO());
// 准备参数
Collection<Long> ids = singleton(user.getId());
// 调用
List<AdminUserDO> result = userService.getUserList(ids);
// 断言
assertEquals(1, result.size());
assertEquals(user, result.get(0));
} |
@SuppressWarnings("MethodMayBeStatic") // Non-static to support DI.
public long parse(final String text) {
final String date;
final String time;
final String timezone;
if (text.contains("T")) {
date = text.substring(0, text.indexOf('T'));
final String withTimezone = text.substring(text.indexOf('T') + 1);
timezone = getTimezone(withTimezone);
time = completeTime(withTimezone.substring(0, withTimezone.length() - timezone.length())
.replaceAll("Z$",""));
} else {
date = completeDate(text);
time = completeTime("");
timezone = "";
}
try {
final ZoneId zoneId = parseTimezone(timezone);
return PARSER.parse(date + "T" + time, zoneId);
} catch (final RuntimeException e) {
throw new KsqlException("Failed to parse timestamp '" + text
+ "': " + e.getMessage()
+ HELP_MESSAGE,
e
);
}
} | @Test
public void shouldParseDateWithHourMinute() {
// When:
assertThat(parser.parse("2020-12-02T13:59"), is(fullParse("2020-12-02T13:59:00.000+0000")));
assertThat(parser.parse("2020-12-02T13:59Z"), is(fullParse("2020-12-02T13:59:00.000+0000")));
} |
public static Builder custom() {
return new Builder();
} | @Test(expected = IllegalArgumentException.class)
public void zeroSlidingWindowSizeShouldFail2() {
custom().slidingWindowSize(0).build();
} |
@Override
@DSTransactional // 多数据源,使用 @DSTransactional 保证本地事务,以及数据源的切换
public void updateTenant(TenantSaveReqVO updateReqVO) {
// 校验存在
TenantDO tenant = validateUpdateTenant(updateReqVO.getId());
// 校验租户名称是否重复
validTenantNameDuplicate(updateReqVO.getName(), updateReqVO.getId());
// 校验租户域名是否重复
validTenantWebsiteDuplicate(updateReqVO.getWebsite(), updateReqVO.getId());
// 校验套餐被禁用
TenantPackageDO tenantPackage = tenantPackageService.validTenantPackage(updateReqVO.getPackageId());
// 更新租户
TenantDO updateObj = BeanUtils.toBean(updateReqVO, TenantDO.class);
tenantMapper.updateById(updateObj);
// 如果套餐发生变化,则修改其角色的权限
if (ObjectUtil.notEqual(tenant.getPackageId(), updateReqVO.getPackageId())) {
updateTenantRoleMenu(tenant.getId(), tenantPackage.getMenuIds());
}
} | @Test
public void testUpdateTenant_success() {
// mock 数据
TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setStatus(randomCommonStatus()));
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
// 准备参数
TenantSaveReqVO reqVO = randomPojo(TenantSaveReqVO.class, o -> {
o.setId(dbTenant.getId()); // 设置更新的 ID
o.setStatus(randomCommonStatus());
o.setWebsite(randomString());
});
// mock 套餐
TenantPackageDO tenantPackage = randomPojo(TenantPackageDO.class,
o -> o.setMenuIds(asSet(200L, 201L)));
when(tenantPackageService.validTenantPackage(eq(reqVO.getPackageId()))).thenReturn(tenantPackage);
// mock 所有角色
RoleDO role100 = randomPojo(RoleDO.class, o -> o.setId(100L).setCode(RoleCodeEnum.TENANT_ADMIN.getCode()));
role100.setTenantId(dbTenant.getId());
RoleDO role101 = randomPojo(RoleDO.class, o -> o.setId(101L));
role101.setTenantId(dbTenant.getId());
when(roleService.getRoleList()).thenReturn(asList(role100, role101));
// mock 每个角色的权限
when(permissionService.getRoleMenuListByRoleId(eq(101L))).thenReturn(asSet(201L, 202L));
// 调用
tenantService.updateTenant(reqVO);
// 校验是否更新正确
TenantDO tenant = tenantMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, tenant);
// verify 设置角色权限
verify(permissionService).assignRoleMenu(eq(100L), eq(asSet(200L, 201L)));
verify(permissionService).assignRoleMenu(eq(101L), eq(asSet(201L)));
} |
protected List<Object> createAndFillList(ArrayNode json, List<Object> toReturn, String className, List<String> genericClasses) {
for (JsonNode node : json) {
if (isSimpleTypeNode(node)) {
String generic = genericClasses.get(genericClasses.size() - 1);
Object value = internalLiteralEvaluation(getSimpleTypeNodeTextValue(node), generic);
toReturn.add(value);
} else {
String genericClassName = ScenarioSimulationSharedUtils.isMap(className) ? className : genericClasses.get(genericClasses.size() - 1);
Object listElement = createObject(genericClassName, genericClasses);
Object returnedObject = createAndFillObject((ObjectNode) node, listElement, genericClassName, genericClasses);
toReturn.add(returnedObject);
}
}
return toReturn;
} | @Test
public void convertList() {
ArrayNode jsonNodes = new ArrayNode(factory);
ObjectNode objectNode = new ObjectNode(factory);
objectNode.put(VALUE, "data");
jsonNodes.add(objectNode);
List<Object> objects = expressionEvaluator.createAndFillList(jsonNodes, new ArrayList<>(), List.class.getCanonicalName(), List.of(String.class.getCanonicalName()));
assertThat(objects).containsExactly("data");
} |
public List<String> build() {
if (columnDefs.isEmpty()) {
throw new IllegalStateException("No column has been defined");
}
switch (dialect.getId()) {
case PostgreSql.ID:
return createPostgresQuery();
case Oracle.ID:
return createOracleQuery();
default:
return createMsSqlAndH2Queries();
}
} | @Test
public void update_columns_on_oracle() {
assertThat(createSampleBuilder(new Oracle()).build())
.containsOnly(
"ALTER TABLE issues MODIFY (value NUMERIC (30,20) NULL)",
"ALTER TABLE issues MODIFY (name VARCHAR2 (10 CHAR) NULL)");
} |
public static java.nio.file.Path getTargetPathIfContainsSymbolicPath(java.nio.file.Path path)
throws IOException {
java.nio.file.Path targetPath = path;
java.nio.file.Path suffixPath = Paths.get("");
while (path != null && path.getFileName() != null) {
if (Files.isSymbolicLink(path)) {
java.nio.file.Path linkedPath = path.toRealPath();
targetPath = Paths.get(linkedPath.toString(), suffixPath.toString());
break;
}
suffixPath = Paths.get(path.getFileName().toString(), suffixPath.toString());
path = path.getParent();
}
return targetPath;
} | @Test
void testGetTargetPathContainsMultipleSymbolicPath() throws IOException {
File linked1Dir = TempDirUtils.newFolder(temporaryFolder, "linked1");
java.nio.file.Path symlink1 = Paths.get(temporaryFolder.toString(), "symlink1");
Files.createSymbolicLink(symlink1, linked1Dir.toPath());
java.nio.file.Path symlink2 = Paths.get(symlink1.toString(), "symlink2");
File linked2Dir = TempDirUtils.newFolder(temporaryFolder, "linked2");
Files.createSymbolicLink(symlink2, linked2Dir.toPath());
java.nio.file.Path dirInLinked2 =
TempDirUtils.newFolder(linked2Dir.toPath(), "one").toPath().toRealPath();
// symlink3 point to another symbolic link: symlink2
java.nio.file.Path symlink3 = Paths.get(symlink1.toString(), "symlink3");
Files.createSymbolicLink(symlink3, symlink2);
java.nio.file.Path targetPath =
FileUtils.getTargetPathIfContainsSymbolicPath(
// path contains multiple symlink : xxx/symlink1/symlink3/one
symlink3.resolve("one"));
assertThat(targetPath).isEqualTo(dirInLinked2);
} |
public static boolean isExisting(final String jobId) {
return JOBS.containsKey(jobId);
} | @Test
void assertIsExisting() {
assertTrue(PipelineJobRegistry.isExisting("foo_job"));
} |
@Override
public boolean validateTree(ValidationContext validationContext) {
validate(validationContext);
return (onCancelConfig.validateTree(validationContext) && errors.isEmpty() && !configuration.hasErrors());
} | @Test
public void validateTreeShouldVerifyIfCancelTasksHasNestedCancelTask() {
PluggableTask pluggableTask = new PluggableTask(new PluginConfiguration(), new Configuration());
pluggableTask.onCancelConfig = mock(OnCancelConfig.class);
com.thoughtworks.go.domain.Task cancelTask = mock(com.thoughtworks.go.domain.Task.class);
when(pluggableTask.onCancelConfig.getTask()).thenReturn(cancelTask);
when(cancelTask.hasCancelTask()).thenReturn(true);
when(pluggableTask.onCancelConfig.validateTree(null)).thenReturn(true);
assertFalse(pluggableTask.validateTree(null));
assertThat(pluggableTask.errors().get("onCancelConfig").get(0), is("Cannot nest 'oncancel' within a cancel task"));
} |
public void validateUserHasAdministerIssuesPermission(String projectUuid) {
try (DbSession dbSession = dbClient.openSession(false)) {
String userUuid = Objects.requireNonNull(userSession.getUuid());
if (!dbClient.authorizationDao().selectEntityPermissions(dbSession, projectUuid, userUuid).contains(ISSUE_ADMIN)){
throw insufficientPrivilegesException();
}
}
} | @Test
public void givenUserDoesNotHaveAdministerIssuesPermission_whenValidateUserHasAdministerIssuesPermission_thenThrowForbiddenException() {
// given
String userUuid = "userUuid";
doReturn(userUuid).when(userSession).getUuid();
String projectUuid = "projectUuid";
DbSession dbSession = mockDbSession();
AuthorizationDao authorizationDao = mockAuthorizationDao();
doReturn(Set.of("permission1")).when(authorizationDao).selectEntityPermissions(dbSession, projectUuid, userUuid);
// when, then
assertThatThrownBy(() -> underTest.validateUserHasAdministerIssuesPermission(projectUuid))
.withFailMessage("Insufficient privileges")
.isInstanceOf(ForbiddenException.class);
verify(dbClient, times(1)).authorizationDao();
} |
public void useModules(String... names) {
checkNotNull(names, "names cannot be null");
Set<String> deduplicateNames = new HashSet<>();
for (String name : names) {
if (!loadedModules.containsKey(name)) {
throw new ValidationException(
String.format("No module with name '%s' exists", name));
}
if (!deduplicateNames.add(name)) {
throw new ValidationException(
String.format("Module '%s' appears more than once", name));
}
}
usedModules.clear();
usedModules.addAll(Arrays.asList(names));
} | @Test
void testUseModulesWithDuplicateModuleName() {
assertThatThrownBy(
() ->
manager.useModules(
CoreModuleFactory.IDENTIFIER, CoreModuleFactory.IDENTIFIER))
.isInstanceOf(ValidationException.class)
.hasMessage("Module 'core' appears more than once");
} |
@VisibleForTesting
void checkDestinationFolderField( String realDestinationFoldernameFieldName, SFTPPutData data ) throws KettleStepException {
realDestinationFoldernameFieldName = environmentSubstitute( realDestinationFoldernameFieldName );
if ( Utils.isEmpty( realDestinationFoldernameFieldName ) ) {
throw new KettleStepException( BaseMessages.getString(
PKG, "SFTPPut.Log.DestinatFolderNameFieldNameMissing" ) );
}
data.indexOfMoveToFolderFieldName = getInputRowMeta().indexOfValue( realDestinationFoldernameFieldName );
if ( data.indexOfMoveToFolderFieldName == -1 ) {
// move to folder field is missing
throw new KettleStepException( BaseMessages.getString(
PKG, "SFTPPut.Error.CanNotFindField", realDestinationFoldernameFieldName ) );
}
} | @Test( expected = KettleStepException.class )
public void checkDestinationFolderField_NameIsBlank() throws Exception {
SFTPPutData data = new SFTPPutData();
step.checkDestinationFolderField( "", data );
} |
public static Write write(String url, String token) {
checkNotNull(url, "url is required.");
checkNotNull(token, "token is required.");
return write(StaticValueProvider.of(url), StaticValueProvider.of(token));
} | @Test
@Category(NeedsRunner.class)
public void successfulSplunkIOMultiBatchNoParallelismTest() {
// Create server expectation for success.
mockServerListening(200);
int testPort = mockServerRule.getPort();
String url = Joiner.on(':').join("http://localhost", testPort);
String token = "test-token";
JsonObject fields = new JsonObject();
fields.addProperty("customfield", 1);
List<SplunkEvent> testEvents =
ImmutableList.of(
SplunkEvent.newBuilder()
.withEvent("test-event-1")
.withHost("test-host-1")
.withIndex("test-index-1")
.withSource("test-source-1")
.withSourceType("test-source-type-1")
.withTime(12345L)
.withFields(fields)
.create(),
SplunkEvent.newBuilder()
.withEvent("test-event-2")
.withHost("test-host-2")
.withIndex("test-index-2")
.withSource("test-source-2")
.withSourceType("test-source-type-2")
.withTime(12345L)
.withFields(fields)
.create());
PCollection<SplunkWriteError> actual =
pipeline
.apply("Create Input data", Create.of(testEvents))
.apply(
"SplunkIO",
SplunkIO.write(url, token).withParallelism(1).withBatchCount(testEvents.size()));
// All successful responses.
PAssert.that(actual).empty();
pipeline.run();
// Server received exactly one POST request.
mockServerClient.verify(HttpRequest.request(EXPECTED_PATH), VerificationTimes.once());
} |
public boolean overlapInTime() {
return timeOverlap().isPresent();
} | @Test
public void testOverlapInTime() {
Track<NopHit> fullTrack = Track.of(newArrayList(P1, P2, P3, P4, P5, P6));
Track<NopHit> earlyTrack = Track.of(newArrayList(P1, P2, P3));
Track<NopHit> endTrack = Track.of(newArrayList(P4, P5, P6));
Track<NopHit> endTrack_2 = Track.of(newArrayList(P3, P4, P5, P6));
assertTrue(overlapInTime(fullTrack, earlyTrack));
assertTrue(overlapInTime(earlyTrack, fullTrack));
assertTrue(overlapInTime(fullTrack, endTrack));
assertTrue(overlapInTime(endTrack, fullTrack));
assertTrue(overlapInTime(earlyTrack, endTrack_2));
assertTrue(overlapInTime(endTrack_2, earlyTrack));
assertFalse(overlapInTime(earlyTrack, endTrack));
assertFalse(overlapInTime(endTrack, earlyTrack));
} |
public static <T> List<T> sub(List<T> list, int start, int end) {
return ListUtil.sub(list, start, end);
} | @Test
public void subInput1PositiveNegativePositiveOutput1() {
// Arrange
final List<Integer> list = new ArrayList<>();
list.add(null);
final int start = 3;
final int end = -1;
final int step = 2;
// Act
final List<Integer> retval = CollUtil.sub(list, start, end, step);
// Assert result
final List<Integer> arrayList = new ArrayList<>();
arrayList.add(null);
assertEquals(arrayList, retval);
} |
public double calculateMinPercentageUsedBy(NormalizedResources used, double totalMemoryMb, double usedMemoryMb) {
if (LOG.isTraceEnabled()) {
LOG.trace("Calculating min percentage used by. Used Mem: {} Total Mem: {}"
+ " Used Normalized Resources: {} Total Normalized Resources: {}", totalMemoryMb, usedMemoryMb,
toNormalizedMap(), used.toNormalizedMap());
}
double min = 1.0;
if (usedMemoryMb > totalMemoryMb) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalMemoryMb != 0.0) {
min = Math.min(min, usedMemoryMb / totalMemoryMb);
}
double totalCpu = getTotalCpu();
if (used.getTotalCpu() > totalCpu) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalCpu != 0.0) {
min = Math.min(min, used.getTotalCpu() / totalCpu);
}
if (used.otherResources.length > otherResources.length) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
for (int i = 0; i < otherResources.length; i++) {
if (otherResources[i] == 0.0) {
//Skip any resources where the total is 0, the percent used for this resource isn't meaningful.
//We fall back to prioritizing by cpu, memory and any other resources by ignoring this value
continue;
}
if (i >= used.otherResources.length) {
//Resources missing from used are using none of that resource
return 0;
}
if (used.otherResources[i] > otherResources[i]) {
String info = String.format("%s, %f > %f", getResourceNameForResourceIndex(i), used.otherResources[i], otherResources[i]);
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb, info);
}
min = Math.min(min, used.otherResources[i] / otherResources[i]);
}
return min * 100.0;
} | @Test
public void testCalculateMinUsageWithNoResourcesInTotal() {
NormalizedResources resources = new NormalizedResources(normalize(Collections.emptyMap()));
NormalizedResources usedResources = new NormalizedResources(normalize(Collections.emptyMap()));
double min = resources.calculateMinPercentageUsedBy(usedResources, 0, 0);
assertThat(min, is(100.0));
} |
@GetMapping("by-product-id/{productId:\\d+}")
public Mono<FavouriteProduct> findFavouriteProductByProductId(Mono<JwtAuthenticationToken> authenticationTokenMono,
@PathVariable("productId") int productId) {
return authenticationTokenMono.flatMap(token ->
this.favouriteProductsService.findFavouriteProductByProduct(productId, token.getToken().getSubject()));
} | @Test
void findFavouriteProductsByProductId_ReturnsFavouriteProducts() {
// given
doReturn(Mono.just(
new FavouriteProduct(UUID.fromString("fe87eef6-cbd7-11ee-aeb6-275dac91de02"), 1,
"5f1d5cf8-cbd6-11ee-9579-cf24d050b47c")
)).when(this.favouriteProductsService).findFavouriteProductByProduct(1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c");
// when
StepVerifier.create(this.controller.findFavouriteProductByProductId(
Mono.just(new JwtAuthenticationToken(Jwt.withTokenValue("e30.e30")
.headers(headers -> headers.put("foo", "bar"))
.claim("sub", "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c").build())), 1))
// then
.expectNext(
new FavouriteProduct(UUID.fromString("fe87eef6-cbd7-11ee-aeb6-275dac91de02"), 1,
"5f1d5cf8-cbd6-11ee-9579-cf24d050b47c")
)
.verifyComplete();
verify(this.favouriteProductsService)
.findFavouriteProductByProduct(1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c");
verifyNoMoreInteractions(this.favouriteProductsService);
} |
public static byte[] decrypt3DES(byte[] data, byte[] key) {
return desTemplate(data, key, TripleDES_Algorithm, TripleDES_Transformation, false);
} | @Test
public void decrypt3DES() throws Exception {
TestCase.assertTrue(
Arrays.equals(
bytesDataDES3,
EncryptKit.decrypt3DES(bytesResDES3, bytesKeyDES3)
)
);
TestCase.assertTrue(
Arrays.equals(
bytesDataDES3,
EncryptKit.decryptHexString3DES(res3DES, bytesKeyDES3)
)
);
TestCase.assertTrue(
Arrays.equals(
bytesDataDES3,
EncryptKit.decryptBase64_3DES(Base64.getEncoder().encode(bytesResDES3), bytesKeyDES3)
)
);
} |
public int getRefreshClusterMaxPriorityFailedRetrieved() {
return numRefreshClusterMaxPriorityFailedRetrieved.value();
} | @Test
public void testRefreshClusterMaxPriorityFailedRetrieved() {
long totalBadBefore = metrics.getRefreshClusterMaxPriorityFailedRetrieved();
badSubCluster.getRefreshClusterMaxPriorityFailed();
Assert.assertEquals(totalBadBefore + 1, metrics.getRefreshClusterMaxPriorityFailedRetrieved());
} |
@Override
public void writeTo(ByteBuf byteBuf) throws LispWriterException {
WRITER.writeTo(byteBuf, this);
} | @Test
public void testSerialization() throws LispReaderException, LispWriterException, LispParseError {
ByteBuf byteBuf = Unpooled.buffer();
ReplyWriter writer = new ReplyWriter();
writer.writeTo(byteBuf, reply1);
ReplyReader reader = new ReplyReader();
LispMapReply deserialized = reader.readFrom(byteBuf);
new EqualsTester().addEqualityGroup(reply1, deserialized).testEquals();
} |
@Override
public PageResult<BrokerageRecordDO> getBrokerageRecordPage(BrokerageRecordPageReqVO pageReqVO) {
return brokerageRecordMapper.selectPage(pageReqVO);
} | @Test
@Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解
public void testGetBrokerageRecordPage() {
// mock 数据
BrokerageRecordDO dbBrokerageRecord = randomPojo(BrokerageRecordDO.class, o -> { // 等会查询到
o.setUserId(null);
o.setBizType(null);
o.setStatus(null);
o.setCreateTime(null);
});
brokerageRecordMapper.insert(dbBrokerageRecord);
// 测试 userId 不匹配
brokerageRecordMapper.insert(cloneIgnoreId(dbBrokerageRecord, o -> o.setUserId(null)));
// 测试 bizType 不匹配
brokerageRecordMapper.insert(cloneIgnoreId(dbBrokerageRecord, o -> o.setBizType(null)));
// 测试 status 不匹配
brokerageRecordMapper.insert(cloneIgnoreId(dbBrokerageRecord, o -> o.setStatus(null)));
// 测试 createTime 不匹配
brokerageRecordMapper.insert(cloneIgnoreId(dbBrokerageRecord, o -> o.setCreateTime(null)));
// 准备参数
BrokerageRecordPageReqVO reqVO = new BrokerageRecordPageReqVO();
reqVO.setUserId(null);
reqVO.setBizType(null);
reqVO.setStatus(null);
reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28));
// 调用
PageResult<BrokerageRecordDO> pageResult = brokerageRecordService.getBrokerageRecordPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbBrokerageRecord, pageResult.getList().get(0));
} |
public Stream<Flow> keepLastVersion(Stream<Flow> stream) {
return keepLastVersionCollector(stream);
} | @Test
void sameRevisionWithDeletedSameRevision() {
Stream<Flow> stream = Stream.of(
create("test2", "test2", 1),
create("test", "test", 1),
create("test", "test2", 2),
create("test", "test3", 3),
create("test", "test2", 2).toDeleted()
);
List<Flow> collect = flowService.keepLastVersion(stream).toList();
assertThat(collect.size(), is(1));
assertThat(collect.getFirst().isDeleted(), is(false));
assertThat(collect.getFirst().getId(), is("test2"));
} |
public static List<ComponentDto> sortComponents(List<ComponentDto> components, ComponentTreeRequest wsRequest, List<MetricDto> metrics,
Table<String, MetricDto, ComponentTreeData.Measure> measuresByComponentUuidAndMetric) {
List<String> sortParameters = wsRequest.getSort();
if (sortParameters == null || sortParameters.isEmpty()) {
return components;
}
boolean isAscending = wsRequest.getAsc();
Map<String, Ordering<ComponentDto>> orderingsBySortField = ImmutableMap.<String, Ordering<ComponentDto>>builder()
.put(NAME_SORT, componentNameOrdering(isAscending))
.put(QUALIFIER_SORT, componentQualifierOrdering(isAscending))
.put(PATH_SORT, componentPathOrdering(isAscending))
.put(METRIC_SORT, metricValueOrdering(wsRequest, metrics, measuresByComponentUuidAndMetric))
.put(METRIC_PERIOD_SORT, metricPeriodOrdering(wsRequest, metrics, measuresByComponentUuidAndMetric))
.build();
String firstSortParameter = sortParameters.get(0);
Ordering<ComponentDto> primaryOrdering = orderingsBySortField.get(firstSortParameter);
if (sortParameters.size() > 1) {
for (int i = 1; i < sortParameters.size(); i++) {
String secondarySortParameter = sortParameters.get(i);
Ordering<ComponentDto> secondaryOrdering = orderingsBySortField.get(secondarySortParameter);
primaryOrdering = primaryOrdering.compound(secondaryOrdering);
}
}
primaryOrdering = primaryOrdering.compound(componentNameOrdering(true));
return primaryOrdering.immutableSortedCopy(components);
} | @Test
void sortComponent_whenMetricIsImpactDataType_shouldOrderByTotalAscending() {
components.add(newComponentWithoutSnapshotId("name-without-measure", "qualifier-without-measure", "path-without-measure"));
ComponentTreeRequest wsRequest = newRequest(singletonList(METRIC_SORT), true, DATA_IMPACT_METRIC_KEY);
List<ComponentDto> result = sortComponents(wsRequest);
assertThat(result).extracting("path")
.containsExactly("path-1", "path-2", "path-3", "path-4", "path-5", "path-6", "path-7", "path-8", "path-9", "path-without-measure");
} |
public final void containsKey(@Nullable Object key) {
check("keySet()").that(checkNotNull(actual).keySet()).contains(key);
} | @Test
public void containsKey() {
ImmutableMap<String, String> actual = ImmutableMap.of("kurt", "kluever");
assertThat(actual).containsKey("kurt");
} |
public static EnumBuilder<Schema> enumeration(String name) {
return builder().enumeration(name);
} | @Test
void enumWithDefault() {
List<String> symbols = Arrays.asList("a", "b");
String enumDefault = "a";
Schema expected = Schema.createEnum("myenum", null, null, symbols, enumDefault);
expected.addProp("p", "v");
Schema schema = SchemaBuilder.enumeration("myenum").prop("p", "v").defaultSymbol(enumDefault).symbols("a", "b");
assertEquals(expected, schema);
} |
public ProtocolHandler protocol(String protocol) {
ProtocolHandlerWithClassLoader h = handlers.get(protocol);
if (null == h) {
return null;
} else {
return h.getHandler();
}
} | @Test
public void testGetProtocol() {
assertSame(handler1, handlers.protocol(protocol1));
assertSame(handler2, handlers.protocol(protocol2));
assertNull(handlers.protocol(protocol3));
} |
public static Map<String, KiePMMLTableSourceCategory> getClassificationTableBuilders(final RegressionCompilationDTO compilationDTO) {
logger.trace("getRegressionTables {}", compilationDTO.getRegressionTables());
LinkedHashMap<String, KiePMMLTableSourceCategory> toReturn =
KiePMMLRegressionTableFactory.getRegressionTableBuilders(compilationDTO);
Map.Entry<String, String> regressionTableEntry = getClassificationTableBuilder(compilationDTO, toReturn);
toReturn.put(regressionTableEntry.getKey(), new KiePMMLTableSourceCategory(regressionTableEntry.getValue(),
""));
return toReturn;
} | @Test
void getClassificationTableBuilders() {
RegressionTable regressionTableProf = getRegressionTable(3.5, "professional");
RegressionTable regressionTableCler = getRegressionTable(27.4, "clerical");
OutputField outputFieldCat = getOutputField("CAT-1", ResultFeature.PROBABILITY, "CatPred-1");
OutputField outputFieldNum = getOutputField("NUM-1", ResultFeature.PROBABILITY, "NumPred-0");
OutputField outputFieldPrev = getOutputField("PREV", ResultFeature.PREDICTED_VALUE, null);
String targetField = "targetField";
DataField dataField = new DataField();
dataField.setName(targetField);
dataField.setOpType(OpType.CATEGORICAL);
DataDictionary dataDictionary = new DataDictionary();
dataDictionary.addDataFields(dataField);
RegressionModel regressionModel = new RegressionModel();
regressionModel.setNormalizationMethod(RegressionModel.NormalizationMethod.CAUCHIT);
regressionModel.addRegressionTables(regressionTableProf, regressionTableCler);
regressionModel.setModelName(getGeneratedClassName("RegressionModel"));
Output output = new Output();
output.addOutputFields(outputFieldCat, outputFieldNum, outputFieldPrev);
regressionModel.setOutput(output);
MiningField miningField = new MiningField();
miningField.setUsageType(MiningField.UsageType.TARGET);
miningField.setName(dataField.getName());
MiningSchema miningSchema = new MiningSchema();
miningSchema.addMiningFields(miningField);
regressionModel.setMiningSchema(miningSchema);
PMML pmml = new PMML();
pmml.setDataDictionary(dataDictionary);
pmml.addModels(regressionModel);
final CommonCompilationDTO<RegressionModel> source =
CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME,
pmml,
regressionModel,
new PMMLCompilationContextMock(),
"FILENAME");
final RegressionCompilationDTO compilationDTO =
RegressionCompilationDTO.fromCompilationDTORegressionTablesAndNormalizationMethod(source,
regressionModel.getRegressionTables(),
regressionModel.getNormalizationMethod());
Map<String, KiePMMLTableSourceCategory> retrieved =
KiePMMLClassificationTableFactory.getClassificationTableBuilders(compilationDTO);
assertThat(retrieved).isNotNull();
assertThat(retrieved).hasSize(3);
retrieved.values().forEach(kiePMMLTableSourceCategory -> commonValidateKiePMMLRegressionTable(kiePMMLTableSourceCategory.getSource()));
Map<String, String> sources = retrieved.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey,
stringKiePMMLTableSourceCategoryEntry -> stringKiePMMLTableSourceCategoryEntry.getValue().getSource()));
commonValidateCompilation(sources);
} |
public boolean hasCwe() {
return !cwe.isEmpty();
} | @Test
@SuppressWarnings("squid:S2699")
public void testHasCwe() {
//already tested, this is just left so the IDE doesn't recreate it.
} |
@Override
public Type classify(final Throwable e) {
Type type = Type.UNKNOWN;
if (e instanceof KsqlFunctionException
|| (e instanceof StreamsException
&& ExceptionUtils.getRootCause(e) instanceof KsqlFunctionException)) {
type = Type.USER;
}
if (type == Type.USER) {
LOG.info(
"Classified error as USER error based on invalid user input. Query ID: {} Exception: {}",
queryId,
e);
}
return type;
} | @Test
public void shouldClassifyWrappedKsqlFunctionExceptionAsUserError() {
// Given:
final Exception e = new StreamsException(new KsqlFunctionException("foo"));
// When:
final Type type = new KsqlFunctionClassifier("").classify(e);
// Then:
assertThat(type, is(Type.USER));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.