focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static String normalize(String path) {
if (path == null) {
return null;
}
//兼容Windows下的共享目录路径(原始路径如果以\\开头,则保留这种路径)
if (path.startsWith("\\\\")) {
return path;
}
// 兼容Spring风格的ClassPath路径,去除前缀,不区分大小写
String pathToUse = StrUtil.removePrefixIgnoreCase(path, URLUtil.CLASSPATH_URL_PREFIX);
// 去除file:前缀
pathToUse = StrUtil.removePrefixIgnoreCase(pathToUse, URLUtil.FILE_URL_PREFIX);
// 识别home目录形式,并转换为绝对路径
if (StrUtil.startWith(pathToUse, '~')) {
pathToUse = getUserHomePath() + pathToUse.substring(1);
}
// 统一使用斜杠
pathToUse = pathToUse.replaceAll("[/\\\\]+", StrUtil.SLASH);
// 去除开头空白符,末尾空白符合法,不去除
pathToUse = StrUtil.trimStart(pathToUse);
// issue#IAB65V 去除尾部的换行符
pathToUse = StrUtil.trim(pathToUse, 1, (c)->c == '\n' || c == '\r');
String prefix = StrUtil.EMPTY;
int prefixIndex = pathToUse.indexOf(StrUtil.COLON);
if (prefixIndex > -1) {
// 可能Windows风格路径
prefix = pathToUse.substring(0, prefixIndex + 1);
if (StrUtil.startWith(prefix, StrUtil.C_SLASH)) {
// 去除类似于/C:这类路径开头的斜杠
prefix = prefix.substring(1);
}
if (false == prefix.contains(StrUtil.SLASH)) {
pathToUse = pathToUse.substring(prefixIndex + 1);
} else {
// 如果前缀中包含/,说明非Windows风格path
prefix = StrUtil.EMPTY;
}
}
if (pathToUse.startsWith(StrUtil.SLASH)) {
prefix += StrUtil.SLASH;
pathToUse = pathToUse.substring(1);
}
List<String> pathList = StrUtil.split(pathToUse, StrUtil.C_SLASH);
List<String> pathElements = new LinkedList<>();
int tops = 0;
String element;
for (int i = pathList.size() - 1; i >= 0; i--) {
element = pathList.get(i);
// 只处理非.的目录,即只处理非当前目录
if (false == StrUtil.DOT.equals(element)) {
if (StrUtil.DOUBLE_DOT.equals(element)) {
tops++;
} else {
if (tops > 0) {
// 有上级目录标记时按照个数依次跳过
tops--;
} else {
// Normal path element found.
pathElements.add(0, element);
}
}
}
}
// issue#1703@Github
if (tops > 0 && StrUtil.isEmpty(prefix)) {
// 只有相对路径补充开头的..,绝对路径直接忽略之
while (tops-- > 0) {
//遍历完节点发现还有上级标注(即开头有一个或多个..),补充之
// Normal path element found.
pathElements.add(0, StrUtil.DOUBLE_DOT);
}
}
return prefix + CollUtil.join(pathElements, StrUtil.SLASH);
} | @Test
public void normalizeClassPathTest() {
assertEquals("", FileUtil.normalize("classpath:"));
} |
public void snapshotTimer(final long correlationId, final long deadline)
{
idleStrategy.reset();
while (true)
{
final long result = publication.tryClaim(ENCODED_TIMER_LENGTH, bufferClaim);
if (result > 0)
{
timerEncoder
.wrapAndApplyHeader(bufferClaim.buffer(), bufferClaim.offset(), messageHeaderEncoder)
.correlationId(correlationId)
.deadline(deadline);
bufferClaim.commit();
break;
}
checkResultAndIdle(result);
}
} | @Test
void snapshotTimer()
{
final int offset = 18;
final int length = MessageHeaderEncoder.ENCODED_LENGTH + TimerEncoder.BLOCK_LENGTH;
final long correlationId = -901;
final long deadline = 12345678901L;
when(publication.tryClaim(eq(length), any()))
.thenReturn(BACK_PRESSURED, ADMIN_ACTION)
.thenAnswer(mockTryClaim(offset));
snapshotTaker.snapshotTimer(correlationId, deadline);
final InOrder inOrder = inOrder(idleStrategy, publication);
inOrder.verify(idleStrategy).reset();
inOrder.verify(publication).tryClaim(anyInt(), any());
inOrder.verify(idleStrategy).idle();
inOrder.verify(publication).tryClaim(anyInt(), any());
inOrder.verify(idleStrategy).idle();
inOrder.verify(publication).tryClaim(anyInt(), any());
inOrder.verifyNoMoreInteractions();
timerDecoder.wrapAndApplyHeader(buffer, offset + HEADER_LENGTH, messageHeaderDecoder);
assertEquals(correlationId, timerDecoder.correlationId());
assertEquals(deadline, timerDecoder.deadline());
} |
public void useModules(String... names) {
checkNotNull(names, "names cannot be null");
Set<String> deduplicateNames = new HashSet<>();
for (String name : names) {
if (!loadedModules.containsKey(name)) {
throw new ValidationException(
String.format("No module with name '%s' exists", name));
}
if (!deduplicateNames.add(name)) {
throw new ValidationException(
String.format("Module '%s' appears more than once", name));
}
}
usedModules.clear();
usedModules.addAll(Arrays.asList(names));
} | @Test
void testUseUnloadedModules() {
assertThatThrownBy(() -> manager.useModules(CoreModuleFactory.IDENTIFIER, "x"))
.isInstanceOf(ValidationException.class)
.hasMessage("No module with name 'x' exists");
} |
@Override
public HttpAction restore(final CallContext ctx, final String defaultUrl) {
val webContext = ctx.webContext();
val sessionStore = ctx.sessionStore();
val optRequestedUrl = sessionStore.get(webContext, Pac4jConstants.REQUESTED_URL);
HttpAction requestedAction = null;
if (optRequestedUrl.isPresent()) {
sessionStore.set(webContext, Pac4jConstants.REQUESTED_URL, null);
val requestedUrl = optRequestedUrl.get();
if (requestedUrl instanceof String) {
requestedAction = new FoundAction((String) requestedUrl);
} else if (requestedUrl instanceof RedirectionAction) {
requestedAction = (RedirectionAction) requestedUrl;
}
}
if (requestedAction == null) {
requestedAction = new FoundAction(defaultUrl);
}
LOGGER.debug("requestedAction: {}", requestedAction.getMessage());
if (requestedAction instanceof FoundAction) {
return HttpActionHelper.buildRedirectUrlAction(webContext, ((FoundAction) requestedAction).getLocation());
} else {
return HttpActionHelper.buildFormPostContentAction(webContext, ((OkAction) requestedAction).getContent());
}
} | @Test
public void testRestoreNoRequestedUrl() {
val context = MockWebContext.create();
val sessionStore = new MockSessionStore();
val action = handler.restore(new CallContext(context, sessionStore), LOGIN_URL);
assertTrue(action instanceof FoundAction);
assertEquals(LOGIN_URL, ((FoundAction) action).getLocation());
assertFalse(sessionStore.get(context, Pac4jConstants.REQUESTED_URL).isPresent());
} |
static void populateOutputFields(final PMML4Result toUpdate,
final ProcessingDTO processingDTO) {
logger.debug("populateOutputFields {} {}", toUpdate, processingDTO);
for (KiePMMLOutputField outputField : processingDTO.getOutputFields()) {
Object variableValue = outputField.evaluate(processingDTO);
if (variableValue != null) {
String variableName = outputField.getName();
toUpdate.addResultVariable(variableName, variableValue);
processingDTO.addKiePMMLNameValue(new KiePMMLNameValue(variableName, variableValue));
}
}
} | @Test
void populateTransformedOutputFieldWithApplyDerivedFieldFromApply() {
// <DerivedField name="CUSTOM_FIELD" optype="continuous" dataType="double">
// <Apply function="+">
// <Constant>64.0</Constant>
// <Constant>36.0</Constant>
// </Apply>
// </DerivedField>
final KiePMMLConstant kiePMMLConstant1 = new KiePMMLConstant(PARAM_2, Collections.emptyList(), 64.0, null);
final KiePMMLConstant kiePMMLConstant2 = new KiePMMLConstant(PARAM_2, Collections.emptyList(), 36, null);
final KiePMMLApply kiePMMLApplyRef = KiePMMLApply.builder("NAMEREF", Collections.emptyList(), "+")
.withKiePMMLExpressions(Arrays.asList(kiePMMLConstant1, kiePMMLConstant2))
.build();
final KiePMMLDerivedField derivedField = KiePMMLDerivedField.builder(CUSTOM_FIELD, Collections.emptyList(),
DATA_TYPE.DOUBLE,
OP_TYPE.CONTINUOUS,
kiePMMLApplyRef).build();
// <Apply function="/">
// <FieldRef>CUSTOM_FIELD</FieldRef>
// <Constant>5.0</Constant>
// </Apply>
final KiePMMLFieldRef kiePMMLFieldRef = new KiePMMLFieldRef(CUSTOM_FIELD, Collections.emptyList(), null);
final KiePMMLConstant kiePMMLConstant3 = new KiePMMLConstant(PARAM_2, Collections.emptyList(), value2, null);
KiePMMLApply kiePMMLApply = KiePMMLApply.builder("NAME", Collections.emptyList(), "/")
.withKiePMMLExpressions(Arrays.asList(kiePMMLFieldRef, kiePMMLConstant3))
.build();
KiePMMLOutputField outputField = KiePMMLOutputField.builder(OUTPUT_NAME, Collections.emptyList())
.withResultFeature(RESULT_FEATURE.TRANSFORMED_VALUE)
.withKiePMMLExpression(kiePMMLApply)
.build();
// From TransformationDictionary
KiePMMLTransformationDictionary transformationDictionary = KiePMMLTransformationDictionary.builder(
"transformationDictionary", Collections.emptyList())
.withDerivedFields(Collections.singletonList(derivedField))
.build();
KiePMMLTestingModel kiePMMLModel1 = testingModelBuilder(outputField)
.withKiePMMLTransformationDictionary(transformationDictionary)
.build();
ProcessingDTO processingDTO1 = buildProcessingDTOWithEmptyNameValues(kiePMMLModel1);
PMML4Result toUpdate1 = new PMML4Result();
PostProcess.populateOutputFields(toUpdate1, processingDTO1);
assertThat(toUpdate1.getResultVariables()).isNotEmpty();
assertThat(toUpdate1.getResultVariables()).containsKey(OUTPUT_NAME);
assertThat(toUpdate1.getResultVariables().get(OUTPUT_NAME)).isEqualTo(value1 / value2);
// From LocalTransformations
KiePMMLLocalTransformations localTransformations = KiePMMLLocalTransformations.builder("localTransformations"
, Collections.emptyList())
.withDerivedFields(Collections.singletonList(derivedField))
.build();
KiePMMLTestingModel kiePMMLModel2 = testingModelBuilder(outputField)
.withKiePMMLLocalTransformations(localTransformations)
.build();
ProcessingDTO processingDTO2 = buildProcessingDTOWithEmptyNameValues(kiePMMLModel2);
PMML4Result toUpdate2 = new PMML4Result();
PostProcess.populateOutputFields(toUpdate2, processingDTO2);
assertThat(toUpdate2.getResultVariables()).isNotEmpty();
assertThat(toUpdate2.getResultVariables()).containsKey(OUTPUT_NAME);
assertThat(toUpdate2.getResultVariables().get(OUTPUT_NAME)).isEqualTo(value1 / value2);
} |
@Override
public <T> Invoker<T> buildInvokerChain(final Invoker<T> originalInvoker, String key, String group) {
Invoker<T> last = originalInvoker;
URL url = originalInvoker.getUrl();
List<ModuleModel> moduleModels = getModuleModelsFromUrl(url);
List<Filter> filters;
if (moduleModels != null && moduleModels.size() == 1) {
filters = ScopeModelUtil.getExtensionLoader(Filter.class, moduleModels.get(0))
.getActivateExtension(url, key, group);
} else if (moduleModels != null && moduleModels.size() > 1) {
filters = new ArrayList<>();
List<ExtensionDirector> directors = new ArrayList<>();
for (ModuleModel moduleModel : moduleModels) {
List<Filter> tempFilters = ScopeModelUtil.getExtensionLoader(Filter.class, moduleModel)
.getActivateExtension(url, key, group);
filters.addAll(tempFilters);
directors.add(moduleModel.getExtensionDirector());
}
filters = sortingAndDeduplication(filters, directors);
} else {
filters = ScopeModelUtil.getExtensionLoader(Filter.class, null).getActivateExtension(url, key, group);
}
if (!CollectionUtils.isEmpty(filters)) {
for (int i = filters.size() - 1; i >= 0; i--) {
final Filter filter = filters.get(i);
final Invoker<T> next = last;
last = new CopyOfFilterChainNode<>(originalInvoker, next, filter);
}
return new CallbackRegistrationInvoker<>(last, filters);
}
return last;
} | @Test
void testBuildInvokerChainForRemoteReference() {
DefaultFilterChainBuilder defaultFilterChainBuilder = new DefaultFilterChainBuilder();
// verify that no filter is built by default
URL urlWithoutFilter = URL.valueOf("dubbo://127.0.0.1:20880/DemoService")
.addParameter(INTERFACE_KEY, DemoService.class.getName());
urlWithoutFilter = urlWithoutFilter.setScopeModel(ApplicationModel.defaultModel());
AbstractInvoker<DemoService> invokerWithoutFilter =
new AbstractInvoker<DemoService>(DemoService.class, urlWithoutFilter) {
@Override
protected Result doInvoke(Invocation invocation) {
return null;
}
};
Invoker<?> invokerAfterBuild =
defaultFilterChainBuilder.buildInvokerChain(invokerWithoutFilter, REFERENCE_FILTER_KEY, CONSUMER);
// Assertions.assertTrue(invokerAfterBuild instanceof AbstractInvoker);
// verify that if LogFilter is configured, LogFilter should exist in the filter chain
URL urlWithFilter = URL.valueOf("dubbo://127.0.0.1:20880/DemoService")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.addParameter(REFERENCE_FILTER_KEY, "log");
urlWithFilter = urlWithFilter.setScopeModel(ApplicationModel.defaultModel());
AbstractInvoker<DemoService> invokerWithFilter =
new AbstractInvoker<DemoService>(DemoService.class, urlWithFilter) {
@Override
protected Result doInvoke(Invocation invocation) {
return null;
}
};
invokerAfterBuild =
defaultFilterChainBuilder.buildInvokerChain(invokerWithFilter, REFERENCE_FILTER_KEY, CONSUMER);
Assertions.assertTrue(invokerAfterBuild instanceof FilterChainBuilder.CallbackRegistrationInvoker);
} |
public static byte getProtocolTypeFromString(String str) {
switch (str.toLowerCase()) {
case PROTOCOL_NAME_TCP:
return IPv4.PROTOCOL_TCP;
case PROTOCOL_NAME_UDP:
return IPv4.PROTOCOL_UDP;
case PROTOCOL_NAME_ANY:
default:
return ARBITRARY_PROTOCOL;
}
} | @Test
public void testGetProtocolTypeFromString() {
assertEquals(IPv4.PROTOCOL_TCP, getProtocolTypeFromString(PROTOCOL_TCP_L));
assertEquals(IPv4.PROTOCOL_TCP, getProtocolTypeFromString(PROTOCOL_TCP_S));
assertEquals(IPv4.PROTOCOL_UDP, getProtocolTypeFromString(PROTOCOL_UDP_L));
assertEquals(IPv4.PROTOCOL_UDP, getProtocolTypeFromString(PROTOCOL_UDP_S));
assertEquals(0, getProtocolTypeFromString(PROTOCOL_ANY_L));
assertEquals(0, getProtocolTypeFromString(PROTOCOL_ANY_S));
} |
static <T> ConsumerImpl<T> newConsumerImpl(PulsarClientImpl client,
String topic,
ConsumerConfigurationData<T> conf,
ExecutorProvider executorProvider,
int partitionIndex,
boolean hasParentConsumer,
CompletableFuture<Consumer<T>> subscribeFuture,
MessageId startMessageId,
Schema<T> schema,
ConsumerInterceptors<T> interceptors,
boolean createTopicIfDoesNotExist) {
return newConsumerImpl(client, topic, conf, executorProvider, partitionIndex, hasParentConsumer, false,
subscribeFuture, startMessageId, schema, interceptors, createTopicIfDoesNotExist, 0);
} | @Test
public void testConsumerCreatedWhilePaused() throws InterruptedException {
PulsarClientImpl client = ClientTestFixtures.createPulsarClientMock(executorProvider, internalExecutor);
ClientConfigurationData clientConf = client.getConfiguration();
clientConf.setOperationTimeoutMs(100);
clientConf.setStatsIntervalSeconds(0);
String topic = "non-persistent://tenant/ns1/my-topic";
consumerConf.setStartPaused(true);
consumer = ConsumerImpl.newConsumerImpl(client, topic, consumerConf,
executorProvider, -1, false, new CompletableFuture<>(), null, null, null,
true);
Assert.assertTrue(consumer.paused);
} |
public abstract int getPriority(); | @Test
void testGetPriority() {
assertEquals(Integer.MIN_VALUE, abilityControlManager.getPriority());
} |
@Override
protected int command() {
if (!validateConfigFilePresent()) {
return 1;
}
final MigrationConfig config;
try {
config = MigrationConfig.load(getConfigFile());
} catch (KsqlException | MigrationException e) {
LOGGER.error(e.getMessage());
return 1;
}
return command(
config,
MigrationsUtil::getKsqlClient,
getMigrationsDir(getConfigFile(), config),
Clock.systemDefaultZone()
);
} | @Test
public void shouldApplyDropConnectorIfExistsStatement() throws Exception {
// Given:
command = PARSER.parse("-v", "3");
createMigrationFile(1, NAME, migrationsDir, COMMAND);
createMigrationFile(3, NAME, migrationsDir, DROP_CONNECTOR_IF_EXISTS);
givenCurrentMigrationVersion("1");
givenAppliedMigration(1, NAME, MigrationState.MIGRATED);
// When:
final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed(
Instant.ofEpochMilli(1000), ZoneId.systemDefault()));
// Then:
assertThat(result, is(0));
final InOrder inOrder = inOrder(ksqlClient);
verifyMigratedVersion(inOrder, 3, "1", MigrationState.MIGRATED,
() -> inOrder.verify(ksqlClient).dropConnector("`WOOF`", true));
inOrder.verify(ksqlClient).close();
inOrder.verifyNoMoreInteractions();
} |
@Override
public double logp(int k) {
if (k <= 0) {
return Double.NEGATIVE_INFINITY;
} else {
return (k - 1) * Math.log(1 - p) + Math.log(p);
}
} | @Test
public void testLogP() {
System.out.println("logP");
ShiftedGeometricDistribution instance = new ShiftedGeometricDistribution(0.3);
instance.rand();
assertEquals(Math.log(0.3), instance.logp(1), 1E-6);
assertEquals(Math.log(0.21), instance.logp(2), 1E-6);
assertEquals(Math.log(0.147), instance.logp(3), 1E-6);
assertEquals(Math.log(0.1029), instance.logp(4), 1E-6);
assertEquals(Math.log(0.07203), instance.logp(5), 1E-6);
assertEquals(Math.log(0.008474257), instance.logp(11), 1E-6);
assertEquals(Math.log(0.0002393768), instance.logp(21), 1E-6);
} |
boolean connectedToRepository() {
return repository.isConnected();
} | @Test
public void connectedToRepositoryReturnsFalse() {
when( repository.isConnected() ).thenReturn( false );
assertFalse( timeoutHandler.connectedToRepository() );
} |
@Override
public String getString(int rowIndex, int columnIndex) {
if (columnIndex != 0) {
throw new IllegalArgumentException("Column index must always be 0 for aggregation result sets");
}
return _groupByResults.get(rowIndex).get("value").asText();
} | @Test
public void testGetString() {
// Run the test
final String result = _groupByResultSetUnderTest.getString(0, 0);
// Verify the results
assertEquals("1", result);
} |
public static String removeAllHtmlAttr(String content, String... tagNames) {
String regex;
for (String tagName : tagNames) {
regex = StrUtil.format("(?i)<{}[^>]*?>", tagName);
content = content.replaceAll(regex, StrUtil.format("<{}>", tagName));
}
return content;
} | @Test
public void removeAllHtmlAttrTest() {
final String html = "<div class=\"test_div\" width=\"120\"></div>";
final String result = HtmlUtil.removeAllHtmlAttr(html, "div");
assertEquals("<div></div>", result);
} |
@Override
public String getResourceInputNodeType() {
return DictionaryConst.NODE_TYPE_FILE_FIELD;
} | @Test
public void testGetResourceInputNodeType() throws Exception {
assertEquals( DictionaryConst.NODE_TYPE_FILE_FIELD, analyzer.getResourceInputNodeType() );
} |
public static Object matchEndpointKey(String path, Map<String, Object> map) {
if (isEmpty(map)) {
return null;
}
// iterate key set to find the endpoint pattern that matches the request path
for (String endpoint : map.keySet()) {
if (StringUtils.matchPathToPattern(path, endpoint)) {
return map.get(endpoint);
}
}
return null;
} | @Test
public void testMatchEndpointKey() {
Map<String, Object> map = new HashMap<>();
map.put("/v1/cat/{petId}", "123");
map.put("/v1/dog/{petId}/uploadImage", "456");
map.put("/v1/fish/{petId}/uploadImage/{imageId}", "789");
Assert.assertEquals("123", CollectionUtils.matchEndpointKey("/v1/cat/123", map));
Assert.assertEquals("456", CollectionUtils.matchEndpointKey("/v1/dog/123/uploadImage", map));
Assert.assertEquals("789", CollectionUtils.matchEndpointKey("/v1/fish/123/uploadImage/456", map));
} |
@Override
public TenantDO getTenantByName(String name) {
return tenantMapper.selectByName(name);
} | @Test
public void testGetTenantByName() {
// mock 数据
TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setName("芋道"));
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
// 调用
TenantDO result = tenantService.getTenantByName("芋道");
// 校验存在
assertPojoEquals(result, dbTenant);
} |
public static String[] getEmptyPaddedStrings() {
if ( emptyPaddedSpacesStrings == null ) {
emptyPaddedSpacesStrings = new String[250];
for ( int i = 0; i < emptyPaddedSpacesStrings.length; i++ ) {
emptyPaddedSpacesStrings[i] = rightPad( "", i );
}
}
return emptyPaddedSpacesStrings;
} | @Test
public void testGetEmptyPaddedStrings() {
final String[] strings = Const.getEmptyPaddedStrings();
for ( int i = 0; i < 250; i++ ) {
assertEquals( i, strings[i].length() );
}
} |
@Override
public boolean databaseExists(String databaseName) throws CatalogException {
checkArgument(!StringUtils.isNullOrEmpty(databaseName));
return listDatabases().contains(databaseName);
} | @Test
public void testDatabaseExists() {
assertTrue(catalog.databaseExists(TEST_DEFAULT_DATABASE));
assertFalse(catalog.databaseExists(NONE_EXIST_DATABASE));
} |
@Override
public CompletableFuture<MastershipRole> requestRoleFor(DeviceId deviceId) {
checkNotNull(deviceId, DEVICE_ID_NULL);
final Timer.Context timer = startTimer(requestRoleTimer);
return store.requestRole(networkId, deviceId)
.whenComplete((result, error) -> stopTimer(timer));
} | @Test
public void requestRoleFor() {
mastershipMgr1.setRole(NID_LOCAL, VDID1, MASTER);
mastershipMgr1.setRole(NID_OTHER, VDID2, MASTER);
//local should be master for one but standby for other
assertEquals("wrong role:", MASTER, Futures.getUnchecked(mastershipMgr1.requestRoleFor(VDID1)));
assertEquals("wrong role:", STANDBY, Futures.getUnchecked(mastershipMgr1.requestRoleFor(VDID2)));
} |
@Override
public boolean trySetCapacity(int capacity) {
return get(trySetCapacityAsync(capacity));
} | @Test
public void testConcurrentPut() throws InterruptedException {
final RBoundedBlockingQueue<Integer> queue1 = redisson.getBoundedBlockingQueue("bounded-queue:testConcurrentPut");
assertThat(queue1.trySetCapacity(10000)).isTrue();
ExecutorService executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors() * 2);
for (int i = 0; i < 10000; i++) {
final int k = i;
executor.execute(new Runnable() {
@Override
public void run() {
queue1.add(k);
}
});
}
executor.shutdown();
assertThat(executor.awaitTermination(1, TimeUnit.MINUTES)).isTrue();
assertThat(queue1.size()).isEqualTo(10000);
} |
public String build( final String cellValue ) {
switch ( type ) {
case FORALL:
return buildForAll( cellValue );
case INDEXED:
return buildMulti( cellValue );
default:
return buildSingle( cellValue );
}
} | @Test
public void testSingleParamMultipleTimes() {
final String snippet = "something.param.getAnother($param).equals($param);";
final SnippetBuilder snip = new SnippetBuilder(snippet);
final String cellValue = "42";
final String result = snip.build(cellValue);
assertThat(result).isNotNull();
assertThat(result).isEqualTo("something.param.getAnother(42).equals(42);");
} |
@Override
public void onIssue(Component component, DefaultIssue issue) {
if (issue.isNew()) {
// analyzer can provide some tags. They must be merged with rule tags
Rule rule = ruleRepository.getByKey(issue.ruleKey());
issue.setTags(union(issue.tags(), rule.getTags()));
}
} | @Test
public void copy_tags_if_new_external_issue() {
externalRule.setTags(Sets.newHashSet("es_lint", "java"));
externalIssue.setNew(true);
underTest.onIssue(mock(Component.class), externalIssue);
assertThat(externalIssue.tags()).containsExactly("es_lint", "java");
} |
@Override
public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs,
ListConsumerGroupOffsetsOptions options) {
SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, OffsetAndMetadata>> future =
ListConsumerGroupOffsetsHandler.newFuture(groupSpecs.keySet());
ListConsumerGroupOffsetsHandler handler =
new ListConsumerGroupOffsetsHandler(groupSpecs, options.requireStable(), logContext);
invokeDriver(handler, future, options.timeoutMs);
return new ListConsumerGroupOffsetsResult(future.all());
} | @Test
public void testListConsumerGroupOffsetsRetriableErrors() throws Exception {
// Retriable errors should be retried
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(
offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Collections.emptyMap()));
/*
* We need to return two responses here, one for NOT_COORDINATOR call when calling list consumer offsets
* api using coordinator that has moved. This will retry whole operation. So we need to again respond with a
* FindCoordinatorResponse.
*
* And the same reason for the following COORDINATOR_NOT_AVAILABLE error response
*/
env.kafkaClient().prepareResponse(
offsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap()));
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(
offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE, Collections.emptyMap()));
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(
offsetFetchResponse(Errors.NONE, Collections.emptyMap()));
final ListConsumerGroupOffsetsResult errorResult1 = env.adminClient().listConsumerGroupOffsets(GROUP_ID);
assertEquals(Collections.emptyMap(), errorResult1.partitionsToOffsetAndMetadata().get());
}
} |
public static void tryEnrichClusterEntryPointError(@Nullable Throwable root) {
tryEnrichOutOfMemoryError(
root,
JM_METASPACE_OOM_ERROR_MESSAGE,
JM_DIRECT_OOM_ERROR_MESSAGE,
JM_HEAP_SPACE_OOM_ERROR_MESSAGE);
} | @Test
public void testMetaspaceOOMHandling() {
OutOfMemoryError error = new OutOfMemoryError("Metaspace");
ClusterEntryPointExceptionUtils.tryEnrichClusterEntryPointError(error);
assertThat(
error.getMessage(),
is(ClusterEntryPointExceptionUtils.JM_METASPACE_OOM_ERROR_MESSAGE));
} |
@Override
public String getFailureMessage() {
return _failureMessage;
} | @Test
public void testSetFailureMessage() {
BasicAuthorizationResultImpl result = new BasicAuthorizationResultImpl(true, "New Failure Message");
assertEquals("New Failure Message", result.getFailureMessage());
} |
@Override
protected Logger newLogger(String name) {
return new JettyLoggerAdapter(name);
} | @Test
void testNewLogger() {
JettyLoggerAdapter loggerAdapter = new JettyLoggerAdapter();
org.eclipse.jetty.util.log.Logger logger =
loggerAdapter.newLogger(this.getClass().getName());
assertThat(logger.getClass().isAssignableFrom(JettyLoggerAdapter.class), is(true));
} |
public boolean isEphemeral() {
return ephemeral;
} | @Test
void testIsEphemeral() {
assertTrue(serviceMetadata.isEphemeral());
} |
@Udf
public Integer extractPort(
@UdfParameter(description = "a valid URL to extract a port from") final String input) {
final Integer port = UrlParser.extract(input, URI::getPort);
// check for LT 0 because URI::getPort returns -1 if the port
// does not exist, but UrlParser#extract will return null if
// the URI is invalid
return (port == null || port < 0) ? null : port;
} | @Test
public void shouldExtractPortIfPresent() {
assertThat(
extractUdf.extractPort("https://docs.confluent.io:8080/current/ksql/docs/syntax-reference.html#scalar-functions"),
equalTo(8080));
} |
@Override
public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) {
LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats);
BinaryColumnStatsData aggregateData = aggregateColStats.getStatsData().getBinaryStats();
BinaryColumnStatsData newData = newColStats.getStatsData().getBinaryStats();
aggregateData.setMaxColLen(mergeMaxColLen(aggregateData.getMaxColLen(), newData.getMaxColLen()));
aggregateData.setAvgColLen(mergeAvgColLen(aggregateData.getAvgColLen(), newData.getAvgColLen()));
aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
} | @Test
public void testMergeNonNullValues() {
ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(byte[].class)
.avgColLen(3)
.maxColLen(2)
.numNulls(2)
.build());
ColumnStatisticsObj newObj = createColumnStatisticsObj(new ColStatsBuilder<>(byte[].class)
.avgColLen(2)
.maxColLen(3)
.numNulls(3)
.build());
MERGER.merge(aggrObj, newObj);
newObj = createColumnStatisticsObj(new ColStatsBuilder<>(byte[].class)
.avgColLen(3)
.maxColLen(3)
.numNulls(1)
.build());
MERGER.merge(aggrObj, newObj);
ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(byte[].class)
.avgColLen(3)
.maxColLen(3)
.numNulls(6)
.build();
assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData());
} |
public List<Date> parse(String language)
{
return parse(language, new Date());
} | @Test
public void testParseTimes()
{
List<Date> parse = new PrettyTimeParser().parse("let's get lunch at two pm");
Assert.assertFalse(parse.isEmpty());
Calendar calendar = Calendar.getInstance();
calendar.setTime(parse.get(0));
Assert.assertEquals(14, calendar.get(Calendar.HOUR_OF_DAY));
} |
@Override
public void run(AdminStatisticsJobRequest jobRequest) throws Exception {
var year = jobRequest.getYear();
var month = jobRequest.getMonth();
LOGGER.info(">> ADMIN REPORT STATS {} {}", year, month);
var stopwatch = new StopWatch();
stopwatch.start("repositories.countActiveExtensions");
var extensions = repositories.countActiveExtensions();
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
stopwatch.start("repositories.downloadsUntil");
var downloadsTotal = repositories.downloadsTotal();
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
var lastDate = LocalDateTime.of(year, month, 1, 0, 0).minusMonths(1);
var lastAdminStatistics = repositories.findAdminStatisticsByYearAndMonth(lastDate.getYear(), lastDate.getMonthValue());
var lastDownloadsTotal = lastAdminStatistics != null ? lastAdminStatistics.getDownloadsTotal() : 0;
var downloads = downloadsTotal - lastDownloadsTotal;
stopwatch.start("repositories.countActiveExtensionPublishers");
var publishers = repositories.countActiveExtensionPublishers();
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
stopwatch.start("repositories.averageNumberOfActiveReviewsPerActiveExtension");
var averageReviewsPerExtension = repositories.averageNumberOfActiveReviewsPerActiveExtension();
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
stopwatch.start("repositories.countPublishersThatClaimedNamespaceOwnership");
var namespaceOwners = repositories.countPublishersThatClaimedNamespaceOwnership();
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
stopwatch.start("repositories.countActiveExtensionsGroupedByExtensionReviewRating");
var extensionsByRating = repositories.countActiveExtensionsGroupedByExtensionReviewRating();
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
stopwatch.start("repositories.countActiveExtensionPublishersGroupedByExtensionsPublished");
var publishersByExtensionsPublished = repositories.countActiveExtensionPublishersGroupedByExtensionsPublished();
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
var limit = 10;
stopwatch.start("repositories.topMostActivePublishingUsers");
var topMostActivePublishingUsers = repositories.topMostActivePublishingUsers(limit);
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
stopwatch.start("repositories.topNamespaceExtensions");
var topNamespaceExtensions = repositories.topNamespaceExtensions(limit);
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
stopwatch.start("repositories.topNamespaceExtensionVersions");
var topNamespaceExtensionVersions = repositories.topNamespaceExtensionVersions(limit);
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
stopwatch.start("repositories.topMostDownloadedExtensions");
var topMostDownloadedExtensions = repositories.topMostDownloadedExtensions(limit);
stopwatch.stop();
LOGGER.info("{} took {} ms", stopwatch.getLastTaskName(), stopwatch.getLastTaskTimeMillis());
LOGGER.info("<< ADMIN REPORT STATS {} {}", year, month);
var statistics = new AdminStatistics();
statistics.setYear(year);
statistics.setMonth(month);
statistics.setExtensions(extensions);
statistics.setDownloads(downloads);
statistics.setDownloadsTotal(downloadsTotal);
statistics.setPublishers(publishers);
statistics.setAverageReviewsPerExtension(averageReviewsPerExtension);
statistics.setNamespaceOwners(namespaceOwners);
statistics.setExtensionsByRating(extensionsByRating);
statistics.setPublishersByExtensionsPublished(publishersByExtensionsPublished);
statistics.setTopMostActivePublishingUsers(topMostActivePublishingUsers);
statistics.setTopNamespaceExtensions(topNamespaceExtensions);
statistics.setTopNamespaceExtensionVersions(topNamespaceExtensionVersions);
statistics.setTopMostDownloadedExtensions(topMostDownloadedExtensions);
service.saveAdminStatistics(statistics);
} | @Test
public void testAdminStatisticsJobRequestHandlerWithPreviousStatistics() throws Exception {
var expectedStatistics = mockAdminStatistics();
expectedStatistics.setDownloads(678L);
var prevStatistics = new AdminStatistics();
prevStatistics.setDownloadsTotal(5000);
Mockito.when(repositories.findAdminStatisticsByYearAndMonth(2023, 10)).thenReturn(prevStatistics);
var request = new AdminStatisticsJobRequest(2023, 11);
handler.run(request);
Mockito.verify(service).saveAdminStatistics(expectedStatistics);
} |
public static String toJavaCode(
final String argName,
final Class<?> argType,
final String lambdaBody
) {
return toJavaCode(ImmutableList.of(new Pair<>(argName, argType)), lambdaBody);
} | @Test(expected= KsqlException.class)
public void shouldThrowOnNonSupportedArguments() {
// Given:
final Pair<String, Class<?>> argName1 = new Pair<>("fred", Long.class);
final Pair<String, Class<?>> argName2 = new Pair<>("bob", Long.class);
final Pair<String, Class<?>> argName3 = new Pair<>("tim", Long.class);
final Pair<String, Class<?>> argName4 = new Pair<>("hello", Long.class);
final List<Pair<String, Class<?>>> argList = ImmutableList.of(argName1, argName2, argName3, argName4);
// When:
LambdaUtil.toJavaCode(argList, "fred + bob + tim + hello + 1");
} |
public String sanitizeInput(String input) {
if (input != null) {
for (String unsafeTag : unsafeTags) {
String unsafeRegex = "<" + unsafeTag + ">(.*)</" + unsafeTag + ">";
Pattern pattern = Pattern.compile(unsafeRegex);
Matcher matcher = pattern.matcher(input);
if (matcher.find()) {
input = matcher.replaceAll("");
}
}
String onclickRegex = "onclick=[\"'](.*)[\"']";
Pattern pattern = Pattern.compile(onclickRegex);
Matcher matcher = pattern.matcher(input);
if (matcher.find()) {
input = matcher.replaceAll("");
}
}
return input;
} | @Test
void sanitizeInput() {
String input = "This is "
+ "<script>alert(1);</script> "
+ "<div onclick='alert(2)'>this is div</div> "
+ "text";
String output = md.sanitizeInput(input);
assertFalse(output.contains("<script>"));
assertFalse(output.contains("onclick"));
assertTrue(output.contains("this is div"));
} |
public static OperatorID fromUid(String uid) {
byte[] hash = Hashing.murmur3_128(0).newHasher().putString(uid, UTF_8).hash().asBytes();
return new OperatorID(hash);
} | @Test
public void testOperatorIdMatchesUid() {
OperatorID expectedId = getOperatorID();
OperatorID generatedId = OperatorIDGenerator.fromUid(UID);
Assert.assertEquals(expectedId, generatedId);
} |
static int determineOperatorReservoirSize(int operatorParallelism, int numPartitions) {
int coordinatorReservoirSize = determineCoordinatorReservoirSize(numPartitions);
int totalOperatorSamples = coordinatorReservoirSize * OPERATOR_OVER_SAMPLE_RATIO;
return (int) Math.ceil((double) totalOperatorSamples / operatorParallelism);
} | @Test
public void testOperatorReservoirSize() {
assertThat(SketchUtil.determineOperatorReservoirSize(5, 3))
.isEqualTo((10_002 * SketchUtil.OPERATOR_OVER_SAMPLE_RATIO) / 5);
assertThat(SketchUtil.determineOperatorReservoirSize(123, 123))
.isEqualTo((123_00 * SketchUtil.OPERATOR_OVER_SAMPLE_RATIO) / 123);
assertThat(SketchUtil.determineOperatorReservoirSize(256, 123))
.isEqualTo(
(int) Math.ceil((double) (123_00 * SketchUtil.OPERATOR_OVER_SAMPLE_RATIO) / 256));
assertThat(SketchUtil.determineOperatorReservoirSize(5_120, 10_123))
.isEqualTo(
(int) Math.ceil((double) (992_054 * SketchUtil.OPERATOR_OVER_SAMPLE_RATIO) / 5_120));
} |
public final void tag(I input, ScopedSpan span) {
if (input == null) throw new NullPointerException("input == null");
if (span == null) throw new NullPointerException("span == null");
if (span.isNoop()) return;
tag(span, input, span.context());
} | @Test void tag_customizer_doesntParseNoop() {
tag.tag(input, context, NoopSpanCustomizer.INSTANCE);
verifyNoMoreInteractions(parseValue); // parsing is lazy
} |
@Description("Inverse of F cdf given numerator degrees of freedom (df1), denominator degrees of freedom (df2) parameters, and probability")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double inverseFCdf(
@SqlType(StandardTypes.DOUBLE) double df1,
@SqlType(StandardTypes.DOUBLE) double df2,
@SqlType(StandardTypes.DOUBLE) double p)
{
checkCondition(p >= 0 && p <= 1, INVALID_FUNCTION_ARGUMENT, "inverseFCdf Function: p must be in the interval [0, 1]");
checkCondition(df1 > 0, INVALID_FUNCTION_ARGUMENT, "inverseFCdf Function: numerator df must be greater than 0");
checkCondition(df2 > 0, INVALID_FUNCTION_ARGUMENT, "inverseFCdf Function: denominator df must be greater than 0");
FDistribution distribution = new FDistribution(null, df1, df2, FDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
return distribution.inverseCumulativeProbability(p);
} | @Test
public void testInverseFCdf()
{
assertFunction("inverse_f_cdf(2.0, 5.0, 0.0)", DOUBLE, 0.0);
assertFunction("round(inverse_f_cdf(2.0, 5.0, 0.5), 4)", DOUBLE, 0.7988);
assertFunction("round(inverse_f_cdf(2.0, 5.0, 0.9), 4)", DOUBLE, 3.7797);
assertInvalidFunction("inverse_f_cdf(0, 3, 0.5)", "inverseFCdf Function: numerator df must be greater than 0");
assertInvalidFunction("inverse_f_cdf(3, 0, 0.5)", "inverseFCdf Function: denominator df must be greater than 0");
assertInvalidFunction("inverse_f_cdf(3, 5, -0.1)", "inverseFCdf Function: p must be in the interval [0, 1]");
assertInvalidFunction("inverse_f_cdf(3, 5, 1.1)", "inverseFCdf Function: p must be in the interval [0, 1]");
} |
void retriggerSubpartitionRequest(Timer timer) {
synchronized (requestLock) {
checkState(subpartitionView == null, "already requested partition");
timer.schedule(
new TimerTask() {
@Override
public void run() {
try {
requestSubpartitions();
} catch (Throwable t) {
setError(t);
}
}
},
getCurrentBackoff());
}
} | @Test
void testChannelErrorWhileRetriggeringRequest() {
final SingleInputGate inputGate = createSingleInputGate(1);
final LocalInputChannel localChannel =
createLocalInputChannel(inputGate, new ResultPartitionManager());
final Timer timer =
new Timer(true) {
@Override
public void schedule(TimerTask task, long delay) {
task.run();
assertThatThrownBy(localChannel::checkError)
.isInstanceOfSatisfying(
PartitionNotFoundException.class,
notFound ->
assertThat(localChannel.partitionId)
.isEqualTo(notFound.getPartitionId()));
}
};
try {
localChannel.retriggerSubpartitionRequest(timer);
} finally {
timer.cancel();
}
} |
@Override
public Optional<Product> findProduct(int productId) {
return this.productRepository.findById(productId);
} | @Test
void findProduct_ProductDoesNotExist_ReturnsEmptyOptional() {
// given
var product = new Product(1, "Товар №1", "Описание товара №1");
// when
var result = this.service.findProduct(1);
// then
assertNotNull(result);
assertTrue(result.isEmpty());
verify(this.productRepository).findById(1);
verifyNoMoreInteractions(this.productRepository);
} |
@Override
public long nextDelayDuration(int reconsumeTimes) {
if (reconsumeTimes < 0) {
reconsumeTimes = 0;
}
if (reconsumeTimes > 32) {
reconsumeTimes = 32;
}
return Math.min(max, initial * (long) Math.pow(multiplier, reconsumeTimes));
} | @Test
public void testNextDelayDurationOutOfRange() {
ExponentialRetryPolicy exponentialRetryPolicy = new ExponentialRetryPolicy();
long actual = exponentialRetryPolicy.nextDelayDuration(-1);
assertThat(actual).isEqualTo(TimeUnit.SECONDS.toMillis(5));
actual = exponentialRetryPolicy.nextDelayDuration(100);
assertThat(actual).isEqualTo(TimeUnit.HOURS.toMillis(2));
} |
public static LocalUri parse(String path) {
if (path.startsWith(SCHEME)) {
URI parsed = URI.create(path);
path = parsed.getPath();
}
if (!path.startsWith(SLASH)) {
throw new IllegalArgumentException("Path must start at root /");
}
StringTokenizer tok = new StringTokenizer(path, SLASH);
LocalUri hpath = Root;
while (tok.hasMoreTokens()) {
hpath = hpath.append(tok.nextToken());
}
return hpath;
} | @Test
public void testParseMalformedRelative() {
String path = "example////some-id//instances/some-instance-id";
assertThrows(IllegalArgumentException.class, () -> LocalUri.parse(path));
} |
@Override
public Predicate negate() {
int size = predicates.length;
Predicate[] inners = new Predicate[size];
for (int i = 0; i < size; i++) {
Predicate original = predicates[i];
Predicate negated;
if (original instanceof NegatablePredicate predicate) {
negated = predicate.negate();
} else {
negated = new NotPredicate(original);
}
inners[i] = negated;
}
OrPredicate orPredicate = new OrPredicate(inners);
return orPredicate;
} | @Test
public void negate_whenContainsNonNegatablePredicate_thenReturnOrPredicateWithNotInside() {
// ~(foo and bar) --> (~foo or ~bar)
// this is testing the case where the inner predicate does NOT implement {@link Negatable}
Predicate nonNegatable = mock(Predicate.class);
AndPredicate and = (AndPredicate) and(nonNegatable);
OrPredicate result = (OrPredicate) and.negate();
Predicate[] inners = result.predicates;
assertThat(inners).hasSize(1);
NotPredicate notPredicate = (NotPredicate) inners[0];
assertThat(nonNegatable).isSameAs(notPredicate.predicate);
} |
@Override
public ApiResult<TopicPartition, ListOffsetsResultInfo> handleResponse(
Node broker,
Set<TopicPartition> keys,
AbstractResponse abstractResponse
) {
ListOffsetsResponse response = (ListOffsetsResponse) abstractResponse;
Map<TopicPartition, ListOffsetsResultInfo> completed = new HashMap<>();
Map<TopicPartition, Throwable> failed = new HashMap<>();
List<TopicPartition> unmapped = new ArrayList<>();
Set<TopicPartition> retriable = new HashSet<>();
for (ListOffsetsTopicResponse topic : response.topics()) {
for (ListOffsetsPartitionResponse partition : topic.partitions()) {
TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex());
Errors error = Errors.forCode(partition.errorCode());
if (!offsetTimestampsByPartition.containsKey(topicPartition)) {
log.warn("ListOffsets response includes unknown topic partition {}", topicPartition);
} else if (error == Errors.NONE) {
Optional<Integer> leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH)
? Optional.empty()
: Optional.of(partition.leaderEpoch());
completed.put(
topicPartition,
new ListOffsetsResultInfo(partition.offset(), partition.timestamp(), leaderEpoch));
} else {
handlePartitionError(topicPartition, error, failed, unmapped, retriable);
}
}
}
// Sanity-check if the current leader for these partitions returned results for all of them
for (TopicPartition topicPartition : keys) {
if (unmapped.isEmpty()
&& !completed.containsKey(topicPartition)
&& !failed.containsKey(topicPartition)
&& !retriable.contains(topicPartition)
) {
ApiException sanityCheckException = new ApiException(
"The response from broker " + broker.id() +
" did not contain a result for topic partition " + topicPartition);
log.error(
"ListOffsets request for topic partition {} failed sanity check",
topicPartition,
sanityCheckException);
failed.put(topicPartition, sanityCheckException);
}
}
return new ApiResult<>(completed, failed, unmapped);
} | @Test
public void testHandleUnexpectedPartitionErrorResponse() {
TopicPartition errorPartition = t0p0;
Errors error = Errors.UNKNOWN_SERVER_ERROR;
Map<TopicPartition, Short> errorsByPartition = new HashMap<>();
errorsByPartition.put(errorPartition, error.code());
ApiResult<TopicPartition, ListOffsetsResultInfo> result =
handleResponse(createResponse(errorsByPartition));
Map<TopicPartition, Throwable> failed = new HashMap<>();
failed.put(errorPartition, error.exception());
Set<TopicPartition> completed = new HashSet<>(offsetTimestampsByPartition.keySet());
completed.removeAll(failed.keySet());
assertResult(result, completed, failed, emptyList(), emptySet());
} |
public String hash() {
try (LockResource r = new LockResource(mLock.readLock())) {
return mHash.get();
}
} | @Test
public void hashEmpty() {
PathProperties emptyProperties = new PathProperties();
String hash = emptyProperties.hash();
Assert.assertNotNull(hash);
Assert.assertEquals(hash, emptyProperties.hash());
} |
@Override
public Option<FileSlice> getLatestFileSlice(String partitionPath, String fileId) {
return execute(partitionPath, fileId, preferredView::getLatestFileSlice, (path, fgId) -> getSecondaryView().getLatestFileSlice(path, fgId));
} | @Test
public void testGetLatestFileSlice() {
Option<FileSlice> actual;
Option<FileSlice> expected = Option.fromJavaOptional(testFileSliceStream.findFirst());
String partitionPath = "/table2";
String fileID = "file.123";
when(primary.getLatestFileSlice(partitionPath, fileID)).thenReturn(expected);
actual = fsView.getLatestFileSlice(partitionPath, fileID);
assertEquals(expected, actual);
verify(secondaryViewSupplier, never()).get();
resetMocks();
when(secondaryViewSupplier.get()).thenReturn(secondary);
when(primary.getLatestFileSlice(partitionPath, fileID)).thenThrow(new RuntimeException());
when(secondary.getLatestFileSlice(partitionPath, fileID)).thenReturn(expected);
actual = fsView.getLatestFileSlice(partitionPath, fileID);
assertEquals(expected, actual);
resetMocks();
when(secondary.getLatestFileSlice(partitionPath, fileID)).thenReturn(expected);
actual = fsView.getLatestFileSlice(partitionPath, fileID);
assertEquals(expected, actual);
resetMocks();
when(secondary.getLatestFileSlice(partitionPath, fileID)).thenThrow(new RuntimeException());
assertThrows(RuntimeException.class, () -> {
fsView.getLatestFileSlice(partitionPath, fileID);
});
} |
@Nonnull
public static <T> Sink<T> remoteReliableTopic(@Nonnull String reliableTopicName,
@Nonnull ClientConfig clientConfig) {
String clientXml = asXmlString(clientConfig); //conversion needed for serializability
return SinkBuilder
.sinkBuilder("reliableTopicSink(" + reliableTopicName + "))",
ctx -> {
HazelcastInstance client = newHazelcastClient(asClientConfig(clientXml));
ITopic<T> topic = client.getReliableTopic(reliableTopicName);
return tuple2(client, topic);
})
.<T>receiveFn((clientTopicTuple, message) -> clientTopicTuple.f1().publish(message))
.destroyFn(clientTopicTuple -> clientTopicTuple.f0().shutdown())
.build();
} | @Test
public void remoteReliableTopic() {
// Given
populateList(srcList);
List<Object> receivedList = new ArrayList<>();
remoteHz.getReliableTopic(sinkName).addMessageListener(message -> receivedList.add(message.getMessageObject()));
// When
Sink<Object> sink = Sinks.remoteReliableTopic(sinkName, clientConfig);
p.readFrom(Sources.list(srcName)).writeTo(sink);
execute();
// Then
assertTrueEventually(() -> assertEquals(itemCount, receivedList.size()));
} |
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final List<Path> containers = new ArrayList<>();
for(Path file : files.keySet()) {
if(containerService.isContainer(file)) {
containers.add(file);
}
else {
callback.delete(file);
final Path bucket = containerService.getContainer(file);
if(file.getType().contains(Path.Type.upload)) {
// In-progress multipart upload
try {
multipartService.delete(new MultipartUpload(file.attributes().getVersionId(),
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)));
}
catch(NotfoundException ignored) {
log.warn(String.format("Ignore failure deleting multipart upload %s", file));
}
}
else {
try {
// Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys
session.getClient().deleteVersionedObject(
file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file));
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
}
}
for(Path file : containers) {
callback.delete(file);
try {
final String bucket = containerService.getContainer(file).getName();
session.getClient().deleteBucket(bucket);
session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket);
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
} | @Test
public void testDeleteFile() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(test, new TransferStatus());
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test));
} |
public static KubernetesJobManagerSpecification buildKubernetesJobManagerSpecification(
FlinkPod podTemplate, KubernetesJobManagerParameters kubernetesJobManagerParameters)
throws IOException {
FlinkPod flinkPod = Preconditions.checkNotNull(podTemplate).copy();
List<HasMetadata> accompanyingResources = new ArrayList<>();
final List<KubernetesStepDecorator> stepDecorators =
new ArrayList<>(
Arrays.asList(
new InitJobManagerDecorator(kubernetesJobManagerParameters),
new EnvSecretsDecorator(kubernetesJobManagerParameters),
new MountSecretsDecorator(kubernetesJobManagerParameters),
new CmdJobManagerDecorator(kubernetesJobManagerParameters),
new InternalServiceDecorator(kubernetesJobManagerParameters),
new ExternalServiceDecorator(kubernetesJobManagerParameters)));
Configuration configuration = kubernetesJobManagerParameters.getFlinkConfiguration();
if (configuration.get(KUBERNETES_HADOOP_CONF_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new HadoopConfMountDecorator(kubernetesJobManagerParameters));
}
if (configuration.get(KUBERNETES_KERBEROS_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new KerberosMountDecorator(kubernetesJobManagerParameters));
}
stepDecorators.addAll(
Arrays.asList(
new FlinkConfMountDecorator(kubernetesJobManagerParameters),
new PodTemplateMountDecorator(kubernetesJobManagerParameters)));
for (KubernetesStepDecorator stepDecorator : stepDecorators) {
flinkPod = stepDecorator.decorateFlinkPod(flinkPod);
accompanyingResources.addAll(stepDecorator.buildAccompanyingKubernetesResources());
}
final Deployment deployment =
createJobManagerDeployment(flinkPod, kubernetesJobManagerParameters);
return new KubernetesJobManagerSpecification(deployment, accompanyingResources);
} | @Test
void testSetJobManagerDeploymentReplicas() throws Exception {
flinkConfig.set(HighAvailabilityOptions.HA_MODE, HighAvailabilityMode.KUBERNETES.name());
flinkConfig.set(
KubernetesConfigOptions.KUBERNETES_JOBMANAGER_REPLICAS, JOBMANAGER_REPLICAS);
kubernetesJobManagerSpecification =
KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(
flinkPod, kubernetesJobManagerParameters);
assertThat(kubernetesJobManagerSpecification.getDeployment().getSpec().getReplicas())
.isEqualTo(JOBMANAGER_REPLICAS);
} |
public void runPickle(Pickle pickle) {
try {
StepTypeRegistry stepTypeRegistry = createTypeRegistryForPickle(pickle);
snippetGenerators = createSnippetGeneratorsForPickle(stepTypeRegistry);
// Java8 step definitions will be added to the glue here
buildBackendWorlds();
glue.prepareGlue(stepTypeRegistry);
TestCase testCase = createTestCaseForPickle(pickle);
testCase.run(bus);
} finally {
glue.removeScenarioScopedGlue();
disposeBackendWorlds();
}
} | @Test
void aftersteps_are_executed_after_failed_step() {
StubStepDefinition stepDefinition = spy(new StubStepDefinition("some step") {
@Override
public void execute(Object[] args) {
super.execute(args);
throw new RuntimeException();
}
});
Pickle pickleMatchingStepDefinitions = createPickleMatchingStepDefinitions(stepDefinition);
final HookDefinition afterStepHook = createHook();
TestRunnerSupplier runnerSupplier = new TestRunnerSupplier(bus, runtimeOptions) {
@Override
public void loadGlue(Glue glue, List<URI> gluePaths) {
glue.addAfterHook(afterStepHook);
glue.addStepDefinition(stepDefinition);
}
};
runnerSupplier.get().runPickle(pickleMatchingStepDefinitions);
InOrder inOrder = inOrder(afterStepHook, stepDefinition);
inOrder.verify(stepDefinition).execute(any(Object[].class));
inOrder.verify(afterStepHook).execute(any(TestCaseState.class));
} |
public static RecordBuilder<Schema> record(String name) {
return builder().record(name);
} | @Test
void validateDefaultsEnabled() {
assertThrows(AvroRuntimeException.class, () -> {
try {
SchemaBuilder.record("ValidationRecord").fields().name("IntegerField").type("int").withDefault("Invalid")
.endRecord();
} catch (AvroRuntimeException e) {
assertEquals("Invalid default for field IntegerField: \"Invalid\" not a \"int\"", e.getMessage(),
"Default behavior is to raise an exception due to record having an invalid default");
throw e;
}
});
} |
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) {
final Fetch<K, V> fetch = Fetch.empty();
final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>();
int recordsRemaining = fetchConfig.maxPollRecords;
try {
while (recordsRemaining > 0) {
final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch();
if (nextInLineFetch == null || nextInLineFetch.isConsumed()) {
final CompletedFetch completedFetch = fetchBuffer.peek();
if (completedFetch == null)
break;
if (!completedFetch.isInitialized()) {
try {
fetchBuffer.setNextInLineFetch(initialize(completedFetch));
} catch (Exception e) {
// Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and
// (2) there are no fetched completedFetch with actual content preceding this exception.
// The first condition ensures that the completedFetches is not stuck with the same completedFetch
// in cases such as the TopicAuthorizationException, and the second condition ensures that no
// potential data loss due to an exception in a following record.
if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0)
fetchBuffer.poll();
throw e;
}
} else {
fetchBuffer.setNextInLineFetch(completedFetch);
}
fetchBuffer.poll();
} else if (subscriptions.isPaused(nextInLineFetch.partition)) {
// when the partition is paused we add the records back to the completedFetches queue instead of draining
// them so that they can be returned on a subsequent poll if the partition is resumed at that time
log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition);
pausedCompletedFetches.add(nextInLineFetch);
fetchBuffer.setNextInLineFetch(null);
} else {
final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch, recordsRemaining);
recordsRemaining -= nextFetch.numRecords();
fetch.add(nextFetch);
}
}
} catch (KafkaException e) {
if (fetch.isEmpty())
throw e;
} finally {
// add any polled completed fetches for paused partitions back to the completed fetches queue to be
// re-evaluated in the next poll
fetchBuffer.addAll(pausedCompletedFetches);
}
return fetch;
} | @Test
public void testCollectFetchInitializationWithUpdateLogStartOffsetOnNotAssignedPartition() {
final TopicPartition topicPartition0 = new TopicPartition("topic", 0);
final long fetchOffset = 42;
final long highWatermark = 1000;
final long logStartOffset = 10;
final SubscriptionState subscriptions = mock(SubscriptionState.class);
when(subscriptions.hasValidPosition(topicPartition0)).thenReturn(true);
when(subscriptions.positionOrNull(topicPartition0)).thenReturn(new SubscriptionState.FetchPosition(fetchOffset));
when(subscriptions.tryUpdatingHighWatermark(topicPartition0, highWatermark)).thenReturn(true);
when(subscriptions.tryUpdatingLogStartOffset(topicPartition0, logStartOffset)).thenReturn(false);
final FetchCollector<String, String> fetchCollector = createFetchCollector(subscriptions);
final Records records = createRecords();
FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData()
.setPartitionIndex(topicPartition0.partition())
.setHighWatermark(highWatermark)
.setRecords(records)
.setLogStartOffset(logStartOffset);
final CompletedFetch completedFetch = new CompletedFetchBuilder()
.partitionData(partitionData)
.partition(topicPartition0)
.fetchOffset(fetchOffset).build();
final FetchBuffer fetchBuffer = mock(FetchBuffer.class);
when(fetchBuffer.nextInLineFetch()).thenReturn(null);
when(fetchBuffer.peek()).thenReturn(completedFetch).thenReturn(null);
final Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer);
assertTrue(fetch.isEmpty());
verify(fetchBuffer).setNextInLineFetch(null);
} |
public static Block getBlockObject(Type type, Object object, ObjectInspector objectInspector)
{
return requireNonNull(serializeObject(type, null, object, objectInspector), "serialized result is null");
} | @Test
public void testReuse()
{
BytesWritable value = new BytesWritable();
byte[] first = "hello world".getBytes(UTF_8);
value.set(first, 0, first.length);
byte[] second = "bye".getBytes(UTF_8);
value.set(second, 0, second.length);
Type type = new TypeToken<Map<BytesWritable, Long>>() {}.getType();
ObjectInspector inspector = getInspector(type);
Block actual = getBlockObject(mapType(createUnboundedVarcharType(), BIGINT), ImmutableMap.of(value, 0L), inspector);
Block expected = mapBlockOf(createUnboundedVarcharType(), BIGINT, "bye", 0L);
assertBlockEquals(actual, expected);
} |
public static HgVersion parse(String hgOut) {
String[] lines = hgOut.split("\n");
String firstLine = lines[0];
Matcher m = HG_VERSION_PATTERN.matcher(firstLine);
if (m.matches()) {
try {
return new HgVersion(Version.create(
asInt(m, 1),
asInt(m, 2),
asInt(m, 3)));
} catch (Exception e) {
throw bomb("cannot parse Hg version : " + hgOut);
}
}
throw bomb("cannot parse Hg version : " + hgOut);
} | @Test
void shouldBombIfVersionCannotBeParsed() {
assertThatExceptionOfType(RuntimeException.class)
.isThrownBy(() -> HgVersion.parse(WINDOWS_HG_TORTOISE))
.withMessage("cannot parse Hg version : " + WINDOWS_HG_TORTOISE);
} |
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) {
String highwayValue = way.getTag("highway");
if (skipEmergency && "service".equals(highwayValue) && "emergency_access".equals(way.getTag("service")))
return;
int firstIndex = way.getFirstIndex(restrictionKeys);
String firstValue = firstIndex < 0 ? "" : way.getTag(restrictionKeys.get(firstIndex), "");
if (restrictedValues.contains(firstValue) && !hasTemporalRestriction(way, firstIndex, restrictionKeys))
return;
if (way.hasTag("gh:barrier_edge") && way.hasTag("node_tags")) {
List<Map<String, Object>> nodeTags = way.getTag("node_tags", null);
Map<String, Object> firstNodeTags = nodeTags.get(0);
// a barrier edge has the restriction in both nodes and the tags are the same -> get(0)
firstValue = getFirstPriorityNodeTag(firstNodeTags, restrictionKeys);
String barrierValue = firstNodeTags.containsKey("barrier") ? (String) firstNodeTags.get("barrier") : "";
if (restrictedValues.contains(firstValue) || barriers.contains(barrierValue)
|| "yes".equals(firstNodeTags.get("locked")) && !INTENDED.contains(firstValue))
return;
}
if (FerrySpeedCalculator.isFerry(way)) {
boolean isCar = restrictionKeys.contains("motorcar");
if (INTENDED.contains(firstValue)
// implied default is allowed only if foot and bicycle is not specified:
|| isCar && firstValue.isEmpty() && !way.hasTag("foot") && !way.hasTag("bicycle")
// if hgv is allowed then smaller trucks and cars are allowed too even if not specified
|| isCar && way.hasTag("hgv", "yes")) {
accessEnc.setBool(false, edgeId, edgeIntAccess, true);
accessEnc.setBool(true, edgeId, edgeIntAccess, true);
}
} else {
boolean isRoundabout = roundaboutEnc.getBool(false, edgeId, edgeIntAccess);
boolean ignoreOneway = "no".equals(way.getFirstValue(ignoreOnewayKeys));
boolean isBwd = isBackwardOneway(way);
if (!ignoreOneway && (isBwd || isRoundabout || isForwardOneway(way))) {
accessEnc.setBool(isBwd, edgeId, edgeIntAccess, true);
} else {
accessEnc.setBool(false, edgeId, edgeIntAccess, true);
accessEnc.setBool(true, edgeId, edgeIntAccess, true);
}
}
} | @Test
public void testPrivate() {
ReaderWay way = new ReaderWay(1);
way.setTag("highway", "primary");
way.setTag("access", "private");
EdgeIntAccess edgeIntAccess = ArrayEdgeIntAccess.createFromBytes(em.getBytesForFlags());
int edgeId = 0;
parser.handleWayTags(edgeId, edgeIntAccess, way, null);
assertTrue(busAccessEnc.getBool(false, edgeId, edgeIntAccess));
assertTrue(busAccessEnc.getBool(true, edgeId, edgeIntAccess));
} |
@Override
public byte[] toByteArray() {
return toByteArray(0);
} | @Test(expected = UnsupportedOperationException.class)
public void testToByteArray() {
out.toByteArray();
} |
@VisibleForTesting
static Number convert(Date date, TimeUnit timeUnit, FieldSpec.DataType dataType) {
long convertedTime = timeUnit.convert(date.getTime(), TimeUnit.MILLISECONDS);
if (dataType == FieldSpec.DataType.LONG || dataType == FieldSpec.DataType.TIMESTAMP) {
return convertedTime;
}
if (dataType == FieldSpec.DataType.INT) {
return (int) convertedTime;
}
throw new IllegalArgumentException("Time column can be only INT, LONG or TIMESTAMP: " + dataType);
} | @Test
public void testConvert() {
int oneHourInMillis = 60 * 60 * 1000;
Date date = new Date(oneHourInMillis + 1);
// seconds
Number convertedTime = TimeGenerator.convert(date, TimeUnit.SECONDS, FieldSpec.DataType.LONG);
assertTrue(convertedTime instanceof Long);
assertEquals(3600L, convertedTime);
// seconds (timestamp)
convertedTime = TimeGenerator.convert(date, TimeUnit.SECONDS, FieldSpec.DataType.TIMESTAMP);
assertTrue(convertedTime instanceof Long);
assertEquals(3600L, convertedTime);
// minutes
convertedTime = TimeGenerator.convert(date, TimeUnit.MINUTES, FieldSpec.DataType.INT);
assertTrue(convertedTime instanceof Integer);
assertEquals(60, convertedTime);
// check hours
convertedTime = TimeGenerator.convert(date, TimeUnit.HOURS, FieldSpec.DataType.INT);
assertTrue(convertedTime instanceof Integer);
assertEquals(1, convertedTime);
} |
@Override
public RouteContext route(final RouteContext routeContext, final BroadcastRule broadcastRule) {
RouteMapper dataSourceMapper = getDataSourceRouteMapper(broadcastRule.getDataSourceNames());
routeContext.getRouteUnits().add(new RouteUnit(dataSourceMapper, createTableRouteMappers()));
return routeContext;
} | @Test
void assertRouteWithCursorStatement() {
CreateViewStatementContext sqlStatementContext = mock(CreateViewStatementContext.class);
Collection<String> logicTables = Collections.singleton("t_address");
ConnectionContext connectionContext = mock(ConnectionContext.class);
BroadcastUnicastRoutingEngine engine = new BroadcastUnicastRoutingEngine(sqlStatementContext, logicTables, connectionContext);
RouteContext routeContext = engine.route(new RouteContext(), broadcastRule);
assertThat(routeContext.getRouteUnits().size(), is(1));
RouteMapper dataSourceRouteMapper = routeContext.getRouteUnits().iterator().next().getDataSourceMapper();
assertThat(dataSourceRouteMapper.getLogicName(), is("ds_0"));
assertTableRouteMapper(routeContext);
} |
public void enablePrivacy(double epsilon)
{
enablePrivacy(epsilon, getDefaultRandomizationStrategy());
} | @Test
public void testEnablePrivacy()
{
SfmSketch sketch = SfmSketch.create(4096, 24);
double epsilon = 4;
for (int i = 0; i < 100_000; i++) {
sketch.add(i);
}
long cardinalityBefore = sketch.cardinality();
sketch.enablePrivacy(epsilon, new TestingSeededRandomizationStrategy(1));
long cardinalityAfter = sketch.cardinality();
// Randomized response probability should reflect the new (private) epsilon
assertEquals(sketch.getRandomizedResponseProbability(), SfmSketch.getRandomizedResponseProbability(epsilon));
assertTrue(sketch.isPrivacyEnabled());
// Cardinality should remain approximately the same
assertEquals(cardinalityAfter, cardinalityBefore, cardinalityBefore * 0.1);
} |
@Override
@Nullable
public Object convert(@Nullable String value) {
if (isNullOrEmpty(value)) {
return null;
}
LOG.debug("Trying to parse date <{}> with pattern <{}>, locale <{}>, and timezone <{}>.", value, dateFormat, locale, timeZone);
final DateTimeFormatter formatter;
if (containsTimeZone) {
formatter = DateTimeFormat
.forPattern(dateFormat)
.withDefaultYear(YearMonth.now(timeZone).getYear())
.withLocale(locale);
} else {
formatter = DateTimeFormat
.forPattern(dateFormat)
.withDefaultYear(YearMonth.now(timeZone).getYear())
.withLocale(locale)
.withZone(timeZone);
}
return DateTime.parse(value, formatter);
} | @Test
public void convertObeysTimeZone() throws Exception {
final DateTimeZone timeZone = DateTimeZone.forOffsetHours(12);
final Converter c = new DateConverter(config("YYYY-MM-dd HH:mm:ss", timeZone.toString(), null));
final DateTime dateOnly = (DateTime) c.convert("2014-03-12 10:00:00");
assertThat(dateOnly).isEqualTo("2014-03-12T10:00:00.000+12:00");
final DateTime dateTime = (DateTime) c.convert("2014-03-12 12:34:00");
assertThat(dateTime).isEqualTo("2014-03-12T12:34:00.000+12:00");
} |
@Override
public List<String> detect(ClassLoader classLoader) {
List<File> classpathContents =
classGraph
.disableNestedJarScanning()
.addClassLoader(classLoader)
.scan(1)
.getClasspathFiles();
return classpathContents.stream().map(File::getAbsolutePath).collect(Collectors.toList());
} | @Test
public void shouldStillDetectResourcesEvenIfClassloaderIsUseless() {
ClassLoader uselessClassLoader = Mockito.mock(ClassLoader.class);
ClasspathScanningResourcesDetector detector =
new ClasspathScanningResourcesDetector(new ClassGraph());
List<String> detectedResources = detector.detect(uselessClassLoader);
assertFalse(detectedResources.isEmpty());
} |
@Subscribe
public synchronized void renew(final ClusterStateEvent event) {
contextManager.getStateContext().switchClusterState(event.getClusterState());
} | @Test
void assertRenewInstanceState() {
ComputeNodeInstanceStateChangedEvent event = new ComputeNodeInstanceStateChangedEvent(
contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId(), InstanceState.OK.name());
subscriber.renew(event);
assertThat(contextManager.getComputeNodeInstanceContext().getInstance().getState().getCurrentState(), is(InstanceState.OK));
} |
@SuppressWarnings("unchecked")
public static <T extends Factory> T discoverFactory(
ClassLoader classLoader, Class<T> factoryClass, String factoryIdentifier) {
final List<Factory> factories = discoverFactories(classLoader);
final List<Factory> foundFactories =
factories.stream()
.filter(f -> factoryClass.isAssignableFrom(f.getClass()))
.collect(Collectors.toList());
if (foundFactories.isEmpty()) {
throw new ValidationException(
String.format(
"Could not find any factories that implement '%s' in the classpath.",
factoryClass.getName()));
}
final List<Factory> matchingFactories =
foundFactories.stream()
.filter(f -> f.factoryIdentifier().equals(factoryIdentifier))
.collect(Collectors.toList());
if (matchingFactories.isEmpty()) {
throw new ValidationException(
String.format(
"Could not find any factory for identifier '%s' that implements '%s' in the classpath.\n\n"
+ "Available factory identifiers are:\n\n"
+ "%s",
factoryIdentifier,
factoryClass.getName(),
foundFactories.stream()
.map(Factory::factoryIdentifier)
.filter(identifier -> !DEFAULT_IDENTIFIER.equals(identifier))
.distinct()
.sorted()
.collect(Collectors.joining("\n"))));
}
if (matchingFactories.size() > 1) {
throw new ValidationException(
String.format(
"Multiple factories for identifier '%s' that implement '%s' found in the classpath.\n\n"
+ "Ambiguous factory classes are:\n\n"
+ "%s",
factoryIdentifier,
factoryClass.getName(),
matchingFactories.stream()
.map(f -> f.getClass().getName())
.sorted()
.collect(Collectors.joining("\n"))));
}
return (T) matchingFactories.get(0);
} | @Test
void testCreateCatalogStore() {
final Map<String, String> options = new HashMap<>();
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
final FactoryUtil.DefaultCatalogStoreContext discoveryContext =
new FactoryUtil.DefaultCatalogStoreContext(options, null, classLoader);
final CatalogStoreFactory factory =
FactoryUtil.discoverFactory(
classLoader, CatalogStoreFactory.class, TestCatalogStoreFactory.IDENTIFIER);
factory.open(discoveryContext);
CatalogStore catalogStore = factory.createCatalogStore();
assertThat(catalogStore).isInstanceOf(TestCatalogStoreFactory.TestCatalogStore.class);
} |
@Override
public Response request(Request request, long timeouts) throws NacosException {
Payload grpcRequest = GrpcUtils.convert(request);
ListenableFuture<Payload> requestFuture = grpcFutureServiceStub.request(grpcRequest);
Payload grpcResponse;
try {
if (timeouts <= 0) {
grpcResponse = requestFuture.get();
} else {
grpcResponse = requestFuture.get(timeouts, TimeUnit.MILLISECONDS);
}
} catch (Exception e) {
throw new NacosException(NacosException.SERVER_ERROR, e);
}
return (Response) GrpcUtils.parse(grpcResponse);
} | @Test
void testRequestTimeout() throws InterruptedException, ExecutionException, TimeoutException, NacosException {
assertThrows(NacosException.class, () -> {
when(future.get(100L, TimeUnit.MILLISECONDS)).thenThrow(new TimeoutException("test"));
connection.request(new HealthCheckRequest(), 100);
});
} |
public int getVersion() {
return mVersion;
} | @Test
public void testProperties() {
PrefsRoot root = new PrefsRoot(3);
Assert.assertEquals(3, root.getVersion());
} |
public IZAddress resolve(boolean ipv6)
{
resolved = protocol.zresolve(address, ipv6);
return resolved;
} | @Test(expected = ZMQException.class)
public void testInvalid()
{
new Address("tcp", "ggglocalhostxxx.google.com:80").resolve(false);
} |
public static String explainPlanFor(String sqlRequest) throws SQLException, NamingException {
final Connection connection = getConnection();
if (connection != null) {
try {
final Database database = Database.getDatabaseForConnection(connection);
if (database == Database.ORACLE) {
// Si oracle, on demande le plan d'exécution avec la table PLAN_TABLE par défaut
// avec "explain plan set statement_id = <statement_id> for ..."
// (si mysql, on pourrait faire "explain ...",
// sauf que les paramètres bindés ne seraient pas acceptés
// et les requêtes update/insert/delete non plus).
// (si db2, la syntaxe serait "explain plan for ...")
// Si mysql il suffit de lire le ResultSet de executeQuery("explain ...")
// qui pourrait être affiché en tableau à partir de String[][],
// mais en oracle il faut aller lire la table plan_table
// (http://www.java2s.com/Open-Source/Java-Document/Database-Client/squirrel-sql-2.6.5a/net/sourceforge/squirrel_sql/plugins/oracle/explainplan/ExplainPlanExecuter.java.htm)
// le hashCode est une clé suffisamment unique car il y a peu de plans d'exécution
// affichés simultanément, et en tout cas CounterRequest.getId() est trop long
// pour la table oracle par défaut (SYS.PLAN_TABLE$.STATEMENT_ID a une longueur de 30)
final String statementId = String.valueOf(sqlRequest.hashCode());
// utilisation de la table PLAN_TABLE par défaut
// (il faut que cette table soit créée auparavant dans oracle
// et elle peut être créée par : @$ORACLE_HOME/rdbms/admin/catplan.sql
// ou par @$ORACLE_HOME/rdbms/admin/utlxplan.sql si oracle 9g ou avant)
final String explainRequest = "explain plan set statement_id = '" + statementId
+ "' for " + normalizeRequestForExplain(sqlRequest, ':');
// exécution de la demande
try (final Statement statement = connection.createStatement()) {
statement.execute(explainRequest);
}
// récupération du résultat
// table PLAN_TABLE par défaut et format par défaut
final String planTableRequest = "select * from table(dbms_xplan.display(null,?,null))";
final String[][] planTableOutput = executeRequest(connection, planTableRequest,
Collections.singletonList(statementId));
final StringBuilder sb = new StringBuilder();
for (final String[] row : planTableOutput) {
for (final String value : row) {
sb.append(value);
}
sb.append('\n');
}
if (sb.indexOf("-") != -1) {
sb.delete(0, sb.indexOf("-"));
}
return sb.toString();
} else if (database == Database.POSTGRESQL && POSTGRESQL_DRIVER_AVAILABLE
&& connection.getMetaData().getDatabaseMajorVersion() >= 16) {
// Si postgresql, on demande le plan d'exécution avec "explain (generic plan) ..."
final PgConnection pgConnection = connection.unwrap(PgConnection.class);
if (pgConnection != null) {
final PreferQueryMode preferQueryMode = pgConnection.getQueryExecutor()
.getPreferQueryMode();
try {
// given the parameters without values, explain (generic plan) should be executed as simple query
// and not as prepared query which is by default.
// (there is no other way than using postgresql "internal" api:
// not possible using jdbc api or postgresql "public" api)
pgConnection.getQueryExecutor()
.setPreferQueryMode(PreferQueryMode.SIMPLE);
// explain plan pour Postgresql 16 ou ultérieur
// https://www.cybertec-postgresql.com/en/explain-generic-plan-postgresql-16/
final String explainPlanRequest = "explain (generic_plan) "
+ normalizeRequestForExplain(sqlRequest, '$');
final StringBuilder sb = new StringBuilder();
try (final Statement statement = connection.createStatement()) {
try (final ResultSet resultSet = statement
.executeQuery(explainPlanRequest)) {
while (resultSet.next()) {
sb.append(resultSet.getString(1)).append('\n');
}
}
}
return sb.toString();
} finally {
// set back the connection preferQueryMode as before
pgConnection.getQueryExecutor().setPreferQueryMode(preferQueryMode);
}
}
}
} finally {
if (!connection.getAutoCommit()) {
connection.rollback();
}
connection.close();
}
}
return null;
} | @Test
public void testExplainPlanFor() throws SQLException, NamingException {
DatabaseInformations.explainPlanFor("select 1");
} |
@Override
public Set<ModelLocalUriId> getModelLocalUriIdsForFile() {
Set<ModelLocalUriId> localUriIds = localUriIdKeySet();
String matchingBase = SLASH + fileNameNoSuffix;
return localUriIds.stream().filter(modelLocalUriId -> modelLocalUriId.basePath().startsWith(matchingBase)).collect(Collectors.toSet());
} | @Test
void getModelLocalUriIdsForFile() {
String path = "/pmml/" + fileName + "/testmod";
LocalUri parsed = LocalUri.parse(path);
ModelLocalUriId modelLocalUriId = new ModelLocalUriId(parsed);
pmmlCompilationContext.addGeneratedClasses(modelLocalUriId, compiledClasses);
Set<ModelLocalUriId> retrieved = pmmlCompilationContext.getModelLocalUriIdsForFile();
assertThat(retrieved.size()).isEqualTo(1);
assertThat(retrieved.iterator().next()).isEqualTo(modelLocalUriId);
} |
private <T> RestResponse<T> get(final String path, final Class<T> type) {
return executeRequestSync(HttpMethod.GET,
path,
null,
r -> deserialize(r.getBody(), type),
Optional.empty());
} | @Test
public void shouldPostQueryRequest_chunkHandler() {
ksqlTarget = new KsqlTarget(httpClient, socketAddress, localProperties, authHeader, HOST,
Collections.emptyMap(), RequestOptions.DEFAULT_TIMEOUT);
executor.submit(this::expectPostQueryRequestChunkHandler);
assertThatEventually(requestStarted::get, is(true));
handlerCaptor.getValue().handle(Buffer.buffer("{\"row\": {\"columns\": [1.0, 12.1]}},\n"));
handlerCaptor.getValue().handle(Buffer.buffer("{\"row\": {\"columns\": [5.0, 10.5]}},\n"));
endCaptor.getValue().handle(null);
assertThatEventually(response::get, notNullValue());
assertThat(response.get().getResponse(), is (2));
assertThat(rows.size(), is (2));
} |
public static List<Event> computeEventDiff(final Params params) {
final List<Event> events = new ArrayList<>();
emitPerNodeDiffEvents(createBaselineParams(params), events);
emitWholeClusterDiffEvent(createBaselineParams(params), events);
emitDerivedBucketSpaceStatesDiffEvents(params, events);
return events;
} | @Test
void cluster_event_is_tagged_with_given_time() {
final EventFixture fixture = EventFixture.createForNodes(3)
.clusterStateBefore("distributor:3 storage:3")
.clusterStateAfter("cluster:d distributor:3 storage:3")
.currentTimeMs(56789);
final List<Event> events = fixture.computeEventDiff();
assertThat(events.size(), equalTo(1));
assertThat(events, hasItem(eventTimeIs(56789)));
} |
public static byte[] hexStringToByteArray(final String s) {
if (s == null) {
return null;
}
final int len = s.length();
final byte[] bytes = new byte[len / 2];
for (int i = 0; i < len; i += 2) {
bytes[i / 2] = (byte) ((Character.digit(s.charAt(i), 16) << 4) + Character.digit(s.charAt(i + 1), 16));
}
return bytes;
} | @Test
public void testHexStringToByteArray() {
Assert.assertNull(BytesUtil.hexStringToByteArray(null));
Assert.assertArrayEquals(new byte[] { -17, -5 }, BytesUtil.hexStringToByteArray("foob"));
} |
@Override
protected void channelRead0(ChannelHandlerContext ctx, String msg) throws Exception {
if (StringUtils.isBlank(msg)) {
ctx.writeAndFlush(QosProcessHandler.PROMPT);
} else {
CommandContext commandContext = TelnetCommandDecoder.decode(msg);
commandContext.setQosConfiguration(qosConfiguration);
commandContext.setRemote(ctx.channel());
try {
String result = commandExecutor.execute(commandContext);
if (StringUtils.isEquals(QosConstants.CLOSE, result)) {
ctx.writeAndFlush(getByeLabel()).addListener(ChannelFutureListener.CLOSE);
} else {
ctx.writeAndFlush(result + QosConstants.BR_STR + QosProcessHandler.PROMPT);
}
} catch (NoSuchCommandException ex) {
ctx.writeAndFlush(msg + " :no such command");
ctx.writeAndFlush(QosConstants.BR_STR + QosProcessHandler.PROMPT);
log.error(QOS_COMMAND_NOT_FOUND, "", "", "can not found command " + commandContext, ex);
} catch (PermissionDenyException ex) {
ctx.writeAndFlush(msg + " :permission deny");
ctx.writeAndFlush(QosConstants.BR_STR + QosProcessHandler.PROMPT);
log.error(
QOS_PERMISSION_DENY_EXCEPTION,
"",
"",
"permission deny to access command " + commandContext,
ex);
} catch (Exception ex) {
ctx.writeAndFlush(msg + " :fail to execute commandContext by " + ex.getMessage());
ctx.writeAndFlush(QosConstants.BR_STR + QosProcessHandler.PROMPT);
log.error(
QOS_UNEXPECTED_EXCEPTION, "", "", "execute commandContext got exception " + commandContext, ex);
}
}
} | @Test
void testGreeting() throws Exception {
ChannelHandlerContext context = mock(ChannelHandlerContext.class);
TelnetProcessHandler handler = new TelnetProcessHandler(
FrameworkModel.defaultModel(),
QosConfiguration.builder()
.anonymousAccessPermissionLevel(PermissionLevel.NONE.name())
.build());
handler.channelRead0(context, "greeting");
ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
verify(context).writeAndFlush(captor.capture());
assertThat(captor.getValue(), containsString("greeting"));
assertThat(captor.getValue(), containsString("dubbo>"));
} |
@Override
public void importFrom(Import theImport, String sourceSystemId) {
this.namespace = theImport.getNamespace() == null ? "" : theImport.getNamespace() + ":";
this.importFrom(theImport.getLocation());
} | @Test
public void testImportInheritedElement() throws Exception {
URL url = ReflectUtil.getResource("org/flowable/engine/impl/webservice/inherited-elements-in-types.wsdl");
assertThat(url).isNotNull();
importer.importFrom(url.toString());
List<StructureDefinition> structures = sortStructures();
assertThat(structures).hasSize(1);
final Object structureTypeInst = ReflectUtil.instantiate("org.flowable.webservice.counter.StructureType");
final Class<? extends Object> structureType = structureTypeInst.getClass();
this.assertStructure(structures.get(0), "inheritedRequest", new String[] { "rootElt", "inheritedElt", "newSimpleElt",
"newStructuredElt" }, new Class<?>[] { Short.class, Integer.class, String.class, structureType });
List<Field> declaredFields = filterJacoco(structureType.getDeclaredFields());
assertThat(declaredFields).hasSize(2);
assertThat(structureType.getDeclaredField("booleanElt")).isNotNull();
assertThat(structureType.getDeclaredField("dateElt")).isNotNull();
assertThat(filterJacoco(structureType.getSuperclass().getDeclaredFields())).hasSize(1);
assertThat(structureType.getSuperclass().getDeclaredField("rootElt")).isNotNull();
} |
public static <T> List<T> notNullElements(List<T> list, String name) {
notNull(list, name);
for (int i = 0; i < list.size(); i++) {
notNull(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i));
}
return list;
} | @Test(expected = IllegalArgumentException.class)
public void notNullElementsNullList() {
Check.notNullElements(null, "name");
} |
@VisibleForTesting
ZonedDateTime parseZoned(final String text, final ZoneId zoneId) {
final TemporalAccessor parsed = formatter.parse(text);
final ZoneId parsedZone = parsed.query(TemporalQueries.zone());
ZonedDateTime resolved = DEFAULT_ZONED_DATE_TIME.apply(
ObjectUtils.defaultIfNull(parsedZone, zoneId));
for (final TemporalField override : ChronoField.values()) {
if (parsed.isSupported(override)) {
if (!resolved.isSupported(override)) {
throw new KsqlException(
"Unsupported temporal field in timestamp: " + text + " (" + override + ")");
}
final long value = parsed.getLong(override);
if (override == ChronoField.DAY_OF_YEAR && value == LEAP_DAY_OF_THE_YEAR) {
if (!parsed.isSupported(ChronoField.YEAR)) {
throw new KsqlException("Leap day cannot be parsed without supplying the year field");
}
// eagerly override year, to avoid mismatch with epoch year, which is not a leap year
resolved = resolved.withYear(parsed.get(ChronoField.YEAR));
}
resolved = resolved.with(override, value);
}
}
return resolved;
} | @Test
public void shouldResolveDefaultsForPartial() {
// Given
final String format = "yyyy";
final String timestamp = "2019";
// When
final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID);
// Then
assertThat(ts, is(sameInstant(EPOCH.withYear(2019).withZoneSameInstant(ZID))));
} |
@Override
public void stopTrackingAndReleaseJobPartitions(
Collection<ResultPartitionID> partitionsToRelease) {
LOG.debug("Releasing Job Partitions {}", partitionsToRelease);
if (partitionsToRelease.isEmpty()) {
return;
}
stopTrackingPartitions(partitionsToRelease);
shuffleEnvironment.releasePartitionsLocally(partitionsToRelease);
} | @Test
void testStopTrackingAndReleaseJobPartitions() throws Exception {
final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture =
new CompletableFuture<>();
testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture;
final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
final ResultPartitionID resultPartitionId2 = new ResultPartitionID();
final TaskExecutorPartitionTracker partitionTracker =
new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
partitionTracker.startTrackingPartition(
new JobID(),
new TaskExecutorPartitionInfo(
new TestingShuffleDescriptor(resultPartitionId1),
new IntermediateDataSetID(),
1));
partitionTracker.startTrackingPartition(
new JobID(),
new TaskExecutorPartitionInfo(
new TestingShuffleDescriptor(resultPartitionId2),
new IntermediateDataSetID(),
1));
partitionTracker.stopTrackingAndReleaseJobPartitions(
Collections.singleton(resultPartitionId1));
assertThatFuture(shuffleReleaseFuture)
.eventuallySucceeds()
.satisfies(actual -> assertThat(actual).containsExactly(resultPartitionId1));
} |
private static VerificationResult verifyChecksums(String expectedDigest, String actualDigest, boolean caseSensitive) {
if (expectedDigest == null) {
return VerificationResult.NOT_PROVIDED;
}
if (actualDigest == null) {
return VerificationResult.NOT_COMPUTED;
}
if (caseSensitive) {
if (MessageDigest.isEqual(expectedDigest.getBytes(StandardCharsets.US_ASCII), actualDigest.getBytes(StandardCharsets.US_ASCII))) {
return VerificationResult.PASS;
}
} else {
if (MessageDigest.isEqual(expectedDigest.toLowerCase().getBytes(StandardCharsets.US_ASCII), actualDigest.toLowerCase().getBytes(StandardCharsets.US_ASCII))) {
return VerificationResult.PASS;
}
}
return VerificationResult.FAIL;
} | @Test
public void sha1DoesNotIgnoreCase() {
final Exception ex = assertThrows(Exception.class, () -> UpdateCenter.verifyChecksums(
new MockDownloadJob(EMPTY_SHA1, EMPTY_SHA256, EMPTY_SHA512),
buildEntryWithExpectedChecksums(EMPTY_SHA1.toUpperCase(Locale.US), null, null), new File("example")));
assertTrue(ex.getMessage().contains("does not match expected SHA-1, expected '2JMJ7L5RSW0YVB/VLWAYKK/YBWK=', actual '2jmj7l5rSw0yVb/vlWAYkK/YBwk='"));
} |
public void insert(int key, int value) {
GHIntHashSet set = map.get(value);
if (set == null) {
map.put(value, set = new GHIntHashSet(slidingMeanValue));
}
// else
// slidingMeanValue = Math.max(5, (slidingMeanValue + set.size()) / 2);
if (!set.add(key)) {
throw new IllegalStateException("use update if you want to update " + key);
}
size++;
} | @Test
public void testInsert() {
GHSortedCollection instance = new GHSortedCollection();
assertTrue(instance.isEmpty());
instance.insert(0, 10);
assertEquals(1, instance.getSize());
assertEquals(10, instance.peekValue());
assertEquals(0, instance.peekKey());
instance.update(0, 10, 2);
assertEquals(2, instance.peekValue());
assertEquals(1, instance.getSize());
instance.insert(0, 11);
assertEquals(2, instance.peekValue());
assertEquals(2, instance.getSize());
instance.insert(1, 0);
assertEquals(0, instance.peekValue());
assertEquals(3, instance.getSize());
} |
@Override
public Charset detect(InputStream input, Metadata metadata) throws IOException {
input.mark(MAX_BYTES);
byte[] bytes = new byte[MAX_BYTES];
try {
int numRead = IOUtils.read(input, bytes);
if (numRead < MIN_BYTES) {
return null;
} else if (numRead < MAX_BYTES) {
//s
byte[] tmpBytes = new byte[numRead];
System.arraycopy(bytes, 0, tmpBytes, 0, numRead);
bytes = tmpBytes;
}
} finally {
input.reset();
}
for (int i = 0; i < BOMS.length; i++) {
ByteOrderMark bom = BOMS[i];
if (startsWith(bom, bytes)) {
return CHARSETS[i];
}
}
return null;
} | @Test
public void testBasic() throws Exception {
EncodingDetector detector = new BOMDetector();
for (ByteOrderMark bom : new ByteOrderMark[]{
ByteOrderMark.UTF_8, ByteOrderMark.UTF_16BE,
ByteOrderMark.UTF_16LE, ByteOrderMark.UTF_32BE, ByteOrderMark.UTF_32LE
}) {
UnsynchronizedByteArrayOutputStream bos = createStream(bom);
try (BOMInputStream bomInputStream =
new BOMInputStream(UnsynchronizedByteArrayInputStream.builder().setByteArray(bos.toByteArray()).get(),
ByteOrderMark.UTF_8, ByteOrderMark.UTF_32BE, ByteOrderMark.UTF_32LE,
ByteOrderMark.UTF_16BE, ByteOrderMark.UTF_16LE)) {
assertEquals(bom, bomInputStream.getBOM());
}
try (UnsynchronizedByteArrayInputStream is =
UnsynchronizedByteArrayInputStream.builder().setByteArray(bos.toByteArray()).get()) {
assertEquals(Charset.forName(bom.getCharsetName()), detector.detect(is, new Metadata()));
int cnt = 0;
int c = is.read();
while (c > -1) {
cnt++;
c = is.read();
}
assertEquals(100 + bom.getBytes().length, cnt);
}
}
} |
public void addCacheConfig(CacheConfig cacheConfig) {
configs.add(cacheConfig);
} | @Test
public void test_cachePostJoinOperationFails_whenJCacheNotAvailable_withCacheConfigs() {
// JCache is not available in classpath
OnJoinCacheOperation onJoinCacheOperation = createTestOnJoinCacheOperation(false);
// some CacheConfigs are added in the OnJoinCacheOperation (so JCache is actually in use in the rest of the cluster)
onJoinCacheOperation.addCacheConfig(new CacheConfig("test"));
onJoinCacheOperation.setNodeEngine(nodeEngine);
assertThatThrownBy(onJoinCacheOperation::run)
.isInstanceOf(HazelcastException.class)
.hasMessage("Service with name 'hz:impl:cacheService' not found!");
verify(nodeEngine).getLogger(onJoinCacheOperation.getClass());
verifyNoMoreInteractions(nodeEngine);
verify(logger).severe(anyString());
} |
public static String getAddress(ECKeyPair ecKeyPair) {
return getAddress(ecKeyPair.getPublicKey());
} | @Test
public void testGetAddressZeroPadded() {
byte[] address =
Keys.getAddress(
Numeric.toBytesPadded(BigInteger.valueOf(0x1234), Keys.PUBLIC_KEY_SIZE));
String expected = Numeric.toHexStringNoPrefix(address);
String value = "1234";
assertEquals(
Keys.getAddress(
"0x"
+ Strings.zeros(Keys.PUBLIC_KEY_LENGTH_IN_HEX - value.length())
+ value),
(expected));
} |
public static <V, F extends Future<V>> F cascade(final F future, final Promise<? super V> promise) {
return cascade(true, future, promise);
} | @Test
public void testCancelPropagationWhenFusedFromFuture() {
Promise<Void> p1 = ImmediateEventExecutor.INSTANCE.newPromise();
Promise<Void> p2 = ImmediateEventExecutor.INSTANCE.newPromise();
Promise<Void> returned = PromiseNotifier.cascade(p1, p2);
assertSame(p1, returned);
assertTrue(returned.cancel(false));
assertTrue(returned.isCancelled());
assertTrue(p2.isCancelled());
} |
@Override
public Optional<String> getContentHash() {
return Optional.ofNullable(mContentHash);
} | @Test
public void writeByteArrayForLargeFile() throws Exception {
int partSize = (int) FormatUtils.parseSpaceSize(PARTITION_SIZE);
byte[] b = new byte[partSize + 1];
assertEquals(mStream.getPartNumber(), 1);
mStream.write(b, 0, b.length);
assertEquals(mStream.getPartNumber(), 2);
Mockito.verify(mMockObsClient)
.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
Mockito.verify(mMockOutputStream).write(b, 0, b.length - 1);
Mockito.verify(mMockOutputStream).write(b, b.length - 1, 1);
Mockito.verify(mMockExecutor).submit(any(Callable.class));
mStream.close();
assertEquals(mStream.getPartNumber(), 3);
Mockito.verify(mMockObsClient)
.completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
assertTrue(mStream.getContentHash().isPresent());
assertEquals("multiTag", mStream.getContentHash().get());
} |
@Udf
public String elt(
@UdfParameter(description = "the nth element to extract") final int n,
@UdfParameter(description = "the strings of which to extract the nth") final String... args
) {
if (args == null) {
return null;
}
if (n < 1 || n > args.length) {
return null;
}
return args[n - 1];
} | @Test
public void shouldHandleNoArgs() {
// When:
final String el = elt.elt(2);
// Then:
assertThat(el, is(nullValue()));
} |
@Override
public IndexRange get(String index) throws NotFoundException {
final DBQuery.Query query = DBQuery.and(
DBQuery.notExists("start"),
DBQuery.is(IndexRange.FIELD_INDEX_NAME, index));
final MongoIndexRange indexRange = collection.findOne(query);
if (indexRange == null) {
throw new NotFoundException("Index range for index <" + index + "> not found.");
}
return indexRange;
} | @Test(expected = NotFoundException.class)
@MongoDBFixtures("MongoIndexRangeServiceTest-LegacyIndexRanges.json")
public void getIgnoresLegacyIndexRange() throws Exception {
indexRangeService.get("graylog_0");
} |
@Override public Repository getRepository() {
IPentahoSession session = pentahoSessionSupplier.get();
if ( session == null ) {
LOGGER.debug( "No active Pentaho Session, attempting to load PDI repository unauthenticated." );
return null;
}
ICacheManager cacheManager = cacheManagerFunction.apply( session );
String sessionName = session.getName();
Repository repository = (Repository) cacheManager.getFromRegionCache( REGION, sessionName );
if ( repository == null ) {
LOGGER.debug( "Repository not cached for user: " + sessionName + "." );
return null;
}
return repository;
} | @Test
public void testGetRepositoryNullSession() {
when( pentahoSessionSupplier.get() ).thenReturn(null );
assertNull( pentahoSessionHolderRepositoryProvider.getRepository() );
} |
synchronized boolean tryToMoveTo(State to) {
boolean res = false;
State currentState = state;
if (TRANSITIONS.get(currentState).contains(to)) {
this.state = to;
res = true;
listeners.forEach(listener -> listener.onProcessState(processId, to));
}
LOG.debug("{} tryToMoveTo {} from {} to {} => {}", Thread.currentThread().getName(), processId.getHumanReadableName(), currentState, to, res);
return res;
} | @Test
@UseDataProvider("allStates")
public void no_state_can_not_move_to_itself(State state) {
assertThat(newLifeCycle(state).tryToMoveTo(state)).isFalse();
} |
@Override
public SelJodaDateTime assignOps(SelOp op, SelType rhs) {
if (op == SelOp.ASSIGN) {
SelTypeUtil.checkTypeMatch(this.type(), rhs.type());
this.val = ((SelJodaDateTime) rhs).val;
return this;
}
throw new UnsupportedOperationException(type() + " DO NOT support assignment operation " + op);
} | @Test
public void assignOps() {
one.assignOps(SelOp.ASSIGN, another);
assertEquals("DATETIME: 2019-01-01T00:00:00.000Z", one.type() + ": " + one);
} |
public static <T> Comparator<T> comparingPinyin(Function<T, String> keyExtractor) {
return comparingPinyin(keyExtractor, false);
} | @Test
public void comparingPinyin() {
List<String> list = ListUtil.toList("成都", "北京", "上海", "深圳");
List<String> ascendingOrderResult = ListUtil.of("北京", "成都", "上海", "深圳");
List<String> descendingOrderResult = ListUtil.of("深圳", "上海", "成都", "北京");
// 正序
list.sort(CompareUtil.comparingPinyin(e -> e));
assertEquals(list, ascendingOrderResult);
// 反序
list.sort(CompareUtil.comparingPinyin(e -> e, true));
assertEquals(list, descendingOrderResult);
} |
@SuppressFBWarnings("NP_NONNULL_PARAM_VIOLATION") // Not a bug
synchronized CompletableFuture<Void> getFutureForSequenceNumber(final long seqNum) {
if (seqNum <= lastCompletedSequenceNumber) {
return CompletableFuture.completedFuture(null);
}
return sequenceNumberFutures.computeIfAbsent(seqNum, k -> new CompletableFuture<>());
} | @Test
public void shouldReturnFutureForExistingSequenceNumber() {
// Given:
final CompletableFuture<Void> existingFuture = futureStore.getFutureForSequenceNumber(2);
// When:
final CompletableFuture<Void> newFuture = futureStore.getFutureForSequenceNumber(2);
// Then:
assertThat(newFuture, is(sameInstance(existingFuture)));
} |
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
URL url = invoker.getUrl();
String methodName = RpcUtils.getMethodName(invocation);
int max = invoker.getUrl().getMethodParameter(methodName, ACTIVES_KEY, 0);
final RpcStatus rpcStatus = RpcStatus.getStatus(invoker.getUrl(), RpcUtils.getMethodName(invocation));
if (!RpcStatus.beginCount(url, methodName, max)) {
long timeout = invoker.getUrl().getMethodParameter(RpcUtils.getMethodName(invocation), TIMEOUT_KEY, 0);
long start = System.currentTimeMillis();
long remain = timeout;
synchronized (rpcStatus) {
while (!RpcStatus.beginCount(url, methodName, max)) {
try {
rpcStatus.wait(remain);
} catch (InterruptedException e) {
// ignore
}
long elapsed = System.currentTimeMillis() - start;
remain = timeout - elapsed;
if (remain <= 0) {
throw new RpcException(
RpcException.LIMIT_EXCEEDED_EXCEPTION,
"Waiting concurrent invoke timeout in client-side for service: "
+ invoker.getInterface().getName()
+ ", method: " + RpcUtils.getMethodName(invocation) + ", elapsed: "
+ elapsed + ", timeout: " + timeout + ". concurrent invokes: "
+ rpcStatus.getActive()
+ ". max concurrent invoke limit: " + max);
}
}
}
}
invocation.put(ACTIVE_LIMIT_FILTER_START_TIME, System.currentTimeMillis());
return invoker.invoke(invocation);
} | @Test
void testInvokeLessActives() {
URL url = URL.valueOf("test://test:11/test?accesslog=true&group=dubbo&version=1.1&actives=10");
Invoker<ActiveLimitFilterTest> invoker = new MyInvoker<ActiveLimitFilterTest>(url);
Invocation invocation = new MockInvocation();
activeLimitFilter.invoke(invoker, invocation);
} |
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
} | @Test
public void testKreearra()
{
ChatMessage chatMessageEvent = new ChatMessage(null, GAMEMESSAGE, "", "Your Kree'arra kill count is: <col=ff0000>4</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessageEvent);
verify(configManager).setRSProfileConfiguration("killcount", "kree'arra", 4);
} |
@Override
public String toString() {
String feedBlockedStr = clusterFeedIsBlocked()
? String.format(", feed blocked: '%s'", feedBlock.description)
: "";
String distributionConfigStr = (distributionConfig != null)
? ", distribution config: %s".formatted(distributionConfig.highLevelDescription())
: "";
if (derivedBucketSpaceStates.isEmpty()) {
return String.format("ClusterStateBundle('%s'%s%s%s)", baselineState,
deferredActivation ? " (deferred activation)" : "",
feedBlockedStr, distributionConfigStr);
}
Map<String, AnnotatedClusterState> orderedStates = new TreeMap<>(derivedBucketSpaceStates);
return String.format("ClusterStateBundle('%s', %s%s%s%s)", baselineState, orderedStates.entrySet().stream()
.map(e -> String.format("%s '%s'", e.getKey(), e.getValue()))
.collect(Collectors.joining(", ")),
deferredActivation ? " (deferred activation)" : "",
feedBlockedStr, distributionConfigStr);
} | @Test
void toString_includes_all_bucket_space_states() {
ClusterStateBundle bundle = createTestBundle();
assertThat(bundle.toString(), equalTo("ClusterStateBundle('distributor:2 storage:2', " +
"default 'distributor:2 storage:2 .0.s:d', " +
"global 'distributor:2 storage:2', " +
"narnia 'distributor:2 .0.s:d storage:2')"));
} |
@Override
public Long createDictData(DictDataSaveReqVO createReqVO) {
// 校验字典类型有效
validateDictTypeExists(createReqVO.getDictType());
// 校验字典数据的值的唯一性
validateDictDataValueUnique(null, createReqVO.getDictType(), createReqVO.getValue());
// 插入字典类型
DictDataDO dictData = BeanUtils.toBean(createReqVO, DictDataDO.class);
dictDataMapper.insert(dictData);
return dictData.getId();
} | @Test
public void testCreateDictData_success() {
// 准备参数
DictDataSaveReqVO reqVO = randomPojo(DictDataSaveReqVO.class,
o -> o.setStatus(randomCommonStatus()))
.setId(null); // 防止 id 被赋值
// mock 方法
when(dictTypeService.getDictType(eq(reqVO.getDictType()))).thenReturn(randomDictTypeDO(reqVO.getDictType()));
// 调用
Long dictDataId = dictDataService.createDictData(reqVO);
// 断言
assertNotNull(dictDataId);
// 校验记录的属性是否正确
DictDataDO dictData = dictDataMapper.selectById(dictDataId);
assertPojoEquals(reqVO, dictData, "id");
} |
public void validate(ExternalIssueReport report, Path reportPath) {
if (report.rules != null && report.issues != null) {
Set<String> ruleIds = validateRules(report.rules, reportPath);
validateIssuesCctFormat(report.issues, ruleIds, reportPath);
} else if (report.rules == null && report.issues != null) {
String documentationLink = documentationLinkGenerator.getDocumentationLink(DOCUMENTATION_SUFFIX);
LOGGER.warn("External issues were imported with a deprecated format which will be removed soon. " +
"Please switch to the newest format to fully benefit from Clean Code: {}", documentationLink);
validateIssuesDeprecatedFormat(report.issues, reportPath);
} else {
throw new IllegalStateException(String.format("Failed to parse report '%s': invalid report detected.", reportPath));
}
} | @Test
public void validate_whenMissingMessageFieldForPrimaryLocation_shouldThrowException() throws IOException {
ExternalIssueReport report = read(REPORTS_LOCATION);
report.issues[0].primaryLocation.message = null;
assertThatThrownBy(() -> validator.validate(report, reportPath))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Failed to parse report 'report-path': missing mandatory field 'message' in the primary location of the issue.");
} |
static SortKey[] rangeBounds(
int numPartitions, Comparator<StructLike> comparator, SortKey[] samples) {
// sort the keys first
Arrays.sort(samples, comparator);
int numCandidates = numPartitions - 1;
SortKey[] candidates = new SortKey[numCandidates];
int step = (int) Math.ceil((double) samples.length / numPartitions);
int position = step - 1;
int numChosen = 0;
while (position < samples.length && numChosen < numCandidates) {
SortKey candidate = samples[position];
// skip duplicate values
if (numChosen > 0 && candidate.equals(candidates[numChosen - 1])) {
// linear probe for the next distinct value
position += 1;
} else {
candidates[numChosen] = candidate;
position += step;
numChosen += 1;
}
}
return candidates;
} | @Test
public void testRangeBoundsSkipDuplicates() {
// step is 3 = ceiling(11/4)
assertThat(
SketchUtil.rangeBounds(
4,
SORT_ORDER_COMPARTOR,
new SortKey[] {
CHAR_KEYS.get("a"),
CHAR_KEYS.get("b"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("g"),
CHAR_KEYS.get("h"),
CHAR_KEYS.get("i"),
CHAR_KEYS.get("j"),
CHAR_KEYS.get("k"),
}))
// skipped duplicate c's
.containsExactly(CHAR_KEYS.get("c"), CHAR_KEYS.get("g"), CHAR_KEYS.get("j"));
} |
public DoubleArrayAsIterable usingTolerance(double tolerance) {
return new DoubleArrayAsIterable(tolerance(tolerance), iterableSubject());
} | @Test
public void usingTolerance_contains_nullExpected() {
expectFailureWhenTestingThat(array(1.1, 2.2, 3.3))
.usingTolerance(DEFAULT_TOLERANCE)
.contains(null);
assertFailureKeys(
"value of",
"expected to contain",
"testing whether",
"but was",
"additionally, one or more exceptions were thrown while comparing elements",
"first exception");
assertThatFailure()
.factValue("first exception")
.startsWith("compare(1.1, null) threw java.lang.NullPointerException");
} |
void createOutputValueMapping() throws KettleException {
data.outputRowMeta = getInputRowMeta().clone();
meta.getFields( getInputRowMeta(), getStepname(), null, null, this, repository, metaStore );
data.fieldIndex = getInputRowMeta().indexOfValue( meta.getFieldname() );
if ( data.fieldIndex < 0 ) {
throw new KettleException( BaseMessages.getString( PKG, "SwitchCase.Exception.UnableToFindFieldName", meta
.getFieldname() ) );
}
data.inputValueMeta = getInputRowMeta().getValueMeta( data.fieldIndex );
try {
StepIOMetaInterface ioMeta = meta.getStepIOMeta();
// There is one or many case target for each target stream.
// The ioMeta object has one more target stream for the default target though.
//
List<StreamInterface> targetStreams = ioMeta.getTargetStreams();
for ( int i = 0; i < targetStreams.size(); i++ ) {
SwitchCaseTarget target = (SwitchCaseTarget) targetStreams.get( i ).getSubject();
if ( target == null ) {
break; // Skip over default option
}
if ( target.caseTargetStep == null ) {
throw new KettleException( BaseMessages.getString(
PKG, "SwitchCase.Log.NoTargetStepSpecifiedForValue", target.caseValue ) );
}
RowSet rowSet = findOutputRowSet( target.caseTargetStep.getName() );
if ( rowSet == null ) {
throw new KettleException( BaseMessages.getString(
PKG, "SwitchCase.Log.UnableToFindTargetRowSetForStep", target.caseTargetStep ) );
}
try {
Object value =
data.valueMeta.convertDataFromString(
target.caseValue, data.stringValueMeta, null, null, ValueMetaInterface.TRIM_TYPE_NONE );
// If we have a value and a rowset, we can store the combination in the map
//
if ( data.valueMeta.isNull( value ) ) {
data.nullRowSetSet.add( rowSet );
} else {
// could not use byte[] as key in Maps, so we need to convert it to his specific hashCode for future
// comparisons
value = prepareObjectType( value );
data.outputMap.put( value, rowSet );
}
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "SwitchCase.Log.UnableToConvertValue", target.caseValue ), e );
}
}
if ( meta.getDefaultTargetStep() != null ) {
RowSet rowSet = findOutputRowSet( meta.getDefaultTargetStep().getName() );
if ( rowSet != null ) {
data.defaultRowSetSet.add( rowSet );
if ( data.nullRowSetSet.isEmpty() ) {
data.nullRowSetSet.add( rowSet );
}
}
}
} catch ( Exception e ) {
throw new KettleException( e );
}
} | @Test
public void testCreateOutputValueMappingWithBinaryType() throws KettleException, URISyntaxException,
ParserConfigurationException, SAXException, IOException {
SwitchCaseCustom krasavez = new SwitchCaseCustom( mockHelper );
// load step info value-case mapping from xml.
List<DatabaseMeta> emptyList = new ArrayList<DatabaseMeta>();
krasavez.meta.loadXML( loadStepXmlMetadata( "SwitchCaseBinaryTest.xml" ), emptyList, mock( IMetaStore.class ) );
KeyToRowSetMap expectedNN = new KeyToRowSetMap();
Set<RowSet> nulls = new HashSet<RowSet>();
// create real steps for all targets
List<SwitchCaseTarget> list = krasavez.meta.getCaseTargets();
for ( SwitchCaseTarget item : list ) {
StepMetaInterface smInt = new DummyTransMeta();
StepMeta stepMeta = new StepMeta( item.caseTargetStepname, smInt );
item.caseTargetStep = stepMeta;
// create and put row set for this
RowSet rw = new QueueRowSet();
krasavez.map.put( item.caseTargetStepname, rw );
// null values goes to null rowset
if ( item.caseValue != null ) {
expectedNN.put( item.caseValue, rw );
} else {
nulls.add( rw );
}
}
// create default step
StepMetaInterface smInt = new DummyTransMeta();
StepMeta stepMeta = new StepMeta( krasavez.meta.getDefaultTargetStepname(), smInt );
krasavez.meta.setDefaultTargetStep( stepMeta );
RowSet rw = new QueueRowSet();
krasavez.map.put( krasavez.meta.getDefaultTargetStepname(), rw );
krasavez.createOutputValueMapping();
// inspect step output data:
Set<RowSet> ones = krasavez.data.outputMap.get( "1" );
assertEquals( "Output map for 1 values contains 2 row sets", 2, ones.size() );
Set<RowSet> zeros = krasavez.data.outputMap.get( "0" );
assertEquals( "Output map for 0 values contains 1 row sets", 1, zeros.size() );
assertEquals( "Null row set contains 0 items: ", 2, krasavez.data.nullRowSetSet.size() );
assertEquals( "We have at least one default rowset", 1, krasavez.data.defaultRowSetSet.size() );
// check that rowsets data is correct:
Set<RowSet> rowsets = expectedNN.get( "1" );
for ( RowSet rowset : rowsets ) {
assertTrue( "Output map for 1 values contains expected row set", ones.contains( rowset ) );
}
rowsets = expectedNN.get( "0" );
for ( RowSet rowset : rowsets ) {
assertTrue( "Output map for 0 values contains expected row set", zeros.contains( rowset ) );
}
for ( RowSet rowset : krasavez.data.nullRowSetSet ) {
assertTrue( "Output map for null values contains expected row set", nulls.contains( rowset ) );
}
// we have already check that there is only one item.
for ( RowSet rowset : krasavez.data.defaultRowSetSet ) {
assertTrue( "Output map for default case contains expected row set", rowset.equals( rw ) );
}
} |
public static Object[] realize(Object[] objs, Class<?>[] types) {
if (objs.length != types.length) {
throw new IllegalArgumentException("args.length != types.length");
}
Object[] dests = new Object[objs.length];
for (int i = 0; i < objs.length; i++) {
dests[i] = realize(objs[i], types[i]);
}
return dests;
} | @Test
void testRealize() throws Exception {
Map<String, String> map = new LinkedHashMap<String, String>();
map.put("key", "value");
Object obj = PojoUtils.generalize(map);
assertTrue(obj instanceof LinkedHashMap);
Object outputObject = PojoUtils.realize(map, LinkedHashMap.class);
assertTrue(outputObject instanceof LinkedHashMap);
Object[] objects = PojoUtils.realize(new Object[] {map}, new Class[] {LinkedHashMap.class});
assertTrue(objects[0] instanceof LinkedHashMap);
assertEquals(objects[0], outputObject);
} |
public static ActiveRuleKey parse(String s) {
Preconditions.checkArgument(s.split(":").length >= 3, "Bad format of activeRule key: " + s);
int semiColonPos = s.indexOf(':');
String ruleProfileUuid = s.substring(0, semiColonPos);
String ruleKey = s.substring(semiColonPos + 1);
return new ActiveRuleKey(ruleProfileUuid, RuleKey.parse(ruleKey));
} | @Test
void parse_fail_when_less_than_three_colons() {
try {
ActiveRuleKey.parse("P1:xoo");
Assertions.fail();
} catch (IllegalArgumentException e) {
assertThat(e).hasMessage("Bad format of activeRule key: P1:xoo");
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.