focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public PipesResult process(FetchEmitTuple t) throws IOException, InterruptedException {
boolean restart = false;
if (!ping()) {
restart = true;
} else if (pipesConfig.getMaxFilesProcessedPerProcess() > 0 &&
filesProcessed >= pipesConfig.getMaxFilesProcessedPerProcess()) {
LOG.info("pipesClientId={}: restarting server after hitting max files: {}",
pipesClientId, filesProcessed);
restart = true;
}
if (restart) {
boolean successfulRestart = false;
while (!successfulRestart) {
try {
restart();
successfulRestart = true;
} catch (TimeoutException e) {
LOG.warn("pipesClientId={}: couldn't restart within {} ms (startupTimeoutMillis)",
pipesClientId, pipesConfig.getStartupTimeoutMillis());
Thread.sleep(pipesConfig.getSleepOnStartupTimeoutMillis());
}
}
}
return actuallyProcess(t);
} | @Test
public void testBasic() throws IOException, InterruptedException {
PipesResult pipesResult = pipesClient.process(
new FetchEmitTuple(testPdfFile, new FetchKey(fetcherName, testPdfFile),
new EmitKey(), new Metadata(), new ParseContext(), FetchEmitTuple.ON_PARSE_EXCEPTION.SKIP));
Assertions.assertNotNull(pipesResult.getEmitData().getMetadataList());
Assertions.assertEquals(1, pipesResult.getEmitData().getMetadataList().size());
Metadata metadata = pipesResult.getEmitData().getMetadataList().get(0);
Assertions.assertEquals("testOverlappingText.pdf", metadata.get("resourceName"));
} |
public static void remove() {
final Map<String, RequestContext> contextMap = THREAD_LOCAL_CONTEXT_MAP.get();
if (contextMap != null) {
contextMap.clear();
THREAD_LOCAL_CONTEXT_MAP.remove();
}
} | @Test
public void remove() throws NoSuchFieldException, IllegalAccessException {
ChainContext.getThreadLocalContext("test");
ChainContext.remove();
final Field mapField = ChainContext.class.getDeclaredField("THREAD_LOCAL_CONTEXT_MAP");
mapField.setAccessible(true);
final Object local = mapField.get(null);
Assert.assertTrue(local instanceof ThreadLocal);
assertNull(((ThreadLocal<?>) local).get());
} |
public static Predicate<MetricDto> isOptimizedForBestValue() {
return m -> m != null && m.isOptimizedBestValue() && m.getBestValue() != null;
} | @Test
void isOptimizedForBestValue_is_false_when_is_not_optimized() {
metric = new MetricDto()
.setBestValue(42.0d)
.setOptimizedBestValue(false);
boolean result = MetricDtoFunctions.isOptimizedForBestValue().test(metric);
assertThat(result).isFalse();
} |
int parseAndConvert(String[] args) throws Exception {
Options opts = createOptions();
int retVal = 0;
try {
if (args.length == 0) {
LOG.info("Missing command line arguments");
printHelp(opts);
return 0;
}
CommandLine cliParser = new GnuParser().parse(opts, args);
if (cliParser.hasOption(CliOption.HELP.shortSwitch)) {
printHelp(opts);
return 0;
}
FSConfigToCSConfigConverter converter =
prepareAndGetConverter(cliParser);
converter.convert(converterParams);
String outputDir = converterParams.getOutputDirectory();
boolean skipVerification =
cliParser.hasOption(CliOption.SKIP_VERIFICATION.shortSwitch);
if (outputDir != null && !skipVerification) {
validator.validateConvertedConfig(
converterParams.getOutputDirectory());
}
} catch (ParseException e) {
String msg = "Options parsing failed: " + e.getMessage();
logAndStdErr(e, msg);
printHelp(opts);
retVal = -1;
} catch (PreconditionException e) {
String msg = "Cannot start FS config conversion due to the following"
+ " precondition error: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (UnsupportedPropertyException e) {
String msg = "Unsupported property/setting encountered during FS config "
+ "conversion: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (ConversionException | IllegalArgumentException e) {
String msg = "Fatal error during FS config conversion: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (VerificationException e) {
Throwable cause = e.getCause();
String msg = "Verification failed: " + e.getCause().getMessage();
conversionOptions.handleVerificationFailure(cause, msg);
retVal = -1;
}
conversionOptions.handleParsingFinished();
return retVal;
} | @Test
public void testConvertFSConfigurationWithConsoleParam()
throws Exception {
setupFSConfigConversionFiles(true);
ArgumentCaptor<FSConfigToCSConfigConverterParams> conversionParams =
ArgumentCaptor.forClass(FSConfigToCSConfigConverterParams.class);
FSConfigToCSConfigArgumentHandler argumentHandler =
createArgumentHandler();
String[] args = getArgumentsAsArrayWithDefaults("-f",
FSConfigConverterTestCommons.FS_ALLOC_FILE,
"-r", FSConfigConverterTestCommons.CONVERSION_RULES_FILE, "-p");
argumentHandler.parseAndConvert(args);
// validate params
verify(mockConverter).convert(conversionParams.capture());
FSConfigToCSConfigConverterParams params = conversionParams.getValue();
LOG.info("FS config converter parameters: " + params);
assertEquals("Yarn site config",
FSConfigConverterTestCommons.YARN_SITE_XML,
params.getYarnSiteXmlConfig());
assertEquals("FS xml", FSConfigConverterTestCommons.FS_ALLOC_FILE,
params.getFairSchedulerXmlConfig());
assertEquals("Conversion rules config",
FSConfigConverterTestCommons.CONVERSION_RULES_FILE,
params.getConversionRulesConfig());
assertTrue("Console mode", params.isConsole());
} |
public static <T> RetryTransformer<T> of(Retry retry) {
return new RetryTransformer<>(retry);
} | @Test
public void doNotRetryFromPredicateUsingFlowable() {
RetryConfig config = RetryConfig.custom()
.retryOnException(t -> t instanceof IOException)
.waitDuration(Duration.ofMillis(50))
.maxAttempts(3).build();
Retry retry = Retry.of("testName", config);
given(helloWorldService.returnHelloWorld())
.willThrow(new HelloWorldException());
Flowable.fromCallable(helloWorldService::returnHelloWorld)
.compose(RetryTransformer.of(retry))
.test()
.assertError(HelloWorldException.class)
.assertNotComplete()
.assertSubscribed();
then(helloWorldService).should().returnHelloWorld();
Retry.Metrics metrics = retry.getMetrics();
assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isEqualTo(1);
assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero();
} |
@Udf(description = "Returns a masked version of the input string. All characters except for the"
+ " last n will be replaced according to the default masking rules.")
@SuppressWarnings("MethodMayBeStatic") // Invoked via reflection
public String mask(
@UdfParameter("input STRING to be masked") final String input,
@UdfParameter("number of characters to keep unmasked at the end") final int numChars
) {
return doMask(new Masker(), input, numChars);
} | @Test
public void shouldNotMaskLastNChars() {
final String result = udf.mask("AbCd#$123xy Z", 5);
assertThat(result, is("XxXx--nn3xy Z"));
} |
public static CheckpointStorage load(
@Nullable CheckpointStorage fromApplication,
StateBackend configuredStateBackend,
Configuration jobConfig,
Configuration clusterConfig,
ClassLoader classLoader,
@Nullable Logger logger)
throws IllegalConfigurationException, DynamicCodeLoadingException {
Preconditions.checkNotNull(jobConfig, "jobConfig");
Preconditions.checkNotNull(clusterConfig, "clusterConfig");
Preconditions.checkNotNull(classLoader, "classLoader");
Preconditions.checkNotNull(configuredStateBackend, "statebackend");
// Job level config can override the cluster level config.
Configuration mergedConfig = new Configuration(clusterConfig);
mergedConfig.addAll(jobConfig);
// Legacy state backends always take precedence for backwards compatibility.
StateBackend rootStateBackend =
(configuredStateBackend instanceof DelegatingStateBackend)
? ((DelegatingStateBackend) configuredStateBackend)
.getDelegatedStateBackend()
: configuredStateBackend;
if (rootStateBackend instanceof CheckpointStorage) {
if (logger != null) {
logger.info(
"Using legacy state backend {} as Job checkpoint storage",
rootStateBackend);
if (fromApplication != null) {
logger.warn(
"Checkpoint storage passed via StreamExecutionEnvironment is ignored because legacy state backend '{}' is used. {}",
rootStateBackend.getClass().getName(),
LEGACY_PRECEDENCE_LOG_MESSAGE);
}
if (mergedConfig.get(CheckpointingOptions.CHECKPOINT_STORAGE) != null) {
logger.warn(
"Config option '{}' is ignored because legacy state backend '{}' is used. {}",
CheckpointingOptions.CHECKPOINT_STORAGE.key(),
rootStateBackend.getClass().getName(),
LEGACY_PRECEDENCE_LOG_MESSAGE);
}
}
return (CheckpointStorage) rootStateBackend;
}
// In the FLINK-2.0, the checkpoint storage from application will not be supported
// anymore.
if (fromApplication != null) {
if (fromApplication instanceof ConfigurableCheckpointStorage) {
if (logger != null) {
logger.info(
"Using job/cluster config to configure application-defined checkpoint storage: {}",
fromApplication);
if (mergedConfig.get(CheckpointingOptions.CHECKPOINT_STORAGE) != null) {
logger.warn(
"Config option '{}' is ignored because the checkpoint storage passed via StreamExecutionEnvironment takes precedence.",
CheckpointingOptions.CHECKPOINT_STORAGE.key());
}
}
return ((ConfigurableCheckpointStorage) fromApplication)
// Use cluster config for backwards compatibility.
.configure(clusterConfig, classLoader);
}
if (logger != null) {
logger.info("Using application defined checkpoint storage: {}", fromApplication);
}
return fromApplication;
}
return fromConfig(mergedConfig, classLoader, logger)
.orElseGet(() -> createDefaultCheckpointStorage(mergedConfig, classLoader, logger));
} | @Test
void testLegacyStateBackendTakesPrecedence() throws Exception {
StateBackend legacy = new LegacyStateBackend();
CheckpointStorage storage = new MockStorage();
CheckpointStorage configured =
CheckpointStorageLoader.load(
storage, legacy, new Configuration(), new Configuration(), cl, LOG);
assertThat(configured)
.withFailMessage("Legacy state backends should always take precedence")
.isEqualTo(legacy);
} |
static void addKieRuntimeServiceToFirstLevelCache(KieRuntimeService toAdd, EfestoClassKey firstLevelClassKey) {
List<KieRuntimeService> stored = firstLevelCache.get(firstLevelClassKey);
if (stored == null) {
stored = new ArrayList<>();
firstLevelCache.put(firstLevelClassKey, stored);
}
stored.add(toAdd);
} | @Test
void addKieRuntimeServiceToFirstLevelCache() {
List<KieRuntimeService> discoveredKieRuntimeServices = Collections.singletonList(kieRuntimeServiceA);
final Map<EfestoClassKey, List<KieRuntimeService>> toPopulate = new HashMap<>();
RuntimeManagerUtils.populateFirstLevelCache(discoveredKieRuntimeServices, toPopulate);
assertThat(toPopulate).hasSize(1);
assertThat(toPopulate).containsKeys(efestoClassKeyA);
List<KieRuntimeService> servicesA = toPopulate.get(efestoClassKeyA);
assertThat(servicesA).containsExactly(kieRuntimeServiceA);
RuntimeManagerUtils.firstLevelCache.putAll(toPopulate);
RuntimeManagerUtils.addKieRuntimeServiceToFirstLevelCache(kieRuntimeServiceA_cloned, efestoClassKeyA);
servicesA = RuntimeManagerUtils.firstLevelCache.get(efestoClassKeyA);
assertThat(servicesA).containsExactly(kieRuntimeServiceA, kieRuntimeServiceA_cloned);
} |
public static <K, V, S extends StateStore> Materialized<K, V, S> as(final DslStoreSuppliers storeSuppliers) {
Objects.requireNonNull(storeSuppliers, "store type can't be null");
return new Materialized<>(storeSuppliers);
} | @Test
public void shouldThrowNullPointerIfKeyValueBytesStoreSupplierIsNull() {
final NullPointerException e = assertThrows(NullPointerException.class,
() -> Materialized.as((KeyValueBytesStoreSupplier) null));
assertEquals(e.getMessage(), "supplier can't be null");
} |
protected boolean isWebJar(Dependency dependency, Dependency nextDependency) {
if (dependency == null || dependency.getFileName() == null
|| nextDependency == null || nextDependency.getFileName() == null
|| dependency.getSoftwareIdentifiers().isEmpty()
|| nextDependency.getSoftwareIdentifiers().isEmpty()) {
return false;
}
final String mainName = dependency.getFileName().toLowerCase();
final String nextName = nextDependency.getFileName().toLowerCase();
if (mainName.endsWith(".jar") && nextName.endsWith(".js") && nextName.startsWith(mainName)) {
return dependency.getSoftwareIdentifiers()
.stream().map(Identifier::getValue).collect(toSet())
.containsAll(nextDependency.getSoftwareIdentifiers().stream().map(this::identifierToWebJarForComparison).collect(toSet()));
} else if (nextName.endsWith(".jar") && mainName.endsWith("js") && mainName.startsWith(nextName)) {
return nextDependency.getSoftwareIdentifiers()
.stream().map(Identifier::getValue).collect(toSet())
.containsAll(dependency.getSoftwareIdentifiers().stream().map(this::identifierToWebJarForComparison).collect(toSet()));
}
return false;
} | @Test
public void testIsWebJar() throws MalformedPackageURLException {
DependencyBundlingAnalyzer instance = new DependencyBundlingAnalyzer();
Dependency left = null;
Dependency right = null;
boolean expResult = false;
boolean result = instance.isWebJar(left, right);
assertEquals(expResult, result);
left = new Dependency();
expResult = false;
result = instance.isWebJar(left, right);
assertEquals(expResult, result);
left = new Dependency(new File("/path/jquery.jar"), true);
expResult = false;
result = instance.isWebJar(left, right);
assertEquals(expResult, result);
right = new Dependency();
expResult = false;
result = instance.isWebJar(left, right);
assertEquals(expResult, result);
right = new Dependency(new File("/path/jquery.js"), true);
expResult = false;
result = instance.isWebJar(left, right);
assertEquals(expResult, result);
right = new Dependency(new File("/path/jquery.js"), true);
right.setFileName("jquery.jar: jquery.js");
expResult = false;
result = instance.isWebJar(left, right);
assertEquals(expResult, result);
left.addSoftwareIdentifier(new PurlIdentifier("maven", "org.webjars", "jquery", "1.0", Confidence.HIGHEST));
expResult = false;
result = instance.isWebJar(left, right);
assertEquals(expResult, result);
right.addSoftwareIdentifier(new PurlIdentifier("javascript", "bootstrap", "1.0", Confidence.HIGHEST));
expResult = false;
result = instance.isWebJar(left, right);
assertEquals(expResult, result);
right = new Dependency(new File("/path/jquery.js"), true);
right.setFileName("jquery.jar: jquery.js");
right.addSoftwareIdentifier(new PurlIdentifier("javascript", "jquery", "1.0", Confidence.HIGHEST));
expResult = true;
result = instance.isWebJar(left, right);
assertEquals(expResult, result);
left = new Dependency(new File("/path/spring-core.jar"), true);
left.addSoftwareIdentifier(new PurlIdentifier("maven", "org.springframework", "spring-core", "3.0.0", Confidence.HIGHEST));
expResult = false;
result = instance.isWebJar(left, right);
assertEquals(expResult, result);
} |
@Override
public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows);
} | @Test
public void shouldNotAllowNullValueJoinerWithKeyOnLeftJoinWithGlobalTableWithNamed() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.leftJoin(
testGlobalTable,
MockMapper.selectValueMapper(),
(ValueJoinerWithKey<? super String, ? super String, ? super String, ?>) null,
Named.as("name")));
assertThat(exception.getMessage(), equalTo("joiner can't be null"));
} |
@Override
public void execute(final CommandLine commandLine, final Options options,
RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
defaultMQAdminExt.start();
TopicConfig topicConfig = new TopicConfig();
String topic;
if (commandLine.hasOption('t')) {
topic = commandLine.getOptionValue('t').trim();
} else {
System.out.printf("topic parameter value must be need.%n");
return;
}
TopicRouteData topicRouteData = defaultMQAdminExt.examineTopicRouteInfo(topic);
assert topicRouteData != null;
List<QueueData> queueDatas = topicRouteData.getQueueDatas();
assert queueDatas != null && queueDatas.size() > 0;
QueueData queueData = queueDatas.get(0);
topicConfig.setTopicName(topic);
topicConfig.setWriteQueueNums(queueData.getWriteQueueNums());
topicConfig.setReadQueueNums(queueData.getReadQueueNums());
topicConfig.setTopicSysFlag(queueData.getTopicSysFlag());
//new perm
int perm;
if (commandLine.hasOption('p')) {
perm = Integer.parseInt(commandLine.getOptionValue('p').trim());
} else {
System.out.printf("perm parameter value must be need.%n");
return;
}
topicConfig.setPerm(perm);
if (commandLine.hasOption('b')) {
String brokerAddr = commandLine.getOptionValue('b').trim();
List<BrokerData> brokerDatas = topicRouteData.getBrokerDatas();
String brokerName = null;
for (BrokerData data : brokerDatas) {
HashMap<Long, String> brokerAddrs = data.getBrokerAddrs();
if (brokerAddrs == null || brokerAddrs.size() == 0) {
continue;
}
for (Map.Entry<Long, String> entry : brokerAddrs.entrySet()) {
if (brokerAddr.equals(entry.getValue()) && MixAll.MASTER_ID == entry.getKey()) {
brokerName = data.getBrokerName();
break;
}
}
if (brokerName != null) {
break;
}
}
if (brokerName != null) {
List<QueueData> queueDataList = topicRouteData.getQueueDatas();
assert queueDataList != null && queueDataList.size() > 0;
int oldPerm = 0;
for (QueueData data : queueDataList) {
if (brokerName.equals(data.getBrokerName())) {
oldPerm = data.getPerm();
if (perm == oldPerm) {
System.out.printf("new perm equals to the old one!%n");
return;
}
break;
}
}
defaultMQAdminExt.createAndUpdateTopicConfig(brokerAddr, topicConfig);
System.out.printf("update topic perm from %s to %s in %s success.%n", oldPerm, perm, brokerAddr);
System.out.printf("%s.%n", topicConfig);
return;
} else {
System.out.printf("updateTopicPerm error broker not exit or broker is not master!.%n");
return;
}
} else if (commandLine.hasOption('c')) {
String clusterName = commandLine.getOptionValue('c').trim();
Set<String> masterSet =
CommandUtil.fetchMasterAddrByClusterName(defaultMQAdminExt, clusterName);
for (String addr : masterSet) {
defaultMQAdminExt.createAndUpdateTopicConfig(addr, topicConfig);
System.out.printf("update topic perm from %s to %s in %s success.%n", queueData.getPerm(), perm, addr);
}
return;
}
ServerUtil.printCommandLineHelp("mqadmin " + this.commandName(), options);
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
} | @Test
public void testExecute() {
UpdateTopicPermSubCommand cmd = new UpdateTopicPermSubCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {"-b 127.0.0.1:10911", "-c default-cluster", "-t unit-test", "-p 6"};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
assertThat(commandLine.getOptionValue('b').trim()).isEqualTo("127.0.0.1:10911");
assertThat(commandLine.getOptionValue('c').trim()).isEqualTo("default-cluster");
assertThat(commandLine.getOptionValue('t').trim()).isEqualTo("unit-test");
assertThat(commandLine.getOptionValue('p').trim()).isEqualTo("6");
} |
@Override
public String getPrefix() {
return String.format("%s.%s", SpectraProtocol.class.getPackage().getName(), "Spectra");
} | @Test
public void testPrefix() {
assertEquals("ch.cyberduck.core.spectra.Spectra", new SpectraProtocol().getPrefix());
} |
public static UserAgent parse(String userAgentString) {
return UserAgentParser.parse(userAgentString);
} | @Test
public void parseWxworkTest() {
final String uaString = "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36 QBCore/4.0.1326.400 QQBrowser/9.0.2524.400 Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36 wxwork/3.1.10 (MicroMessenger/6.2) WindowsWechat";
final UserAgent ua = UserAgentUtil.parse(uaString);
assertEquals("wxwork", ua.getBrowser().toString());
assertEquals("3.1.10", ua.getVersion());
assertEquals("Webkit", ua.getEngine().toString());
assertEquals("537.36", ua.getEngineVersion());
assertEquals("Windows 10 or Windows Server 2016", ua.getOs().toString());
assertEquals("10.0", ua.getOsVersion());
assertEquals("Windows", ua.getPlatform().toString());
assertFalse(ua.isMobile());
} |
@Override
public int hashCode() {
return executionId.hashCode() + executionState.ordinal();
} | @Test
public void testSerialization() {
try {
final ExecutionAttemptID executionId = createExecutionAttemptId();
final ExecutionState state = ExecutionState.DEPLOYING;
final Throwable error = new IOException("fubar");
TaskExecutionState original1 = new TaskExecutionState(executionId, state, error);
TaskExecutionState original2 = new TaskExecutionState(executionId, state);
TaskExecutionState javaSerCopy1 = CommonTestUtils.createCopySerializable(original1);
TaskExecutionState javaSerCopy2 = CommonTestUtils.createCopySerializable(original2);
// equalities
assertEquals(original1, javaSerCopy1);
assertEquals(javaSerCopy1, original1);
assertEquals(original2, javaSerCopy2);
assertEquals(javaSerCopy2, original2);
// hash codes
assertEquals(original1.hashCode(), javaSerCopy1.hashCode());
assertEquals(original2.hashCode(), javaSerCopy2.hashCode());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
} |
static void setPropertiesForRecipientList(
RecipientList recipientList, CamelContext camelContext, DynamicRouterConfiguration cfg) {
recipientList.setAggregationStrategy(createAggregationStrategy(camelContext, cfg));
recipientList.setParallelProcessing(cfg.isParallelProcessing());
recipientList.setParallelAggregate(cfg.isParallelAggregate());
recipientList.setSynchronous(cfg.isSynchronous());
recipientList.setStreaming(cfg.isStreaming());
recipientList.setShareUnitOfWork(cfg.isShareUnitOfWork());
recipientList.setStopOnException(cfg.isStopOnException());
recipientList.setIgnoreInvalidEndpoints(cfg.isIgnoreInvalidEndpoints());
recipientList.setCacheSize(cfg.getCacheSize());
if (cfg.getOnPrepare() != null) {
recipientList.setOnPrepare(mandatoryLookup(camelContext, cfg.getOnPrepare(), Processor.class));
}
if (cfg.getTimeout() > 0 && !cfg.isParallelProcessing()) {
throw new IllegalArgumentException("Timeout is used but ParallelProcessing has not been enabled.");
}
recipientList.setTimeout(cfg.getTimeout());
} | @Test
void testSetPropertiesForRecipientList() {
// Set up mocking
when(mockConfig.isParallelProcessing()).thenReturn(true);
when(mockConfig.isParallelAggregate()).thenReturn(true);
when(mockConfig.isSynchronous()).thenReturn(true);
when(mockConfig.isStreaming()).thenReturn(true);
when(mockConfig.isShareUnitOfWork()).thenReturn(true);
when(mockConfig.isStopOnException()).thenReturn(true);
when(mockConfig.isIgnoreInvalidEndpoints()).thenReturn(true);
when(mockConfig.getCacheSize()).thenReturn(10);
// Invoke the method under test
DynamicRouterRecipientListHelper.setPropertiesForRecipientList(recipientList, camelContext, mockConfig);
// Verify results
verify(recipientList, times(1)).setParallelProcessing(true);
verify(recipientList, times(1)).setParallelAggregate(true);
verify(recipientList, times(1)).setSynchronous(true);
verify(recipientList, times(1)).setStreaming(true);
verify(recipientList, times(1)).setShareUnitOfWork(true);
verify(recipientList, times(1)).setStopOnException(true);
verify(recipientList, times(1)).setIgnoreInvalidEndpoints(true);
verify(recipientList, times(1)).setCacheSize(10);
} |
public static int stringHash(Client client) {
String s = buildUniqueString(client);
if (s == null) {
return 0;
}
return s.hashCode();
} | @Test
void performanceTestOfStringHash() {
long start = System.nanoTime();
for (int i = 0; i < N; i++) {
DistroUtils.stringHash(client1);
}
System.out.printf("Distro Verify Revision Performance: %.2f ivk/ns\n", ((double) System.nanoTime() - start) / N);
} |
@Override
public List<String> readMultiParam(String key) {
String[] values = source.getParameterValues(key);
return values == null ? emptyList() : ImmutableList.copyOf(values);
} | @Test
public void read_multi_param_from_source_with_one_value() {
when(source.getParameterValues("param")).thenReturn(new String[]{"firstValue"});
List<String> result = underTest.readMultiParam("param");
assertThat(result).containsExactly("firstValue");
} |
@Override
public Page<ConfigInfoTagWrapper> findAllConfigInfoTagForDumpAll(final int pageNo, final int pageSize) {
final int startRow = (pageNo - 1) * pageSize;
ConfigInfoTagMapper configInfoTagMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_TAG);
String sqlCountRows = configInfoTagMapper.count(null);
MapperResult sqlFetchRows = configInfoTagMapper.findAllConfigInfoTagForDumpAllFetchRows(
new MapperContext(startRow, pageSize));
PaginationHelper<ConfigInfoTagWrapper> helper = createPaginationHelper();
try {
return helper.fetchPageLimit(sqlCountRows, sqlFetchRows.getSql(), sqlFetchRows.getParamList().toArray(),
pageNo, pageSize, CONFIG_INFO_TAG_WRAPPER_ROW_MAPPER);
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
} | @Test
void testFindAllConfigInfoTagForDumpAll() {
//mock count
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(Integer.class))).thenReturn(308);
List<ConfigInfoTagWrapper> mockTagList = new ArrayList<>();
mockTagList.add(new ConfigInfoTagWrapper());
mockTagList.add(new ConfigInfoTagWrapper());
mockTagList.add(new ConfigInfoTagWrapper());
mockTagList.get(0).setLastModified(System.currentTimeMillis());
mockTagList.get(1).setLastModified(System.currentTimeMillis());
mockTagList.get(2).setLastModified(System.currentTimeMillis());
//mock query list
Mockito.when(jdbcTemplate.query(anyString(), eq(new Object[] {}), eq(CONFIG_INFO_TAG_WRAPPER_ROW_MAPPER))).thenReturn(mockTagList);
int pageNo = 3;
int pageSize = 100;
//execute & verify
Page<ConfigInfoTagWrapper> returnTagPage = externalConfigInfoTagPersistService.findAllConfigInfoTagForDumpAll(pageNo, pageSize);
assertEquals(308, returnTagPage.getTotalCount());
assertEquals(mockTagList, returnTagPage.getPageItems());
//mock count CannotGetJdbcConnectionException
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(Integer.class)))
.thenThrow(new CannotGetJdbcConnectionException("conn error111"));
//execute & verify
try {
externalConfigInfoTagPersistService.findAllConfigInfoTagForDumpAll(pageNo, pageSize);
assertTrue(false);
} catch (Exception e) {
assertEquals("conn error111", e.getMessage());
}
} |
public HollowOrdinalIterator findKeysWithPrefix(String prefix) {
TST current;
HollowOrdinalIterator it;
do {
current = prefixIndexVolatile;
it = current.findKeysWithPrefix(prefix);
} while (current != this.prefixIndexVolatile);
return it;
} | @Test
public void testMovieActorReference() throws Exception {
List<Actor> actors = Arrays.asList(new Actor("Keanu Reeves"), new Actor("Laurence Fishburne"), new Actor("Carrie-Anne Moss"));
MovieActorReference movieSetReference = new MovieActorReference(1, 1999, "The Matrix", actors);
objectMapper.add(movieSetReference);
StateEngineRoundTripper.roundTripSnapshot(writeStateEngine, readStateEngine);
HollowPrefixIndex prefixIndex = new HollowPrefixIndex(readStateEngine, "MovieActorReference", "actors.element");
Set<Integer> ordinals = toSet(prefixIndex.findKeysWithPrefix("kea"));
Assert.assertTrue(ordinals.size() == 1);
} |
@Override
public KeyValueIterator<Windowed<K>, V> fetchAll(final Instant timeFrom,
final Instant timeTo) throws IllegalArgumentException {
return new KeyValueIteratorFacade<>(inner.fetchAll(timeFrom, timeTo));
} | @Test
public void shouldReturnPlainKeyValuePairsOnFetchAllLongParameters() {
when(mockedKeyValueWindowTimestampIterator.next())
.thenReturn(KeyValue.pair(
new Windowed<>("key1", new TimeWindow(21L, 22L)),
ValueAndTimestamp.make("value1", 22L)))
.thenReturn(KeyValue.pair(
new Windowed<>("key2", new TimeWindow(42L, 43L)),
ValueAndTimestamp.make("value2", 100L)));
when(mockedWindowTimestampStore.fetchAll(Instant.ofEpochMilli(21L), Instant.ofEpochMilli(42L)))
.thenReturn(mockedKeyValueWindowTimestampIterator);
final KeyValueIterator<Windowed<String>, String> iterator =
readOnlyWindowStoreFacade.fetchAll(Instant.ofEpochMilli(21L), Instant.ofEpochMilli(42L));
assertThat(iterator.next(), is(KeyValue.pair(new Windowed<>("key1", new TimeWindow(21L, 22L)), "value1")));
assertThat(iterator.next(), is(KeyValue.pair(new Windowed<>("key2", new TimeWindow(42L, 43L)), "value2")));
} |
public synchronized ResultSet fetchResults(FetchOrientation orientation, int maxFetchSize) {
long token;
switch (orientation) {
case FETCH_NEXT:
token = currentToken;
break;
case FETCH_PRIOR:
token = currentToken - 1;
break;
default:
throw new UnsupportedOperationException(
String.format("Unknown fetch orientation: %s.", orientation));
}
if (orientation == FetchOrientation.FETCH_NEXT && bufferedResults.isEmpty()) {
// make sure data is available in the buffer
resultStore.waitUntilHasData();
}
return fetchResults(token, maxFetchSize);
} | @Test
void testFetchIllegalToken() {
ResultFetcher fetcher =
buildResultFetcher(Collections.singletonList(data.iterator()), data.size());
assertThatThrownBy(() -> fetcher.fetchResults(2, Integer.MAX_VALUE))
.satisfies(FlinkAssertions.anyCauseMatches("Expecting token to be 0, but found 2"));
} |
public static <InputT> GloballyDistinct<InputT> globally() {
return GloballyDistinct.<InputT>builder().build();
} | @Test
public void smallCardinality() {
final int smallCard = 1000;
final int p = 6;
final double expectedErr = 1.104 / Math.sqrt(p);
List<Integer> small = new ArrayList<>();
for (int i = 0; i < smallCard; i++) {
small.add(i);
}
PCollection<Long> cardinality =
tp.apply("small stream", Create.of(small))
.apply("small cardinality", ApproximateDistinct.<Integer>globally().withPrecision(p));
PAssert.that("Not Accurate Enough", cardinality)
.satisfies(new VerifyAccuracy(smallCard, expectedErr));
tp.run();
} |
public FEELFnResult<TemporalAmount> invoke(@ParameterName("from") Temporal from, @ParameterName("to") Temporal to) {
if ( from == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null"));
}
if ( to == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "cannot be null"));
}
final LocalDate fromDate = getLocalDateFromTemporal(from);
if (fromDate == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "is of type not suitable for years and months function"));
}
final LocalDate toDate = getLocalDateFromTemporal(to);
if (toDate == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "is of type not suitable for years and months function"));
}
return FEELFnResult.ofResult(new ComparablePeriod(Period.between(fromDate, toDate).withDays(0)));
} | @Test
void invokeLocalDateLocalDate() {
FunctionTestUtil.assertResult(
yamFunction.invoke(
LocalDate.of(2017, 6, 12),
LocalDate.of(2020, 7, 13)),
ComparablePeriod.of(3, 1, 0));
} |
@Override
public void stop() {
try {
// Removing the worker UUIDs
getClusteredWorkerUUIDs().remove(hazelcastMember.getUuid());
} catch (HazelcastInstanceNotActiveException | RetryableHazelcastException e) {
LOGGER.debug("Hazelcast is not active anymore", e);
}
} | @Test
public void stop_whenThrowHazelcastInactiveException_shouldSilenceError() {
logTester.setLevel(Level.DEBUG);
when(hzClientWrapper.getReplicatedMap(any())).thenThrow(new HazelcastInstanceNotActiveException("Hazelcast is not active"));
CeDistributedInformationImpl ceDistributedInformation = new CeDistributedInformationImpl(hzClientWrapper, mock(CeWorkerFactory.class));
ceDistributedInformation.stop();
assertThat(logTester.logs(Level.DEBUG)).contains("Hazelcast is not active anymore");
assertThat(logTester.logs(Level.ERROR)).isEmpty();
} |
public void runExtractor(Message msg) {
try(final Timer.Context ignored = completeTimer.time()) {
final String field;
try (final Timer.Context ignored2 = conditionTimer.time()) {
// We can only work on Strings.
if (!(msg.getField(sourceField) instanceof String)) {
conditionMissesCounter.inc();
return;
}
field = (String) msg.getField(sourceField);
// Decide if to extract at all.
if (conditionType.equals(ConditionType.STRING)) {
if (field.contains(conditionValue)) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
} else if (conditionType.equals(ConditionType.REGEX)) {
if (regexConditionPattern.matcher(field).find()) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
}
}
try (final Timer.Context ignored2 = executionTimer.time()) {
Result[] results;
try {
results = run(field);
} catch (ExtractorException e) {
final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>";
msg.addProcessingError(new Message.ProcessingError(
ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e)));
return;
}
if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) {
return;
} else if (results.length == 1 && results[0].target == null) {
// results[0].target is null if this extractor cannot produce multiple fields use targetField in that case
msg.addField(targetField, results[0].getValue());
} else {
for (final Result result : results) {
msg.addField(result.getTarget(), result.getValue());
}
}
// Remove original from message?
if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) {
final StringBuilder sb = new StringBuilder(field);
final List<Result> reverseList = Arrays.stream(results)
.sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed())
.collect(Collectors.toList());
// remove all from reverse so that the indices still match
for (final Result result : reverseList) {
sb.delete(result.getBeginIndex(), result.getEndIndex());
}
final String builtString = sb.toString();
final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString;
msg.removeField(sourceField);
// TODO don't add an empty field back, or rather don't add fullyCutByExtractor
msg.addField(sourceField, finalResult);
}
runConverters(msg);
}
}
} | @Test
public void testConvertersThatReturnNullValue() throws Exception {
final Converter converter = new TestConverter.Builder()
.callback(new Function<Object, Object>() {
@Nullable
@Override
public Object apply(Object input) {
return null;
}
})
.build();
final TestExtractor extractor = new TestExtractor.Builder()
.converters(Lists.newArrayList(converter))
.callback(new Callable<Result[]>() {
@Override
public Result[] call() throws Exception {
return new Result[] {
new Result("1", -1, -1)
};
}
})
.build();
final Message msg = createMessage("message");
extractor.runExtractor(msg);
assertThat(msg.getField("target")).isNull();
} |
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR
if (splittee == null || splitChar == null) {
return new String[0];
}
final String EMPTY_ELEMENT = "";
int spot;
final int splitLength = splitChar.length();
final String adjacentSplit = splitChar + splitChar;
final int adjacentSplitLength = adjacentSplit.length();
if (truncate) {
while ((spot = splittee.indexOf(adjacentSplit)) != -1) {
splittee = splittee.substring(0, spot + splitLength)
+ splittee.substring(spot + adjacentSplitLength, splittee.length());
}
if (splittee.startsWith(splitChar)) {
splittee = splittee.substring(splitLength);
}
if (splittee.endsWith(splitChar)) { // Remove trailing splitter
splittee = splittee.substring(0, splittee.length() - splitLength);
}
}
List<String> returns = new ArrayList<>();
final int length = splittee.length(); // This is the new length
int start = 0;
spot = 0;
while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) {
if (spot > 0) {
returns.add(splittee.substring(start, spot));
} else {
returns.add(EMPTY_ELEMENT);
}
start = spot + splitLength;
}
if (start < length) {
returns.add(splittee.substring(start));
} else if (spot == length - splitLength) {// Found splitChar at end of line
returns.add(EMPTY_ELEMENT);
}
return returns.toArray(new String[returns.size()]);
} | @Test
public void testSplitStringStringFalseDoubledSplitChar() throws Exception {
assertThat(JOrphanUtils.split("a;;b;;;;;;d;;e;;;;f", ";;", false),
CoreMatchers.equalTo(new String[]{"a", "b", "", "", "d", "e", "", "f"}));
} |
public Json removePadding(String padding) {
String text = getFirstSourceText();
XTokenQueue tokenQueue = new XTokenQueue(text);
tokenQueue.consumeWhitespace();
tokenQueue.consume(padding);
tokenQueue.consumeWhitespace();
String chompBalanced = tokenQueue.chompBalancedNotInQuotes('(', ')');
return new Json(chompBalanced);
} | @Test
public void testRemovePadding() throws Exception {
String name = new Json(text).removePadding("callback").jsonPath("$.name").get();
assertThat(name).isEqualTo("json");
} |
@Override
public O apply(final E e) {
if (o != null) {
return o;
}
return init(e);
} | @Test
public void testApply() {
Function<String, String> function = Function.identity();
FreshBeanHolder<String, String> freshBeanHolder = new FreshBeanHolder<>(function);
assertEquals("hello", freshBeanHolder.apply("hello"));
} |
public Config setProperty(@Nonnull String name, @Nonnull String value) {
if (isNullOrEmptyAfterTrim(name)) {
throw new IllegalArgumentException("argument 'name' can't be null or empty");
}
isNotNull(value, "value");
properties.setProperty(name, value);
return this;
} | @Test(expected = IllegalArgumentException.class)
public void testSetConfigPropertyNameEmpty() {
config.setProperty(" ", "test");
} |
@Override
protected JobConfigInfo handleRequest(
HandlerRequest<EmptyRequestBody> request, AccessExecutionGraph executionGraph) {
return createJobConfigInfo(executionGraph);
} | @Test
void handleRequest_executionConfigWithSecretValues_excludesSecretValuesFromResponse()
throws HandlerRequestException {
final JobConfigHandler jobConfigHandler =
new JobConfigHandler(
() -> null,
TestingUtils.TIMEOUT,
Collections.emptyMap(),
JobConfigHeaders.getInstance(),
new DefaultExecutionGraphCache(TestingUtils.TIMEOUT, TestingUtils.TIMEOUT),
Executors.directExecutor());
final Map<String, String> globalJobParameters = new HashMap<>();
globalJobParameters.put("foobar", "barfoo");
globalJobParameters.put("bar.secret.foo", "my secret");
globalJobParameters.put("password.to.my.safe", "12345");
final ArchivedExecutionConfig archivedExecutionConfig =
new ArchivedExecutionConfigBuilder()
.setGlobalJobParameters(globalJobParameters)
.build();
final AccessExecutionGraph archivedExecutionGraph =
new ArchivedExecutionGraphBuilder()
.setArchivedExecutionConfig(archivedExecutionConfig)
.build();
final HandlerRequest<EmptyRequestBody> handlerRequest =
createRequest(archivedExecutionGraph.getJobID());
final JobConfigInfo jobConfigInfoResponse =
jobConfigHandler.handleRequest(handlerRequest, archivedExecutionGraph);
final Map<String, String> filteredGlobalJobParameters =
filterSecretValues(globalJobParameters);
assertThat(jobConfigInfoResponse.getExecutionConfigInfo().getGlobalJobParameters())
.isEqualTo(filteredGlobalJobParameters);
} |
public Result parse(final String string) throws DateNotParsableException {
return this.parse(string, new Date());
} | @Test
public void testAntarcticaTZ() throws Exception {
NaturalDateParser.Result today = naturalDateParserAntarctica.parse("today");
assertThat(today.getFrom()).as("From should not be null").isNotNull();
assertThat(today.getTo()).as("To should not be null").isNotNull();
assertThat(today.getDateTimeZone().getID()).as("should have the Antarctica/Palmer as Timezone").isEqualTo("Antarctica/Palmer");
} |
public int getWorkerId() {
return instance.getWorkerId();
} | @Test
void assertGetWorkerId() {
ComputeNodeInstance computeNodeInstance = mock(ComputeNodeInstance.class);
when(computeNodeInstance.getWorkerId()).thenReturn(0);
ComputeNodeInstanceContext context = new ComputeNodeInstanceContext(computeNodeInstance, mock(WorkerIdGenerator.class), modeConfig, lockContext, eventBusContext);
assertThat(context.getWorkerId(), is(0));
} |
@Override
public void run() {
try {
Date now = new Date();
LOG.info("SubClusterCleaner at {}", now);
Map<SubClusterId, SubClusterInfo> infoMap =
this.gpgContext.getStateStoreFacade().getSubClusters(false, true);
// Iterate over each sub cluster and check last heartbeat
for (Map.Entry<SubClusterId, SubClusterInfo> entry : infoMap.entrySet()) {
SubClusterInfo subClusterInfo = entry.getValue();
Date lastHeartBeat = new Date(subClusterInfo.getLastHeartBeat());
if (LOG.isDebugEnabled()) {
LOG.debug("Checking subcluster {} in state {}, last heartbeat at {}",
subClusterInfo.getSubClusterId(), subClusterInfo.getState(),
lastHeartBeat);
}
if (subClusterInfo.getState().isUsable()) {
long timeUntilDeregister = this.heartbeatExpirationMillis
- (now.getTime() - lastHeartBeat.getTime());
// Deregister sub-cluster as SC_LOST if last heartbeat too old
if (timeUntilDeregister < 0) {
LOG.warn(
"Deregistering subcluster {} in state {} last heartbeat at {}",
subClusterInfo.getSubClusterId(), subClusterInfo.getState(),
new Date(subClusterInfo.getLastHeartBeat()));
try {
this.gpgContext.getStateStoreFacade().deregisterSubCluster(
subClusterInfo.getSubClusterId(), SubClusterState.SC_LOST);
} catch (Exception e) {
LOG.error("deregisterSubCluster failed on subcluster "
+ subClusterInfo.getSubClusterId(), e);
}
} else if (LOG.isDebugEnabled()) {
LOG.debug("Time until deregister for subcluster {}: {}",
entry.getKey(),
DurationFormatUtils.formatDurationISO(timeUntilDeregister));
}
}
}
} catch (Throwable e) {
LOG.error("Subcluster cleaner fails: ", e);
}
} | @Test
public void testSubClusterRegisterHeartBeatTime() throws YarnException {
cleaner.run();
Assert.assertEquals(3, facade.getSubClusters(true, true).size());
} |
public static YarnApplicationAttemptState convertRmAppAttemptStateToYarnApplicationAttemptState(
RMAppAttemptState currentState,
RMAppAttemptState previousState
) {
return createApplicationAttemptState(
currentState == RMAppAttemptState.FINAL_SAVING
? previousState
: currentState
);
} | @Test
public void testConvertRmAppAttemptStateToYarnApplicationAttemptState() {
Assert.assertEquals(
YarnApplicationAttemptState.FAILED,
RMServerUtils.convertRmAppAttemptStateToYarnApplicationAttemptState(
RMAppAttemptState.FINAL_SAVING,
RMAppAttemptState.FAILED
)
);
Assert.assertEquals(
YarnApplicationAttemptState.SCHEDULED,
RMServerUtils.convertRmAppAttemptStateToYarnApplicationAttemptState(
RMAppAttemptState.FINAL_SAVING,
RMAppAttemptState.SCHEDULED
)
);
Assert.assertEquals(
YarnApplicationAttemptState.NEW,
RMServerUtils.convertRmAppAttemptStateToYarnApplicationAttemptState(
RMAppAttemptState.NEW,
null
)
);
} |
@Override
public Database getDb(String name) {
try {
return new Database(ConnectorTableId.CONNECTOR_ID_GENERATOR.getNextId().asInt(), name);
} catch (StarRocksConnectorException e) {
e.printStackTrace();
return null;
}
} | @Test
public void testGetDb() {
Database database = odpsMetadata.getDb("project");
Assert.assertNotNull(database);
Assert.assertEquals(database.getFullName(), "project");
} |
@Override
public void start() {
this.all = registry.meter(name(getName(), "all"));
this.trace = registry.meter(name(getName(), "trace"));
this.debug = registry.meter(name(getName(), "debug"));
this.info = registry.meter(name(getName(), "info"));
this.warn = registry.meter(name(getName(), "warn"));
this.error = registry.meter(name(getName(), "error"));
super.start();
} | @Test
public void usesSharedRegistries() {
String registryName = "registry";
SharedMetricRegistries.add(registryName, registry);
final InstrumentedAppender shared = new InstrumentedAppender(registryName);
shared.start();
when(event.getLevel()).thenReturn(Level.INFO);
shared.doAppend(event);
assertThat(registry.meter(METRIC_NAME_PREFIX + ".info").getCount())
.isEqualTo(1);
} |
public static Autoscaling empty() {
return empty("");
} | @Test
public void test_autoscaling_in_dev_with_required_resources_preprovisioned() {
var requiredCapacity =
Capacity.from(new ClusterResources(2, 1,
new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any)),
new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any)),
IntRange.empty(),
true,
true,
Optional.empty(),
ClusterInfo.empty());
var fixture = DynamicProvisioningTester.fixture()
.hostCount(5)
.capacity(requiredCapacity)
.zone(new Zone(Environment.dev, RegionName.from("us-east")))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0, 0, 0), 200);
fixture.tester().assertResources("We scale even in dev because resources are 'required'",
3, 1, 1.0, 13.4, 62.5,
fixture.autoscale());
} |
public MastershipInfo mastershipInfo() {
return mastershipInfo;
} | @Test
public void checkConstruction() {
assertThat(event1.type(), is(MastershipEvent.Type.BACKUPS_CHANGED));
assertThat(event1.subject(), is(deviceId1));
assertThat(event1.mastershipInfo(), is(mastershipInfo1));
assertThat(event4.time(), is(time));
assertThat(event4.type(), is(MastershipEvent.Type.MASTER_CHANGED));
assertThat(event4.subject(), is(deviceId1));
assertThat(event4.mastershipInfo(), is(mastershipInfo2));
} |
public SerializableFunction<Row, T> getFromRowFunction() {
return fromRowFunction;
} | @Test
public void testNullMapRowToProto() {
ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(MapPrimitive.getDescriptor());
SerializableFunction<Row, DynamicMessage> fromRow = schemaProvider.getFromRowFunction();
MapPrimitive proto =
parseFrom(fromRow.apply(NULL_MAP_PRIMITIVE_ROW).toString(), MapPrimitive.newBuilder())
.build();
assertEquals(NULL_MAP_PRIMITIVE_PROTO, proto);
} |
public static synchronized void w(final String tag, String text, Object... args) {
if (msLogger.supportsW()) {
String msg = getFormattedString(text, args);
msLogger.w(tag, msg);
addLog(LVL_W, tag, msg);
}
} | @Test
public void testW() throws Exception {
Logger.w("mTag", "Text with %d digits", 0);
Mockito.verify(mMockLog).w("mTag", "Text with 0 digits");
Logger.w("mTag", "Text with no digits");
Mockito.verify(mMockLog).w("mTag", "Text with no digits");
} |
@DeleteMapping("/token")
@PermitAll
@Operation(summary = "删除访问令牌")
@Parameter(name = "token", required = true, description = "访问令牌", example = "biu")
public CommonResult<Boolean> revokeToken(HttpServletRequest request,
@RequestParam("token") String token) {
// 校验客户端
String[] clientIdAndSecret = obtainBasicAuthorization(request);
OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientIdAndSecret[0], clientIdAndSecret[1],
null, null, null);
// 删除访问令牌
return success(oauth2GrantService.revokeToken(client.getClientId(), token));
} | @Test
public void testRevokeToken() {
// 准备参数
HttpServletRequest request = mockRequest("demo_client_id", "demo_client_secret");
String token = randomString();
// mock 方法(client)
OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId("demo_client_id");
when(oauth2ClientService.validOAuthClientFromCache(eq("demo_client_id"),
eq("demo_client_secret"), isNull(), isNull(), isNull())).thenReturn(client);
// mock 方法(移除)
when(oauth2GrantService.revokeToken(eq("demo_client_id"), eq(token))).thenReturn(true);
// 调用
CommonResult<Boolean> result = oauth2OpenController.revokeToken(request, token);
// 断言
assertEquals(0, result.getCode());
assertTrue(result.getData());
} |
@Override
public String getFieldDefinition( ValueMetaInterface v, String tk, String pk, boolean useAutoinc,
boolean addFieldName, boolean addCr ){
return fallback.getFieldDefinition( v, tk, pk, useAutoinc, addFieldName, addCr );
} | @Test
public void testStringFieldDef() throws Exception {
AthenaDatabaseMeta dbricks = new AthenaDatabaseMeta();
String fieldDef = dbricks.getFieldDefinition( new ValueMetaString( "name" ), null, null, false, false, false );
assertEquals( "VARCHAR()", fieldDef );
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void filterExpression() {
String inputExpression = "[ {x:1, y:2}, {x:2, y:3} ][ x=1 ]";
BaseNode filterBase = parse( inputExpression );
assertThat( filterBase).isInstanceOf(FilterExpressionNode.class);
assertThat( filterBase.getText()).isEqualTo(inputExpression);
FilterExpressionNode filter = (FilterExpressionNode) filterBase;
assertThat( filter.getExpression()).isInstanceOf(ListNode.class);
assertThat( filter.getExpression().getText()).isEqualTo( "{x:1, y:2}, {x:2, y:3}");
assertThat( filter.getFilter()).isInstanceOf(InfixOpNode.class);
assertThat( filter.getFilter().getText()).isEqualTo( "x=1");
} |
public static boolean isBeanPropertyReadMethod(Method method) {
return method != null
&& Modifier.isPublic(method.getModifiers())
&& !Modifier.isStatic(method.getModifiers())
&& method.getReturnType() != void.class
&& method.getDeclaringClass() != Object.class
&& method.getParameterTypes().length == 0
&& ((method.getName().startsWith("get") && method.getName().length() > 3)
|| (method.getName().startsWith("is")
&& method.getName().length() > 2));
} | @Test
void testIsBeanPropertyReadMethod() throws Exception {
Method method = EmptyClass.class.getMethod("getProperty");
assertTrue(ReflectUtils.isBeanPropertyReadMethod(method));
method = EmptyClass.class.getMethod("getProperties");
assertFalse(ReflectUtils.isBeanPropertyReadMethod(method));
method = EmptyClass.class.getMethod("isProperty");
assertFalse(ReflectUtils.isBeanPropertyReadMethod(method));
method = EmptyClass.class.getMethod("getPropertyIndex", int.class);
assertFalse(ReflectUtils.isBeanPropertyReadMethod(method));
} |
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) {
final Fetch<K, V> fetch = Fetch.empty();
final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>();
int recordsRemaining = fetchConfig.maxPollRecords;
try {
while (recordsRemaining > 0) {
final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch();
if (nextInLineFetch == null || nextInLineFetch.isConsumed()) {
final CompletedFetch completedFetch = fetchBuffer.peek();
if (completedFetch == null)
break;
if (!completedFetch.isInitialized()) {
try {
fetchBuffer.setNextInLineFetch(initialize(completedFetch));
} catch (Exception e) {
// Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and
// (2) there are no fetched completedFetch with actual content preceding this exception.
// The first condition ensures that the completedFetches is not stuck with the same completedFetch
// in cases such as the TopicAuthorizationException, and the second condition ensures that no
// potential data loss due to an exception in a following record.
if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0)
fetchBuffer.poll();
throw e;
}
} else {
fetchBuffer.setNextInLineFetch(completedFetch);
}
fetchBuffer.poll();
} else if (subscriptions.isPaused(nextInLineFetch.partition)) {
// when the partition is paused we add the records back to the completedFetches queue instead of draining
// them so that they can be returned on a subsequent poll if the partition is resumed at that time
log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition);
pausedCompletedFetches.add(nextInLineFetch);
fetchBuffer.setNextInLineFetch(null);
} else {
final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch, recordsRemaining);
recordsRemaining -= nextFetch.numRecords();
fetch.add(nextFetch);
}
}
} catch (KafkaException e) {
if (fetch.isEmpty())
throw e;
} finally {
// add any polled completed fetches for paused partitions back to the completed fetches queue to be
// re-evaluated in the next poll
fetchBuffer.addAll(pausedCompletedFetches);
}
return fetch;
} | @Test
public void testFetchNormal() {
int recordCount = DEFAULT_MAX_POLL_RECORDS;
buildDependencies();
assignAndSeek(topicAPartition0);
CompletedFetch completedFetch = completedFetchBuilder
.recordCount(recordCount)
.build();
// Validate that the buffer is empty until after we add the fetch data.
assertTrue(fetchBuffer.isEmpty());
fetchBuffer.add(completedFetch);
assertFalse(fetchBuffer.isEmpty());
// Validate that the completed fetch isn't initialized just because we add it to the buffer.
assertFalse(completedFetch.isInitialized());
// Fetch the data and validate that we get all the records we want back.
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer);
assertFalse(fetch.isEmpty());
assertEquals(recordCount, fetch.numRecords());
// When we collected the data from the buffer, this will cause the completed fetch to get initialized.
assertTrue(completedFetch.isInitialized());
// However, even though we've collected the data, it isn't (completely) consumed yet.
assertFalse(completedFetch.isConsumed());
// The buffer is now considered "empty" because our queue is empty.
assertTrue(fetchBuffer.isEmpty());
assertNull(fetchBuffer.peek());
assertNull(fetchBuffer.poll());
// However, while the queue is "empty", the next-in-line fetch is actually still in the buffer.
assertNotNull(fetchBuffer.nextInLineFetch());
// Validate that the next fetch position has been updated to point to the record after our last fetched
// record.
SubscriptionState.FetchPosition position = subscriptions.position(topicAPartition0);
assertEquals(recordCount, position.offset);
// Now attempt to collect more records from the fetch buffer.
fetch = fetchCollector.collectFetch(fetchBuffer);
// The Fetch object is non-null, but it's empty.
assertEquals(0, fetch.numRecords());
assertTrue(fetch.isEmpty());
// However, once we read *past* the end of the records in the CompletedFetch, then we will call
// drain on it, and it will be considered all consumed.
assertTrue(completedFetch.isConsumed());
} |
@Override
public boolean registry(ServiceInstance serviceInstance) {
checkDiscoveryState();
if (getZkClient().isStateOk()) {
return registrySync(serviceInstance);
}
return registryAsync(serviceInstance);
} | @Test
public void registry() throws Exception {
final DefaultServiceInstance instance = new DefaultServiceInstance("localhost", "127.0.0.1", 8080,
Collections.emptyMap(), serviceName);
final boolean result = zkDiscoveryClient.registry(instance);
Assert.assertTrue(result);
Mockito.verify(serviceDiscovery, Mockito.times(1)).registerService(Mockito.any());
} |
public static boolean checkRegexResourceField(AbstractRule rule) {
if (!rule.isRegex()) {
return true;
}
String resourceName = rule.getResource();
try {
Pattern.compile(resourceName);
return true;
} catch (Exception e) {
return false;
}
} | @Test
public void testValidRegexRule() {
// Setup
FlowRule flowRule = new FlowRule();
flowRule.setRegex(true);
flowRule.setResource("{}");
// Run the test and verify
Assert.assertFalse(RuleManager.checkRegexResourceField(flowRule));
flowRule.setResource(".*");
// Run the test and verify
Assert.assertTrue(RuleManager.checkRegexResourceField(flowRule));
} |
public PutMessageResult putMessage(MessageExtBrokerInner messageExt) {
BrokerController masterBroker = this.brokerController.peekMasterBroker();
if (masterBroker != null) {
return masterBroker.getMessageStore().putMessage(messageExt);
} else if (this.brokerController.getBrokerConfig().isEnableSlaveActingMaster()
&& this.brokerController.getBrokerConfig().isEnableRemoteEscape()) {
try {
messageExt.setWaitStoreMsgOK(false);
final SendResult sendResult = putMessageToRemoteBroker(messageExt, null);
return transformSendResult2PutResult(sendResult);
} catch (Exception e) {
LOG.error("sendMessageInFailover to remote failed", e);
return new PutMessageResult(PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL, null, true);
}
} else {
LOG.warn("Put message failed, enableSlaveActingMaster={}, enableRemoteEscape={}.",
this.brokerController.getBrokerConfig().isEnableSlaveActingMaster(), this.brokerController.getBrokerConfig().isEnableRemoteEscape());
return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null);
}
} | @Test
public void putMessageTest() {
messageExtBrokerInner.setTopic(TEST_TOPIC);
messageExtBrokerInner.setQueueId(DEFAULT_QUEUE_ID);
messageExtBrokerInner.setBody("Hello World".getBytes(StandardCharsets.UTF_8));
// masterBroker is null
final PutMessageResult result1 = escapeBridge.putMessage(messageExtBrokerInner);
assert result1 != null;
assert PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL.equals(result1.getPutMessageStatus());
// masterBroker is not null
messageExtBrokerInner.setBody("Hello World2".getBytes(StandardCharsets.UTF_8));
when(brokerController.peekMasterBroker()).thenReturn(brokerController);
Assertions.assertThatCode(() -> escapeBridge.putMessage(messageExtBrokerInner)).doesNotThrowAnyException();
when(brokerController.peekMasterBroker()).thenReturn(null);
final PutMessageResult result3 = escapeBridge.putMessage(messageExtBrokerInner);
assert result3 != null;
assert PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL.equals(result3.getPutMessageStatus());
} |
public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException {
checkMaybeCompatible(source, target);
if (source.isOptional() && !target.isOptional()) {
if (target.defaultValue() != null) {
if (record != null) {
return projectRequiredSchema(source, record, target);
} else {
return target.defaultValue();
}
} else {
throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value.");
}
} else {
if (record != null) {
return projectRequiredSchema(source, record, target);
} else {
return null;
}
}
} | @Test
public void testNestedSchemaProjection() {
Schema sourceFlatSchema = SchemaBuilder.struct()
.field("field", Schema.INT32_SCHEMA)
.build();
Schema targetFlatSchema = SchemaBuilder.struct()
.field("field", Schema.INT32_SCHEMA)
.field("field2", SchemaBuilder.int32().defaultValue(123).build())
.build();
Schema sourceNestedSchema = SchemaBuilder.struct()
.field("first", Schema.INT32_SCHEMA)
.field("second", Schema.STRING_SCHEMA)
.field("array", SchemaBuilder.array(Schema.INT32_SCHEMA).build())
.field("map", SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build())
.field("nested", sourceFlatSchema)
.build();
Schema targetNestedSchema = SchemaBuilder.struct()
.field("first", Schema.INT32_SCHEMA)
.field("second", Schema.STRING_SCHEMA)
.field("array", SchemaBuilder.array(Schema.INT32_SCHEMA).build())
.field("map", SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build())
.field("nested", targetFlatSchema)
.build();
Struct sourceFlatStruct = new Struct(sourceFlatSchema);
sourceFlatStruct.put("field", 113);
Struct sourceNestedStruct = new Struct(sourceNestedSchema);
sourceNestedStruct.put("first", 1);
sourceNestedStruct.put("second", "abc");
sourceNestedStruct.put("array", Arrays.asList(1, 2));
sourceNestedStruct.put("map", Collections.singletonMap(5, "def"));
sourceNestedStruct.put("nested", sourceFlatStruct);
Struct targetNestedStruct = (Struct) SchemaProjector.project(sourceNestedSchema, sourceNestedStruct,
targetNestedSchema);
assertEquals(1, targetNestedStruct.get("first"));
assertEquals("abc", targetNestedStruct.get("second"));
assertEquals(Arrays.asList(1, 2), targetNestedStruct.get("array"));
assertEquals(Collections.singletonMap(5, "def"), targetNestedStruct.get("map"));
Struct projectedStruct = (Struct) targetNestedStruct.get("nested");
assertEquals(113, projectedStruct.get("field"));
assertEquals(123, projectedStruct.get("field2"));
} |
public SerializableFunction<T, Row> getToRowFunction() {
return toRowFunction;
} | @Test
public void testOneOfProtoToRow() throws InvalidProtocolBufferException {
ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(OneOf.getDescriptor());
SerializableFunction<DynamicMessage, Row> toRow = schemaProvider.getToRowFunction();
// equality doesn't work between dynamic messages and other,
// so we compare string representation
assertEquals(ONEOF_ROW_INT32.toString(), toRow.apply(toDynamic(ONEOF_PROTO_INT32)).toString());
assertEquals(ONEOF_ROW_BOOL.toString(), toRow.apply(toDynamic(ONEOF_PROTO_BOOL)).toString());
assertEquals(
ONEOF_ROW_STRING.toString(), toRow.apply(toDynamic(ONEOF_PROTO_STRING)).toString());
assertEquals(
ONEOF_ROW_PRIMITIVE.toString(), toRow.apply(toDynamic(ONEOF_PROTO_PRIMITIVE)).toString());
} |
@Override
public PageResult<ProductSpuDO> getSpuPage(ProductSpuPageReqVO pageReqVO) {
return productSpuMapper.selectPage(pageReqVO);
} | @Test
void getSpuPage_alarmStock() {
// 准备参数
ArrayList<ProductSpuDO> createReqVOs = Lists.newArrayList(randomPojo(ProductSpuDO.class,o->{
o.setCategoryId(generateId());
o.setBrandId(generateId());
o.setDeliveryTemplateId(generateId());
o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setVirtualSalesCount(generaInt()); // 限制范围为正整数
o.setPrice(generaInt()); // 限制范围为正整数
o.setMarketPrice(generaInt()); // 限制范围为正整数
o.setCostPrice(generaInt()); // 限制范围为正整数
o.setStock(5); // 限制范围为正整数
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setSalesCount(generaInt()); // 限制范围为正整数
o.setBrowseCount(generaInt()); // 限制范围为正整数
}), randomPojo(ProductSpuDO.class,o->{
o.setCategoryId(generateId());
o.setBrandId(generateId());
o.setDeliveryTemplateId(generateId());
o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setVirtualSalesCount(generaInt()); // 限制范围为正整数
o.setPrice(generaInt()); // 限制范围为正整数
o.setMarketPrice(generaInt()); // 限制范围为正整数
o.setCostPrice(generaInt()); // 限制范围为正整数
o.setStock(9); // 限制范围为正整数
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setSalesCount(generaInt()); // 限制范围为正整数
o.setBrowseCount(generaInt()); // 限制范围为正整数
}));
productSpuMapper.insertBatch(createReqVOs);
// 调用
ProductSpuPageReqVO productSpuPageReqVO = new ProductSpuPageReqVO();
productSpuPageReqVO.setTabType(ProductSpuPageReqVO.ALERT_STOCK);
PageResult<ProductSpuDO> spuPage = productSpuService.getSpuPage(productSpuPageReqVO);
assertEquals(createReqVOs.size(), spuPage.getTotal());
} |
@DELETE
@Consumes(MediaType.APPLICATION_JSON)
@Path("{component}")
public Response unsetConfigs(@PathParam("component") String component,
InputStream request) throws IOException {
ComponentConfigService service = get(ComponentConfigService.class);
ObjectNode props = readTreeFromStream(mapper(), request);
props.fieldNames().forEachRemaining(k -> service.unsetProperty(component, k));
return Response.noContent().build();
} | @Test
public void unsetConfigs() {
WebTarget wt = target();
try {
// TODO: this needs to be revised later. Do you really need to
// contain any entry inside delete request? Why not just use put then?
wt.path("configuration/foo").request().delete();
} catch (BadRequestException e) {
assertEquals("incorrect key", "foo", service.component);
assertEquals("incorrect key", "k", service.name);
assertEquals("incorrect value", null, service.value);
}
} |
static AllManifestsTable.ManifestListReadTask fromJson(JsonNode jsonNode) {
Preconditions.checkArgument(jsonNode != null, "Invalid JSON node for manifest task: null");
Preconditions.checkArgument(
jsonNode.isObject(), "Invalid JSON node for manifest task: non-object (%s)", jsonNode);
Schema dataTableSchema = SchemaParser.fromJson(JsonUtil.get(DATA_TABLE_SCHEMA, jsonNode));
FileIO fileIO = FileIOParser.fromJson(JsonUtil.get(FILE_IO, jsonNode), null);
Schema schema = SchemaParser.fromJson(JsonUtil.get(SCHEMA, jsonNode));
JsonNode specsArray = JsonUtil.get(SPECS, jsonNode);
Preconditions.checkArgument(
specsArray.isArray(), "Invalid JSON node for partition specs: non-array (%s)", specsArray);
ImmutableList.Builder<PartitionSpec> specsBuilder = ImmutableList.builder();
for (JsonNode specNode : specsArray) {
PartitionSpec spec = PartitionSpecParser.fromJson(dataTableSchema, specNode);
specsBuilder.add(spec);
}
Map<Integer, PartitionSpec> specsById = PartitionUtil.indexSpecs(specsBuilder.build());
String manifestListLocation = JsonUtil.getString(MANIFEST_LIST_LOCATION, jsonNode);
Expression residualFilter = ExpressionParser.fromJson(JsonUtil.get(RESIDUAL, jsonNode));
long referenceSnapshotId = JsonUtil.getLong(REFERENCE_SNAPSHOT_ID, jsonNode);
return new AllManifestsTable.ManifestListReadTask(
dataTableSchema,
fileIO,
schema,
specsById,
manifestListLocation,
residualFilter,
referenceSnapshotId);
} | @Test
public void invalidJsonNode() throws Exception {
String jsonStr = "{\"str\":\"1\", \"arr\":[]}";
ObjectMapper mapper = new ObjectMapper();
JsonNode rootNode = mapper.reader().readTree(jsonStr);
assertThatThrownBy(() -> AllManifestsTableTaskParser.fromJson(rootNode.get("str")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Invalid JSON node for manifest task: non-object ");
assertThatThrownBy(() -> AllManifestsTableTaskParser.fromJson(rootNode.get("arr")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Invalid JSON node for manifest task: non-object ");
} |
@Override
public void configure(final Map<String, ?> config) {
configure(
config,
new Options(),
org.rocksdb.LRUCache::new,
org.rocksdb.WriteBufferManager::new
);
} | @Test
public void shouldSetNumThreads() {
// When:
KsqlBoundedMemoryRocksDBConfigSetter.configure(
CONFIG_PROPS,
rocksOptions,
cacheFactory,
bufferManagerFactory
);
// Then:
verify(env).setBackgroundThreads(NUM_BACKGROUND_THREADS);
} |
@Override
public void clear() {
repo.clear();
} | @Test
public void testClear() throws Exception {
// ADD key to remove
assertTrue(repo.add(key01));
assertEquals(1, cache.size());
// remove key
assertTrue(repo.remove(key01));
assertEquals(0, cache.size());
// try to remove a key that isn't there
assertFalse(repo.remove(key02));
} |
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR
if (splittee == null || splitChar == null) {
return new String[0];
}
final String EMPTY_ELEMENT = "";
int spot;
final int splitLength = splitChar.length();
final String adjacentSplit = splitChar + splitChar;
final int adjacentSplitLength = adjacentSplit.length();
if (truncate) {
while ((spot = splittee.indexOf(adjacentSplit)) != -1) {
splittee = splittee.substring(0, spot + splitLength)
+ splittee.substring(spot + adjacentSplitLength, splittee.length());
}
if (splittee.startsWith(splitChar)) {
splittee = splittee.substring(splitLength);
}
if (splittee.endsWith(splitChar)) { // Remove trailing splitter
splittee = splittee.substring(0, splittee.length() - splitLength);
}
}
List<String> returns = new ArrayList<>();
final int length = splittee.length(); // This is the new length
int start = 0;
spot = 0;
while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) {
if (spot > 0) {
returns.add(splittee.substring(start, spot));
} else {
returns.add(EMPTY_ELEMENT);
}
start = spot + splitLength;
}
if (start < length) {
returns.add(splittee.substring(start));
} else if (spot == length - splitLength) {// Found splitChar at end of line
returns.add(EMPTY_ELEMENT);
}
return returns.toArray(new String[returns.size()]);
} | @Test
public void testSplitSSSWithEmptyDelimiter() {
final String in = "a,;bc,;,";
assertThat(JOrphanUtils.split(in, "", "x"), CoreMatchers.equalTo(new String[]{in}));
} |
@ShellMethod(key = "show logfile records", value = "Read records from log files")
public String showLogFileRecords(
@ShellOption(value = {"--limit"}, help = "Limit commits",
defaultValue = "10") final Integer limit,
@ShellOption(value = "--logFilePathPattern",
help = "Fully qualified paths for the log files") final String logFilePathPattern,
@ShellOption(value = "--mergeRecords", help = "If the records in the log files should be merged",
defaultValue = "false") final Boolean shouldMerge)
throws IOException {
System.out.println("===============> Showing only " + limit + " records <===============");
HoodieTableMetaClient client = HoodieCLI.getTableMetaClient();
HoodieStorage storage = client.getStorage();
List<String> logFilePaths = FSUtils.getGlobStatusExcludingMetaFolder(
storage, new StoragePath(logFilePathPattern)).stream()
.map(status -> status.getPath().toString()).sorted(Comparator.reverseOrder())
.collect(Collectors.toList());
// logFilePaths size must > 1
checkArgument(logFilePaths.size() > 0, "There is no log file");
// TODO : readerSchema can change across blocks/log files, fix this inside Scanner
Schema readerSchema = null;
// get schema from last log file
for (int i = logFilePaths.size() - 1; i >= 0; i--) {
Schema schema = TableSchemaResolver.readSchemaFromLogFile(
storage, new StoragePath(logFilePaths.get(i)));
if (schema != null) {
readerSchema = schema;
break;
}
}
Objects.requireNonNull(readerSchema);
List<IndexedRecord> allRecords = new ArrayList<>();
if (shouldMerge) {
System.out.println("===========================> MERGING RECORDS <===================");
HoodieMergedLogRecordScanner scanner =
HoodieMergedLogRecordScanner.newBuilder()
.withStorage(storage)
.withBasePath(client.getBasePath())
.withLogFilePaths(logFilePaths)
.withReaderSchema(readerSchema)
.withLatestInstantTime(
client.getActiveTimeline()
.getCommitAndReplaceTimeline().lastInstant().get().getTimestamp())
.withReverseReader(
Boolean.parseBoolean(
HoodieReaderConfig.COMPACTION_REVERSE_LOG_READ_ENABLE.defaultValue()))
.withBufferSize(HoodieMemoryConfig.MAX_DFS_STREAM_BUFFER_SIZE.defaultValue())
.withMaxMemorySizeInBytes(
HoodieMemoryConfig.DEFAULT_MAX_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES)
.withSpillableMapBasePath(FileIOUtils.getDefaultSpillableMapBasePath())
.withDiskMapType(HoodieCommonConfig.SPILLABLE_DISK_MAP_TYPE.defaultValue())
.withBitCaskDiskMapCompressionEnabled(HoodieCommonConfig.DISK_MAP_BITCASK_COMPRESSION_ENABLED.defaultValue())
.withOptimizedLogBlocksScan(Boolean.parseBoolean(HoodieReaderConfig.ENABLE_OPTIMIZED_LOG_BLOCKS_SCAN.defaultValue()))
.build();
for (HoodieRecord hoodieRecord : scanner) {
Option<HoodieAvroIndexedRecord> record = hoodieRecord.toIndexedRecord(readerSchema, new Properties());
if (allRecords.size() < limit) {
allRecords.add(record.get().getData());
}
}
} else {
for (String logFile : logFilePaths) {
Schema writerSchema = TableSchemaResolver.readSchemaFromLogFile(
client.getStorage(), new StoragePath(logFile));
try (HoodieLogFormat.Reader reader =
HoodieLogFormat.newReader(storage, new HoodieLogFile(new StoragePath(logFile)), writerSchema)) {
// read the avro blocks
while (reader.hasNext()) {
HoodieLogBlock n = reader.next();
if (n instanceof HoodieDataBlock) {
HoodieDataBlock blk = (HoodieDataBlock) n;
try (ClosableIterator<HoodieRecord<IndexedRecord>> recordItr = blk.getRecordIterator(HoodieRecordType.AVRO)) {
recordItr.forEachRemaining(record -> {
if (allRecords.size() < limit) {
allRecords.add(record.getData());
}
});
}
}
}
}
if (allRecords.size() >= limit) {
break;
}
}
}
String[][] rows = new String[allRecords.size()][];
int i = 0;
for (IndexedRecord record : allRecords) {
String[] data = new String[1];
data[0] = record.toString();
rows[i] = data;
i++;
}
return HoodiePrintHelper.print(new String[] {HoodieTableHeaderFields.HEADER_RECORDS}, rows);
} | @Test
public void testShowLogFileRecords() throws IOException, URISyntaxException {
Object result = shell.evaluate(() -> "show logfile records --logFilePathPattern " + partitionPath + "/*");
assertTrue(ShellEvaluationResultUtil.isSuccess(result));
// construct expect result, get 10 records.
List<IndexedRecord> records = SchemaTestUtil.generateTestRecords(0, 10);
String[][] rows = records.stream().map(r -> new String[] {r.toString()}).toArray(String[][]::new);
String expected = HoodiePrintHelper.print(new String[] {HoodieTableHeaderFields.HEADER_RECORDS}, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(result.toString());
assertEquals(expected, got);
} |
@SuppressWarnings("unchecked")
public static <R> R getStaticField(Field field) {
try {
field.setAccessible(true);
return (R) field.get(null);
} catch (Exception e) {
throw new RuntimeException(e);
}
} | @Test
public void getStaticFieldReflectively_withField_getsStaticField() throws Exception {
Field field = ExampleDescendant.class.getDeclaredField("DESCENDANT");
int result = ReflectionHelpers.getStaticField(field);
assertThat(result).isEqualTo(6);
} |
public void reschedule(Node<K, V> node) {
if (node.getNextInVariableOrder() != null) {
unlink(node);
schedule(node);
}
} | @Test(dataProvider = "clock")
public void reschedule(long clock) {
when(cache.evictEntry(captor.capture(), any(), anyLong())).thenReturn(true);
timerWheel.nanos = clock;
var timer = new Timer(clock + TimeUnit.MINUTES.toNanos(15));
timerWheel.schedule(timer);
var startBucket = timer.getNextInVariableOrder();
timer.setVariableTime(clock + TimeUnit.HOURS.toNanos(2));
timerWheel.reschedule(timer);
assertThat(timer.getNextInVariableOrder()).isNotSameInstanceAs(startBucket);
timerWheel.advance(cache, clock + TimeUnit.DAYS.toNanos(1));
checkEmpty();
} |
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
} else if (!(obj instanceof Dimension)) {
return false;
}
Dimension other = (Dimension) obj;
if (this.width != other.width) {
return false;
} else if (this.height != other.height) {
return false;
}
return true;
} | @Test
public void equalsTest() {
Dimension dimension1 = new Dimension(1, 2);
Dimension dimension2 = new Dimension(1, 2);
Dimension dimension3 = new Dimension(1, 1);
Dimension dimension4 = new Dimension(2, 2);
TestUtils.equalsTest(dimension1, dimension2);
TestUtils.notEqualsTest(dimension1, dimension3);
TestUtils.notEqualsTest(dimension1, dimension4);
TestUtils.notEqualsTest(dimension1, new Object());
TestUtils.notEqualsTest(dimension1, null);
} |
private void removeDevice(final DeviceId deviceId) {
discoverers.computeIfPresent(deviceId, (did, ld) -> {
ld.stop();
return null;
});
} | @Test
public void testRemoveDevice() {
assertThat(provider.discoverers.entrySet(), hasSize(2));
deviceListener.event(new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, dev3));
assertThat(provider.discoverers.entrySet(), hasSize(3));
deviceListener.event(new DeviceEvent(DeviceEvent.Type.DEVICE_REMOVED, dev3));
assertThat(provider.discoverers.entrySet(), hasSize(2));
} |
public static void main(String[] args) {
var service = ServiceLocator.getService(JNDI_SERVICE_A);
service.execute();
service = ServiceLocator.getService(JNDI_SERVICE_B);
service.execute();
service = ServiceLocator.getService(JNDI_SERVICE_A);
service.execute();
service = ServiceLocator.getService(JNDI_SERVICE_A);
service.execute();
} | @Test
void shouldExecuteWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
@POST
@Path(RMWSConsts.SCHEDULER_LOGS)
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Override
public String dumpSchedulerLogs(@FormParam(RMWSConsts.TIME) String time,
@Context HttpServletRequest hsr) throws IOException {
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
initForWritableEndpoints(callerUGI, true);
ResourceScheduler rs = rm.getResourceScheduler();
int period = Integer.parseInt(time);
if (period <= 0) {
throw new BadRequestException("Period must be greater than 0");
}
final String logHierarchy =
"org.apache.hadoop.yarn.server.resourcemanager.scheduler";
String logfile = "yarn-scheduler-debug.log";
if (rs instanceof CapacityScheduler) {
logfile = "yarn-capacity-scheduler-debug.log";
} else if (rs instanceof FairScheduler) {
logfile = "yarn-fair-scheduler-debug.log";
}
AdHocLogDumper dumper = new AdHocLogDumper(logHierarchy, logfile);
// time period is sent to us in seconds
dumper.dumpLogs("DEBUG", period * 1000);
return "Capacity scheduler logs are being created.";
} | @Test
public void testDumpingSchedulerLogs() throws Exception {
ResourceManager mockRM = mock(ResourceManager.class);
Configuration conf = new YarnConfiguration();
HttpServletRequest mockHsr = mockHttpServletRequestByUserName("non-admin");
ApplicationACLsManager aclsManager = new ApplicationACLsManager(conf);
when(mockRM.getApplicationACLsManager()).thenReturn(aclsManager);
RMWebServices webSvc =
new RMWebServices(mockRM, conf, mock(HttpServletResponse.class));
// nothing should happen
webSvc.dumpSchedulerLogs("1", mockHsr);
waitforLogDump(50);
checkSchedulerLogFileAndCleanup();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.setStrings(YarnConfiguration.YARN_ADMIN_ACL, "admin");
aclsManager = new ApplicationACLsManager(conf);
when(mockRM.getApplicationACLsManager()).thenReturn(aclsManager);
webSvc = new RMWebServices(mockRM, conf, mock(HttpServletResponse.class));
boolean exceptionThrown = false;
try {
webSvc.dumpSchedulerLogs("1", mockHsr);
fail("Dumping logs should fail");
} catch (ForbiddenException ae) {
exceptionThrown = true;
}
assertTrue("ForbiddenException expected", exceptionThrown);
exceptionThrown = false;
when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
@Override
public String getName() {
return "testuser";
}
});
try {
webSvc.dumpSchedulerLogs("1", mockHsr);
fail("Dumping logs should fail");
} catch (ForbiddenException ae) {
exceptionThrown = true;
}
assertTrue("ForbiddenException expected", exceptionThrown);
when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
@Override
public String getName() {
return "admin";
}
});
webSvc.dumpSchedulerLogs("1", mockHsr);
waitforLogDump(50);
checkSchedulerLogFileAndCleanup();
} |
@Override
public void add(long key, String value) {
// fix https://github.com/crossoverJie/cim/issues/79
sortArrayMap.clear();
for (int i = 0; i < VIRTUAL_NODE_SIZE; i++) {
Long hash = super.hash("vir" + key + i);
sortArrayMap.add(hash,value);
}
sortArrayMap.add(key, value);
} | @Test
public void getFirstNodeValue5() {
AbstractConsistentHash map = new SortArrayMapConsistentHash() ;
List<String> strings = new ArrayList<String>();
strings.add("45.78.28.220:9000:8081") ;
strings.add("45.78.28.220:9100:9081") ;
strings.add("45.78.28.220:9100:10081") ;
String process = map.process(strings,"1551253899106");
System.out.println(process);
Assert.assertEquals("45.78.28.220:9000:8081",process);
} |
public MeasureDto toMeasureDto(Measure measure, Metric metric, Component component) {
MeasureDto out = new MeasureDto();
out.setMetricUuid(metric.getUuid());
out.setComponentUuid(component.getUuid());
out.setAnalysisUuid(analysisMetadataHolder.getUuid());
if (measure.hasQualityGateStatus()) {
setAlert(out, measure.getQualityGateStatus());
}
out.setValue(valueAsDouble(measure));
out.setData(data(measure));
return out;
} | @Test
public void toMeasureDto_returns_Dto_with_alertStatus_and_alertText_if_Measure_has_QualityGateStatus() {
String alertText = "some error";
MeasureDto measureDto = underTest.toMeasureDto(Measure.newMeasureBuilder().setQualityGateStatus(new QualityGateStatus(Measure.Level.ERROR, alertText)).create(SOME_STRING),
SOME_STRING_METRIC, SOME_COMPONENT);
assertThat(measureDto.getAlertStatus()).isEqualTo(Measure.Level.ERROR.name());
assertThat(measureDto.getAlertText()).isEqualTo(alertText);
} |
@SuppressWarnings("checkstyle:magicnumber")
void writeLong(long value) {
if (value == Long.MIN_VALUE) {
write(STR_LONG_MIN_VALUE);
return;
}
if (value < 0) {
write('-');
value = -value;
}
int digitsWithoutComma = 0;
tmpSb.setLength(0);
do {
digitsWithoutComma++;
if (digitsWithoutComma == 4) {
tmpSb.append(',');
digitsWithoutComma = 1;
}
int mod = (int) (value % 10);
tmpSb.append(DIGITS[mod]);
value = value / 10;
} while (value > 0);
for (int k = tmpSb.length() - 1; k >= 0; k--) {
char c = tmpSb.charAt(k);
write(c);
}
} | @Test
public void writeLong() {
assertLongValue(0);
assertLongValue(10);
assertLongValue(100);
assertLongValue(1000);
assertLongValue(10000);
assertLongValue(100000);
assertLongValue(1000000);
assertLongValue(10000000);
assertLongValue(100000000);
assertLongValue(1000000000);
assertLongValue(10000000000L);
assertLongValue(100000000000L);
assertLongValue(1000000000000L);
assertLongValue(10000000000000L);
assertLongValue(100000000000000L);
assertLongValue(1000000000000000L);
assertLongValue(10000000000000000L);
assertLongValue(100000000000000000L);
assertLongValue(1000000000000000000L);
assertLongValue(-10);
assertLongValue(-100);
assertLongValue(-1000);
assertLongValue(-10000);
assertLongValue(-100000);
assertLongValue(-1000000);
assertLongValue(-10000000);
assertLongValue(-100000000);
assertLongValue(-1000000000);
assertLongValue(-10000000000L);
assertLongValue(-100000000000L);
assertLongValue(-1000000000000L);
assertLongValue(-10000000000000L);
assertLongValue(-100000000000000L);
assertLongValue(-1000000000000000L);
assertLongValue(-10000000000000000L);
assertLongValue(-100000000000000000L);
assertLongValue(-1000000000000000000L);
assertLongValue(1);
assertLongValue(345);
assertLongValue(83883);
assertLongValue(1222333);
assertLongValue(11122233);
assertLongValue(111222334);
assertLongValue(1112223344);
assertLongValue(-1);
assertLongValue(-345);
assertLongValue(-83883);
assertLongValue(-1222333);
assertLongValue(-11122233);
assertLongValue(-111222334);
assertLongValue(-1112223344);
assertLongValue(Integer.MIN_VALUE);
assertLongValue(Integer.MAX_VALUE);
assertLongValue(Long.MIN_VALUE);
assertLongValue(Long.MAX_VALUE);
} |
@Modified
public void modified(ComponentContext context) {
if (context == null) {
log.info("No component configuration");
return;
}
Dictionary<?, ?> properties = context.getProperties();
int newPollFrequency = getNewPollFrequency(properties, alarmPollFrequencySeconds);
if (newPollFrequency != alarmPollFrequencySeconds) {
alarmPollFrequencySeconds = newPollFrequency;
//stops the old scheduled task
scheduledTask.cancel(true);
//schedules new task at the new polling rate
scheduledTask = schedulePolling();
}
} | @Test
public void modified() throws Exception {
provider.modified(null);
assertEquals("Incorrect polling frequency", 1, provider.alarmPollFrequencySeconds);
provider.activate(null);
provider.modified(context);
assertEquals("Incorrect polling frequency", 1, provider.alarmPollFrequencySeconds);
} |
@Override
public RandomAccessReadView createView(long startPosition, long streamLength) throws IOException
{
throw new IOException(getClass().getName() + ".createView isn't supported.");
} | @Test
void testView() throws IOException
{
byte[] inputValues = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
ByteArrayInputStream bais = new ByteArrayInputStream(inputValues);
try (NonSeekableRandomAccessReadInputStream randomAccessSource = new NonSeekableRandomAccessReadInputStream(
bais))
{
Assertions.assertThrows(IOException.class, () -> randomAccessSource.createView(3, 5),
"createView should have thrown an IOException");
}
} |
void readEntries(ReadHandle lh, long firstEntry, long lastEntry, boolean shouldCacheEntry,
final AsyncCallbacks.ReadEntriesCallback callback, Object ctx) {
final PendingReadKey key = new PendingReadKey(firstEntry, lastEntry);
Map<PendingReadKey, PendingRead> pendingReadsForLedger =
cachedPendingReads.computeIfAbsent(lh.getId(), (l) -> new ConcurrentHashMap<>());
boolean listenerAdded = false;
while (!listenerAdded) {
AtomicBoolean createdByThisThread = new AtomicBoolean();
FindPendingReadOutcome findBestCandidateOutcome = findPendingRead(key,
pendingReadsForLedger, createdByThisThread);
PendingRead pendingRead = findBestCandidateOutcome.pendingRead;
if (findBestCandidateOutcome.needsAdditionalReads()) {
AsyncCallbacks.ReadEntriesCallback wrappedCallback = new AsyncCallbacks.ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
PendingReadKey missingOnLeft = findBestCandidateOutcome.missingOnLeft;
PendingReadKey missingOnRight = findBestCandidateOutcome.missingOnRight;
if (missingOnRight != null && missingOnLeft != null) {
AsyncCallbacks.ReadEntriesCallback readFromLeftCallback =
new AsyncCallbacks.ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entriesFromLeft, Object dummyCtx1) {
AsyncCallbacks.ReadEntriesCallback readFromRightCallback =
new AsyncCallbacks.ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entriesFromRight,
Object dummyCtx2) {
List<Entry> finalResult =
new ArrayList<>(entriesFromLeft.size()
+ entries.size() + entriesFromRight.size());
finalResult.addAll(entriesFromLeft);
finalResult.addAll(entries);
finalResult.addAll(entriesFromRight);
callback.readEntriesComplete(finalResult, ctx);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception,
Object dummyCtx3) {
entries.forEach(Entry::release);
entriesFromLeft.forEach(Entry::release);
callback.readEntriesFailed(exception, ctx);
}
};
rangeEntryCache.asyncReadEntry0(lh,
missingOnRight.startEntry, missingOnRight.endEntry,
shouldCacheEntry, readFromRightCallback, null);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object dummyCtx4) {
entries.forEach(Entry::release);
callback.readEntriesFailed(exception, ctx);
}
};
rangeEntryCache.asyncReadEntry0(lh, missingOnLeft.startEntry, missingOnLeft.endEntry,
shouldCacheEntry, readFromLeftCallback, null);
} else if (missingOnLeft != null) {
AsyncCallbacks.ReadEntriesCallback readFromLeftCallback =
new AsyncCallbacks.ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entriesFromLeft,
Object dummyCtx5) {
List<Entry> finalResult =
new ArrayList<>(entriesFromLeft.size() + entries.size());
finalResult.addAll(entriesFromLeft);
finalResult.addAll(entries);
callback.readEntriesComplete(finalResult, ctx);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception,
Object dummyCtx6) {
entries.forEach(Entry::release);
callback.readEntriesFailed(exception, ctx);
}
};
rangeEntryCache.asyncReadEntry0(lh, missingOnLeft.startEntry, missingOnLeft.endEntry,
shouldCacheEntry, readFromLeftCallback, null);
} else if (missingOnRight != null) {
AsyncCallbacks.ReadEntriesCallback readFromRightCallback =
new AsyncCallbacks.ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entriesFromRight,
Object dummyCtx7) {
List<Entry> finalResult =
new ArrayList<>(entriesFromRight.size() + entries.size());
finalResult.addAll(entries);
finalResult.addAll(entriesFromRight);
callback.readEntriesComplete(finalResult, ctx);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception,
Object dummyCtx8) {
entries.forEach(Entry::release);
callback.readEntriesFailed(exception, ctx);
}
};
rangeEntryCache.asyncReadEntry0(lh, missingOnRight.startEntry, missingOnRight.endEntry,
shouldCacheEntry, readFromRightCallback, null);
}
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
callback.readEntriesFailed(exception, ctx);
}
};
listenerAdded = pendingRead.addListener(wrappedCallback, ctx, key.startEntry, key.endEntry);
} else {
listenerAdded = pendingRead.addListener(callback, ctx, key.startEntry, key.endEntry);
}
if (createdByThisThread.get()) {
CompletableFuture<List<EntryImpl>> readResult = rangeEntryCache.readFromStorage(lh, firstEntry,
lastEntry, shouldCacheEntry);
pendingRead.attach(readResult);
}
}
} | @Test
public void simpleRead() throws Exception {
long firstEntry = 100;
long endEntry = 199;
boolean shouldCacheEntry = false;
PreparedReadFromStorage read1
= prepareReadFromStorage(lh, rangeEntryCache, firstEntry, endEntry, shouldCacheEntry);
CapturingReadEntriesCallback callback = new CapturingReadEntriesCallback();
pendingReadsManager.readEntries(lh, firstEntry, endEntry, shouldCacheEntry, callback, CTX);
// complete the read
read1.storageReadCompleted();
// wait for the callback to complete
callback.get();
assertSame(callback.getCtx(), CTX);
// verify
verifyRange(callback.entries, firstEntry, endEntry);
} |
public Node node() { return node; } | @Test
void testNode() {
int index = 0;
Node node = new Node(index);
assertEquals(index, node.index());
} |
static void run(String resolverPath, String rootPath, String targetDirectoryPath, String[] sources)
throws IOException
{
final DataSchemaResolver schemaResolver = MultiFormatDataSchemaResolver.withBuiltinFormats(resolverPath);
VelocityEngine velocityEngine = initVelocityEngine();
final File targetDirectory = new File(targetDirectoryPath);
final StringBuilder message = new StringBuilder();
final RestSpecParser parser = new RestSpecParser();
final RestSpecParser.ParseResult parseResult = parser.parseSources(sources);
for (CodeUtil.Pair<ResourceSchema, File> pair : parseResult.getSchemaAndFiles())
{
generateFluentClientByResource(
pair.first,
schemaResolver,
velocityEngine,
targetDirectory,
pair.second.getPath(),
new ArrayList<>(2),
message
);
}
if (message.length() > 0)
{
throw new IOException(message.toString());
}
} | @Test()
public void testBasic() throws Exception
{
final String pegasusDir = moduleDir + FS + RESOURCES_DIR + FS + "pegasus";
final String outPath = outdir.getPath();
FluentApiGenerator.run(pegasusDir,
moduleDir,
outPath,
new String[] { moduleDir + FS + RESOURCES_DIR + FS + "idls" + FS + "testCollection.restspec.json" });
final File apiFile = new File(outPath + FS + "com" + FS + "linkedin" + FS + "restli" + FS + "swift" + FS + "integration" + FS + "TestCollection.java");
Assert.assertTrue(apiFile.exists());
} |
public void project() {
srcPotentialIndex = 0;
trgPotentialIndex = 0;
recurse(0, 0);
BayesAbsorption.normalize(trgPotentials);
} | @Test
public void testProjection2() {
// Projects from node1 into sep. A, B and C are in node1. A and B are in the sep.
// this tests a non separator var, after the vars
BayesVariable a = new BayesVariable<String>( "A", 0, new String[] {"A1", "A2"}, new double[][] {{0.1, 0.2}});
BayesVariable b = new BayesVariable<String>( "B", 1, new String[] {"B1", "B2"}, new double[][] {{0.1, 0.2}});
BayesVariable c = new BayesVariable<String>( "C", 2, new String[] {"C1", "C2"}, new double[][] {{0.1, 0.2}});
Graph<BayesVariable> graph = new BayesNetwork();
GraphNode x0 = addNode(graph);
GraphNode x1 = addNode(graph);
GraphNode x2 = addNode(graph);
x0.setContent( a );
x1.setContent( b );
x2.setContent( c );
JunctionTreeClique node1 = new JunctionTreeClique(0, graph, bitSet("0111") );
JunctionTreeClique node2 = new JunctionTreeClique(1, graph, bitSet("0011") );
SeparatorState sep = new JunctionTreeSeparator(0, node1, node2, bitSet("0011"), graph).createState();
double v = 0.1;
for ( int i = 0; i < node1.getPotentials().length; i++ ) {
node1.getPotentials()[i] = v;
v = scaleDouble(3, v + 0.1 );
}
BayesVariable[] vars = new BayesVariable[] {a, b, c};
BayesVariable[] sepVars = new BayesVariable[] { a, b };
int[] sepVarPos = PotentialMultiplier.createSubsetVarPos(vars, sepVars);
int sepVarNumberOfStates = PotentialMultiplier.createNumberOfStates(sepVars);
int[] sepVarMultipliers = PotentialMultiplier.createIndexMultipliers(sepVars, sepVarNumberOfStates);
double[] projectedSepPotentials = new double[ sep.getPotentials().length];
BayesProjection p = new BayesProjection(vars, node1.getPotentials(), sepVarPos, sepVarMultipliers, projectedSepPotentials);
p.project();
// remember it's been normalized, from 0.3, 0.7, 1.1, 1.5
assertArray(new double[]{0.083, 0.194, 0.306, 0.417}, scaleDouble(3, projectedSepPotentials));
} |
@VisibleForTesting
void validateRoleDuplicate(String name, String code, Long id) {
// 0. 超级管理员,不允许创建
if (RoleCodeEnum.isSuperAdmin(code)) {
throw exception(ROLE_ADMIN_CODE_ERROR, code);
}
// 1. 该 name 名字被其它角色所使用
RoleDO role = roleMapper.selectByName(name);
if (role != null && !role.getId().equals(id)) {
throw exception(ROLE_NAME_DUPLICATE, name);
}
// 2. 是否存在相同编码的角色
if (!StringUtils.hasText(code)) {
return;
}
// 该 code 编码被其它角色所使用
role = roleMapper.selectByCode(code);
if (role != null && !role.getId().equals(id)) {
throw exception(ROLE_CODE_DUPLICATE, code);
}
} | @Test
public void testValidateRoleDuplicate_nameDuplicate() {
// mock 数据
RoleDO roleDO = randomPojo(RoleDO.class, o -> o.setName("role_name"));
roleMapper.insert(roleDO);
// 准备参数
String name = "role_name";
// 调用,并断言异常
assertServiceException(() -> roleService.validateRoleDuplicate(name, randomString(), null),
ROLE_NAME_DUPLICATE, name);
} |
@CanIgnoreReturnValue
public final Ordered containsExactly() {
return containsExactlyEntriesIn(ImmutableMap.of());
} | @Test
public void containsExactlyWrongValue_sameToStringForValues() {
expectFailureWhenTestingThat(ImmutableMap.of("jan", 1L, "feb", 2L))
.containsExactly("jan", 1, "feb", 2);
assertFailureKeys(
"keys with wrong values",
"for key",
"expected value",
"but got value",
"for key",
"expected value",
"but got value",
"---",
"expected",
"but was");
assertFailureValueIndexed("for key", 0, "jan");
assertFailureValueIndexed("expected value", 0, "1 (java.lang.Integer)");
assertFailureValueIndexed("but got value", 0, "1 (java.lang.Long)");
assertFailureValueIndexed("for key", 1, "feb");
assertFailureValueIndexed("expected value", 1, "2 (java.lang.Integer)");
assertFailureValueIndexed("but got value", 1, "2 (java.lang.Long)");
} |
@Override
public void start() throws Exception {
if (!state.compareAndSet(State.LATENT, State.STARTED)) {
throw new IllegalStateException();
}
try {
client.create().creatingParentContainersIfNeeded().forPath(queuePath);
} catch (KeeperException.NodeExistsException ignore) {
// this is OK
}
if (lockPath != null) {
try {
client.create().creatingParentContainersIfNeeded().forPath(lockPath);
} catch (KeeperException.NodeExistsException ignore) {
// this is OK
}
}
if (!isProducerOnly || (maxItems != QueueBuilder.NOT_SET)) {
childrenCache.start();
}
if (!isProducerOnly) {
service.submit(new Callable<Object>() {
@Override
public Object call() {
runLoop();
return null;
}
});
}
} | @Test
public void testSafetyBasic() throws Exception {
final int itemQty = 10;
DistributedQueue<TestQueueItem> queue = null;
CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
client.start();
try {
final BlockingQueueConsumer<TestQueueItem> consumer =
new BlockingQueueConsumer<>(new DummyConnectionStateListener());
queue = QueueBuilder.builder(client, consumer, serializer, QUEUE_PATH)
.lockPath("/a/locks")
.buildQueue();
queue.start();
QueueTestProducer producer = new QueueTestProducer(queue, itemQty, 0);
ExecutorService service = Executors.newCachedThreadPool();
service.submit(producer);
final CountDownLatch latch = new CountDownLatch(1);
service.submit(new Callable<Object>() {
@Override
public Object call() throws Exception {
for (int i = 0; i < itemQty; ++i) {
TestQueueItem item = consumer.take();
assertEquals(item.str, Integer.toString(i));
}
latch.countDown();
return null;
}
});
assertTrue(latch.await(10, TimeUnit.SECONDS));
} finally {
CloseableUtils.closeQuietly(queue);
CloseableUtils.closeQuietly(client);
}
} |
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
} | @Test
public void testRofOne()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHECK_RING_OF_FORGING_ONE, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_RING_OF_FORGING, 1);
} |
@Override
public void doFilter(HttpRequest request, HttpResponse response, FilterChain filterChain) {
RedirectionRequest wsRequest = new RedirectionRequest(request);
ServletResponse wsResponse = new ServletResponse(response);
webServiceEngine.execute(wsRequest, wsResponse);
} | @Test
public void redirect_components_update_key() {
when(request.getServletPath()).thenReturn("/api/components/update_key");
when(request.getMethod()).thenReturn("POST");
underTest.doFilter(new JavaxHttpRequest(request), new JavaxHttpResponse(response), chain);
assertRedirection("/api/projects/update_key", "POST");
} |
@SuppressWarnings({"PMD.AvoidInstantiatingObjectsInLoops"})
public void validate(Workflow workflow, User caller) {
try {
RunProperties runProperties = new RunProperties();
runProperties.setOwner(caller);
Map<String, ParamDefinition> workflowParams = workflow.getParams();
Map<String, ParamDefinition> defaultDryRunParams =
defaultParamManager.getDefaultDryRunParams();
// add run params to override params with known invalid defaults
Map<String, ParamDefinition> filteredParams =
defaultDryRunParams.entrySet().stream()
.filter(
entry ->
workflowParams != null
&& workflowParams.containsKey(entry.getKey())
&& workflowParams.get(entry.getKey()).getType()
== entry.getValue().getType())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Initiator initiator = new ValidationInitiator();
initiator.setCaller(caller);
RunRequest runRequest =
RunRequest.builder()
.initiator(initiator)
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.runParams(filteredParams)
.build();
WorkflowInstance workflowInstance =
workflowHelper.createWorkflowInstance(workflow, 1L, 1L, runProperties, runRequest);
WorkflowSummary workflowSummary =
workflowHelper.createWorkflowSummaryFromInstance(workflowInstance);
// todo: improve to traverse in DAG order to validate steps and their params
for (Step step : workflow.getSteps()) {
StepRuntimeSummary runtimeSummary =
StepRuntimeSummary.builder()
.stepId(step.getId())
.stepAttemptId(1L)
.stepInstanceId(1L)
.stepInstanceUuid(UUID.randomUUID().toString())
.stepName(StepHelper.getStepNameOrDefault(step))
.tags(step.getTags())
.type(step.getType())
.subType(step.getSubType())
.params(new LinkedHashMap<>())
.transition(StepInstanceTransition.from(step))
.synced(true)
.dependencies(Collections.emptyMap())
.build();
paramsManager.generateMergedStepParams(
workflowSummary, step, stepRuntimeMap.get(step.getType()), runtimeSummary);
}
} catch (Exception e) {
throw new MaestroDryRunException(
e,
"Exception during dry run validation for workflow %s Error=[%s] Type=[%s] StackTrace=[%s]",
workflow.getId(),
e.getMessage(),
e.getClass(),
ExceptionHelper.getStackTrace(e, MAX_STACKTRACE_LINES));
}
} | @Test
public void testValidateFailStepMerge() {
when(paramsManager.generateMergedWorkflowParams(any(), any()))
.thenReturn(new LinkedHashMap<>());
when(paramsManager.generateMergedStepParams(any(), any(), any(), any()))
.thenThrow(new MaestroValidationException("Error validating"));
AssertHelper.assertThrows(
"validation error",
MaestroDryRunException.class,
"Exception during dry run validation",
() -> dryRunValidator.validate(definition.getWorkflow(), user));
} |
public Mono<CosmosItemResponse<Object>> deleteItem(
final String itemId, final PartitionKey partitionKey, final CosmosItemRequestOptions itemRequestOptions) {
CosmosDbUtils.validateIfParameterIsNotEmpty(itemId, PARAM_ITEM_ID);
CosmosDbUtils.validateIfParameterIsNotEmpty(partitionKey, PARAM_PARTITION_KEY);
return applyToContainer(container -> container.deleteItem(itemId, partitionKey, itemRequestOptions));
} | @Test
void deleteItem() {
final CosmosDbContainerOperations operations
= new CosmosDbContainerOperations(Mono.just(mock(CosmosAsyncContainer.class)));
CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.deleteItem(null, null, null));
CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.deleteItem("", null, null));
CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.deleteItem("tes", null, null));
} |
public Meter meter(String name) {
return getOrAdd(name, MetricBuilder.METERS);
} | @Test
public void accessingACustomMeterRegistersAndReusesIt() {
final MetricRegistry.MetricSupplier<Meter> supplier = () -> meter;
final Meter meter1 = registry.meter("thing", supplier);
final Meter meter2 = registry.meter("thing", supplier);
assertThat(meter1)
.isSameAs(meter2);
verify(listener).onMeterAdded("thing", meter1);
} |
public static ImmutableList<String> glob(final String glob) {
Path path = getGlobPath(glob);
int globIndex = getGlobIndex(path);
if (globIndex < 0) {
return of(glob);
}
return doGlob(path, searchPath(path, globIndex));
} | @Test
public void should_glob_absolute_files_with_glob(@TempDir final Path folder) throws IOException {
Path tempFile = folder.resolve("glob.json");
java.nio.file.Files.createFile(tempFile);
File file = tempFile.toFile();
String glob = Files.join(folder.toFile().getAbsolutePath(), "*.json");
ImmutableList<String> files = Globs.glob(glob);
assertThat(files.contains(file.getAbsolutePath()), is(true));
} |
public static int getOcuranceString( String string, String searchFor ) {
if ( string == null || string.length() == 0 ) {
return 0;
}
Pattern p = Pattern.compile( searchFor );
Matcher m = p.matcher( string );
int count = 0;
while ( m.find() ) {
++count;
}
return count;
} | @Test
public void testGetOcuranceString() {
assertEquals( 0, Const.getOcuranceString( "", "" ) );
assertEquals( 0, Const.getOcuranceString( "foo bar bazfoo", "cat" ) );
assertEquals( 2, Const.getOcuranceString( "foo bar bazfoo", "foo" ) );
} |
@Override
public Mono<UserDetails> findByUsername(String username) {
return userService.getUser(username)
.onErrorMap(UserNotFoundException.class,
e -> new BadCredentialsException("Invalid Credentials"))
.flatMap(user -> {
var name = user.getMetadata().getName();
var userBuilder = User.withUsername(name)
.password(user.getSpec().getPassword())
.disabled(requireNonNullElse(user.getSpec().getDisabled(), false));
var setAuthorities = roleService.getRolesByUsername(name)
// every authenticated user should have authenticated and anonymous roles.
.concatWithValues(AUTHENTICATED_ROLE_NAME, ANONYMOUS_ROLE_NAME)
.map(roleName -> new SimpleGrantedAuthority(ROLE_PREFIX + roleName))
.distinct()
.collectList()
.doOnNext(userBuilder::authorities);
return setAuthorities.then(Mono.fromSupplier(() -> {
var twoFactorAuthSettings = TwoFactorUtils.getTwoFactorAuthSettings(user);
return new HaloUser.Builder(userBuilder.build())
.twoFactorAuthEnabled(
(!twoFactorAuthDisabled) && twoFactorAuthSettings.isAvailable()
)
.totpEncryptedSecret(user.getSpec().getTotpEncryptedSecret())
.build();
}));
});
} | @Test
void shouldFindHaloUserDetailsWith2faDisabledWhen2faEnabledButNoTotpConfigured() {
var fakeUser = createFakeUser();
fakeUser.getSpec().setTwoFactorAuthEnabled(true);
when(userService.getUser("faker")).thenReturn(Mono.just(fakeUser));
when(roleService.getRolesByUsername("faker")).thenReturn(Flux.empty());
userDetailService.findByUsername("faker")
.as(StepVerifier::create)
.assertNext(userDetails -> {
assertInstanceOf(HaloUserDetails.class, userDetails);
assertFalse(((HaloUserDetails) userDetails).isTwoFactorAuthEnabled());
})
.verifyComplete();
} |
@Override
public void process(Exchange exchange) throws Exception {
final SchematronProcessor schematronProcessor = SchematronProcessorFactory.newSchematronEngine(endpoint.getRules());
final Object payload = exchange.getIn().getBody();
final String report;
if (payload instanceof Source) {
LOG.debug("Applying schematron validation on payload: {}", payload);
report = schematronProcessor.validate((Source) payload);
} else if (payload instanceof String) {
LOG.debug("Applying schematron validation on payload: {}", payload);
report = schematronProcessor.validate((String) payload);
} else {
String stringPayload = exchange.getIn().getBody(String.class);
LOG.debug("Applying schematron validation on payload: {}", stringPayload);
report = schematronProcessor.validate(stringPayload);
}
LOG.debug("Schematron validation report \n {}", report);
String status = getValidationStatus(report);
LOG.info("Schematron validation status : {}", status);
setValidationReport(exchange, report, status);
} | @Test
public void testProcessInValidXMLAsSource() throws Exception {
Exchange exc = new DefaultExchange(context, ExchangePattern.InOut);
exc.getIn().setBody(
new SAXSource(getXMLReader(), new InputSource(ClassLoader.getSystemResourceAsStream("xml/article-2.xml"))));
// process xml payload
producer.process(exc);
// assert
assertEquals(Constants.FAILED, exc.getMessage().getHeader(Constants.VALIDATION_STATUS));
} |
@Override
public synchronized void write(int b) throws IOException {
mUfsOutStream.write(b);
mBytesWritten++;
} | @Test
public void singleByteWrite() throws IOException, AlluxioException {
byte byteToWrite = 5;
AlluxioURI ufsPath = getUfsPath();
try (FileOutStream outStream = mFileSystem.createFile(ufsPath)) {
outStream.write(byteToWrite);
}
try (InputStream inputStream = mFileSystem.openFile(ufsPath)) {
assertEquals(byteToWrite, inputStream.read());
}
} |
@Override
public List<SimpleColumn> toColumns(
final ParsedSchema schema,
final SerdeFeatures serdeFeatures,
final boolean isKey) {
SerdeUtils.throwOnUnsupportedFeatures(serdeFeatures, format.supportedFeatures());
Schema connectSchema = connectSrTranslator.toConnectSchema(schema);
if (serdeFeatures.enabled(SerdeFeature.UNWRAP_SINGLES)) {
connectSchema = SerdeUtils.wrapSingle(connectSchema, isKey);
}
if (connectSchema.type() != Type.STRUCT) {
if (isKey) {
throw new IllegalStateException("Key schemas are always unwrapped.");
}
throw new KsqlException("Schema returned from schema registry is anonymous type. "
+ "To use this schema with ksqlDB, set '" + CommonCreateConfigs.WRAP_SINGLE_VALUE
+ "=false' in the WITH clause properties.");
}
final Schema rowSchema = connectKsqlTranslator.toKsqlSchema(connectSchema);
return rowSchema.fields().stream()
.map(ConnectFormatSchemaTranslator::toColumn)
.collect(Collectors.toList());
} | @Test
public void shouldSupportBuildingColumnsFromPrimitiveKeySchema() {
// Given:
when(format.supportedFeatures()).thenReturn(Collections.singleton(SerdeFeature.UNWRAP_SINGLES));
// When:
translator.toColumns(parsedSchema, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES), true);
// Then:
verify(connectKsqlTranslator).toKsqlSchema(SchemaBuilder.struct()
.field("ROWKEY", connectSchema)
.build());
} |
public Optional<Measure> toMeasure(@Nullable LiveMeasureDto measureDto, Metric metric) {
requireNonNull(metric);
if (measureDto == null) {
return Optional.empty();
}
Double value = measureDto.getValue();
String data = measureDto.getDataAsString();
switch (metric.getType().getValueType()) {
case INT:
return toIntegerMeasure(value, data);
case LONG:
return toLongMeasure(value, data);
case DOUBLE:
return toDoubleMeasure(value, data);
case BOOLEAN:
return toBooleanMeasure(value, data);
case STRING:
return toStringMeasure(data);
case LEVEL:
return toLevelMeasure(data);
case NO_VALUE:
return toNoValueMeasure();
default:
throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType());
}
} | @Test
public void toMeasure_returns_no_value_if_dto_has_no_data_for_Level_Metric() {
Optional<Measure> measure = underTest.toMeasure(EMPTY_MEASURE_DTO, SOME_LEVEL_METRIC);
assertThat(measure).isPresent();
assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.NO_VALUE);
} |
@Deprecated
public void setUncaughtExceptionHandler(final Thread.UncaughtExceptionHandler uncaughtExceptionHandler) {
synchronized (stateLock) {
if (state.hasNotStarted()) {
oldHandler = true;
processStreamThread(thread -> thread.setUncaughtExceptionHandler(uncaughtExceptionHandler));
if (globalStreamThread != null) {
globalStreamThread.setUncaughtExceptionHandler(uncaughtExceptionHandler);
}
} else {
throw new IllegalStateException("Can only set UncaughtExceptionHandler before calling start(). " +
"Current state is: " + state);
}
}
} | @Test
public void shouldThrowNullPointerExceptionSettingStreamsUncaughtExceptionHandlerIfNull() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
assertThrows(NullPointerException.class, () -> streams.setUncaughtExceptionHandler((StreamsUncaughtExceptionHandler) null));
}
} |
public JobStatus getJobStatus(JobID oldJobID) throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
GetJobReportRequest request =
recordFactory.newRecordInstance(GetJobReportRequest.class);
request.setJobId(jobId);
JobReport report = ((GetJobReportResponse) invoke("getJobReport",
GetJobReportRequest.class, request)).getJobReport();
JobStatus jobStatus = null;
if (report != null) {
if (StringUtils.isEmpty(report.getJobFile())) {
String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID);
report.setJobFile(jobFile);
}
String historyTrackingUrl = report.getTrackingUrl();
String url = StringUtils.isNotEmpty(historyTrackingUrl)
? historyTrackingUrl : trackingUrl;
jobStatus = TypeConverter.fromYarn(report, url);
}
return jobStatus;
} | @Test
public void testRMDownRestoreForJobStatusBeforeGetAMReport()
throws IOException {
Configuration conf = new YarnConfiguration();
conf.setInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, 3);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED,
!isAMReachableFromClient);
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(any(GetJobReportRequest.class)))
.thenReturn(getJobReportResponse());
ResourceMgrDelegate rmDelegate = mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenThrow(
new java.lang.reflect.UndeclaredThrowableException(new IOException(
"Connection refuced1"))).thenThrow(
new java.lang.reflect.UndeclaredThrowableException(new IOException(
"Connection refuced2")))
.thenReturn(getFinishedApplicationReport());
ClientServiceDelegate clientServiceDelegate = new ClientServiceDelegate(
conf, rmDelegate, oldJobId, historyServerProxy);
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
verify(rmDelegate, times(3)).getApplicationReport(
any(ApplicationId.class));
Assert.assertNotNull(jobStatus);
} catch (YarnException e) {
throw new IOException(e);
}
} |
@Override
public Path touch(final Path file, final TransferStatus status) throws BackgroundException {
try {
try {
if(!new DriveAttributesFinderFeature(session, fileid).find(file).isHidden()) {
throw new ConflictException(file.getAbsolute());
}
}
catch(NotfoundException e) {
// Ignore
}
final Drive.Files.Create insert = session.getClient().files().create(new File()
.setName(file.getName())
.setMimeType(status.getMime())
.setParents(Collections.singletonList(fileid.getFileId(file.getParent()))));
final File execute = insert
.setFields(DriveAttributesFinderFeature.DEFAULT_FIELDS)
.setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")).execute();
fileid.cache(file, execute.getId());
return file.withAttributes(new DriveAttributesFinderFeature(session, fileid).toAttributes(execute));
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map("Cannot create {0}", e, file);
}
} | @Test
public void testTouch() throws Exception {
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path folder = new DriveDirectoryFeature(session, fileid).mkdir(
new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path test = new DriveTouchFeature(session, fileid).touch(
new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus().withMime("x-application/cyberduck"));
final String id = test.attributes().getFileId();
assertNotNull(id);
assertNull(test.attributes().getVersionId());
assertEquals(test.attributes().getFileId(), new DriveAttributesFinderFeature(session, fileid).find(test).getFileId());
assertThrows(ConflictException.class, () -> new DriveTouchFeature(session, fileid).touch(test, new TransferStatus()));
assertThrows(ConflictException.class, () -> new DriveDirectoryFeature(session, fileid).mkdir(test, new TransferStatus()));
new DriveTrashFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertNull(test.attributes().getFileId());
// Trashed
assertFalse(new DriveFindFeature(session, fileid).find(test));
assertFalse(new DefaultFindFeature(session).find(test));
test.attributes().setFileId(id);
assertTrue(new DriveFindFeature(session, fileid).find(test));
assertTrue(new DefaultFindFeature(session).find(test));
new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public void process(Exchange exchange) throws Exception {
try {
plc4XEndpoint.reconnectIfNeeded();
} catch (PlcConnectionException e) {
if (log.isTraceEnabled()) {
log.warn("Unable to reconnect, skipping request", e);
} else {
log.warn("Unable to reconnect, skipping request");
}
return;
}
Message in = exchange.getIn();
Object body = in.getBody();
PlcWriteRequest plcWriteRequest;
if (body instanceof Map) { //Check if we have a Map
Map<String, Map<String, Object>> tags = (Map<String, Map<String, Object>>) body;
plcWriteRequest = plc4XEndpoint.buildPlcWriteRequest(tags);
} else {
throw new PlcInvalidTagException("The body must contain a Map<String,Map<String,Object>");
}
CompletableFuture<? extends PlcWriteResponse> completableFuture = plcWriteRequest.execute();
int currentlyOpenRequests = openRequests.incrementAndGet();
try {
log.debug("Currently open requests including {}:{}", exchange, currentlyOpenRequests);
Object plcWriteResponse = completableFuture.get(5000, TimeUnit.MILLISECONDS);
if (exchange.getPattern().isOutCapable()) {
Message out = exchange.getMessage();
out.copyFrom(exchange.getIn());
out.setBody(plcWriteResponse);
} else {
in.setBody(plcWriteResponse);
}
} finally {
int openRequestsAfterFinish = openRequests.decrementAndGet();
log.trace("Open Requests after {}:{}", exchange, openRequestsAfterFinish);
}
} | @Test
public void processAsync() {
sut.process(testExchange, doneSync -> {
});
when(testExchange.getPattern()).thenReturn(ExchangePattern.InOnly);
sut.process(testExchange, doneSync -> {
});
when(testExchange.getPattern()).thenReturn(ExchangePattern.InOut);
sut.process(testExchange, doneSync -> {
});
} |
@Operation(summary = "list", description = "List hosts")
@GetMapping
public ResponseEntity<List<HostVO>> list(@PathVariable Long clusterId) {
return ResponseEntity.success(hostService.list(clusterId));
} | @Test
void listReturnsAllHosts() {
Long clusterId = 1L;
List<HostVO> hosts = Arrays.asList(new HostVO(), new HostVO());
when(hostService.list(clusterId)).thenReturn(hosts);
ResponseEntity<List<HostVO>> response = hostController.list(clusterId);
assertTrue(response.isSuccess());
assertEquals(hosts, response.getData());
} |
@Override
public GZIPCompressionOutputStream createOutputStream( OutputStream out ) throws IOException {
return new GZIPCompressionOutputStream( out, this );
} | @Test
public void testCreateOutputStream() throws IOException {
GZIPCompressionProvider provider = (GZIPCompressionProvider) factory.getCompressionProviderByName( PROVIDER_NAME );
ByteArrayOutputStream out = new ByteArrayOutputStream();
GZIPOutputStream gos = new GZIPOutputStream( out );
GZIPCompressionOutputStream outStream = new GZIPCompressionOutputStream( out, provider );
assertNotNull( outStream );
out = new ByteArrayOutputStream();
GZIPCompressionOutputStream ncis = provider.createOutputStream( out );
assertNotNull( ncis );
GZIPCompressionOutputStream ncis2 = provider.createOutputStream( gos );
assertNotNull( ncis2 );
} |
static <P> Matcher[] toArray(Iterable<? extends Matcher<P>> matchers) {
if (matchers == null) throw new NullPointerException("matchers == null");
if (matchers instanceof Collection) {
return (Matcher[]) ((Collection) matchers).toArray(new Matcher[0]);
}
List<Matcher<P>> result = new ArrayList<Matcher<P>>();
for (Matcher<P> matcher : matchers) result.add(matcher);
return result.toArray(new Matcher[0]);
} | @Test void toArray_iterable() {
Matcher<Void> one = b -> true;
Matcher<Void> two = b -> false;
Matcher<Void> three = b -> true;
assertThat(Matchers.toArray(() -> asList(one, two, three).iterator()))
.containsExactly(one, two, three);
} |
public static UpdateRequirement fromJson(String json) {
return JsonUtil.parse(json, UpdateRequirementParser::fromJson);
} | @Test
public void testAssertViewUUIDFromJson() {
String requirementType = UpdateRequirementParser.ASSERT_VIEW_UUID;
String uuid = "2cc52516-5e73-41f2-b139-545d41a4e151";
String json = String.format("{\"type\":\"assert-view-uuid\",\"uuid\":\"%s\"}", uuid);
UpdateRequirement expected = new UpdateRequirement.AssertViewUUID(uuid);
assertEquals(requirementType, expected, UpdateRequirementParser.fromJson(json));
} |
@NonNull
public static List<VideoStream> getSortedStreamVideosList(
@NonNull final Context context,
@Nullable final List<VideoStream> videoStreams,
@Nullable final List<VideoStream> videoOnlyStreams,
final boolean ascendingOrder,
final boolean preferVideoOnlyStreams) {
final SharedPreferences preferences =
PreferenceManager.getDefaultSharedPreferences(context);
final boolean showHigherResolutions = preferences.getBoolean(
context.getString(R.string.show_higher_resolutions_key), false);
final MediaFormat defaultFormat = getDefaultFormat(context,
R.string.default_video_format_key, R.string.default_video_format_value);
return getSortedStreamVideosList(defaultFormat, showHigherResolutions, videoStreams,
videoOnlyStreams, ascendingOrder, preferVideoOnlyStreams);
} | @Test
public void getSortedStreamVideosListTest() {
List<VideoStream> result = ListHelper.getSortedStreamVideosList(MediaFormat.MPEG_4, true,
VIDEO_STREAMS_TEST_LIST, VIDEO_ONLY_STREAMS_TEST_LIST, true, false);
List<String> expected = List.of("144p", "240p", "360p", "480p", "720p", "720p60",
"1080p", "1080p60", "1440p60", "2160p", "2160p60");
assertEquals(expected.size(), result.size());
for (int i = 0; i < result.size(); i++) {
assertEquals(result.get(i).getResolution(), expected.get(i));
assertEquals(expected.get(i), result.get(i).getResolution());
}
////////////////////
// Reverse Order //
//////////////////
result = ListHelper.getSortedStreamVideosList(MediaFormat.MPEG_4, true,
VIDEO_STREAMS_TEST_LIST, VIDEO_ONLY_STREAMS_TEST_LIST, false, false);
expected = List.of("2160p60", "2160p", "1440p60", "1080p60", "1080p", "720p60",
"720p", "480p", "360p", "240p", "144p");
assertEquals(expected.size(), result.size());
for (int i = 0; i < result.size(); i++) {
assertEquals(expected.get(i), result.get(i).getResolution());
}
} |
@Override
public String getPolicyName() {
return LoadBalancerStrategy.RANDOM.getStrategy();
} | @Test
public void testGetPolicyName() {
assertEquals(randomLoadBalancerProvider.getPolicyName(), LoadBalancerStrategy.RANDOM.getStrategy());
} |
public static IndexIterationPointer create(
@Nullable Comparable<?> from,
boolean fromInclusive,
@Nullable Comparable<?> to,
boolean toInclusive,
boolean descending,
@Nullable Data lastEntryKey
) {
return new IndexIterationPointer(
(byte) ((descending ? FLAG_DESCENDING : 0)
| (fromInclusive ? FLAG_FROM_INCLUSIVE : 0)
| (toInclusive ? FLAG_TO_INCLUSIVE : 0)
| (from == to ? FLAG_POINT_LOOKUP : 0)),
from,
to,
lastEntryKey
);
} | @Test
void createBadSingleton() {
assertThatThrownBy(() -> create(5, true, 5, false, false, null))
.isInstanceOf(AssertionError.class).hasMessageContaining("Point lookup limits must be all inclusive");
assertThatThrownBy(() -> create(5, false, 5, true, false, null))
.isInstanceOf(AssertionError.class).hasMessageContaining("Point lookup limits must be all inclusive");
assertThatThrownBy(() -> create(5, false, 5, false, false, null))
.isInstanceOf(AssertionError.class).hasMessageContaining("Point lookup limits must be all inclusive");
} |
public static DataMap parseDataMapKeys(Map<String, List<String>> queryParameters) throws PathSegmentSyntaxException
{
// The parameters are parsed into an intermediary structure comprised of
// HashMap<String,Object> and HashMap<Integer,Object>, defined respectively
// as MapMap and ListMap for convenience. This is done for two reasons:
// - first, indexed keys representing lists are parsed into ListMaps keyed on
// index values, since the indices may come in any order in the query parameter,
// while we want to preserve the order.
// - second, DataMap only accepts Data objects as values, so ListMaps cannot
// be stored there, so using an intermediary structure even for maps.
MapMap dataMap = new MapMap();
for (Map.Entry<String, List<String>> entry : queryParameters.entrySet())
{
// As per the notation above, we no longer support multiple occurrences of
// a parameter (considering its full multi-part and indexed name), i.e
// there should be only a single entry in each list. For backward compatibility
// as well as ease of use, repeated parameters are still allowed if they
// are "simple", i.e. they are not multi-part or indexed.
List<String> valueList = entry.getValue();
if (valueList.size() == 1)
{
String[] key = SEGMENT_DELIMITER_PATTERN.split(entry.getKey());
parseParameter(key, valueList.get(0), dataMap);
}
else
{
String parameterName = entry.getKey();
// In case of multiple parameters ensure they are not delimited or
// indexed and then simulate the index for each one.
if(parameterName.indexOf('.') != -1)
throw new PathSegmentSyntaxException("Multiple values of complex query parameter are not supported");
if(parameterName.charAt(parameterName.length()-1) == ']')
throw new PathSegmentSyntaxException("Multiple values of indexed query parameter are not supported");
if(dataMap.containsKey(parameterName))
throw new PathSegmentSyntaxException("Conflicting references to key " + parameterName + "[0]");
else
{
dataMap.put(parameterName, new DataList(valueList));
}
}
}
return (DataMap)convertToDataCollection(dataMap);
} | @Test
public void testParseDataMapKeys() throws Exception {
String testQS = "ids[0].params.versionTag=tag1&ids[0].params.authToken=tok1&ids[0].memberID=1&ids[0].groupID=2&" +
"ids[1].params.versionTag=tag2&ids[1].params.authToken=tok2&ids[1].memberID=2&ids[1].groupID=2&" +
"q=someFinder";
/*
* Resulting DataMap:
*
* { q=someFinder,
* ids=[
* {groupID=2,
* params={
* versionTag=tag2,
* authToken=tok2
* },
* memberID=2
* },
*
* {groupID=2,
* params={
* versionTag=tag1,
* authToken=tok1
* },
* memberID=1
* }
* ]
* }
*/
DataMap queryParamDataMap = queryParamsDataMap(testQS);
Assert.assertNotNull(queryParamDataMap);
Assert.assertEquals("someFinder", queryParamDataMap.get("q"));
DataList ids = queryParamDataMap.getDataList("ids");
DataMap ids0 = ids.getDataMap(0);
Assert.assertEquals(ids0.get("memberID"), "1");
Assert.assertEquals(ids0.get("groupID"), "2");
DataMap params = ids0.getDataMap("params");
Assert.assertEquals(params.get("versionTag"), "tag1");
Assert.assertEquals(params.get("authToken"), "tok1");
ids0 = ids.getDataMap(1);
Assert.assertEquals(ids0.get("memberID"), "2");
Assert.assertEquals(ids0.get("groupID"), "2");
params = ids0.getDataMap("params");
Assert.assertEquals(params.get("versionTag"), "tag2");
Assert.assertEquals(params.get("authToken"), "tok2");
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.