focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
static String headerLine(CSVFormat csvFormat) {
return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader());
} | @Test
public void givenIgnoreEmptyLines_shouldSkip() {
CSVFormat csvFormat = csvFormat().withIgnoreEmptyLines(true);
PCollection<String> input =
pipeline.apply(Create.of(headerLine(csvFormat), "a,1,1.1", "", "b,2,2.2", "", "c,3,3.3"));
CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat);
CsvIOParseResult<List<String>> result = input.apply(underTest);
PAssert.that(result.getOutput())
.containsInAnyOrder(
Arrays.asList(
Arrays.asList("a", "1", "1.1"),
Arrays.asList("b", "2", "2.2"),
Arrays.asList("c", "3", "3.3")));
PAssert.that(result.getErrors()).empty();
pipeline.run();
} |
public static String substVars(String val, PropertyContainer pc1) {
return substVars(val, pc1, null);
} | @Test
public void detectCircularReferences5() {
context.putProperty("A", "${B} and ${C}");
context.putProperty("B", "${B1}");
context.putProperty("B1", "B1-value");
context.putProperty("C", "${C1}");
context.putProperty("C1", "here's the loop: ${A}");
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Circular variable reference detected while parsing input [${A} --> ${C} --> ${C1} --> ${A}]");
String result = OptionHelper.substVars("${A}", context);
System.err.println(result);
} |
public static long parseDuration(final String propertyName, final String propertyValue)
{
final char lastCharacter = propertyValue.charAt(propertyValue.length() - 1);
if (Character.isDigit(lastCharacter))
{
return Long.parseLong(propertyValue);
}
if (lastCharacter != 's' && lastCharacter != 'S')
{
throw new NumberFormatException(
propertyName + ": " + propertyValue + " should end with: s, ms, us, or ns.");
}
final char secondLastCharacter = propertyValue.charAt(propertyValue.length() - 2);
if (Character.isDigit(secondLastCharacter))
{
final long value = AsciiEncoding.parseLongAscii(propertyValue, 0, propertyValue.length() - 1);
return TimeUnit.SECONDS.toNanos(value);
}
final long value = AsciiEncoding.parseLongAscii(propertyValue, 0, propertyValue.length() - 2);
switch (secondLastCharacter)
{
case 'n':
case 'N':
return value;
case 'u':
case 'U':
return TimeUnit.MICROSECONDS.toNanos(value);
case 'm':
case 'M':
return TimeUnit.MILLISECONDS.toNanos(value);
default:
throw new NumberFormatException(
propertyName + ": " + propertyValue + " should end with: s, ms, us, or ns.");
}
} | @Test
void shouldParseTimesWithSuffix()
{
assertEquals(1L, parseDuration("", "1"));
assertEquals(1L, parseDuration("", "1ns"));
assertEquals(1L, parseDuration("", "1NS"));
assertEquals(1000L, parseDuration("", "1us"));
assertEquals(1000L, parseDuration("", "1US"));
assertEquals(1000L * 1000, parseDuration("", "1ms"));
assertEquals(1000L * 1000, parseDuration("", "1MS"));
assertEquals(1000L * 1000 * 1000, parseDuration("", "1s"));
assertEquals(1000L * 1000 * 1000, parseDuration("", "1S"));
assertEquals(12L * 1000 * 1000 * 1000, parseDuration("", "12S"));
} |
public static Map<String, String> parseUriPattern(String pattern, String url) {
int qpos = url.indexOf('?');
if (qpos != -1) {
url = url.substring(0, qpos);
}
List<String> leftList = StringUtils.split(pattern, '/', false);
List<String> rightList = StringUtils.split(url, '/', false);
int leftSize = leftList.size();
int rightSize = rightList.size();
if (rightSize != leftSize) {
return null;
}
Map<String, String> map = new LinkedHashMap<>(leftSize);
for (int i = 0; i < leftSize; i++) {
String left = leftList.get(i);
String right = rightList.get(i);
if (left.equals(right)) {
continue;
}
if (left.startsWith("{") && left.endsWith("}")) {
left = left.substring(1, left.length() - 1);
map.put(left, right);
} else {
return null; // match failed
}
}
return map;
} | @Test
void testParseUriPathPatterns() {
Map<String, String> map = HttpUtils.parseUriPattern("/cats/{id}", "/cats/1");
match(map, "{ id: '1' }");
map = HttpUtils.parseUriPattern("/cats/{id}/", "/cats/1"); // trailing slash
match(map, "{ id: '1' }");
map = HttpUtils.parseUriPattern("/cats/{id}", "/cats/1/"); // trailing slash
match(map, "{ id: '1' }");
map = HttpUtils.parseUriPattern("/cats/{id}", "/foo/bar");
match(map, null);
map = HttpUtils.parseUriPattern("/cats", "/cats/1"); // exact match
match(map, null);
map = HttpUtils.parseUriPattern("/{path}/{id}", "/cats/1");
match(map, "{ path: 'cats', id: '1' }");
map = HttpUtils.parseUriPattern("/cats/{id}/foo", "/cats/1/foo");
match(map, "{ id: '1' }");
map = HttpUtils.parseUriPattern("/api/{img}", "/api/billie.jpg");
match(map, "{ img: 'billie.jpg' }");
map = HttpUtils.parseUriPattern("/hello/{raw}", "/hello/�Ill~Formed@RequiredString!");
match(map, "{ raw: '�Ill~Formed@RequiredString!' }");
} |
@Override
public int ndoc() {
return docs.size();
} | @Test
public void testGetNumDocuments() {
System.out.println("getNumDocuments");
assertEquals(5000, corpus.ndoc());
} |
@Override
public void pluginJarRemoved(BundleOrPluginFileDetails bundleOrPluginFileDetails) {
GoPluginDescriptor existingDescriptor = registry.getPluginByIdOrFileName(null, bundleOrPluginFileDetails.file().getName());
if (existingDescriptor == null) {
return;
}
try {
LOGGER.info("Plugin removal starting: {}", bundleOrPluginFileDetails.file());
boolean externalPlugin = !bundleOrPluginFileDetails.isBundledPlugin();
boolean bundledPlugin = existingDescriptor.isBundledPlugin();
boolean externalPluginWithSameIdAsBundledPlugin = bundledPlugin && externalPlugin;
if (externalPluginWithSameIdAsBundledPlugin) {
LOGGER.info("External Plugin file '{}' having same name as bundled plugin file has been removed. Refusing to unload bundled plugin with id: '{}'", bundleOrPluginFileDetails.file(), existingDescriptor.id());
return;
}
removePlugin(existingDescriptor.bundleDescriptor());
} finally {
LOGGER.info("Plugin removal finished: {}", bundleOrPluginFileDetails.file());
}
} | @Test
void shouldRemovePluginFromBundlePathAndInformRegistryWhenAPluginIsRemoved() throws Exception {
File pluginJarFile = new File(pluginWorkDir, PLUGIN_JAR_FILE_NAME);
File removedBundleDirectory = new File(bundleDir, PLUGIN_JAR_FILE_NAME);
String pluginJarFileLocation = pluginJarFile.getAbsolutePath();
final GoPluginDescriptor descriptorOfThePluginWhichWillBeRemoved = GoPluginDescriptor.builder()
.id("testplugin.descriptorValidator")
.bundleLocation(removedBundleDirectory)
.pluginJarFileLocation(pluginJarFileLocation)
.isBundledPlugin(true)
.build();
GoPluginBundleDescriptor descriptorOfThePluginBundleWhichWillBeRemoved = new GoPluginBundleDescriptor(descriptorOfThePluginWhichWillBeRemoved);
when(registry.getPluginByIdOrFileName(null, descriptorOfThePluginWhichWillBeRemoved.fileName())).thenReturn(descriptorOfThePluginWhichWillBeRemoved);
when(registry.unloadPlugin(descriptorOfThePluginBundleWhichWillBeRemoved)).thenReturn(descriptorOfThePluginBundleWhichWillBeRemoved);
copyPluginToTheDirectory(bundleDir, PLUGIN_JAR_FILE_NAME);
listener.pluginJarRemoved(new BundleOrPluginFileDetails(pluginJarFile, true, pluginWorkDir));
verify(registry).unloadPlugin(descriptorOfThePluginBundleWhichWillBeRemoved);
verify(pluginLoader).unloadPlugin(descriptorOfThePluginBundleWhichWillBeRemoved);
assertThat(removedBundleDirectory).doesNotExist();
} |
static List<String> parseEtcResolverSearchDomains() throws IOException {
return parseEtcResolverSearchDomains(new File(ETC_RESOLV_CONF_FILE));
} | @Test
public void searchDomainsPrecedence(@TempDir Path tempDir) throws IOException {
File f = buildFile(tempDir, "domain linecorp.local\n" +
"search squarecorp.local\n" +
"nameserver 127.0.0.2\n");
List<String> domains = UnixResolverDnsServerAddressStreamProvider.parseEtcResolverSearchDomains(f);
assertEquals(Collections.singletonList("squarecorp.local"), domains);
} |
@Override
public Iterator<E> iterator() {
return new ElementIterator();
} | @Test
public void testIteratorHasNextReturnsTrueIfNotIteratedOverAll() {
final OAHashSet<Integer> set = new OAHashSet<>(8);
populateSet(set, 2);
final Iterator<Integer> iterator = set.iterator();
iterator.next();
assertTrue(iterator.hasNext());
} |
public static Set<Metric> mapFromDataProvider(TelemetryDataProvider<?> provider) {
switch (provider.getDimension()) {
case INSTALLATION -> {
return mapInstallationMetric(provider);
} case PROJECT -> {
return mapProjectMetric(provider);
} case USER -> {
return mapUserMetric(provider);
} case LANGUAGE -> {
return mapLanguageMetric(provider);
} default -> throw new IllegalArgumentException("Dimension: " + provider.getDimension() + " not yet implemented.");
}
} | @Test
void mapFromDataProvider_whenLanguageProvider() {
TelemetryDataProvider<String> provider = new TestTelemetryBean(Dimension.LANGUAGE);
Set<Metric> metrics = TelemetryMetricsMapper.mapFromDataProvider(provider);
List<LanguageMetric> list = retrieveList(metrics);
assertThat(list)
.extracting(LanguageMetric::getKey, LanguageMetric::getType, LanguageMetric::getLanguage, LanguageMetric::getValue, LanguageMetric::getGranularity)
.containsExactlyInAnyOrder(
expected()
);
} |
@Override
public FileObject[] findJarFiles() throws KettleFileException {
return findJarFiles( searchLibDir );
} | @Test
public void testFindJarFiles_DirWithKettleIgnoreFileIgnored() throws IOException, KettleFileException {
Files.createDirectories( PATH_TO_TEST_DIR_NAME );
Files.createFile( PATH_TO_JAR_FILE2 );
Files.createFile( PATH_TO_KETTLE_IGNORE_FILE );
FileObject[] findJarFiles = plFolder.findJarFiles();
assertNotNull( findJarFiles );
assertEquals( 0, findJarFiles.length );
} |
public static UForAll create(List<UTypeVar> typeVars, UType quantifiedType) {
return new AutoValue_UForAll(ImmutableList.copyOf(typeVars), quantifiedType);
} | @Test
public void equality() {
UType objectType = UClassType.create("java.lang.Object", ImmutableList.<UType>of());
UTypeVar eType = UTypeVar.create("E", objectType);
UTypeVar tType = UTypeVar.create("T", objectType);
UType listOfEType = UClassType.create("java.util.List", ImmutableList.<UType>of(eType));
new EqualsTester()
.addEqualityGroup(UForAll.create(ImmutableList.of(eType), eType)) // <E> E
.addEqualityGroup(UForAll.create(ImmutableList.of(eType), listOfEType)) // <E> List<E>
.addEqualityGroup(UForAll.create(ImmutableList.of(tType), tType)) // <T> T
.testEquals();
} |
public Map<String, HivePartitionStats> getPartitionStatistics(Table table, List<String> partitionNames) {
String catalogName = ((HiveMetaStoreTable) table).getCatalogName();
String dbName = ((HiveMetaStoreTable) table).getDbName();
String tblName = ((HiveMetaStoreTable) table).getTableName();
List<HivePartitionName> hivePartitionNames = partitionNames.stream()
.map(partitionName -> HivePartitionName.of(dbName, tblName, partitionName))
.peek(hivePartitionName -> checkState(hivePartitionName.getPartitionNames().isPresent(),
"partition name is missing"))
.collect(Collectors.toList());
Map<String, HivePartitionStats> partitionStats;
if (enableCatalogLevelCache) {
partitionStats = metastore.getPresentPartitionsStatistics(hivePartitionNames);
if (partitionStats.size() == partitionNames.size()) {
return partitionStats;
}
String backgroundThreadName = String.format(BACKGROUND_THREAD_NAME_PREFIX + "%s-%s-%s",
catalogName, dbName, tblName);
executeInNewThread(backgroundThreadName, () -> metastore.getPartitionStatistics(table, partitionNames));
} else {
partitionStats = metastore.getPartitionStatistics(table, partitionNames);
}
return partitionStats;
} | @Test
public void testGetPartitionStatistics() {
com.starrocks.catalog.Table hiveTable = hmsOps.getTable("db1", "table1");
Map<String, HivePartitionStats> statistics = hmsOps.getPartitionStatistics(
hiveTable, Lists.newArrayList("col1=1", "col1=2"));
Assert.assertEquals(0, statistics.size());
cachingHiveMetastore.getPartitionStatistics(hiveTable, Lists.newArrayList("col1=1", "col1=2"));
statistics = hmsOps.getPartitionStatistics(
hiveTable, Lists.newArrayList("col1=1", "col1=2"));
HivePartitionStats stats1 = statistics.get("col1=1");
HiveCommonStats commonStats1 = stats1.getCommonStats();
Assert.assertEquals(50, commonStats1.getRowNums());
Assert.assertEquals(100, commonStats1.getTotalFileBytes());
HiveColumnStats columnStatistics1 = stats1.getColumnStats().get("col2");
Assert.assertEquals(0, columnStatistics1.getTotalSizeBytes());
Assert.assertEquals(1, columnStatistics1.getNumNulls());
Assert.assertEquals(2, columnStatistics1.getNdv());
HivePartitionStats stats2 = statistics.get("col1=2");
HiveCommonStats commonStats2 = stats2.getCommonStats();
Assert.assertEquals(50, commonStats2.getRowNums());
Assert.assertEquals(100, commonStats2.getTotalFileBytes());
HiveColumnStats columnStatistics2 = stats2.getColumnStats().get("col2");
Assert.assertEquals(0, columnStatistics2.getTotalSizeBytes());
Assert.assertEquals(2, columnStatistics2.getNumNulls());
Assert.assertEquals(5, columnStatistics2.getNdv());
} |
@Override
protected Release findLatestActiveRelease(String appId, String clusterName, String namespaceName,
ApolloNotificationMessages clientMessages) {
String messageKey = ReleaseMessageKeyGenerator.generate(appId, clusterName, namespaceName);
String cacheKey = messageKey;
if (bizConfig.isConfigServiceCacheKeyIgnoreCase()) {
cacheKey = cacheKey.toLowerCase();
}
Tracer.logEvent(TRACER_EVENT_CACHE_GET, cacheKey);
ConfigCacheEntry cacheEntry = configCache.getUnchecked(cacheKey);
//cache is out-dated
if (clientMessages != null && clientMessages.has(messageKey) &&
clientMessages.get(messageKey) > cacheEntry.getNotificationId()) {
//invalidate the cache and try to load from db again
invalidate(cacheKey);
cacheEntry = configCache.getUnchecked(cacheKey);
}
return cacheEntry.getRelease();
} | @Test
public void testFindLatestActiveReleaseWithReleaseNotFound() throws Exception {
when(releaseMessageService.findLatestReleaseMessageForMessages(Lists.newArrayList(someKey))).thenReturn(null);
when(releaseService.findLatestActiveRelease(someAppId, someClusterName, someNamespaceName)).thenReturn(null);
Release release = configServiceWithCache.findLatestActiveRelease(someAppId, someClusterName, someNamespaceName,
someNotificationMessages);
Release anotherRelease = configServiceWithCache.findLatestActiveRelease(someAppId, someClusterName,
someNamespaceName, someNotificationMessages);
int retryTimes = 100;
for (int i = 0; i < retryTimes; i++) {
configServiceWithCache.findLatestActiveRelease(someAppId, someClusterName,
someNamespaceName, someNotificationMessages);
}
assertNull(release);
assertNull(anotherRelease);
verify(releaseMessageService, times(1)).findLatestReleaseMessageForMessages(Lists.newArrayList(someKey));
verify(releaseService, times(1)).findLatestActiveRelease(someAppId, someClusterName, someNamespaceName);
} |
public boolean updateQuota(String tenant, Integer quota) {
return updateTenantCapacity(tenant, quota, null, null, null);
} | @Test
void testUpdateQuota() {
List<Object> argList = CollectionUtils.list();
Integer quota = 2;
argList.add(quota);
String tenant = "test2";
argList.add(tenant);
when(jdbcTemplate.update(anyString(), any(Object.class))).thenAnswer((Answer<Integer>) invocationOnMock -> {
if (invocationOnMock.getArgument(1).equals(quota) && invocationOnMock.getArgument(3).equals(tenant)) {
return 1;
}
return 0;
});
assertTrue(service.updateQuota(tenant, quota));
//mock get connection fail
when(jdbcTemplate.update(anyString(), any(Object.class))).thenThrow(new CannotGetJdbcConnectionException("conn fail"));
try {
service.updateQuota(tenant, quota);
assertTrue(false);
} catch (Exception e) {
assertEquals("conn fail", e.getMessage());
}
} |
public void addCwe(String cwe) {
if (cwe != null) {
this.cwes.add(cwe);
}
} | @Test
public void testAddCwe() {
System.out.println("addCwe");
String cwe = "CWE-89";
CweSet instance = new CweSet();
instance.addCwe(cwe);
assertFalse(instance.getEntries().isEmpty());
} |
public AppResponse startFlow(String flowName, Action action, AppRequest request) throws FlowNotDefinedException, NoSuchAlgorithmException, IOException, FlowStateNotDefinedException, SharedServiceClientException {
Flow flow = flowFactoryFactory.getFactory(ActivationFlowFactory.TYPE).getFlow(flowName);
AbstractFlowStep flowStep = flowFactoryFactory.getFactory(ActivationFlowFactory.TYPE).getStep(action);
AppResponse appResponse = flow.processState(flowStep, request);
if (appResponse instanceof NokResponse || !flowStep.isValid()) {
return appResponse;
}
AppSession appSession = flowStep.getAppSession();
appSession.setState(getStateName(flow.getNextState(State.valueOf(appSession.getState().toUpperCase()), action)));
appSessionService.save(appSession);
return appResponse;
} | @Test
void startFlowNOKTest() throws FlowNotDefinedException, FlowStateNotDefinedException, IOException, NoSuchAlgorithmException, SharedServiceClientException {
AppSession appSession = new AppSession();
appSession.setState(State.INITIALIZED.name());
appSession.setActivationMethod(ActivationMethod.SMS);
AbstractFlowStep flowStep = mock(AbstractFlowStep.class);
Flow flow = mock(Flow.class);
when(flowFactoryFactory.getFactory(any())).thenReturn(flowFactory);
when(flowFactory.getFlow(any())).thenReturn(flow);
when(flowFactory.getStep(any())).thenReturn(flowStep);
when(flow.processState(any(), any())).thenReturn(new NokResponse());
AppResponse result = flowService.startFlow(UndefinedFlow.NAME, Action.CONFIRM_PASSWORD, new ActivationUsernamePasswordRequest());
assertEquals(State.INITIALIZED.name(), appSession.getState());
assertNull(appSession.getFlow());
assertTrue(result instanceof NokResponse);
} |
public static Expression convert(Predicate[] predicates) {
Expression expression = Expressions.alwaysTrue();
for (Predicate predicate : predicates) {
Expression converted = convert(predicate);
Preconditions.checkArgument(
converted != null, "Cannot convert Spark predicate to Iceberg expression: %s", predicate);
expression = Expressions.and(expression, converted);
}
return expression;
} | @Test
public void testNotIn() {
NamedReference namedReference = FieldReference.apply("col");
LiteralValue v1 = new LiteralValue(1, DataTypes.IntegerType);
LiteralValue v2 = new LiteralValue(2, DataTypes.IntegerType);
org.apache.spark.sql.connector.expressions.Expression[] attrAndValue =
new org.apache.spark.sql.connector.expressions.Expression[] {namedReference, v1, v2};
Predicate in = new Predicate("IN", attrAndValue);
Not not = new Not(in);
Expression actual = SparkV2Filters.convert(not);
Expression expected =
Expressions.and(Expressions.notNull("col"), Expressions.notIn("col", 1, 2));
Assert.assertEquals("Expressions should match", expected.toString(), actual.toString());
} |
public static BigDecimal cast(final Integer value, final int precision, final int scale) {
if (value == null) {
return null;
}
return cast(value.longValue(), precision, scale);
} | @Test
public void shouldNotCastIntTooBig() {
// When:
final Exception e = assertThrows(
ArithmeticException.class,
() -> cast(10, 2, 1)
);
// Then:
assertThat(e.getMessage(), containsString("Numeric field overflow"));
} |
@Override
public void setConfig(RedisClusterNode node, String param, String value) {
RedisClient entry = getEntry(node);
RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_SET, param, value);
syncFuture(f);
} | @Test
public void testSetConfig() {
RedisClusterNode master = getFirstMaster();
connection.setConfig(master, "timeout", "10");
} |
public Object resolve(final Expression expression) {
return new Visitor().process(expression, null);
} | @Test
public void shouldResolveArbitraryExpressions() {
// Given:
final SqlType type = SqlTypes.struct().field("FOO", SqlTypes.STRING).build();
final Expression exp = new CreateStructExpression(ImmutableList.of(
new Field("FOO", new FunctionCall(
FunctionName.of("CONCAT"),
ImmutableList.of(
new StringLiteral("bar"),
new StringLiteral("baz"))
))
));
// When:
final Object o = new GenericExpressionResolver(type, FIELD_NAME, registry, config,
"insert value", false).resolve(exp);
// Then:
assertThat(o, is(new Struct(
SchemaBuilder.struct().field("FOO", Schema.OPTIONAL_STRING_SCHEMA).optional().build()
).put("FOO", "barbaz")));
} |
@Override
protected void runTask() {
LOGGER.debug("Updating currently processed jobs... ");
convertAndProcessJobs(new ArrayList<>(backgroundJobServer.getJobSteward().getJobsInProgress()), this::updateCurrentlyProcessingJob);
} | @Test
void jobsThatAreBeingProcessedButHaveBeenDeletedViaDashboardWillBeInterrupted() {
// GIVEN
final Job job = anEnqueuedJob().withId().build();
doThrow(new ConcurrentJobModificationException(job)).when(storageProvider).save(singletonList(job));
when(storageProvider.getJobById(job.getId())).thenReturn(aCopyOf(job).withDeletedState().build());
final Thread threadMock = startProcessingJobAndReturnThread(job);
// WHEN
runTask(task);
// THEN
assertThat(logger).hasNoWarnLogMessages();
assertThat(job).hasState(DELETED);
verify(storageProvider).save(singletonList(job));
verify(threadMock).interrupt();
} |
public Schema getSchema() {
return context.getSchema();
} | @Test
public void testRepeatedSchema() {
ProtoDynamicMessageSchema schemaProvider =
schemaFromDescriptor(RepeatPrimitive.getDescriptor());
Schema schema = schemaProvider.getSchema();
assertEquals(REPEATED_SCHEMA, schema);
} |
@Override
public void createEvents(EventFactory eventFactory, EventProcessorParameters processorParameters, EventConsumer<List<EventWithContext>> eventsConsumer) throws EventProcessorException {
final AggregationEventProcessorParameters parameters = (AggregationEventProcessorParameters) processorParameters;
// TODO: We have to take the Elasticsearch index.refresh_interval into account here!
if (!dependencyCheck.hasMessagesIndexedUpTo(parameters.timerange())) {
final String msg = String.format(Locale.ROOT, "Couldn't run aggregation <%s/%s> for timerange <%s to %s> because required messages haven't been indexed, yet.",
eventDefinition.title(), eventDefinition.id(), parameters.timerange().getFrom(), parameters.timerange().getTo());
throw new EventProcessorPreconditionException(msg, eventDefinition);
}
LOG.debug("Creating events for config={} parameters={}", config, parameters);
// The absence of a series indicates that the user doesn't want to do an aggregation but create events from
// a simple search query. (one message -> one event)
try {
if (config.series().isEmpty()) {
filterSearch(eventFactory, parameters, eventsConsumer);
} else {
aggregatedSearch(eventFactory, parameters, eventsConsumer);
}
} catch (SearchException e) {
if (e.error() instanceof ParameterExpansionError) {
final String msg = String.format(Locale.ROOT, "Couldn't run aggregation <%s/%s> because parameters failed to expand: %s",
eventDefinition.title(), eventDefinition.id(), e.error().description());
LOG.error(msg);
throw new EventProcessorPreconditionException(msg, eventDefinition, e);
}
} catch (ElasticsearchException e) {
final String msg = String.format(Locale.ROOT, "Couldn't run aggregation <%s/%s> because of search error: %s",
eventDefinition.title(), eventDefinition.id(), e.getMessage());
LOG.error(msg);
throw new EventProcessorPreconditionException(msg, eventDefinition, e);
}
// Update the state for this processor! This state will be used for dependency checks between event processors.
stateService.setState(eventDefinition.id(), parameters.timerange().getFrom(), parameters.timerange().getTo());
} | @Test
public void createEventsWithFilter() throws Exception {
when(eventProcessorDependencyCheck.hasMessagesIndexedUpTo(any(TimeRange.class))).thenReturn(true);
final DateTime now = DateTime.now(DateTimeZone.UTC);
final AbsoluteRange timerange = AbsoluteRange.create(now.minusHours(1), now.minusHours(1).plusMillis(SEARCH_WINDOW_MS));
final AggregationEventProcessorConfig config = AggregationEventProcessorConfig.builder()
.query(QUERY_STRING)
.streams(ImmutableSet.of())
.groupBy(ImmutableList.of())
.series(ImmutableList.of())
.conditions(null)
.searchWithinMs(SEARCH_WINDOW_MS)
.executeEveryMs(SEARCH_WINDOW_MS)
.build();
final EventDefinitionDto eventDefinitionDto = buildEventDefinitionDto(ImmutableSet.of(), ImmutableList.of(), null, emptyList());
final AggregationEventProcessorParameters parameters = AggregationEventProcessorParameters.builder()
.timerange(timerange)
.build();
final AggregationEventProcessor eventProcessor = new AggregationEventProcessor(eventDefinitionDto, searchFactory,
eventProcessorDependencyCheck, stateService, moreSearch, eventStreamService, messages, notificationService, permittedStreams, Set.of(), messageFactory);
assertThatCode(() -> eventProcessor.createEvents(eventFactory, parameters, (events) -> {})).doesNotThrowAnyException();
verify(moreSearch, times(1)).scrollQuery(
eq(config.query()),
eq(ImmutableSet.of("stream-3", "stream-2", "stream-1", "000000000000000000000001")),
eq(emptyList()),
eq(config.queryParameters()),
eq(parameters.timerange()),
eq(parameters.batchSize()),
any(MoreSearch.ScrollCallback.class)
);
verify(searchFactory, never()).create(eq(config), eq(parameters), any(AggregationSearch.User.class), eq(eventDefinitionDto), eq(List.of()));
} |
public static double log2(double x) {
return Math.log(x) / LOG2;
} | @Test
public void testLog2() {
System.out.println("log2");
assertEquals(0, MathEx.log2(1), 1E-6);
assertEquals(1, MathEx.log2(2), 1E-6);
assertEquals(1.584963, MathEx.log2(3), 1E-6);
assertEquals(2, MathEx.log2(4), 1E-6);
} |
@Override
public CompletableFuture<T> getFuture() {
final CompletableFuture<T> currentGatewayFuture = atomicGatewayFuture.get();
if (currentGatewayFuture.isCompletedExceptionally()) {
try {
currentGatewayFuture.get();
} catch (ExecutionException | InterruptedException executionException) {
String leaderAddress;
try {
Tuple2<String, UUID> leaderAddressSessionId =
getLeaderNow()
.orElse(
Tuple2.of(
"unknown address",
HighAvailabilityServices.DEFAULT_LEADER_ID));
leaderAddress = leaderAddressSessionId.f0;
} catch (Exception e) {
log.warn("Could not obtain the current leader.", e);
leaderAddress = "unknown leader address";
}
if (log.isDebugEnabled() || log.isTraceEnabled()) {
// only log exceptions on debug or trace level
log.warn(
"Error while retrieving the leader gateway. Retrying to connect to {}.",
leaderAddress,
ExceptionUtils.stripExecutionException(executionException));
} else {
log.warn(
"Error while retrieving the leader gateway. Retrying to connect to {}.",
leaderAddress);
}
}
// we couldn't resolve the gateway --> let's try again
final CompletableFuture<T> newGatewayFuture = createGateway(getLeaderFuture());
// let's check if there was a concurrent createNewFuture call
if (atomicGatewayFuture.compareAndSet(currentGatewayFuture, newGatewayFuture)) {
return newGatewayFuture;
} else {
return atomicGatewayFuture.get();
}
} else {
return atomicGatewayFuture.get();
}
} | @Test
void testGatewayRetrievalFailures() throws Exception {
final String address = "localhost";
final UUID leaderId = UUID.randomUUID();
RpcGateway rpcGateway = TestingRpcGateway.newBuilder().build();
TestingLeaderGatewayRetriever leaderGatewayRetriever =
new TestingLeaderGatewayRetriever(rpcGateway);
SettableLeaderRetrievalService settableLeaderRetrievalService =
new SettableLeaderRetrievalService();
settableLeaderRetrievalService.start(leaderGatewayRetriever);
CompletableFuture<RpcGateway> gatewayFuture = leaderGatewayRetriever.getFuture();
// this triggers the first gateway retrieval attempt
settableLeaderRetrievalService.notifyListener(address, leaderId);
FlinkAssertions.assertThatFuture(gatewayFuture)
.as("The first future should have been failed.")
.eventuallyFailsWith(ExecutionException.class);
// the second attempt should fail as well
assertThat((leaderGatewayRetriever.getNow())).isNotPresent();
// the third attempt should succeed
assertThat(leaderGatewayRetriever.getNow()).hasValue(rpcGateway);
} |
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
List<AclEntry> foundAclSpecEntries =
Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry);
if (aclSpecEntry != null) {
foundAclSpecEntries.add(aclSpecEntry);
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
// ACL spec entries that were not replacements are new additions.
for (AclEntry newEntry: aclSpec) {
if (Collections.binarySearch(foundAclSpecEntries, newEntry,
ACL_ENTRY_COMPARATOR) < 0) {
scopeDirty.add(newEntry.getScope());
if (newEntry.getType() == MASK) {
providedMask.put(newEntry.getScope(), newEntry);
maskDirty.add(newEntry.getScope());
} else {
aclBuilder.add(newEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
} | @Test(expected=AclException.class)
public void testMergeAclEntriesResultTooLarge() throws AclException {
ImmutableList.Builder<AclEntry> aclBuilder =
new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL));
for (int i = 1; i <= 28; ++i) {
aclBuilder.add(aclEntry(ACCESS, USER, "user" + i, READ));
}
aclBuilder
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, NONE));
List<AclEntry> existing = aclBuilder.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ));
mergeAclEntries(existing, aclSpec);
} |
@Override
protected void handleRemove(final String listenTo)
{
ClusterInfoItem clusterInfoRemoved = _simpleLoadBalancerState.getClusterInfo().remove(listenTo);
_simpleLoadBalancerState.notifyListenersOnClusterInfoRemovals(clusterInfoRemoved);
_simpleLoadBalancerState.notifyClusterListenersOnRemove(listenTo);
} | @Test
public void testHandleRemove()
{
String clusterName = "mock-cluster-foo";
ClusterLoadBalancerSubscriberFixture fixture = new ClusterLoadBalancerSubscriberFixture();
ClusterInfoItem clusterInfoItemToRemove =
new ClusterInfoItem(fixture._simpleLoadBalancerState, new ClusterProperties(clusterName),
new PartitionAccessor() {
@Override
public int getMaxPartitionId() {
return 0;
}
@Override
public int getPartitionId(URI uri) {
return 0;
}
}, CanaryDistributionProvider.Distribution.CANARY);
fixture._simpleLoadBalancerState.getClusterInfo().put(clusterName, clusterInfoItemToRemove);
fixture.getMockSubscriber(false).handleRemove(clusterName);
Assert.assertFalse(fixture._simpleLoadBalancerState.getClusterInfo().containsKey(clusterName));
verify(fixture._simpleLoadBalancerState, times(1)).notifyListenersOnClusterInfoRemovals(
clusterInfoItemToRemove
);
verify(fixture._simpleLoadBalancerState, times(1)).notifyClusterListenersOnRemove(
clusterName
);
} |
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
} | @Test
public void shouldThrowOnAmbiguousImplicitCastWithoutGenerics() {
// Given:
givenFunctions(
function(FIRST_FUNC, -1, LONG, LONG),
function(SECOND_FUNC, -1, DOUBLE, DOUBLE)
);
// When:
final KsqlException e = assertThrows(KsqlException.class,
() -> udfIndex
.getFunction(ImmutableList.of(SqlArgument.of(INTEGER), SqlArgument.of(BIGINT))));
// Then:
assertThat(e.getMessage(), containsString("Function 'name' cannot be resolved due " +
"to ambiguous method parameters "
+ "(INTEGER, BIGINT)"));
} |
@Override
public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) {
String taskType = MinionConstants.UpsertCompactionTask.TASK_TYPE;
List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>();
for (TableConfig tableConfig : tableConfigs) {
if (!validate(tableConfig)) {
LOGGER.warn("Validation failed for table {}. Skipping..", tableConfig.getTableName());
continue;
}
String tableNameWithType = tableConfig.getTableName();
LOGGER.info("Start generating task configs for table: {}", tableNameWithType);
if (tableConfig.getTaskConfig() == null) {
LOGGER.warn("Task config is null for table: {}", tableNameWithType);
continue;
}
Map<String, String> taskConfigs = tableConfig.getTaskConfig().getConfigsForTaskType(taskType);
List<SegmentZKMetadata> allSegments = _clusterInfoAccessor.getSegmentsZKMetadata(tableNameWithType);
// Get completed segments and filter out the segments based on the buffer time configuration
List<SegmentZKMetadata> completedSegments =
getCompletedSegments(taskConfigs, allSegments, System.currentTimeMillis());
if (completedSegments.isEmpty()) {
LOGGER.info("No completed segments were eligible for compaction for table: {}", tableNameWithType);
continue;
}
// Only schedule 1 task of this type, per table
Map<String, TaskState> incompleteTasks =
TaskGeneratorUtils.getIncompleteTasks(taskType, tableNameWithType, _clusterInfoAccessor);
if (!incompleteTasks.isEmpty()) {
LOGGER.warn("Found incomplete tasks: {} for same table: {} and task type: {}. Skipping task generation.",
incompleteTasks.keySet(), tableNameWithType, taskType);
continue;
}
// get server to segment mappings
PinotHelixResourceManager pinotHelixResourceManager = _clusterInfoAccessor.getPinotHelixResourceManager();
Map<String, List<String>> serverToSegments = pinotHelixResourceManager.getServerToSegmentsMap(tableNameWithType);
BiMap<String, String> serverToEndpoints;
try {
serverToEndpoints = pinotHelixResourceManager.getDataInstanceAdminEndpoints(serverToSegments.keySet());
} catch (InvalidConfigException e) {
throw new RuntimeException(e);
}
ServerSegmentMetadataReader serverSegmentMetadataReader =
new ServerSegmentMetadataReader(_clusterInfoAccessor.getExecutor(),
_clusterInfoAccessor.getConnectionManager());
// By default, we use 'snapshot' for validDocIdsType. This means that we will use the validDocIds bitmap from
// the snapshot from Pinot segment. This will require 'enableSnapshot' from UpsertConfig to be set to true.
String validDocIdsTypeStr =
taskConfigs.getOrDefault(UpsertCompactionTask.VALID_DOC_IDS_TYPE, ValidDocIdsType.SNAPSHOT.toString());
ValidDocIdsType validDocIdsType = ValidDocIdsType.valueOf(validDocIdsTypeStr.toUpperCase());
// Number of segments to query per server request. If a table has a lot of segments, then we might send a
// huge payload to pinot-server in request. Batching the requests will help in reducing the payload size.
int numSegmentsBatchPerServerRequest = Integer.parseInt(
taskConfigs.getOrDefault(UpsertCompactionTask.NUM_SEGMENTS_BATCH_PER_SERVER_REQUEST,
String.valueOf(DEFAULT_NUM_SEGMENTS_BATCH_PER_SERVER_REQUEST)));
// Validate that the snapshot is enabled if validDocIdsType is validDocIdsSnapshot
if (validDocIdsType == ValidDocIdsType.SNAPSHOT) {
UpsertConfig upsertConfig = tableConfig.getUpsertConfig();
Preconditions.checkNotNull(upsertConfig, "UpsertConfig must be provided for UpsertCompactionTask");
Preconditions.checkState(upsertConfig.isEnableSnapshot(), String.format(
"'enableSnapshot' from UpsertConfig must be enabled for UpsertCompactionTask with validDocIdsType = %s",
validDocIdsType));
} else if (validDocIdsType == ValidDocIdsType.IN_MEMORY_WITH_DELETE) {
UpsertConfig upsertConfig = tableConfig.getUpsertConfig();
Preconditions.checkNotNull(upsertConfig, "UpsertConfig must be provided for UpsertCompactionTask");
Preconditions.checkNotNull(upsertConfig.getDeleteRecordColumn(),
String.format("deleteRecordColumn must be provided for " + "UpsertCompactionTask with validDocIdsType = %s",
validDocIdsType));
}
Map<String, List<ValidDocIdsMetadataInfo>> validDocIdsMetadataList =
serverSegmentMetadataReader.getSegmentToValidDocIdsMetadataFromServer(tableNameWithType, serverToSegments,
serverToEndpoints, null, 60_000, validDocIdsType.toString(), numSegmentsBatchPerServerRequest);
Map<String, SegmentZKMetadata> completedSegmentsMap =
completedSegments.stream().collect(Collectors.toMap(SegmentZKMetadata::getSegmentName, Function.identity()));
SegmentSelectionResult segmentSelectionResult =
processValidDocIdsMetadata(taskConfigs, completedSegmentsMap, validDocIdsMetadataList);
if (!segmentSelectionResult.getSegmentsForDeletion().isEmpty()) {
pinotHelixResourceManager.deleteSegments(tableNameWithType, segmentSelectionResult.getSegmentsForDeletion(),
"0d");
LOGGER.info(
"Deleted segments containing only invalid records for table: {}, number of segments to be deleted: {}",
tableNameWithType, segmentSelectionResult.getSegmentsForDeletion());
}
int numTasks = 0;
int maxTasks = getMaxTasks(taskType, tableNameWithType, taskConfigs);
for (SegmentZKMetadata segment : segmentSelectionResult.getSegmentsForCompaction()) {
if (numTasks == maxTasks) {
break;
}
if (StringUtils.isBlank(segment.getDownloadUrl())) {
LOGGER.warn("Skipping segment {} for task {} as download url is empty", segment.getSegmentName(), taskType);
continue;
}
Map<String, String> configs = new HashMap<>(getBaseTaskConfigs(tableConfig, List.of(segment.getSegmentName())));
configs.put(MinionConstants.DOWNLOAD_URL_KEY, segment.getDownloadUrl());
configs.put(MinionConstants.UPLOAD_URL_KEY, _clusterInfoAccessor.getVipUrl() + "/segments");
configs.put(MinionConstants.ORIGINAL_SEGMENT_CRC_KEY, String.valueOf(segment.getCrc()));
configs.put(UpsertCompactionTask.VALID_DOC_IDS_TYPE, validDocIdsType.toString());
pinotTaskConfigs.add(new PinotTaskConfig(UpsertCompactionTask.TASK_TYPE, configs));
numTasks++;
}
LOGGER.info("Finished generating {} tasks configs for table: {}", numTasks, tableNameWithType);
}
return pinotTaskConfigs;
} | @Test
public void testGenerateTasksWithNoSegments() {
when(_mockClusterInfoAccessor.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(
Lists.newArrayList(Collections.emptyList()));
when(_mockClusterInfoAccessor.getIdealState(REALTIME_TABLE_NAME)).thenReturn(
getIdealState(REALTIME_TABLE_NAME, Lists.newArrayList(Collections.emptyList())));
_taskGenerator.init(_mockClusterInfoAccessor);
List<PinotTaskConfig> pinotTaskConfigs = _taskGenerator.generateTasks(Lists.newArrayList(_tableConfig));
assertEquals(pinotTaskConfigs.size(), 0);
} |
public boolean isFreshRun() {
return currentPolicy == null || currentPolicy.isFreshRun();
} | @Test
public void testIsFreshRun() {
RunRequest runRequest =
RunRequest.builder()
.initiator(new ManualInitiator())
.currentPolicy(RestartPolicy.RESTART_FROM_BEGINNING)
.build();
Assert.assertFalse(runRequest.isFreshRun());
runRequest =
RunRequest.builder()
.initiator(new ManualInitiator())
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.build();
Assert.assertTrue(runRequest.isFreshRun());
} |
@Override
public void close() throws UnavailableException {
// JournalContext is closed before block deletion context so that file system master changes
// are written before block master changes. If a failure occurs between deleting an inode and
// remove its blocks, it's better to have an orphaned block than an inode with a missing block.
closeQuietly(mJournalContext);
closeQuietly(mBlockDeletionContext);
if (mThrown != null) {
Throwables.propagateIfPossible(mThrown, UnavailableException.class);
throw new RuntimeException(mThrown);
}
} | @Test
public void order() throws Throwable {
List<Object> order = new ArrayList<>();
doAnswer(unused -> order.add(mMockJC)).when(mMockJC).close();
doAnswer(unused -> order.add(mMockBDC)).when(mMockBDC).close();
mRpcContext.close();
assertEquals(Arrays.asList(mMockJC, mMockBDC), order);
} |
public synchronized LogAction record(double... values) {
return record(DEFAULT_RECORDER_NAME, timer.monotonicNow(), values);
} | @Test
public void testInfrequentPrimaryAndDependentLoggers() {
helper = new LogThrottlingHelper(LOG_PERIOD, "foo", timer);
assertTrue(helper.record("foo", 0).shouldLog());
assertTrue(helper.record("bar", 0).shouldLog());
// Both should log once the period has elapsed
assertTrue(helper.record("foo", LOG_PERIOD).shouldLog());
assertTrue(helper.record("bar", LOG_PERIOD).shouldLog());
} |
public static <T> T[] reverse(T[] array, final int startIndexInclusive, final int endIndexExclusive) {
if (isEmpty(array)) {
return array;
}
int i = Math.max(startIndexInclusive, 0);
int j = Math.min(array.length, endIndexExclusive) - 1;
T tmp;
while (j > i) {
tmp = array[j];
array[j] = array[i];
array[i] = tmp;
j--;
i++;
}
return array;
} | @Test
public void reverseTest2s() {
Object[] a = {"1", '2', "3", 4};
final Object[] reverse = ArrayUtil.reverse(a);
assertArrayEquals(new Object[]{4, "3", '2', "1"}, reverse);
} |
@Override
public int[] computeMatchingLines(Component component) {
List<String> database = getDBLines(component);
List<String> report = getReportLines(component);
return new SourceLinesDiffFinder().findMatchingLines(database, report);
} | @Test
void all_file_is_modified_if_no_source_in_db() {
periodHolder.setPeriod(null);
Component component = fileComponent(FILE_REF);
setLineHashesInReport(component, CONTENT);
assertThat(underTest.computeMatchingLines(component)).containsExactly(0, 0, 0, 0, 0, 0, 0);
} |
@Override
@Cacheable(cacheNames = RedisKeyConstants.SMS_TEMPLATE, key = "#code",
unless = "#result == null")
public SmsTemplateDO getSmsTemplateByCodeFromCache(String code) {
return smsTemplateMapper.selectByCode(code);
} | @Test
public void testGetSmsTemplateByCodeFromCache() {
// mock 数据
SmsTemplateDO dbSmsTemplate = randomSmsTemplateDO();
smsTemplateMapper.insert(dbSmsTemplate);// @Sql: 先插入出一条存在的数据
// 准备参数
String code = dbSmsTemplate.getCode();
// 调用
SmsTemplateDO smsTemplate = smsTemplateService.getSmsTemplateByCodeFromCache(code);
// 校验
assertPojoEquals(dbSmsTemplate, smsTemplate);
} |
static CharSource asCharSource(ZipFile file, ZipEntry entry, Charset charset) {
return asByteSource(file, entry).asCharSource(charset);
} | @Test
public void testAsCharSource() throws Exception {
File zipDir = new File(tmpDir, "zip");
assertTrue(zipDir.mkdirs());
createFileWithContents(zipDir, "myTextFile.txt", "Simple Text");
ZipFiles.zipDirectory(tmpDir, zipFile);
try (ZipFile zip = new ZipFile(zipFile)) {
ZipEntry entry = zip.getEntry("zip/myTextFile.txt");
CharSource charSource = ZipFiles.asCharSource(zip, entry, StandardCharsets.UTF_8);
assertEquals("Simple Text", charSource.read());
}
} |
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
} | @Test
public void testMultipleFinishBundleMethods() throws Exception {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Found multiple methods annotated with @FinishBundle");
thrown.expectMessage("bar(FinishBundleContext)");
thrown.expectMessage("baz(FinishBundleContext)");
thrown.expectMessage(getClass().getName() + "$");
DoFnSignatures.getSignature(
new DoFn<String, String>() {
@ProcessElement
public void foo(ProcessContext context) {}
@FinishBundle
public void bar(FinishBundleContext context) {}
@FinishBundle
public void baz(FinishBundleContext context) {}
}.getClass());
} |
public static String replaceByCodePoint(CharSequence str, int startInclude, int endExclude, char replacedChar) {
if (isEmpty(str)) {
return str(str);
}
final String originalStr = str(str);
int[] strCodePoints = originalStr.codePoints().toArray();
final int strLength = strCodePoints.length;
if (startInclude > strLength) {
return originalStr;
}
if (endExclude > strLength) {
endExclude = strLength;
}
if (startInclude > endExclude) {
// 如果起始位置大于结束位置,不替换
return originalStr;
}
final StringBuilder stringBuilder = new StringBuilder();
for (int i = 0; i < strLength; i++) {
if (i >= startInclude && i < endExclude) {
stringBuilder.append(replacedChar);
} else {
stringBuilder.append(new String(strCodePoints, i, 1));
}
}
return stringBuilder.toString();
} | @Test
public void replaceByStrTest() {
String replace = "SSM15930297701BeryAllen";
String result = CharSequenceUtil.replaceByCodePoint(replace, 5, 12, "***");
assertEquals("SSM15***01BeryAllen", result);
} |
@SuppressWarnings("unchecked")
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir,
Configuration conf, FileSystem fs, List<InputSplit> splits)
throws IOException, InterruptedException {
T[] array = (T[]) splits.toArray(new InputSplit[splits.size()]);
createSplitFiles(jobSubmitDir, conf, fs, array);
} | @Test
public void testMaxBlockLocationsOldSplits() throws Exception {
TEST_DIR.mkdirs();
try {
Configuration conf = new Configuration();
conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY, 4);
Path submitDir = new Path(TEST_DIR.getAbsolutePath());
FileSystem fs = FileSystem.getLocal(conf);
org.apache.hadoop.mapred.FileSplit split =
new org.apache.hadoop.mapred.FileSplit(new Path("/some/path"), 0, 1,
new String[] { "loc1", "loc2", "loc3", "loc4", "loc5" });
JobSplitWriter.createSplitFiles(submitDir, conf, fs,
new org.apache.hadoop.mapred.InputSplit[] { split });
JobSplit.TaskSplitMetaInfo[] infos =
SplitMetaInfoReader.readSplitMetaInfo(new JobID(), fs, conf,
submitDir);
assertEquals("unexpected number of splits", 1, infos.length);
assertEquals("unexpected number of split locations",
4, infos[0].getLocations().length);
} finally {
FileUtil.fullyDelete(TEST_DIR);
}
} |
@Override
public String toString() {
return String.format(
"IcebergStagedScan(table=%s, type=%s, taskSetID=%s, caseSensitive=%s)",
table(), expectedSchema().asStruct(), taskSetId, caseSensitive());
} | @TestTemplate
public void testTaskSetPlanning() throws NoSuchTableException, IOException {
sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName);
List<SimpleRecord> records =
ImmutableList.of(new SimpleRecord(1, "a"), new SimpleRecord(2, "b"));
Dataset<Row> df = spark.createDataFrame(records, SimpleRecord.class);
df.coalesce(1).writeTo(tableName).append();
df.coalesce(1).writeTo(tableName).append();
Table table = validationCatalog.loadTable(tableIdent);
assertThat(table.snapshots()).as("Should produce 2 snapshot").hasSize(2);
try (CloseableIterable<FileScanTask> fileScanTasks = table.newScan().planFiles()) {
ScanTaskSetManager taskSetManager = ScanTaskSetManager.get();
String setID = UUID.randomUUID().toString();
List<FileScanTask> tasks = ImmutableList.copyOf(fileScanTasks);
taskSetManager.stageTasks(table, setID, tasks);
// load the staged file set and make sure each file is in a separate split
Dataset<Row> scanDF =
spark
.read()
.format("iceberg")
.option(SparkReadOptions.SCAN_TASK_SET_ID, setID)
.option(SparkReadOptions.SPLIT_SIZE, tasks.get(0).file().fileSizeInBytes())
.load(tableName);
assertThat(scanDF.javaRDD().getNumPartitions())
.as("Num partitions should match")
.isEqualTo(2);
// load the staged file set and make sure we combine both files into a single split
scanDF =
spark
.read()
.format("iceberg")
.option(SparkReadOptions.SCAN_TASK_SET_ID, setID)
.option(SparkReadOptions.SPLIT_SIZE, Long.MAX_VALUE)
.load(tableName);
assertThat(scanDF.javaRDD().getNumPartitions())
.as("Num partitions should match")
.isEqualTo(1);
}
} |
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
throw new UnsupportedOperationException("MultiMessageCodec " + getClass() + " does not support decode()");
} | @Test
public void decodeThrowsUnsupportedOperationException() throws Exception {
assertThatExceptionOfType(UnsupportedOperationException.class)
.isThrownBy(() -> codec.decode(new RawMessage(new byte[0])))
.withMessage("MultiMessageCodec " + NetFlowCodec.class + " does not support decode()");
} |
public static PointList sample(PointList input, double maxDistance, DistanceCalc distCalc, ElevationProvider elevation) {
PointList output = new PointList(input.size() * 2, input.is3D());
if (input.isEmpty()) return output;
int nodes = input.size();
double lastLat = input.getLat(0), lastLon = input.getLon(0), lastEle = input.getEle(0),
thisLat, thisLon, thisEle;
for (int i = 0; i < nodes; i++) {
thisLat = input.getLat(i);
thisLon = input.getLon(i);
thisEle = input.getEle(i);
if (i > 0) {
double segmentLength = distCalc.calcDist3D(lastLat, lastLon, lastEle, thisLat, thisLon, thisEle);
int segments = (int) Math.round(segmentLength / maxDistance);
// for small distances, we use a simple and fast approximation to interpolate between points
// for longer distances (or when crossing international date line) we use great circle interpolation
boolean exact = segmentLength > GREAT_CIRCLE_SEGMENT_LENGTH || distCalc.isCrossBoundary(lastLon, thisLon);
for (int segment = 1; segment < segments; segment++) {
double ratio = (double) segment / segments;
double lat, lon;
if (exact) {
GHPoint point = distCalc.intermediatePoint(ratio, lastLat, lastLon, thisLat, thisLon);
lat = point.getLat();
lon = point.getLon();
} else {
lat = lastLat + (thisLat - lastLat) * ratio;
lon = lastLon + (thisLon - lastLon) * ratio;
}
double ele = elevation.getEle(lat, lon);
if (!Double.isNaN(ele)) {
output.add(lat, lon, ele);
}
}
}
output.add(thisLat, thisLon, thisEle);
lastLat = thisLat;
lastLon = thisLon;
lastEle = thisEle;
}
return output;
} | @Test
public void addsExtraPointBelowSecondThreshold() {
PointList in = new PointList(2, true);
in.add(0, 0, 0);
in.add(0.8, 0, 0);
PointList out = EdgeSampling.sample(
in,
DistanceCalcEarth.METERS_PER_DEGREE / 3,
new DistanceCalcEarth(),
elevation
);
assertEquals("(0.0,0.0,0.0), (0.4,0.0,10.0), (0.8,0.0,0.0)", round(out).toString());
} |
@Override
protected void transmitRpcContext(final Map<String, String> rpcContext) {
RpcContext.getClientAttachment().setAttachments(rpcContext);
} | @Test
public void testTransmitRpcContext() {
Map<String, String> stringStringMap = Maps.newHashMapWithExpectedSize(1);
stringStringMap.put("test", "test");
apacheDubboPlugin.transmitRpcContext(stringStringMap);
assertEquals(RpcContext.getContext().getAttachment("test"), "test");
} |
public static void checkDrivingLicenceMrz(String mrz) {
if (mrz.charAt(0) != 'D') {
throw new VerificationException("MRZ should start with D");
}
if (mrz.charAt(1) != '1') {
throw new VerificationException("Only BAP configuration is supported (1)");
}
if (!mrz.substring(2, 5).equals("NLD")) {
throw new VerificationException("Only Dutch driving licence supported");
}
if (mrz.length() != 30) {
throw new VerificationException("Dutch MRZ should have length of 30");
}
checkMrzCheckDigit(mrz);
} | @Test
public void checkDrivingLicenceMrzPositive() {
MrzUtils.checkDrivingLicenceMrz("PPPPPPPPPPPPPPPPPPPPPPPPPPPPPP");
} |
@Override
public synchronized Multimap<String, String> findBundlesForUnloading(final LoadData loadData,
final ServiceConfiguration conf) {
selectedBundlesCache.clear();
final double threshold = conf.getLoadBalancerBrokerThresholdShedderPercentage() / 100.0;
final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles();
final double minThroughputThreshold = conf.getLoadBalancerBundleUnloadMinThroughputThreshold() * MB;
final double avgUsage = getBrokerAvgUsage(loadData, conf.getLoadBalancerHistoryResourcePercentage(), conf);
if (avgUsage == 0) {
log.warn("average max resource usage is 0");
return selectedBundlesCache;
}
loadData.getBrokerData().forEach((broker, brokerData) -> {
final LocalBrokerData localData = brokerData.getLocalData();
final double currentUsage = brokerAvgResourceUsage.getOrDefault(broker, 0.0);
if (currentUsage < avgUsage + threshold) {
if (log.isDebugEnabled()) {
log.debug("[{}] broker is not overloaded, ignoring at this point ({})", broker,
localData.printResourceUsage());
}
return;
}
double percentOfTrafficToOffload =
currentUsage - avgUsage - threshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN;
double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut();
double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload;
if (minimumThroughputToOffload < minThroughputThreshold) {
if (log.isDebugEnabled()) {
log.debug("[{}] broker is planning to shed throughput {} MByte/s less than "
+ "minimumThroughputThreshold {} MByte/s, skipping bundle unload ({})",
broker, minimumThroughputToOffload / MB, minThroughputThreshold / MB,
localData.printResourceUsage());
}
return;
}
log.info(
"Attempting to shed load on {}, which has max resource usage above avgUsage and threshold {}%"
+ " > {}% + {}% -- Offloading at least {} MByte/s of traffic,"
+ " left throughput {} MByte/s ({})",
broker, 100 * currentUsage, 100 * avgUsage, 100 * threshold, minimumThroughputToOffload / MB,
(brokerCurrentThroughput - minimumThroughputToOffload) / MB, localData.printResourceUsage());
if (localData.getBundles().size() > 1) {
filterAndSelectBundle(loadData, recentlyUnloadedBundles, broker, localData, minimumThroughputToOffload);
} else if (localData.getBundles().size() == 1) {
log.warn(
"HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. "
+ "No Load Shedding will be done on this broker",
localData.getBundles().iterator().next(), broker);
} else {
log.warn("Broker {} is overloaded despite having no bundles", broker);
}
});
if (selectedBundlesCache.isEmpty() && conf.isLowerBoundarySheddingEnabled()) {
tryLowerBoundaryShedding(loadData, conf);
}
return selectedBundlesCache;
} | @Test
public void testLowerBoundaryShedding() {
int numBundles = 10;
int brokerNum = 11;
int lowLoadNode = 10;
LoadData loadData = new LoadData();
double throughput = 100 * 1024 * 1024;
//There are 11 Brokers, of which 10 are loaded at 80% and 1 is loaded at 0%.
//At this time, the average load is 80*10/11 = 72.73, and the threshold for rebalancing is 72.73 + 10 = 82.73.
//Since 80 < 82.73, rebalancing will not be trigger, and there is one Broker with load of 0.
for (int i = 0; i < brokerNum; i++) {
LocalBrokerData broker = new LocalBrokerData();
for (int j = 0; j < numBundles; j++) {
broker.getBundles().add("bundle-" + j);
BundleData bundle = new BundleData();
TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData();
timeAverageMessageData.setMsgThroughputIn(i == lowLoadNode ? 0 : throughput);
timeAverageMessageData.setMsgThroughputOut(i == lowLoadNode ? 0 : throughput);
bundle.setShortTermData(timeAverageMessageData);
String broker2BundleName = "broker-" + i + "-bundle-" + j;
loadData.getBundleData().put(broker2BundleName, bundle);
broker.getBundles().add(broker2BundleName);
}
broker.setBandwidthIn(new ResourceUsage(i == lowLoadNode ? 0 : 80, 100));
broker.setBandwidthOut(new ResourceUsage(i == lowLoadNode ? 0 : 80, 100));
broker.setMsgThroughputIn(i == lowLoadNode ? 0 : throughput);
broker.setMsgThroughputOut(i == lowLoadNode ? 0 : throughput);
loadData.getBrokerData().put("broker-" + i, new BrokerData(broker));
}
ThresholdShedder shedder = new ThresholdShedder();
Multimap<String, String> bundlesToUnload = shedder.findBundlesForUnloading(loadData, conf);
assertTrue(bundlesToUnload.isEmpty());
conf.setLowerBoundarySheddingEnabled(true);
bundlesToUnload = thresholdShedder.findBundlesForUnloading(loadData, conf);
assertFalse(bundlesToUnload.isEmpty());
} |
public static org.apache.pinot.common.utils.regex.Pattern compile(String regex) {
// un-initialized factory will use java.util.regex to avoid requiring initialization in tests
if (_regexClass == null) {
return new JavaUtilPattern(regex);
}
switch (_regexClass) {
case RE2J:
return new Re2jPattern(regex);
case JAVA_UTIL:
default:
return new JavaUtilPattern(regex);
}
} | @Test
public void testNonInitializedPatternFactory() {
Pattern pattern = PatternFactory.compile("pattern");
Assert.assertTrue(pattern instanceof JavaUtilPattern);
} |
public static double cov(int[] x, int[] y) {
if (x.length != y.length) {
throw new IllegalArgumentException("Arrays have different length.");
}
if (x.length < 3) {
throw new IllegalArgumentException("array length has to be at least 3.");
}
double mx = mean(x);
double my = mean(y);
double Sxy = 0.0;
for (int i = 0; i < x.length; i++) {
double dx = x[i] - mx;
double dy = y[i] - my;
Sxy += dx * dy;
}
return Sxy / (x.length - 1);
} | @Test
public void testCov_doubleArr_doubleArr() {
System.out.println("cov");
double[] x = {-2.1968219, -0.9559913, -0.0431738, 1.0567679, 0.3853515};
double[] y = {-1.7781325, -0.6659839, 0.9526148, -0.9460919, -0.3925300};
assertEquals(0.5894983, MathEx.cov(x, y), 1E-7);
} |
public ArtifactResolveRequest startArtifactResolveProcess(HttpServletRequest httpServletRequest) throws SamlParseException {
try {
final var artifactResolveRequest = validateRequest(httpServletRequest);
final var samlSession = updateArtifactResolveRequestWithSamlSession(artifactResolveRequest);
validateArtifactResolve(artifactResolveRequest);
dcMetadataService.resolveDcMetadata(artifactResolveRequest);
signatureService.validateSamlRequest(artifactResolveRequest, artifactResolveRequest.getArtifactResolve().getSignature());
createAdAuthentication(samlSession, artifactResolveRequest);
samlSessionService.updateSamlSession(artifactResolveRequest);
return artifactResolveRequest;
} catch (MessageDecodingException e) {
throw new SamlParseException("ArtifactResolveRequest soap11 decode exception", e);
} catch (ComponentInitializationException e) {
throw new SamlParseException("ArtifactResolveRequest initialization exception", e);
} catch (SamlSessionException e) {
throw new SamlParseException("Failed to load saml session", e);
} catch (AdException e) {
throw new SamlParseException("Failed to create an authentication", e);
} catch (DienstencatalogusException e) {
throw new SamlParseException("Failed to retrieve metadata from DienstenCatalogus", e);
} catch (SamlValidationException e) {
throw new SamlParseException("ArtifactResolve not valid", e);
} catch (ValidationException e) {
throw new SamlParseException("Failed to validate", e);
} catch (SharedServiceClientException e) {
throw new SamlParseException("Failed to retrieve data from sharedServiceClient.getSSConfigLong", e);
}
} | @Test
void parseArtifactResolveSuccessfulBVDRequest() throws Exception {
samlSession.setProtocolType(ProtocolType.SAML_ROUTERINGSDIENST);
samlSession.setTransactionId("transactionId");
when(samlSessionServiceMock.loadSession(anyString())).thenReturn(samlSession);
ArtifactResolveRequest artifactResolveRequest = artifactResolveService.startArtifactResolveProcess(prepareSoapRequest(artifactResolveValidBVD));
assertEquals("PPPPPPPP", artifactResolveRequest.getAdAuthentication().getBsn());
} |
public static <T> Serde<Windowed<T>> sessionWindowedSerdeFrom(final Class<T> type) {
return new SessionWindowedSerde<>(Serdes.serdeFrom(type));
} | @Test
public void shouldWrapForSessionWindowedSerde() {
final Serde<Windowed<String>> serde = WindowedSerdes.sessionWindowedSerdeFrom(String.class);
assertInstanceOf(SessionWindowedSerializer.class, serde.serializer());
assertInstanceOf(SessionWindowedDeserializer.class, serde.deserializer());
assertInstanceOf(StringSerializer.class, ((SessionWindowedSerializer) serde.serializer()).innerSerializer());
assertInstanceOf(StringDeserializer.class, ((SessionWindowedDeserializer) serde.deserializer()).innerDeserializer());
} |
public static void mkdir(
final HybridFile parentFile,
@NonNull final HybridFile file,
final Context context,
final boolean rootMode,
@NonNull final ErrorCallBack errorCallBack) {
new AsyncTask<Void, Void, Void>() {
private DataUtils dataUtils = DataUtils.getInstance();
private Function<DocumentFile, Void> safCreateDirectory =
input -> {
if (input != null && input.isDirectory()) {
boolean result = false;
try {
result = input.createDirectory(file.getName(context)) != null;
} catch (Exception e) {
LOG.warn("Failed to make directory", e);
}
errorCallBack.done(file, result);
} else errorCallBack.done(file, false);
return null;
};
@Override
protected Void doInBackground(Void... params) {
// checking whether filename is valid or a recursive call possible
if (!Operations.isFileNameValid(file.getName(context))) {
errorCallBack.invalidName(file);
return null;
}
if (file.exists()) {
errorCallBack.exists(file);
return null;
}
// Android data directory, prohibit create directory
if (file.isAndroidDataDir()) {
errorCallBack.done(file, false);
return null;
}
if (file.isSftp() || file.isFtp()) {
file.mkdir(context);
/*
FIXME: throw Exceptions from HybridFile.mkdir() so errorCallback can throw Exceptions
here
*/
errorCallBack.done(file, true);
return null;
}
if (file.isSmb()) {
try {
file.getSmbFile(2000).mkdirs();
} catch (SmbException e) {
LOG.warn("failed to make smb directories", e);
errorCallBack.done(file, false);
return null;
}
errorCallBack.done(file, file.exists());
return null;
}
if (file.isOtgFile()) {
if (checkOtgNewFileExists(file, context)) {
errorCallBack.exists(file);
return null;
}
safCreateDirectory.apply(OTGUtil.getDocumentFile(parentFile.getPath(), context, false));
return null;
}
if (file.isDocumentFile()) {
if (checkDocumentFileNewFileExists(file, context)) {
errorCallBack.exists(file);
return null;
}
safCreateDirectory.apply(
OTGUtil.getDocumentFile(
parentFile.getPath(),
SafRootHolder.getUriRoot(),
context,
OpenMode.DOCUMENT_FILE,
false));
return null;
} else if (file.isDropBoxFile()) {
CloudStorage cloudStorageDropbox = dataUtils.getAccount(OpenMode.DROPBOX);
try {
cloudStorageDropbox.createFolder(CloudUtil.stripPath(OpenMode.DROPBOX, file.getPath()));
errorCallBack.done(file, true);
} catch (Exception e) {
LOG.warn("failed to make directory in cloud connection", e);
errorCallBack.done(file, false);
}
} else if (file.isBoxFile()) {
CloudStorage cloudStorageBox = dataUtils.getAccount(OpenMode.BOX);
try {
cloudStorageBox.createFolder(CloudUtil.stripPath(OpenMode.BOX, file.getPath()));
errorCallBack.done(file, true);
} catch (Exception e) {
LOG.warn("failed to make directory in cloud connection", e);
errorCallBack.done(file, false);
}
} else if (file.isOneDriveFile()) {
CloudStorage cloudStorageOneDrive = dataUtils.getAccount(OpenMode.ONEDRIVE);
try {
cloudStorageOneDrive.createFolder(
CloudUtil.stripPath(OpenMode.ONEDRIVE, file.getPath()));
errorCallBack.done(file, true);
} catch (Exception e) {
LOG.warn("failed to make directory in cloud connection", e);
errorCallBack.done(file, false);
}
} else if (file.isGoogleDriveFile()) {
CloudStorage cloudStorageGdrive = dataUtils.getAccount(OpenMode.GDRIVE);
try {
cloudStorageGdrive.createFolder(CloudUtil.stripPath(OpenMode.GDRIVE, file.getPath()));
errorCallBack.done(file, true);
} catch (Exception e) {
LOG.warn("failed to make directory in cloud connection", e);
errorCallBack.done(file, false);
}
} else {
if (file.isLocal() || file.isRoot()) {
int mode = checkFolder(new File(file.getParent(context)), context);
if (mode == 2) {
errorCallBack.launchSAF(file);
return null;
}
if (mode == 1 || mode == 0) MakeDirectoryOperation.mkdir(file.getFile(), context);
if (!file.exists() && rootMode) {
file.setMode(OpenMode.ROOT);
if (file.exists()) errorCallBack.exists(file);
try {
MakeDirectoryCommand.INSTANCE.makeDirectory(
file.getParent(context), file.getName(context));
} catch (ShellNotRunningException e) {
LOG.warn("failed to make directory in local filesystem", e);
}
errorCallBack.done(file, file.exists());
return null;
}
errorCallBack.done(file, file.exists());
return null;
}
errorCallBack.done(file, file.exists());
}
return null;
}
}.executeOnExecutor(executor);
} | @Test
public void testMkdirDuplicate() throws InterruptedException {
File newFolder = new File(storageRoot, "test");
HybridFile newFolderHF = new HybridFile(OpenMode.FILE, newFolder.getAbsolutePath());
CountDownLatch waiter1 = new CountDownLatch(1);
Operations.mkdir(
newFolderHF,
newFolderHF,
ApplicationProvider.getApplicationContext(),
false,
new AbstractErrorCallback() {
@Override
public void done(HybridFile hFile, boolean b) {
waiter1.countDown();
}
});
waiter1.await();
assertTrue(newFolder.exists());
CountDownLatch waiter2 = new CountDownLatch(1);
AtomicBoolean assertFlag = new AtomicBoolean(false);
Operations.mkdir(
newFolderHF,
newFolderHF,
ApplicationProvider.getApplicationContext(),
false,
new AbstractErrorCallback() {
@Override
public void exists(HybridFile file) {
assertFlag.set(true);
waiter2.countDown();
}
});
waiter2.await();
assertTrue(assertFlag.get());
} |
@Override
public void start() {
DatabaseVersion.Status status = version.getStatus();
checkState(status == UP_TO_DATE || status == FRESH_INSTALL, "Compute Engine can't start unless Database is up to date");
} | @Test
public void start_has_no_effect_if_status_is_UP_TO_DATE() {
when(databaseVersion.getStatus()).thenReturn(UP_TO_DATE);
underTest.start();
verify(databaseVersion).getStatus();
verifyNoMoreInteractions(databaseVersion);
} |
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
if (!authConfigs.isAuthEnabled()) {
chain.doFilter(request, response);
return;
}
HttpServletRequest req = (HttpServletRequest) request;
HttpServletResponse resp = (HttpServletResponse) response;
if (authConfigs.isEnableUserAgentAuthWhite()) {
String userAgent = WebUtils.getUserAgent(req);
if (StringUtils.startsWith(userAgent, Constants.NACOS_SERVER_HEADER)) {
chain.doFilter(request, response);
return;
}
} else if (StringUtils.isNotBlank(authConfigs.getServerIdentityKey()) && StringUtils.isNotBlank(
authConfigs.getServerIdentityValue())) {
String serverIdentity = req.getHeader(authConfigs.getServerIdentityKey());
if (StringUtils.isNotBlank(serverIdentity)) {
if (authConfigs.getServerIdentityValue().equals(serverIdentity)) {
chain.doFilter(request, response);
return;
}
Loggers.AUTH.warn("Invalid server identity value for {} from {}", authConfigs.getServerIdentityKey(),
req.getRemoteHost());
}
} else {
resp.sendError(HttpServletResponse.SC_FORBIDDEN,
"Invalid server identity key or value, Please make sure set `nacos.core.auth.server.identity.key`"
+ " and `nacos.core.auth.server.identity.value`, or open `nacos.core.auth.enable.userAgentAuthWhite`");
return;
}
try {
Method method = methodsCache.getMethod(req);
if (method == null) {
chain.doFilter(request, response);
return;
}
if (method.isAnnotationPresent(Secured.class) && authConfigs.isAuthEnabled()) {
if (Loggers.AUTH.isDebugEnabled()) {
Loggers.AUTH.debug("auth start, request: {} {}", req.getMethod(), req.getRequestURI());
}
Secured secured = method.getAnnotation(Secured.class);
if (!protocolAuthService.enableAuth(secured)) {
chain.doFilter(request, response);
return;
}
Resource resource = protocolAuthService.parseResource(req, secured);
IdentityContext identityContext = protocolAuthService.parseIdentity(req);
boolean result = protocolAuthService.validateIdentity(identityContext, resource);
RequestContext requestContext = RequestContextHolder.getContext();
requestContext.getAuthContext().setIdentityContext(identityContext);
requestContext.getAuthContext().setResource(resource);
if (null == requestContext.getAuthContext().getAuthResult()) {
requestContext.getAuthContext().setAuthResult(result);
}
if (!result) {
// TODO Get reason of failure
throw new AccessException("Validate Identity failed.");
}
String action = secured.action().toString();
result = protocolAuthService.validateAuthority(identityContext, new Permission(resource, action));
if (!result) {
// TODO Get reason of failure
throw new AccessException("Validate Authority failed.");
}
}
chain.doFilter(request, response);
} catch (AccessException e) {
if (Loggers.AUTH.isDebugEnabled()) {
Loggers.AUTH.debug("access denied, request: {} {}, reason: {}", req.getMethod(), req.getRequestURI(),
e.getErrMsg());
}
resp.sendError(HttpServletResponse.SC_FORBIDDEN, e.getErrMsg());
} catch (IllegalArgumentException e) {
resp.sendError(HttpServletResponse.SC_BAD_REQUEST, ExceptionUtil.getAllExceptionMsg(e));
} catch (Exception e) {
Loggers.AUTH.warn("[AUTH-FILTER] Server failed: ", e);
resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Server failed, " + e.getMessage());
}
} | @Test
void testDoFilter() {
try {
FilterChain filterChain = new MockFilterChain();
Mockito.when(authConfigs.isAuthEnabled()).thenReturn(true);
MockHttpServletRequest request = new MockHttpServletRequest();
HttpServletResponse response = new MockHttpServletResponse();
authFilter.doFilter(request, response, filterChain);
Mockito.when(authConfigs.isEnableUserAgentAuthWhite()).thenReturn(true);
request.addHeader(HttpHeaderConsts.USER_AGENT_HEADER, Constants.NACOS_SERVER_HEADER);
authFilter.doFilter(request, response, filterChain);
Mockito.when(authConfigs.isEnableUserAgentAuthWhite()).thenReturn(false);
Mockito.when(authConfigs.getServerIdentityKey()).thenReturn("1");
Mockito.when(authConfigs.getServerIdentityValue()).thenReturn("2");
request.addHeader("1", "2");
authFilter.doFilter(request, response, filterChain);
Mockito.when(authConfigs.getServerIdentityValue()).thenReturn("3");
authFilter.doFilter(request, response, filterChain);
Mockito.when(methodsCache.getMethod(Mockito.any())).thenReturn(filterChain.getClass().getMethod("testSecured"));
authFilter.doFilter(request, response, filterChain);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
} |
private void failoverServiceCntMetrics() {
try {
for (Map.Entry<String, ServiceInfo> entry : serviceMap.entrySet()) {
String serviceName = entry.getKey();
List<Tag> tags = new ArrayList<>();
tags.add(new ImmutableTag("service_name", serviceName));
if (Metrics.globalRegistry.find("nacos_naming_client_failover_instances").tags(tags).gauge() == null) {
Gauge.builder("nacos_naming_client_failover_instances", () -> serviceMap.get(serviceName).ipCount())
.tags(tags).register(Metrics.globalRegistry);
}
}
} catch (Exception e) {
NAMING_LOGGER.info("[NA] registerFailoverServiceCnt fail.", e);
}
} | @Test
void testFailoverServiceCntMetrics()
throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
Method method = FailoverReactor.class.getDeclaredMethod("failoverServiceCntMetrics");
method.setAccessible(true);
method.invoke(failoverReactor);
// No exception
} |
@Override
public boolean putRowWait( RowMetaInterface rowMeta, Object[] rowData, long time, TimeUnit tu ) {
return putRow( rowMeta, rowData );
} | @Test
public void testPutRowWait() throws Exception {
rowSet.putRowWait( new RowMeta(), row, 1, TimeUnit.SECONDS );
assertSame( row, rowSet.getRowWait( 1, TimeUnit.SECONDS ) );
} |
@ApiOperation(value = "Delete widget type (deleteWidgetType)",
notes = "Deletes the Widget Type. Referencing non-existing Widget Type Id will cause an error." + SYSTEM_OR_TENANT_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAnyAuthority('SYS_ADMIN', 'TENANT_ADMIN')")
@RequestMapping(value = "/widgetType/{widgetTypeId}", method = RequestMethod.DELETE)
@ResponseStatus(value = HttpStatus.OK)
public void deleteWidgetType(
@Parameter(description = WIDGET_TYPE_ID_PARAM_DESCRIPTION, required = true)
@PathVariable("widgetTypeId") String strWidgetTypeId) throws Exception {
checkParameter("widgetTypeId", strWidgetTypeId);
WidgetTypeId widgetTypeId = new WidgetTypeId(toUUID(strWidgetTypeId));
WidgetTypeDetails wtd = checkWidgetTypeId(widgetTypeId, Operation.DELETE);
tbWidgetTypeService.delete(wtd, getCurrentUser());
} | @Test
public void testDeleteWidgetType() throws Exception {
WidgetTypeDetails widgetType = new WidgetTypeDetails();
widgetType.setName("Widget Type");
widgetType.setDescriptor(JacksonUtil.fromString("{ \"someKey\": \"someValue\" }", JsonNode.class));
WidgetTypeDetails savedWidgetType = doPost("/api/widgetType", widgetType, WidgetTypeDetails.class);
doDelete("/api/widgetType/" + savedWidgetType.getId().getId().toString())
.andExpect(status().isOk());
doGet("/api/widgetType/" + savedWidgetType.getId().getId().toString())
.andExpect(status().isNotFound());
} |
@Override
@Deprecated
public void setFullName(final String fullname) {
fields.put(FULL_NAME, fullname);
} | @Test
public void testSetFullName() {
user = createUserImpl(null, null, null);
user.setFullName("Full Name");
assertEquals("Full Name", user.getFullName());
assertFalse(user.getFirstName().isPresent());
assertFalse(user.getLastName().isPresent());
} |
public String opensslDn() {
StringBuilder bldr = new StringBuilder();
if (organizationName != null) {
bldr.append(String.format("/O=%s", organizationName));
}
if (commonName != null) {
bldr.append(String.format("/CN=%s", commonName));
}
return bldr.toString();
} | @Test
public void testSubjectOpensslDn() {
Subject.Builder subject = new Subject.Builder()
.withCommonName("joe");
assertThat(subject.build().opensslDn(), is("/CN=joe"));
subject = new Subject.Builder()
.withOrganizationName("MyOrg");
assertThat(subject.build().opensslDn(), is("/O=MyOrg"));
subject = new Subject.Builder()
.withCommonName("joe")
.withOrganizationName("MyOrg");
assertThat(subject.build().opensslDn(), is("/O=MyOrg/CN=joe"));
} |
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
} | @Test
public void testSpdySynReplyFrame() throws Exception {
short type = 2;
byte flags = 0;
int length = 4;
int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01;
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeControlFrameHeader(buf, type, flags, length);
buf.writeInt(streamId);
decoder.decode(buf);
verify(delegate).readSynReplyFrame(streamId, false);
verify(delegate).readHeaderBlockEnd();
assertFalse(buf.isReadable());
buf.release();
} |
@SuppressWarnings("DataFlowIssue")
public static CommandExecutor newInstance(final MySQLCommandPacketType commandPacketType, final CommandPacket commandPacket, final ConnectionSession connectionSession) throws SQLException {
if (commandPacket instanceof SQLReceivedPacket) {
log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL());
} else {
log.debug("Execute packet type: {}", commandPacketType);
}
switch (commandPacketType) {
case COM_QUIT:
return new MySQLComQuitExecutor();
case COM_INIT_DB:
return new MySQLComInitDbExecutor((MySQLComInitDbPacket) commandPacket, connectionSession);
case COM_FIELD_LIST:
return new MySQLComFieldListPacketExecutor((MySQLComFieldListPacket) commandPacket, connectionSession);
case COM_QUERY:
return new MySQLComQueryPacketExecutor((MySQLComQueryPacket) commandPacket, connectionSession);
case COM_PING:
return new MySQLComPingExecutor(connectionSession);
case COM_STMT_PREPARE:
return new MySQLComStmtPrepareExecutor((MySQLComStmtPreparePacket) commandPacket, connectionSession);
case COM_STMT_EXECUTE:
return new MySQLComStmtExecuteExecutor((MySQLComStmtExecutePacket) commandPacket, connectionSession);
case COM_STMT_SEND_LONG_DATA:
return new MySQLComStmtSendLongDataExecutor((MySQLComStmtSendLongDataPacket) commandPacket, connectionSession);
case COM_STMT_RESET:
return new MySQLComStmtResetExecutor((MySQLComStmtResetPacket) commandPacket, connectionSession);
case COM_STMT_CLOSE:
return new MySQLComStmtCloseExecutor((MySQLComStmtClosePacket) commandPacket, connectionSession);
case COM_SET_OPTION:
return new MySQLComSetOptionExecutor((MySQLComSetOptionPacket) commandPacket, connectionSession);
case COM_RESET_CONNECTION:
return new MySQLComResetConnectionExecutor(connectionSession);
default:
return new MySQLUnsupportedCommandExecutor(commandPacketType);
}
} | @Test
void assertNewInstanceWithComStmtClose() throws SQLException {
assertThat(MySQLCommandExecutorFactory.newInstance(MySQLCommandPacketType.COM_STMT_CLOSE,
mock(MySQLComStmtClosePacket.class), connectionSession), instanceOf(MySQLComStmtCloseExecutor.class));
} |
public static boolean getIncludeNullsProperty() {
return "Y".equalsIgnoreCase( System.getProperty( Const.KETTLE_JSON_INPUT_INCLUDE_NULLS, "N" ) );
} | @Test
public void testGetIncludeNullsProperty_Y() {
System.setProperty( Const.KETTLE_JSON_INPUT_INCLUDE_NULLS, "Y" );
assertTrue( JsonInputMeta.getIncludeNullsProperty() );
} |
public static void notEmpty(Collection<?> collection, String message) {
if (CollectionUtil.isEmpty(collection)) {
throw new IllegalArgumentException(message);
}
} | @Test(expected = IllegalArgumentException.class)
public void assertNotEmptyByListAndMessageIsNull() {
Assert.notEmpty(Collections.emptyList());
} |
@Override
public boolean equals( Object o )
{
return o instanceof COSFloat &&
Float.floatToIntBits(((COSFloat)o).value) == Float.floatToIntBits(value);
} | @Test
void testEquals()
{
new BaseTester()
{
@Override
@SuppressWarnings({"java:S5863"}) // don't flag tests for reflexivity
void runTest(float num)
{
COSFloat test1 = new COSFloat(num);
COSFloat test2 = new COSFloat(num);
COSFloat test3 = new COSFloat(num);
// Reflexive (x == x)
assertEquals(test1, test1);
// Symmetric is preserved ( x==y then y==x)
assertEquals(test2, test3);
assertEquals(test3, test2);
// Transitive (if x==y && y==z then x==z)
assertEquals(test1, test2);
assertEquals(test2, test3);
assertEquals(test1, test3);
float nf = Float.intBitsToFloat(Float.floatToIntBits(num) + 1);
COSFloat test4 = new COSFloat(nf);
assertNotEquals(test4, test1);
}
}.runTests();
} |
@Override
public Consumer createConsumer(Processor processor) throws Exception {
Plc4XConsumer consumer = new Plc4XConsumer(this, processor);
configureConsumer(consumer);
return consumer;
} | @Test
public void createConsumer() throws Exception {
assertThat(sut.createConsumer(mock(Processor.class)), notNullValue());
} |
public static int damerau(String a, String b) {
// switch parameters to use the shorter one as b to save space.
if (a.length() < b.length()) {
String swap = a;
a = b;
b = swap;
}
int[][] d = new int[3][b.length() + 1];
for (int j = 0; j <= b.length(); j++) {
d[1][j] = j;
}
for (int i = 1; i <= a.length(); i++) {
d[2][0] = i;
for (int j = 1; j <= b.length(); j++) {
int cost = a.charAt(i-1) == b.charAt(j-1) ? 0 : 1;
d[2][j] = MathEx.min(
d[1][j] + 1, // deletion
d[2][j-1] + 1, // insertion
d[1][j-1] + cost); // substitution
if (i > 1 && j > 1) {
if (a.charAt(i-1) == b.charAt(j-2) && a.charAt(i-2) == b.charAt(j-1))
d[2][j] = Math.min(d[2][j], d[0][j-2] + cost); // damerau
}
}
int[] swap = d[0];
d[0] = d[1];
d[1] = d[2];
d[2] = swap;
}
return d[1][b.length()];
} | @Test
public void testPlainDamerauSpeedTest() {
System.out.println("Plain Damerau speed test");
for (int i = 0; i < 100; i++) {
EditDistance.damerau(H1N1, H1N5);
}
} |
@Override
public void doInject(RequestResource resource, RamContext context, LoginIdentityContext result) {
String accessKey = context.getAccessKey();
String secretKey = context.getSecretKey();
// STS 临时凭证鉴权的优先级高于 AK/SK 鉴权
if (StsConfig.getInstance().isStsOn()) {
StsCredential stsCredential = StsCredentialHolder.getInstance().getStsCredential();
accessKey = stsCredential.getAccessKeyId();
secretKey = stsCredential.getAccessKeySecret();
result.setParameter(IdentifyConstants.SECURITY_TOKEN_HEADER, stsCredential.getSecurityToken());
}
if (StringUtils.isNotEmpty(accessKey) && StringUtils.isNotBlank(secretKey)) {
result.setParameter(ACCESS_KEY_HEADER, accessKey);
}
String signatureKey = secretKey;
if (StringUtils.isNotEmpty(context.getRegionId())) {
signatureKey = CalculateV4SigningKeyUtil
.finalSigningKeyStringWithDefaultInfo(secretKey, context.getRegionId());
result.setParameter(RamConstants.SIGNATURE_VERSION, RamConstants.V4);
}
Map<String, String> signHeaders = SpasAdapter
.getSignHeaders(getResource(resource.getNamespace(), resource.getGroup()), signatureKey);
result.setParameters(signHeaders);
} | @Test
void testDoInjectForV4Sign() {
LoginIdentityContext actual = new LoginIdentityContext();
ramContext.setRegionId("cn-hangzhou");
configResourceInjector.doInject(resource, ramContext, actual);
assertEquals(4, actual.getAllKey().size());
assertEquals(PropertyKeyConst.ACCESS_KEY, actual.getParameter("Spas-AccessKey"));
assertEquals(RamConstants.V4, actual.getParameter(RamConstants.SIGNATURE_VERSION));
assertTrue(actual.getAllKey().contains("Timestamp"));
assertTrue(actual.getAllKey().contains("Spas-Signature"));
} |
public void retain(IndexSet indexSet, IndexLifetimeConfig config, RetentionExecutor.RetentionAction action, String actionName) {
final Map<String, Set<String>> deflectorIndices = indexSet.getAllIndexAliases();
// Account for DST and time zones in determining age
final DateTime now = clock.nowUTC();
final long cutoffSoft = now.minus(config.indexLifetimeMin()).getMillis();
final long cutoffHard = now.minus(config.indexLifetimeMax()).getMillis();
final int removeCount = (int) deflectorIndices.keySet()
.stream()
.filter(indexName -> !indices.isReopened(indexName))
.filter(indexName -> !hasCurrentWriteAlias(indexSet, deflectorIndices, indexName))
.filter(indexName -> exceedsAgeLimit(indexName, cutoffSoft, cutoffHard))
.count();
if (LOG.isDebugEnabled()) {
var debug = deflectorIndices.keySet().stream()
.collect(Collectors.toMap(k -> k, k -> Map.of(
"isReopened", indices.isReopened(k),
"hasCurrentWriteAlias", hasCurrentWriteAlias(indexSet, deflectorIndices, k),
"exceedsAgeLimit", exceedsAgeLimit(k, cutoffSoft, cutoffHard),
"closingDate", indices.indexClosingDate(k),
"creationDate", indices.indexCreationDate(k)
)));
Joiner.MapJoiner mapJoiner = Joiner.on("\n").withKeyValueSeparator("=");
LOG.debug("Debug info retain for indexSet <{}>: (min {}, max {}) removeCount: {} details: <{}>",
indexSet.getIndexPrefix(), config.indexLifetimeMin(), config.indexLifetimeMax(),
removeCount, mapJoiner.join(debug));
}
if (removeCount > 0) {
final String msg = "Running retention for " + removeCount + " aged-out indices.";
LOG.info(msg);
activityWriter.write(new Activity(msg, TimeBasedRetentionExecutor.class));
retentionExecutor.runRetention(indexSet, removeCount, action, actionName);
}
} | @Test
public void timeBasedNoDates() {
when(indices.indexClosingDate("test_1")).thenReturn(Optional.empty());
when(indices.indexCreationDate("test_1")).thenReturn(Optional.empty());
underTest.retain(indexSet, getIndexLifetimeConfig(14, 16), action, "action");
verify(action, times(1)).retain(retainedIndexName.capture(), eq(indexSet));
assertThat(retainedIndexName.getValue()).containsExactly("test_1");
} |
public static CallRoutingTable fromTsv(final Reader inputReader) throws IOException {
try (final BufferedReader reader = new BufferedReader(inputReader)) {
// use maps to silently dedupe CidrBlocks
Map<CidrBlock.IpV4CidrBlock, List<String>> ipv4Map = new HashMap<>();
Map<CidrBlock.IpV6CidrBlock, List<String>> ipv6Map = new HashMap<>();
Map<CallRoutingTable.GeoKey, List<String>> ipGeoTable = new HashMap<>();
String line;
while((line = reader.readLine()) != null) {
if(line.isBlank()) {
continue;
}
List<String> splits = Arrays.stream(line.split(WHITESPACE_REGEX)).filter(s -> !s.isBlank()).toList();
if (splits.size() < 2) {
throw new IllegalStateException("Invalid row, expected some key and list of values");
}
List<String> datacenters = splits.subList(1, splits.size());
switch (guessLineType(splits)) {
case v4 -> {
CidrBlock cidrBlock = CidrBlock.parseCidrBlock(splits.getFirst());
if(!(cidrBlock instanceof CidrBlock.IpV4CidrBlock)) {
throw new IllegalArgumentException("Expected an ipv4 cidr block");
}
ipv4Map.put((CidrBlock.IpV4CidrBlock) cidrBlock, datacenters);
}
case v6 -> {
CidrBlock cidrBlock = CidrBlock.parseCidrBlock(splits.getFirst());
if(!(cidrBlock instanceof CidrBlock.IpV6CidrBlock)) {
throw new IllegalArgumentException("Expected an ipv6 cidr block");
}
ipv6Map.put((CidrBlock.IpV6CidrBlock) cidrBlock, datacenters);
}
case Geo -> {
String[] geo = splits.getFirst().split("-");
if(geo.length < 3) {
throw new IllegalStateException("Geo row key invalid, expected atleast continent, country, and protocol");
}
String continent = geo[0];
String country = geo[1];
Optional<String> subdivision = geo.length > 3 ? Optional.of(geo[2]) : Optional.empty();
CallRoutingTable.Protocol protocol = CallRoutingTable.Protocol.valueOf(geo[geo.length - 1].toLowerCase());
CallRoutingTable.GeoKey tableKey = new CallRoutingTable.GeoKey(
continent,
country,
subdivision,
protocol
);
ipGeoTable.put(tableKey, datacenters);
}
}
}
return new CallRoutingTable(
ipv4Map,
ipv6Map,
ipGeoTable
);
}
} | @Test
public void testParserMissingSection() throws IOException {
var input =
"""
192.1.12.0/24\t \tdatacenter-1\t\t datacenter-2 datacenter-3
193.123.123.0/24\tdatacenter-1\tdatacenter-2
1.123.123.0/24\t datacenter-1
SA-SR-v4 datacenter-3
SA-UY-v4\tdatacenter-3\tdatacenter-1\tdatacenter-2
NA-US-VA-v6 datacenter-2 \tdatacenter-1
""";
var actual = CallRoutingTableParser.fromTsv(new StringReader(input));
var expected = new CallRoutingTable(
Map.of(
(CidrBlock.IpV4CidrBlock) CidrBlock.parseCidrBlock("192.1.12.0/24"), List.of("datacenter-1", "datacenter-2", "datacenter-3"),
(CidrBlock.IpV4CidrBlock) CidrBlock.parseCidrBlock("193.123.123.0/24"), List.of("datacenter-1", "datacenter-2"),
(CidrBlock.IpV4CidrBlock) CidrBlock.parseCidrBlock("1.123.123.0/24"), List.of("datacenter-1")
),
Map.of(),
Map.of(
new CallRoutingTable.GeoKey("SA", "SR", Optional.empty(), CallRoutingTable.Protocol.v4), List.of("datacenter-3"),
new CallRoutingTable.GeoKey("SA", "UY", Optional.empty(), CallRoutingTable.Protocol.v4), List.of("datacenter-3", "datacenter-1", "datacenter-2"),
new CallRoutingTable.GeoKey("NA", "US", Optional.of("VA"), CallRoutingTable.Protocol.v6), List.of("datacenter-2", "datacenter-1")
)
);
assertThat(actual).isEqualTo(expected);
} |
@Override
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
// 入栈
DataPermission dataPermission = this.findAnnotation(methodInvocation);
if (dataPermission != null) {
DataPermissionContextHolder.add(dataPermission);
}
try {
// 执行逻辑
return methodInvocation.proceed();
} finally {
// 出栈
if (dataPermission != null) {
DataPermissionContextHolder.remove();
}
}
} | @Test // 在 Class 上有 @DataPermission 注解
public void testInvoke_class() throws Throwable {
// 参数
mockMethodInvocation(TestClass.class);
// 调用
Object result = interceptor.invoke(methodInvocation);
// 断言
assertEquals("class", result);
assertEquals(1, interceptor.getDataPermissionCache().size());
assertFalse(CollUtil.getFirst(interceptor.getDataPermissionCache().values()).enable());
} |
@Override
public boolean contains(CharSequence name, CharSequence value) {
return contains(name, value, false);
} | @Test
public void testContainsNameAndValue() {
Http2Headers headers = newHeaders();
assertTrue(headers.contains("name1", "value2"));
assertFalse(headers.contains("name1", "Value2"));
assertTrue(headers.contains("2name", "Value3", true));
assertFalse(headers.contains("2name", "Value3", false));
} |
public EntityShareResponse prepareShare(GRN ownedEntity,
EntityShareRequest request,
User sharingUser,
Subject sharingSubject) {
requireNonNull(ownedEntity, "ownedEntity cannot be null");
requireNonNull(request, "request cannot be null");
requireNonNull(sharingUser, "sharingUser cannot be null");
requireNonNull(sharingSubject, "sharingSubject cannot be null");
final GRN sharingUserGRN = grnRegistry.ofUser(sharingUser);
final Set<Grantee> modifiableGrantees = getModifiableGrantees(sharingUser, sharingUserGRN, ownedEntity);
final Set<GRN> modifiableGranteeGRNs = modifiableGrantees.stream().map(Grantee::grn).collect(Collectors.toSet());
final ImmutableSet<ActiveShare> modifiableActiveShares = getActiveShares(ownedEntity, sharingUser, modifiableGranteeGRNs);
return EntityShareResponse.builder()
.entity(ownedEntity.toString())
.sharingUser(sharingUserGRN)
.availableGrantees(modifiableGrantees)
.availableCapabilities(getAvailableCapabilities())
.activeShares(modifiableActiveShares)
.selectedGranteeCapabilities(getSelectedGranteeCapabilities(modifiableActiveShares, request))
.missingPermissionsOnDependencies(checkMissingPermissionsOnDependencies(ownedEntity, sharingUserGRN, modifiableActiveShares, request))
.validationResult(validateRequest(ownedEntity, request, sharingUser, modifiableGranteeGRNs))
.build();
} | @DisplayName("Don't run validation on initial empty request")
@Test
void noValidationOnEmptyRequest() {
final GRN entity = grnRegistry.newGRN(GRNTypes.DASHBOARD, "54e3deadbeefdeadbeefaffe");
final EntityShareRequest shareRequest = EntityShareRequest.create(null);
final User user = createMockUser("hans");
final Subject subject = mock(Subject.class);
final EntityShareResponse entityShareResponse = entitySharesService.prepareShare(entity, shareRequest, user, subject);
assertThat(entityShareResponse.validationResult()).satisfies(validationResult -> {
assertThat(validationResult.failed()).isFalse();
assertThat(validationResult.getErrors()).isEmpty();
});
} |
@Override
public String pluginNamed() {
return PluginEnum.SENTINEL.getName();
} | @Test
public void pluginNamedTest() {
assertEquals(PluginEnum.SENTINEL.getName(), sentinelRuleHandle.pluginNamed());
} |
@Override
public IndexRange calculateRange(String index) {
checkIfHealthy(indices.waitForRecovery(index),
(status) -> new RuntimeException("Unable to calculate range for index <" + index + ">, index is unhealthy: " + status));
final DateTime now = DateTime.now(DateTimeZone.UTC);
final Stopwatch sw = Stopwatch.createStarted();
final IndexRangeStats stats = indices.indexRangeStatsOfIndex(index);
final int duration = Ints.saturatedCast(sw.stop().elapsed(TimeUnit.MILLISECONDS));
LOG.info("Calculated range of [{}] in [{}ms].", index, duration);
return MongoIndexRange.create(index, stats.min(), stats.max(), now, duration, stats.streamIds());
} | @Test
@MongoDBFixtures("MongoIndexRangeServiceTest-EmptyCollection.json")
public void testCalculateRangeWithEmptyIndex() throws Exception {
final String index = "graylog";
when(indices.indexRangeStatsOfIndex(index)).thenReturn(IndexRangeStats.EMPTY);
when(indices.waitForRecovery(index)).thenReturn(HealthStatus.Green);
final IndexRange range = indexRangeService.calculateRange(index);
assertThat(range).isNotNull();
assertThat(range.indexName()).isEqualTo(index);
assertThat(range.begin()).isEqualTo(new DateTime(0L, DateTimeZone.UTC));
assertThat(range.end()).isEqualTo(new DateTime(0L, DateTimeZone.UTC));
} |
@Override
public boolean match(Message msg, StreamRule rule) {
if(msg.getField(Message.FIELD_GL2_SOURCE_INPUT) == null) {
return rule.getInverted();
}
final String value = msg.getField(Message.FIELD_GL2_SOURCE_INPUT).toString();
return rule.getInverted() ^ value.trim().equalsIgnoreCase(rule.getValue());
} | @Test
public void testSuccessfulMatch() {
StreamRule rule = getSampleRule();
rule.setValue("input-id-beef");
Message msg = getSampleMessage();
msg.addField(Message.FIELD_GL2_SOURCE_INPUT, "input-id-beef");
StreamRuleMatcher matcher = getMatcher(rule);
assertTrue(matcher.match(msg, rule));
} |
@Override
public Future<RestResponse> restRequest(RestRequest request)
{
return restRequest(request, new RequestContext());
} | @Test
public void testRestRetryOverLimit() throws Exception
{
SimpleLoadBalancer balancer = prepareLoadBalancer(Arrays.asList("http://test.linkedin.com/retry1", "http://test.linkedin.com/retry2"),
HttpClientFactory.UNLIMITED_CLIENT_REQUEST_RETRY_RATIO);
DynamicClient dynamicClient = new DynamicClient(balancer, null);
RetryClient client = new RetryClient(
dynamicClient,
balancer,
1,
RetryClient.DEFAULT_UPDATE_INTERVAL_MS,
RetryClient.DEFAULT_AGGREGATED_INTERVAL_NUM,
SystemClock.instance(),
true,
false);
URI uri = URI.create("d2://retryService?arg1=empty&arg2=empty");
RestRequest restRequest = new RestRequestBuilder(uri).build();
DegraderTrackerClientTest.TestCallback<RestResponse> restCallback = new DegraderTrackerClientTest.TestCallback<>();
client.restRequest(restRequest, restCallback);
assertNull(restCallback.t);
assertNotNull(restCallback.e);
assertTrue(restCallback.e.getMessage().contains("Data not available"));
} |
static void parseTargetAddress(HttpHost target, Span span) {
if (span.isNoop()) return;
if (target == null) return;
InetAddress address = target.getAddress();
if (address != null) {
if (span.remoteIpAndPort(address.getHostAddress(), target.getPort())) return;
}
span.remoteIpAndPort(target.getHostName(), target.getPort());
} | @Test void parseTargetAddress_IpAndPortFromHost() {
when(span.isNoop()).thenReturn(false);
when(span.remoteIpAndPort("1.2.3.4", 9999)).thenReturn(true);
HttpHost host = new HttpHost("1.2.3.4", 9999);
TracingHttpAsyncClientBuilder.parseTargetAddress(host, span);
verify(span).isNoop();
verify(span).remoteIpAndPort("1.2.3.4", 9999);
verifyNoMoreInteractions(span);
} |
@Override
public void doFilter(ServletRequest req, ServletResponse resp, FilterChain chain)
throws IOException, ServletException {
if (bizConfig.isAdminServiceAccessControlEnabled()) {
HttpServletRequest request = (HttpServletRequest) req;
HttpServletResponse response = (HttpServletResponse) resp;
String token = request.getHeader(HttpHeaders.AUTHORIZATION);
if (!checkAccessToken(token)) {
logger.warn("Invalid access token: {} for uri: {}", token, request.getRequestURI());
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthorized");
return;
}
}
chain.doFilter(req, resp);
} | @Test
public void testWithAccessControlEnabledWithTokenSpecifiedWithValidTokenPassed()
throws Exception {
String someValidToken = "someToken";
when(bizConfig.isAdminServiceAccessControlEnabled()).thenReturn(true);
when(bizConfig.getAdminServiceAccessTokens()).thenReturn(someValidToken);
when(servletRequest.getHeader(HttpHeaders.AUTHORIZATION)).thenReturn(someValidToken);
authenticationFilter.doFilter(servletRequest, servletResponse, filterChain);
verify(bizConfig, times(1)).isAdminServiceAccessControlEnabled();
verify(bizConfig, times(1)).getAdminServiceAccessTokens();
verify(filterChain, times(1)).doFilter(servletRequest, servletResponse);
verify(servletResponse, never()).sendError(anyInt(), anyString());
} |
@Override
public void putAll(Map<String, ?> vars) {
throw new UnsupportedOperationException();
} | @Test
public void testPutAllMapOfStringQ() {
assertThrowsUnsupportedOperation(
() -> unmodifiables.putAll(Collections.emptyMap()));
} |
public Timer add(long interval, Handler handler, Object... args)
{
if (handler == null) {
return null;
}
Utils.checkArgument(interval > 0, "Delay of a timer has to be strictly greater than 0");
final Timer timer = new Timer(this, interval, handler, args);
final boolean rc = insert(timer);
assert (rc);
return timer;
} | @Test
public void testAddFaultyHandler()
{
Timers.Timer timer = timers.add(10, null);
assertThat(timer, nullValue());
} |
@Override
public String resolveStaticUri(Exchange exchange, DynamicAwareEntry entry) {
String optimizedUri = null;
String uri = entry.getUri();
if (DynamicRouterControlConstants.SHOULD_OPTIMIZE.test(uri)) {
optimizedUri = URISupport.stripQuery(uri);
}
return optimizedUri;
} | @Test
void resolveStaticUri() throws Exception {
String originalUri = "dynamic-router-control:subscribe?subscriptionId=testSub1";
String uri = "dynamic-router-control://subscribe?subscriptionId=testSub1";
try (DynamicRouterControlChannelSendDynamicAware testSubject = new DynamicRouterControlChannelSendDynamicAware()) {
SendDynamicAware.DynamicAwareEntry entry = testSubject.prepare(exchange, uri, originalUri);
String result = testSubject.resolveStaticUri(exchange, entry);
assertEquals("dynamic-router-control://subscribe", result);
}
} |
public ImmutableSet<GrantDTO> getForGranteeWithCapability(GRN grantee, Capability capability) {
return streamQuery(DBQuery.and(
DBQuery.is(GrantDTO.FIELD_GRANTEE, grantee),
DBQuery.is(GrantDTO.FIELD_CAPABILITY, capability)
)).collect(ImmutableSet.toImmutableSet());
} | @Test
@MongoDBFixtures("grants.json")
public void getForGranteeWithCapability() {
final GRN jane = grnRegistry.newGRN("user", "jane");
final GRN john = grnRegistry.newGRN("user", "john");
assertThat(dbService.getForGranteeWithCapability(jane, Capability.MANAGE)).hasSize(1);
assertThat(dbService.getForGranteeWithCapability(jane, Capability.OWN)).hasSize(1);
assertThat(dbService.getForGranteeWithCapability(john, Capability.VIEW)).hasSize(1);
} |
public static DateTime dateTimeFromDouble(double x) {
return new DateTime(Math.round(x * 1000), DateTimeZone.UTC);
} | @Test
public void testTimeFromDouble() {
assertTrue(Tools.dateTimeFromDouble(1381076986.306509).toString().startsWith("2013-10-06T"));
assertTrue(Tools.dateTimeFromDouble(1381076986).toString().startsWith("2013-10-06T"));
assertTrue(Tools.dateTimeFromDouble(1381079085.6).toString().startsWith("2013-10-06T"));
assertTrue(Tools.dateTimeFromDouble(1381079085.06).toString().startsWith("2013-10-06T"));
} |
@Override
protected void doRefresh(final List<PluginData> dataList) {
pluginDataSubscriber.refreshPluginDataSelf(dataList);
dataList.forEach(pluginDataSubscriber::onSubscribe);
} | @Test
public void testDoRefresh() {
List<PluginData> pluginDataList = createFakePluginDataObjects(3);
pluginDataHandler.doRefresh(pluginDataList);
verify(subscriber).refreshPluginDataSelf(pluginDataList);
pluginDataList.forEach(verify(subscriber)::onSubscribe);
} |
public void update(Map<String, NamespaceBundleStats> bundleStats, int topk) {
arr.clear();
try {
var isLoadBalancerSheddingBundlesWithPoliciesEnabled =
pulsar.getConfiguration().isLoadBalancerSheddingBundlesWithPoliciesEnabled();
for (var etr : bundleStats.entrySet()) {
String bundle = etr.getKey();
// TODO: do not filter system topic while shedding
if (NamespaceService.isSystemServiceNamespace(NamespaceBundle.getBundleNamespace(bundle))) {
continue;
}
if (!isLoadBalancerSheddingBundlesWithPoliciesEnabled && hasPolicies(bundle)) {
continue;
}
arr.add(etr);
}
var topKBundlesLoadData = loadData.getTopBundlesLoadData();
topKBundlesLoadData.clear();
if (arr.isEmpty()) {
return;
}
topk = Math.min(topk, arr.size());
partitionSort(arr, topk);
for (int i = topk - 1; i >= 0; i--) {
var etr = arr.get(i);
topKBundlesLoadData.add(
new TopBundlesLoadData.BundleLoadData(etr.getKey(), (NamespaceBundleStats) etr.getValue()));
}
} finally {
arr.clear();
}
} | @Test
public void testLoadBalancerSheddingBundlesWithPoliciesEnabledConfig() throws MetadataStoreException {
setIsolationPolicy();
setAntiAffinityGroup();
configuration.setLoadBalancerSheddingBundlesWithPoliciesEnabled(true);
Map<String, NamespaceBundleStats> bundleStats = new HashMap<>();
var topKBundles = new TopKBundles(pulsar);
NamespaceBundleStats stats1 = new NamespaceBundleStats();
stats1.msgRateIn = 500;
bundleStats.put(bundle1, stats1);
NamespaceBundleStats stats2 = new NamespaceBundleStats();
stats2.msgRateIn = 10000;
bundleStats.put(bundle2, stats2);
topKBundles.update(bundleStats, 2);
assertEquals(topKBundles.getLoadData().getTopBundlesLoadData().size(), 2);
var top0 = topKBundles.getLoadData().getTopBundlesLoadData().get(0);
var top1 = topKBundles.getLoadData().getTopBundlesLoadData().get(1);
assertEquals(top0.bundleName(), bundle1);
assertEquals(top1.bundleName(), bundle2);
configuration.setLoadBalancerSheddingBundlesWithPoliciesEnabled(false);
topKBundles.update(bundleStats, 2);
assertEquals(topKBundles.getLoadData().getTopBundlesLoadData().size(), 0);
} |
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
} | @Test
public void testReceiveFailedBatchTwiceWithTransactions() throws Exception {
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3));
TransactionManager txnManager = new TransactionManager(logContext, "testFailTwice", 60000, 100, apiVersions);
setupWithTransactionState(txnManager);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp0);
client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tp0, Errors.NONE)));
sender.runOnce();
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
sender.runOnce(); // send request
Node node = metadata.fetch().nodes().get(0);
time.sleep(2000L);
client.disconnect(node.idString(), true);
client.backoff(node, 10);
sender.runOnce(); // now expire the batch.
assertFutureFailure(request1, TimeoutException.class);
time.sleep(20);
sendIdempotentProducerResponse(0, tp0, Errors.INVALID_TXN_STATE, -1);
sender.runOnce(); // receive late response
// Loop once and confirm that the transaction manager does not enter a fatal error state
sender.runOnce();
assertTrue(txnManager.hasAbortableError());
TransactionalRequestResult result = txnManager.beginAbort();
sender.runOnce();
respondToEndTxn(Errors.NONE);
sender.runOnce();
assertTrue(txnManager::isInitializing);
prepareInitProducerResponse(Errors.NONE, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch);
sender.runOnce();
assertTrue(txnManager::isReady);
assertTrue(result.isSuccessful());
result.await();
txnManager.beginTransaction();
} |
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
} | @Test
public void shouldFormatResumeQuery() {
// Given:
final ResumeQuery query = ResumeQuery.query(Optional.empty(), new QueryId(
"FOO"));
// When:
final String formatted = SqlFormatter.formatSql(query);
// Then:
assertThat(formatted, is("RESUME FOO"));
} |
T call() throws IOException, RegistryException {
String apiRouteBase = "https://" + registryEndpointRequestProperties.getServerUrl() + "/v2/";
URL initialRequestUrl = registryEndpointProvider.getApiRoute(apiRouteBase);
return call(initialRequestUrl);
} | @Test
public void testCall_credentialsForcedOverHttp() throws IOException, RegistryException {
ResponseException unauthorizedException =
mockResponseException(HttpStatusCodes.STATUS_CODE_UNAUTHORIZED);
setUpRegistryResponse(unauthorizedException);
System.setProperty(JibSystemProperties.SEND_CREDENTIALS_OVER_HTTP, "true");
try {
endpointCaller.call();
Assert.fail("Call should have failed");
} catch (RegistryCredentialsNotSentException ex) {
throw new AssertionError("should have sent credentials", ex);
} catch (RegistryUnauthorizedException ex) {
Assert.assertEquals("Unauthorized for serverUrl/imageName", ex.getMessage());
}
} |
static String resolveLocalRepoPath(String localRepoPath) {
// todo decouple home folder resolution
// find homedir
String home = System.getenv("ZEPPELIN_HOME");
if (home == null) {
home = System.getProperty("zeppelin.home");
}
if (home == null) {
home = "..";
}
return Paths.get(home).resolve(localRepoPath).toAbsolutePath().toString();
} | @Test
void should_throw_exception_for_null() {
assertThrows(NullPointerException.class, () -> {
Booter.resolveLocalRepoPath(null);
});
} |
void start(Iterable<ShardCheckpoint> checkpoints) {
LOG.info(
"Pool {} - starting for stream {} consumer {}. Checkpoints = {}",
poolId,
read.getStreamName(),
consumerArn,
checkpoints);
for (ShardCheckpoint shardCheckpoint : checkpoints) {
checkState(
!state.containsKey(shardCheckpoint.getShardId()),
"Duplicate shard id %s",
shardCheckpoint.getShardId());
ShardState shardState =
new ShardState(
initShardSubscriber(shardCheckpoint), shardCheckpoint, watermarkPolicyFactory);
state.put(shardCheckpoint.getShardId(), shardState);
}
} | @Test
public void poolReSubscribesWhenRecoverableErrorOccurs() throws Exception {
kinesis = new EFOStubbedKinesisAsyncClient(10);
kinesis
.stubSubscribeToShard("shard-000", eventWithRecords(3))
.failWith(new ReadTimeoutException());
kinesis.stubSubscribeToShard("shard-000", eventWithRecords(3, 7));
kinesis.stubSubscribeToShard("shard-000", eventsWithoutRecords(10, 10));
kinesis.stubSubscribeToShard("shard-001", eventWithRecords(3));
kinesis
.stubSubscribeToShard("shard-001", eventWithRecords(3, 5))
.failWith(SdkClientException.create("this is recoverable", new ReadTimeoutException()));
kinesis.stubSubscribeToShard("shard-001", eventsWithoutRecords(8, 8));
KinesisReaderCheckpoint initialCheckpoint =
initialLatestCheckpoint(ImmutableList.of("shard-000", "shard-001"));
pool = new EFOShardSubscribersPool(readSpec, consumerArn, kinesis, 1);
pool.start(initialCheckpoint);
PoolAssertion.assertPool(pool)
.givesCheckPointedRecords(
ShardAssertion.shard("shard-000")
.gives(KinesisRecordView.generate("shard-000", 0, 10))
.withLastCheckpointSequenceNumber(19),
ShardAssertion.shard("shard-001")
.gives(KinesisRecordView.generate("shard-001", 0, 8))
.withLastCheckpointSequenceNumber(15));
assertThat(kinesis.subscribeRequestsSeen())
.containsExactlyInAnyOrder(
subscribeLatest("shard-000"),
subscribeLatest("shard-001"),
subscribeAfterSeqNumber("shard-000", "2"),
subscribeAfterSeqNumber("shard-001", "2"),
subscribeAfterSeqNumber("shard-000", "9"),
subscribeAfterSeqNumber("shard-001", "7"),
subscribeAfterSeqNumber("shard-000", "19"),
subscribeAfterSeqNumber("shard-001", "15"));
} |
public static void main(String[] args) {
var queriesOr = new String[]{"many", "Annabel"};
var finder = Finders.expandedFinder(queriesOr);
var res = finder.find(text());
LOGGER.info("the result of expanded(or) query[{}] is {}", queriesOr, res);
var queriesAnd = new String[]{"Annabel", "my"};
finder = Finders.specializedFinder(queriesAnd);
res = finder.find(text());
LOGGER.info("the result of specialized(and) query[{}] is {}", queriesAnd, res);
finder = Finders.advancedFinder("it was", "kingdom", "sea");
res = finder.find(text());
LOGGER.info("the result of advanced query is {}", res);
res = Finders.filteredFinder(" was ", "many", "child").find(text());
LOGGER.info("the result of filtered query is {}", res);
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> CombinatorApp.main(new String[]{}));
} |
public void parse(DataByteArrayInputStream input, int readSize) throws Exception {
if (currentParser == null) {
currentParser = initializeHeaderParser();
}
// Parser stack will run until current incoming data has all been consumed.
currentParser.parse(input, readSize);
} | @Test
public void testProcessInChunks() throws Exception {
CONNECT connect = new CONNECT();
connect.cleanSession(false);
connect.clientId(new UTF8Buffer("test"));
connect.userName(new UTF8Buffer("user"));
connect.password(new UTF8Buffer("pass"));
DataByteArrayOutputStream output = new DataByteArrayOutputStream();
wireFormat.marshal(connect.encode(), output);
Buffer marshalled = output.toBuffer();
DataByteArrayInputStream input = new DataByteArrayInputStream(marshalled);
int first = marshalled.length() / 2;
int second = marshalled.length() - first;
codec.parse(input, first);
codec.parse(input, second);
assertTrue(!frames.isEmpty());
assertEquals(1, frames.size());
connect = new CONNECT().decode(frames.get(0));
LOG.info("Unmarshalled: {}", connect);
assertFalse(connect.cleanSession());
assertEquals("user", connect.userName().toString());
assertEquals("pass", connect.password().toString());
assertEquals("test", connect.clientId().toString());
} |
@Override
public Collection<Integer> getOutboundPorts(EndpointQualifier endpointQualifier) {
final AdvancedNetworkConfig advancedNetworkConfig = node.getConfig().getAdvancedNetworkConfig();
if (advancedNetworkConfig.isEnabled()) {
EndpointConfig endpointConfig = advancedNetworkConfig.getEndpointConfigs().get(endpointQualifier);
final Collection<Integer> outboundPorts = endpointConfig != null
? endpointConfig.getOutboundPorts() : Collections.emptyList();
final Collection<String> outboundPortDefinitions = endpointConfig != null
? endpointConfig.getOutboundPortDefinitions() : Collections.emptyList();
return AddressUtil.getOutboundPorts(outboundPorts, outboundPortDefinitions);
}
final NetworkConfig networkConfig = node.getConfig().getNetworkConfig();
final Collection<Integer> outboundPorts = networkConfig.getOutboundPorts();
final Collection<String> outboundPortDefinitions = networkConfig.getOutboundPortDefinitions();
return AddressUtil.getOutboundPorts(outboundPorts, outboundPortDefinitions);
} | @Test
public void testGetOutboundPorts_zeroTakesPrecedenceInRange() {
networkConfig.addOutboundPortDefinition("0-100");
Collection<Integer> outboundPorts = serverContext.getOutboundPorts(MEMBER);
assertEquals(0, outboundPorts.size());
} |
public boolean addItem(Item item) {
if (items.size() < inventorySize) {
lock.lock();
try {
if (items.size() < inventorySize) {
items.add(item);
var thread = Thread.currentThread();
LOGGER.info("{}: items.size()={}, inventorySize={}", thread, items.size(), inventorySize);
return true;
}
} finally {
lock.unlock();
}
}
return false;
} | @Test
void testAddItem() {
assertTimeout(ofMillis(10000), () -> {
// Create a new inventory with a limit of 1000 items and put some load on the add method
final var inventory = new Inventory(INVENTORY_SIZE);
final var executorService = Executors.newFixedThreadPool(THREAD_COUNT);
IntStream.range(0, THREAD_COUNT).<Runnable>mapToObj(i -> () -> {
while (inventory.addItem(new Item())) ;
}).forEach(executorService::execute);
// Wait until all threads have finished
executorService.shutdown();
executorService.awaitTermination(5, TimeUnit.SECONDS);
// Check the number of items in the inventory. It should not have exceeded the allowed maximum
final var items = inventory.getItems();
assertNotNull(items);
assertEquals(INVENTORY_SIZE, items.size());
assertEquals(INVENTORY_SIZE, appender.getLogSize());
// ... and check if the inventory size is increasing continuously
IntStream.range(0, items.size())
.mapToObj(i -> appender.log.get(i).getFormattedMessage()
.contains("items.size()=" + (i + 1)))
.forEach(Assertions::assertTrue);
});
} |
@Override
public String toString() {
return toString(false);
} | @Test
public void testToStringNoQuota() {
QuotaUsage quotaUsage = new QuotaUsage.Builder().
fileAndDirectoryCount(1234).build();
String expected = " none inf none"
+ " inf ";
assertEquals(expected, quotaUsage.toString());
} |
public static <T> WithTimestamps<T> of(SerializableFunction<T, Instant> fn) {
return new WithTimestamps<>(fn, Duration.ZERO);
} | @Test
@Category(ValidatesRunner.class)
public void withTimestampsWithNullFnShouldThrowOnConstruction() {
SerializableFunction<String, Instant> timestampFn = null;
thrown.expect(NullPointerException.class);
thrown.expectMessage("WithTimestamps fn cannot be null");
p.apply(Create.of("1234", "0", Integer.toString(Integer.MAX_VALUE)))
.apply(WithTimestamps.of(timestampFn));
p.run();
} |
@Override
public CRParseResult responseMessageForParseDirectory(String responseBody) {
ErrorCollection errors = new ErrorCollection();
try {
ResponseScratch responseMap = parseResponseForMigration(responseBody);
ParseDirectoryResponseMessage parseDirectoryResponseMessage;
if (responseMap.target_version == null) {
errors.addError("Plugin response message", "missing 'target_version' field");
return new CRParseResult(errors);
} else if (responseMap.target_version > CURRENT_CONTRACT_VERSION) {
String message = String.format("'target_version' is %s but the GoCD Server supports %s",
responseMap.target_version, CURRENT_CONTRACT_VERSION);
errors.addError("Plugin response message", message);
return new CRParseResult(errors);
} else {
int version = responseMap.target_version;
while (version < CURRENT_CONTRACT_VERSION) {
version++;
responseBody = migrate(responseBody, version);
}
// after migration, json should match contract
parseDirectoryResponseMessage = codec.getGson().fromJson(responseBody, ParseDirectoryResponseMessage.class);
parseDirectoryResponseMessage.validateResponse(errors);
errors.addErrors(parseDirectoryResponseMessage.getPluginErrors());
return new CRParseResult(parseDirectoryResponseMessage.getEnvironments(), parseDirectoryResponseMessage.getPipelines(), errors);
}
} catch (Exception ex) {
StringBuilder builder = new StringBuilder();
builder.append("Unexpected error when handling plugin response").append('\n');
builder.append(ex);
// "location" of error is runtime. This is what user will see in config repo errors list.
errors.addError("runtime", builder.toString());
LOGGER.error(builder.toString(), ex);
return new CRParseResult(errors);
}
} | @Test
public void shouldErrorWhenTargetVersionOfPluginIsHigher() {
int targetVersion = JsonMessageHandler1_0.CURRENT_CONTRACT_VERSION + 1;
String json = "{\n" +
" \"target_version\" : " + targetVersion + ",\n" +
" \"pipelines\" : [],\n" +
" \"errors\" : []\n" +
"}";
CRParseResult result = handler.responseMessageForParseDirectory(json);
String errorMessage = String.format("'target_version' is %s but the GoCD Server supports %s", targetVersion, JsonMessageHandler1_0.CURRENT_CONTRACT_VERSION);
assertThat(result.getErrors().getErrorsAsText()).contains(errorMessage);
} |
@GetMapping(
path = "/api/{namespace}/{extension}",
produces = MediaType.APPLICATION_JSON_VALUE
)
@CrossOrigin
@Operation(summary = "Provides metadata of the latest version of an extension")
@ApiResponses({
@ApiResponse(
responseCode = "200",
description = "The extension metadata are returned in JSON format"
),
@ApiResponse(
responseCode = "404",
description = "The specified extension could not be found",
content = @Content()
),
@ApiResponse(
responseCode = "429",
description = "A client has sent too many requests in a given amount of time",
content = @Content(),
headers = {
@Header(
name = "X-Rate-Limit-Retry-After-Seconds",
description = "Number of seconds to wait after receiving a 429 response",
schema = @Schema(type = "integer", format = "int32")
),
@Header(
name = "X-Rate-Limit-Remaining",
description = "Remaining number of requests left",
schema = @Schema(type = "integer", format = "int32")
)
}
)
})
public ResponseEntity<ExtensionJson> getExtension(
@PathVariable @Parameter(description = "Extension namespace", example = "redhat")
String namespace,
@PathVariable @Parameter(description = "Extension name", example = "java")
String extension
) {
for (var registry : getRegistries()) {
try {
return ResponseEntity.ok()
.cacheControl(CacheControl.noCache().cachePublic())
.body(registry.getExtension(namespace, extension, null));
} catch (NotFoundException exc) {
// Try the next registry
}
}
var json = ExtensionJson.error("Extension not found: " + NamingUtil.toExtensionId(namespace, extension));
return new ResponseEntity<>(json, HttpStatus.NOT_FOUND);
} | @Test
public void testPreReleaseExtensionVersionNonDefaultTarget() throws Exception {
var extVersion = mockExtension("web");
extVersion.setPreRelease(true);
extVersion.setDisplayName("Foo Bar (web)");
Mockito.when(repositories.findExtensionVersion("foo", "bar", null, VersionAlias.PRE_RELEASE)).thenReturn(extVersion);
Mockito.when(repositories.findLatestVersionForAllUrls(extVersion.getExtension(), null, false, true)).thenReturn(extVersion);
Mockito.when(repositories.findLatestVersionForAllUrls(extVersion.getExtension(), null, true, true)).thenReturn(extVersion);
mockMvc.perform(get("/api/{namespace}/{extension}/{version}", "foo", "bar", "pre-release"))
.andExpect(status().isOk())
.andExpect(content().json(extensionJson(e -> {
e.namespace = "foo";
e.name = "bar";
e.version = "1.0.0";
e.verified = false;
e.timestamp = "2000-01-01T10:00Z";
e.displayName = "Foo Bar (web)";
e.versionAlias = List.of("pre-release", "latest");
e.preRelease = true;
})));
} |
@Override
public MastershipTerm getTermFor(NetworkId networkId, DeviceId deviceId) {
Map<DeviceId, NodeId> masterMap = getMasterMap(networkId);
Map<DeviceId, AtomicInteger> termMap = getTermMap(networkId);
if ((termMap.get(deviceId) == null)) {
return MastershipTerm.of(masterMap.get(deviceId), NOTHING);
}
return MastershipTerm.of(
masterMap.get(deviceId), termMap.get(deviceId).get());
} | @Test
public void getTermFor() {
put(VNID1, VDID1, N1, true, true);
assertEquals("wrong term", MastershipTerm.of(N1, 0),
sms.getTermFor(VNID1, VDID1));
//switch to N2 and back - 2 term switches
sms.setMaster(VNID1, N2, VDID1);
sms.setMaster(VNID1, N1, VDID1);
assertEquals("wrong term", MastershipTerm.of(N1, 2),
sms.getTermFor(VNID1, VDID1));
} |
public CompiledPipeline.CompiledExecution buildExecution() {
return buildExecution(false);
} | @SuppressWarnings({"unchecked"})
@Test
public void equalityCheckOnCompositeField() throws Exception {
final ConfigVariableExpander cve = ConfigVariableExpander.withoutSecret(EnvironmentVariableProvider.defaultProvider());
final PipelineIR pipelineIR = ConfigCompiler.configToPipelineIR(
IRHelpers.toSourceWithMetadata("input {mockinput{}} filter { if 4 == [list] { mockaddfilter {} } if 5 == [map] { mockaddfilter {} } } output {mockoutput{} }"),
false,
cve);
final Collection<String> s = new ArrayList<>();
s.add("foo");
final Map<String, Object> m = new HashMap<>();
m.put("foo", "bar");
final JrubyEventExtLibrary.RubyEvent testEvent =
JrubyEventExtLibrary.RubyEvent.newRubyEvent(RubyUtil.RUBY, new Event());
testEvent.getEvent().setField("list", ConvertedList.newFromList(s));
testEvent.getEvent().setField("map", ConvertedMap.newFromMap(m));
final Map<String, Supplier<IRubyObject>> filters = new HashMap<>();
filters.put("mockaddfilter", () -> ADD_FIELD_FILTER);
new CompiledPipeline(
pipelineIR,
new CompiledPipelineTest.MockPluginFactory(
Collections.singletonMap("mockinput", () -> null),
filters,
Collections.singletonMap("mockoutput", mockOutputSupplier())
)
).buildExecution().compute(RubyUtil.RUBY.newArray(testEvent), false, false);
final Collection<JrubyEventExtLibrary.RubyEvent> outputEvents = EVENT_SINKS.get(runId);
MatcherAssert.assertThat(outputEvents.size(), CoreMatchers.is(1));
MatcherAssert.assertThat(outputEvents.contains(testEvent), CoreMatchers.is(true));
MatcherAssert.assertThat(testEvent.getEvent().getField("foo"), CoreMatchers.nullValue());
} |
@SuppressWarnings({"unchecked", "rawtypes"})
public static Collection<ShardingSphereRule> build(final String databaseName, final DatabaseType protocolType, final DatabaseConfiguration databaseConfig,
final ComputeNodeInstanceContext computeNodeInstanceContext, final ResourceMetaData resourceMetaData) {
Collection<ShardingSphereRule> result = new LinkedList<>();
for (Entry<RuleConfiguration, DatabaseRuleBuilder> entry : getRuleBuilderMap(databaseConfig).entrySet()) {
RuleConfigurationChecker configChecker = OrderedSPILoader.getServicesByClass(
RuleConfigurationChecker.class, Collections.singleton(entry.getKey().getClass())).get(entry.getKey().getClass());
if (null != configChecker) {
configChecker.check(databaseName, entry.getKey(), resourceMetaData.getDataSourceMap(), result);
}
result.add(entry.getValue().build(entry.getKey(), databaseName, protocolType, resourceMetaData, result, computeNodeInstanceContext));
}
return result;
} | @Test
void assertBuild() {
Iterator<ShardingSphereRule> actual = DatabaseRulesBuilder.build("foo_db", new MySQLDatabaseType(),
new DataSourceProvidedDatabaseConfiguration(Collections.emptyMap(), Collections.singleton(new FixtureRuleConfiguration())), mock(ComputeNodeInstanceContext.class),
mock(ResourceMetaData.class)).iterator();
assertThat(actual.next(), instanceOf(FixtureRule.class));
assertFalse(actual.hasNext());
} |
@Override
public void execute(String commandName, BufferedReader reader, BufferedWriter writer)
throws Py4JException, IOException {
String returnCommand = null;
String subCommand = safeReadLine(reader, false);
if (subCommand.equals(FIELD_GET_SUB_COMMAND_NAME)) {
returnCommand = getField(reader);
} else if (subCommand.equals(FIELD_SET_SUB_COMMAND_NAME)) {
returnCommand = setField(reader);
} else {
returnCommand = Protocol.getOutputErrorCommand("Unknown Field SubCommand Name: " + subCommand);
}
logger.finest("Returning command: " + returnCommand);
writer.write(returnCommand);
writer.flush();
} | @Test
public void testPrivateMember() {
String inputCommand = "g\n" + target + "\nfield1\ne\n";
try {
command.execute("f", new BufferedReader(new StringReader(inputCommand)), writer);
assertEquals("!yo\n", sWriter.toString());
} catch (Exception e) {
e.printStackTrace();
fail();
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.