focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public Object getCell(final int columnIndex) {
Preconditions.checkArgument(columnIndex > 0 && columnIndex < data.size() + 1);
return data.get(columnIndex - 1);
} | @Test
void assertGetCellWithOutOfIndex() {
assertThrows(IllegalArgumentException.class, () -> new LocalDataQueryResultRow().getCell(1));
} |
@Override
public void upload(UploadTask uploadTask) throws IOException {
Throwable error = getErrorSafe();
if (error != null) {
LOG.debug("don't persist {} changesets, already failed", uploadTask.changeSets.size());
uploadTask.fail(error);
return;
}
LOG.debug("persist {} changeSets", uploadTask.changeSets.size());
try {
long size = uploadTask.getSize();
synchronized (lock) {
while (!uploadThrottle.hasCapacity()) {
lock.wait();
}
uploadThrottle.seizeCapacity(size);
if (!uploadThrottle.hasCapacity()) {
availabilityHelper.resetUnavailable();
}
scheduledBytesCounter += size;
scheduled.add(wrapWithSizeUpdate(uploadTask, size));
scheduleUploadIfNeeded();
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
uploadTask.fail(e);
throw new IOException(e);
} catch (Exception e) {
uploadTask.fail(e);
throw e;
}
} | @Test
void testSizeThreshold() throws Exception {
int numChanges = 7;
int changeSize = 11;
int threshold = changeSize * numChanges;
withStore(
Integer.MAX_VALUE,
threshold,
MAX_BYTES_IN_FLIGHT,
(store, probe) -> {
List<StateChangeSet> expected = new ArrayList<>();
int runningSize = 0;
for (int i = 0; i < numChanges; i++) {
List<StateChangeSet> changes = getChanges(changeSize);
runningSize += changes.stream().mapToLong(StateChangeSet::getSize).sum();
upload(store, changes);
expected.addAll(changes);
if (runningSize >= threshold) {
assertSaved(probe, expected);
} else {
assertThat(probe.getUploaded()).isEmpty();
}
}
});
} |
@Udf(description = "Returns a masked version of the input string. All characters of the input"
+ " will be replaced according to the default masking rules.")
@SuppressWarnings("MethodMayBeStatic") // Invoked via reflection
public String mask(
@UdfParameter("input STRING to be masked") final String input
) {
return doMask(new Masker(), input);
} | @Test
public void shouldReturnNullForNullInput() {
final String result = udf.mask(null);
assertThat(result, is(nullValue()));
} |
@Override
public String toString() {
return instance.toString();
} | @Test
public void testToString() {
MapWritable map = new MapWritable();
final IntWritable key = new IntWritable(5);
final Text value = new Text("value");
map.put(key, value);
assertEquals("{5=value}", map.toString());
} |
@VisibleForTesting
JarRunHandler getJarRunHandler() {
return jarRunHandler;
} | @Test
void applicationsRunInSeparateThreads(@TempDir Path tempDir) throws Exception {
final Path uploadDir = Files.createDirectories(tempDir.resolve("uploadDir"));
// create a copy because the upload handler moves uploaded jars (because it assumes it to be
// a temporary file)
final Path jarFile =
Files.copy(
Paths.get(System.getProperty("targetDir")).resolve(JAR_NAME),
tempDir.resolve("app.jar"));
final DispatcherGateway dispatcherGateway = TestingDispatcherGateway.newBuilder().build();
final ThreadCapturingApplicationRunner threadCapturingApplicationRunner =
new ThreadCapturingApplicationRunner();
final WebSubmissionExtension webSubmissionExtension =
new WebSubmissionExtension(
new Configuration(),
() -> CompletableFuture.completedFuture(dispatcherGateway),
Collections.emptyMap(),
new CompletableFuture<>(),
uploadDir,
Executors.directExecutor(),
Time.of(5, TimeUnit.SECONDS),
() -> threadCapturingApplicationRunner);
final String jarId = uploadJar(webSubmissionExtension, jarFile, dispatcherGateway);
final JarRunHandler jarRunHandler = webSubmissionExtension.getJarRunHandler();
final JarRunMessageParameters parameters = new JarRunMessageParameters();
parameters.jarIdPathParameter.resolve(jarId);
final HandlerRequest<JarRunRequestBody> runRequest =
HandlerRequest.create(new JarRunRequestBody(), parameters);
// run several applications in sequence, and verify that each thread is unique
int numApplications = 20;
for (int i = 0; i < numApplications; i++) {
jarRunHandler.handleRequest(runRequest, dispatcherGateway).get();
}
assertThat(threadCapturingApplicationRunner.getThreads().size()).isEqualTo(numApplications);
} |
public static Sensor pollRatioSensor(final String threadId,
final StreamsMetricsImpl streamsMetrics) {
final Sensor sensor =
streamsMetrics.threadLevelSensor(threadId, POLL + RATIO_SUFFIX, Sensor.RecordingLevel.INFO);
final Map<String, String> tagMap = streamsMetrics.threadLevelTagMap(threadId);
addValueMetricToSensor(
sensor,
THREAD_LEVEL_GROUP,
tagMap,
POLL + RATIO_SUFFIX,
POLL_RATIO_DESCRIPTION
);
return sensor;
} | @Test
public void shouldGetPollRatioSensor() {
final String operation = "poll-ratio";
final String ratioDescription = "The fraction of time the thread spent on polling records from consumer";
when(streamsMetrics.threadLevelSensor(THREAD_ID, operation, RecordingLevel.INFO)).thenReturn(expectedSensor);
when(streamsMetrics.threadLevelTagMap(THREAD_ID)).thenReturn(tagMap);
try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) {
final Sensor sensor = ThreadMetrics.pollRatioSensor(THREAD_ID, streamsMetrics);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addValueMetricToSensor(
expectedSensor,
THREAD_LEVEL_GROUP,
tagMap,
operation,
ratioDescription
)
);
assertThat(sensor, is(expectedSensor));
}
} |
@Override
public boolean matchToken(TokenQueue tokenQueue, List<Token> matchedTokenList) {
matchedTokenList.add(tokenQueue.poll());
return true;
} | @Test
public void shouldMatch() {
Token t1 = new Token("a", 1, 1);
Token t2 = new Token("b", 2, 1);
TokenQueue tokenQueue = spy(new TokenQueue(Arrays.asList(t1, t2)));
List<Token> output = mock(List.class);
AnyTokenMatcher matcher = new AnyTokenMatcher();
assertThat(matcher.matchToken(tokenQueue, output), is(true));
verify(tokenQueue).poll();
verifyNoMoreInteractions(tokenQueue);
verify(output).add(t1);
verifyNoMoreInteractions(output);
} |
@Override
protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) {
ShenyuContext shenyuContext = exchange.getAttribute(Constants.CONTEXT);
assert shenyuContext != null;
DivideRuleHandle ruleHandle = buildRuleHandle(rule);
if (ruleHandle.getHeaderMaxSize() > 0) {
long headerSize = exchange.getRequest().getHeaders().values()
.stream()
.flatMap(Collection::stream)
.mapToLong(header -> header.getBytes(StandardCharsets.UTF_8).length)
.sum();
if (headerSize > ruleHandle.getHeaderMaxSize()) {
LOG.error("request header is too large");
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.REQUEST_HEADER_TOO_LARGE);
return WebFluxResultUtils.result(exchange, error);
}
}
if (ruleHandle.getRequestMaxSize() > 0) {
if (exchange.getRequest().getHeaders().getContentLength() > ruleHandle.getRequestMaxSize()) {
LOG.error("request entity is too large");
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.REQUEST_ENTITY_TOO_LARGE);
return WebFluxResultUtils.result(exchange, error);
}
}
List<Upstream> upstreamList = UpstreamCacheManager.getInstance().findUpstreamListBySelectorId(selector.getId());
if (CollectionUtils.isEmpty(upstreamList)) {
LOG.error("divide upstream configuration error: {}", selector);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.CANNOT_FIND_HEALTHY_UPSTREAM_URL);
return WebFluxResultUtils.result(exchange, error);
}
String ip = Objects.requireNonNull(exchange.getRequest().getRemoteAddress()).getAddress().getHostAddress();
Upstream upstream = LoadBalancerFactory.selector(upstreamList, ruleHandle.getLoadBalance(), ip);
if (Objects.isNull(upstream)) {
LOG.error("divide has no upstream");
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.CANNOT_FIND_HEALTHY_UPSTREAM_URL);
return WebFluxResultUtils.result(exchange, error);
}
// set the http url
if (CollectionUtils.isNotEmpty(exchange.getRequest().getHeaders().get(Constants.SPECIFY_DOMAIN))) {
upstream.setUrl(exchange.getRequest().getHeaders().get(Constants.SPECIFY_DOMAIN).get(0));
}
// set domain
String domain = upstream.buildDomain();
exchange.getAttributes().put(Constants.HTTP_DOMAIN, domain);
// set the http timeout
exchange.getAttributes().put(Constants.HTTP_TIME_OUT, ruleHandle.getTimeout());
exchange.getAttributes().put(Constants.HTTP_RETRY, ruleHandle.getRetry());
// set retry strategy stuff
exchange.getAttributes().put(Constants.RETRY_STRATEGY, StringUtils.defaultString(ruleHandle.getRetryStrategy(), RetryEnum.CURRENT.getName()));
exchange.getAttributes().put(Constants.LOAD_BALANCE, StringUtils.defaultString(ruleHandle.getLoadBalance(), LoadBalanceEnum.RANDOM.getName()));
exchange.getAttributes().put(Constants.DIVIDE_SELECTOR_ID, selector.getId());
if (ruleHandle.getLoadBalance().equals(P2C)) {
return chain.execute(exchange).doOnSuccess(e -> responseTrigger(upstream
)).doOnError(throwable -> responseTrigger(upstream));
} else if (ruleHandle.getLoadBalance().equals(SHORTEST_RESPONSE)) {
beginTime = System.currentTimeMillis();
return chain.execute(exchange).doOnSuccess(e -> successResponseTrigger(upstream
));
}
return chain.execute(exchange);
} | @Test
public void doExecuteTest() {
when(chain.execute(exchange)).thenReturn(Mono.empty());
Mono<Void> result = dividePlugin.doExecute(exchange, chain, selectorData, ruleData);
StepVerifier.create(result).expectSubscription().verifyComplete();
DivideRuleHandle divideRuleHandle = DividePluginDataHandler.CACHED_HANDLE.get()
.obtainHandle(CacheKeyUtils.INST.getKey(ruleData));
divideRuleHandle.setHeaderMaxSize(1);
// hit `ruleHandle.getHeaderMaxSize() > 0`
dividePlugin.doExecute(exchange, chain, selectorData, ruleData);
divideRuleHandle.setHeaderMaxSize(1);
// hit `ruleHandle.getRequestMaxSize() > 0`
divideRuleHandle.setHeaderMaxSize(0);
divideRuleHandle.setRequestMaxSize(1);
dividePlugin.doExecute(exchange, chain, selectorData, ruleData);
// hit `CollectionUtils.isEmpty(upstreamList)`
divideRuleHandle.setRequestMaxSize(0);
UpstreamCacheManager.getInstance().removeByKey(selectorData.getId());
when(selectorData.getHandle()).thenReturn(null);
dividePlugin.doExecute(exchange, chain, selectorData, ruleData);
// hit `Objects.isNull(upstream)`
MockedStatic<LoadBalancerFactory> loadBalancerFactoryMockedStatic = mockStatic(LoadBalancerFactory.class);
loadBalancerFactoryMockedStatic.when(() -> LoadBalancerFactory.selector(any(), any(), any()))
.thenReturn(null);
dividePlugin.doExecute(exchange, chain, selectorData, ruleData);
// hit `assert shenyuContext != null`
exchange.getAttributes().remove(Constants.CONTEXT);
assertThrows(AssertionError.class, () -> dividePlugin.doExecute(exchange, chain, selectorData, ruleData));
} |
public static boolean isBlank(CharSequence str) {
if ((str == null)) {
return true;
}
int length = str.length();
if (length == 0) {
return true;
}
for (int i = 0; i < length; i++) {
char c = str.charAt(i);
boolean charNotBlank = Character.isWhitespace(c) || Character.isSpaceChar(c) || c == '\ufeff' || c == '\u202a';
if (!charNotBlank) {
return false;
}
}
return true;
} | @Test
public void isBlank() {
String string = "";
Assert.assertTrue(StringUtil.isBlank(string));
} |
@Override
public boolean canManageResource(EfestoResource toProcess) {
return toProcess instanceof EfestoFileResource && ((EfestoFileResource) toProcess).getModelType().equalsIgnoreCase(PMML_STRING);
} | @Test
void canManageResource() throws IOException {
String fileName = "LinearRegressionSample.pmml";
File pmmlFile = getFileFromFileName(fileName).orElseThrow(() -> new RuntimeException("Failed to get pmmlFIle"));
EfestoFileResource toProcess = new EfestoFileResource(pmmlFile);
assertThat(kieCompilerService.canManageResource(toProcess)).isTrue();
EfestoInputStreamResource notToProcess = new EfestoInputStreamResource(Files.newInputStream(pmmlFile.toPath()), fileName);
assertThat(kieCompilerService.canManageResource(notToProcess)).isFalse();
} |
public static double getDoubleValue(ConstantOperator constantOperator) {
OptionalDouble optionalDouble = doubleValueFromConstant(constantOperator);
if (optionalDouble.isPresent()) {
return optionalDouble.getAsDouble();
} else {
return Double.NaN;
}
} | @Test
public void getDoubleValue() {
ConstantOperator constant0 = ConstantOperator.createTinyInt((byte) 1);
ConstantOperator constant1 = ConstantOperator.createInt(1000);
ConstantOperator constant2 = ConstantOperator.createSmallInt((short) 12);
ConstantOperator constant3 = ConstantOperator.createBigint(1000000);
ConstantOperator constant4 = ConstantOperator.createFloat(1.5);
ConstantOperator constant5 = ConstantOperator.createDouble(6.789);
ConstantOperator constant6 = ConstantOperator.createBoolean(true);
ConstantOperator constant7 = ConstantOperator.createBoolean(false);
ConstantOperator constant8 = ConstantOperator.createDate(LocalDateTime.of(2003, 10, 11, 23, 56, 25));
ConstantOperator constant9 = ConstantOperator.createDatetime(LocalDateTime.of(2003, 10, 11, 23, 56, 25));
ConstantOperator constant10 = ConstantOperator.createTime(124578990d);
ConstantOperator constant11 = ConstantOperator.createVarchar("123");
assertEquals(ConstantOperatorUtils.getDoubleValue(constant0), 1, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant1), 1000, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant2), 12, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant3), 1000000, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant4), 1.5, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant5), 6.789, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant6), 1, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant7), 0, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant8), 1065887785, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant9), 1065887785, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant10), 124578990, 0.1);
assertEquals(ConstantOperatorUtils.getDoubleValue(constant11), Double.NaN, 0.0);
} |
public AmazonInfo build() {
return new AmazonInfo(Name.Amazon.name(), metadata);
} | @Test
public void payloadWithOtherStuffBeforeAndAfterMetadata() throws IOException {
String json = "{"
+ " \"@class\": \"com.netflix.appinfo.AmazonInfo\","
+ " \"foo\": \"bar\","
+ " \"metadata\": {"
+ " \"instance-id\": \"i-12345\""
+ " },"
+ " \"bar\": \"baz\","
+ " \"name\": \"Amazon\""
+ "}";
AmazonInfo info = newMapper().readValue(json, AmazonInfo.class);
AmazonInfo expected = AmazonInfo.Builder.newBuilder()
.addMetadata(AmazonInfo.MetaDataKey.instanceId, "i-12345")
.build();
Assert.assertEquals(expected, nonCompact(info));
} |
public void parse(InputStream file) throws IOException, TikaException {
UnsynchronizedByteArrayOutputStream xmpraw = UnsynchronizedByteArrayOutputStream.builder().get();
if (!scanner.parse(file, xmpraw)) {
return;
}
XMPMetadata xmp = null;
try (InputStream decoded = xmpraw.toInputStream()) {
Document dom = XMLReaderUtils.buildDOM(decoded, EMPTY_PARSE_CONTEXT);
if (dom != null) {
xmp = new XMPMetadata(dom);
}
} catch (IOException | SAXException e) {
//
}
extractDublinCore(xmp, metadata);
extractXMPMM(xmp, metadata);
} | @Test
public void testParseJpeg() throws IOException, TikaException {
Metadata metadata = new Metadata();
try (InputStream stream = getResourceAsStream("/test-documents/testJPEG_commented.jpg")) {
// set some values before extraction to see that they are overridden
metadata.set(TikaCoreProperties.TITLE, "old title");
metadata.set(TikaCoreProperties.DESCRIPTION, "old description");
metadata.set(TikaCoreProperties.CREATOR, "previous author");
// ... or kept in case the field is multi-value
metadata.add(TikaCoreProperties.SUBJECT, "oldkeyword");
JempboxExtractor extractor = new JempboxExtractor(metadata);
extractor.parse(stream);
// DublinCore fields
assertEquals("Tosteberga \u00C4ngar", metadata.get(TikaCoreProperties.TITLE));
assertEquals("Bird site in north eastern Sk\u00E5ne, Sweden.\n(new line)",
metadata.get(TikaCoreProperties.DESCRIPTION));
assertEquals("Some Tourist", metadata.get(TikaCoreProperties.CREATOR));
Collection<String> keywords =
Arrays.asList(metadata.getValues(TikaCoreProperties.SUBJECT));
assertTrue(keywords.contains("oldkeyword"));
assertTrue(keywords.contains("grazelands"));
assertTrue(keywords.contains("nature reserve"));
assertTrue(keywords.contains("bird watching"));
assertTrue(keywords.contains("coast"));
}
} |
public static int toSeconds(TimeRange timeRange) {
if (timeRange.getFrom() == null || timeRange.getTo() == null) {
return 0;
}
try {
return Seconds.secondsBetween(timeRange.getFrom(), timeRange.getTo()).getSeconds();
} catch (IllegalArgumentException e) {
return 0;
}
} | @Test
public void toSecondsReturnsCorrectNumberOfSeconds() throws Exception {
DateTime from = DateTime.now(DateTimeZone.UTC);
DateTime to = from.plusMinutes(5);
assertThat(TimeRanges.toSeconds(AbsoluteRange.create(from, to))).isEqualTo(300);
assertThat(TimeRanges.toSeconds(RelativeRange.create(300))).isEqualTo(300);
assertThat(TimeRanges.toSeconds(KeywordRange.create("last 5 minutes", "Etc/UTC"))).isEqualTo(300);
} |
@PostConstruct
public void start()
{
List<SourceQuery> sourceQueries = sourceQuerySupplier.get();
log.info("Total Queries: %s", sourceQueries.size());
sourceQueries = applyOverrides(sourceQueries);
sourceQueries = applyWhitelist(sourceQueries);
sourceQueries = applyBlacklist(sourceQueries);
sourceQueries = filterQueryType(sourceQueries);
sourceQueries = applyCustomFilters(sourceQueries);
submit(sourceQueries);
reportProgressUntilFinished();
} | @Test
public void testVerifierError()
{
VerificationManager manager = getVerificationManager(ImmutableList.of(SOURCE_QUERY), new MockPrestoAction(new RuntimeException()), VERIFIER_CONFIG);
manager.start();
List<VerifierQueryEvent> events = eventClient.getEvents();
assertEquals(events.size(), 1);
assertEquals(events.get(0).getStatus(), SKIPPED.name());
assertEquals(events.get(0).getSkippedReason(), VERIFIER_INTERNAL_ERROR.name());
assertEquals(events.get(0).getErrorCode(), "VERIFIER_INTERNAL_ERROR");
} |
@Override
public PrimitiveIterator.OfInt iterator() {
return new SingleIntIterator();
} | @Test
public void testIterator() throws Exception {
IntSet sis = new SingletonIntSet(3);
PrimitiveIterator.OfInt iterator = sis.iterator();
assertEquals(3, iterator.nextInt());
assertFalse(iterator.hasNext());
} |
@Override
public int hashCode() {
int result = 1;
result = 31 * result + Objects.hashCode(username);
result = 31 * result + Objects.hashCode(getPasswordValue());
result = 31 * result + Objects.hashCode(getSocketAddress().get());
result = 31 * result + Boolean.hashCode(getNonProxyHostsValue());
result = 31 * result + Objects.hashCode(httpHeaders.get());
result = 31 * result + Objects.hashCode(getType());
result = 31 * result + Long.hashCode(connectTimeoutMillis);
return result;
} | @Test
void equalProxyProviders() {
assertThat(createProxy(ADDRESS_1, PASSWORD_1)).isEqualTo(createProxy(ADDRESS_1, PASSWORD_1));
assertThat(createProxy(ADDRESS_1, PASSWORD_1).hashCode()).isEqualTo(createProxy(ADDRESS_1, PASSWORD_1).hashCode());
} |
@Override
public PluginDescriptor find(Path pluginPath) {
Plugin plugin = yamlPluginFinder.find(pluginPath);
return convert(plugin);
} | @Test
void find() throws JSONException {
PluginDescriptor pluginDescriptor = yamlPluginDescriptorFinder.find(testFile.toPath());
String actual = JsonUtils.objectToJson(pluginDescriptor);
JSONAssert.assertEquals("""
{
"pluginId": "fake-plugin",
"pluginDescription": "Fake description",
"pluginClass": "run.halo.app.plugin.BasePlugin",
"version": "0.0.2",
"requires": ">=2.0.0",
"provider": "johnniang",
"dependencies": [],
"license": "GPLv3"
}
""",
actual,
false);
} |
public static DateTime convertToDateTime(@Nonnull Object value) {
if (value instanceof DateTime) {
return (DateTime) value;
}
if (value instanceof Date) {
return new DateTime(value, DateTimeZone.UTC);
} else if (value instanceof ZonedDateTime) {
final DateTimeZone dateTimeZone = DateTimeZone.forTimeZone(TimeZone.getTimeZone(((ZonedDateTime) value).getZone()));
return new DateTime(Date.from(((ZonedDateTime) value).toInstant()), dateTimeZone);
} else if (value instanceof OffsetDateTime) {
return new DateTime(Date.from(((OffsetDateTime) value).toInstant()), DateTimeZone.UTC);
} else if (value instanceof LocalDateTime) {
final LocalDateTime localDateTime = (LocalDateTime) value;
final ZoneId defaultZoneId = ZoneId.systemDefault();
final ZoneOffset offset = defaultZoneId.getRules().getOffset(localDateTime);
return new DateTime(Date.from(localDateTime.toInstant(offset)));
} else if (value instanceof LocalDate) {
final LocalDate localDate = (LocalDate) value;
final LocalDateTime localDateTime = localDate.atStartOfDay();
final ZoneId defaultZoneId = ZoneId.systemDefault();
final ZoneOffset offset = defaultZoneId.getRules().getOffset(localDateTime);
return new DateTime(Date.from(localDateTime.toInstant(offset)));
} else if (value instanceof Instant) {
return new DateTime(Date.from((Instant) value), DateTimeZone.UTC);
} else if (value instanceof String) {
return ES_DATE_FORMAT_FORMATTER.parseDateTime((String) value);
} else {
throw new IllegalArgumentException("Value of invalid type <" + value.getClass().getSimpleName() + "> provided");
}
} | @Test
@SuppressForbidden("Comparing twice with default timezone is okay in tests")
void convertFromLocalDateTime() {
final LocalDateTime input = LocalDateTime.of(2021, Month.AUGUST, 19, 12, 0);
final DateTime output = DateTimeConverter.convertToDateTime(input);
// both input and output are represented with local timezone.
final DateTime expectedOutput = new DateTime(2021, 8, 19, 12, 0);
assertThat(output).isEqualTo(expectedOutput);
} |
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
} | @Test
void testReadFieldsNestedPojo() {
String[] readFields = {"pojo1.int2; string1; pojo1.string1"};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, null, null, readFields, nestedPojoType, intType);
FieldSet fs = sp.getReadFields(0);
assertThat(fs).containsExactly(2, 4, 5);
readFields[0] = "pojo1.*";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, null, null, readFields, nestedPojoType, intType);
fs = sp.getReadFields(0);
assertThat(fs).containsExactly(1, 2, 3, 4);
readFields[0] = "pojo1";
sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, null, null, readFields, nestedPojoType, intType);
fs = sp.getReadFields(0);
assertThat(fs).containsExactly(1, 2, 3, 4);
} |
String addPath(String path) {
// Make sure the paths always start and end with a slash.
// This simplifies later comparison with the request path.
if (!path.startsWith("/")) {
path = "/" + path;
}
if (!path.endsWith("/")) {
path = path + "/";
}
resourcePrefixes.add(path);
return path;
} | @Test
public void test_addPath() {
ResourceCacheControl resourceCacheControl = new ResourceCacheControl();
Assert.assertEquals("/a/b/", resourceCacheControl.addPath("a/b"));
Assert.assertEquals("/a/b/", resourceCacheControl.addPath("/a/b"));
Assert.assertEquals("/a/b/", resourceCacheControl.addPath("a/b/"));
Assert.assertEquals("/a/b/", resourceCacheControl.addPath("/a/b/"));
} |
@Override
protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) {
String authorization = StringUtils.defaultString(exchange.getRequest().getHeaders().getFirst(HttpHeaders.AUTHORIZATION), exchange.getRequest().getURI().getUserInfo());
BasicAuthRuleHandle basicAuthRuleHandle = BasicAuthPluginDataHandler.CACHED_HANDLE.get().obtainHandle(CacheKeyUtils.INST.getKey(rule));
BasicAuthAuthenticationStrategy authenticationStrategy = Optional.ofNullable(basicAuthRuleHandle).map(BasicAuthRuleHandle::getBasicAuthAuthenticationStrategy).orElse(null);
if (authenticationStrategy != null && authenticationStrategy.authenticate(basicAuthRuleHandle, authorization)) {
return chain.execute(exchange);
}
return WebFluxResultUtils.result(exchange, ShenyuResultWrap.error(exchange, ShenyuResultEnum.ERROR_TOKEN));
} | @Test
public void testDoExecute() {
ruleData.setHandle("{\"authorization\":\"test:test123\"}");
basicAuthPluginDataHandler.handlerRule(ruleData);
when(this.chain.execute(any())).thenReturn(Mono.empty());
StepVerifier.create(basicAuthPlugin.doExecute(exchange, chain, selectorData, ruleData)).expectSubscription().verifyComplete();
verify(chain).execute(exchange);
} |
public OpenConfigConfigOfAssignmentHandler addAllocation(BigDecimal allocation) {
modelObject.allocation(allocation);
return this;
} | @Test
public void testAddAllocation() {
// test Handler
OpenConfigConfigOfAssignmentHandler config = new OpenConfigConfigOfAssignmentHandler(parent);
// call addAllocation
config.addAllocation(BigDecimal.valueOf(4));
// expected ModelObject
DefaultConfig modelObject = new DefaultConfig();
modelObject.allocation(BigDecimal.valueOf(4));
assertEquals("[NG]addAllocation:ModelObject(Allocation added) is not an expected one.\n",
modelObject, config.getModelObject());
} |
@Override
public void pluginUnLoaded(GoPluginDescriptor pluginDescriptor) {
if (scmExtension.canHandlePlugin(pluginDescriptor.id())) {
scmMetadataStore.removeMetadata(pluginDescriptor.id());
}
} | @Test
public void shouldRemoveMetadataOnPluginUnLoadedCallback() throws Exception {
SCMMetadataStore.getInstance().addMetadataFor(pluginDescriptor.id(), new SCMConfigurations(), createSCMView(null, null));
when(scmExtension.canHandlePlugin(pluginDescriptor.id())).thenReturn(true);
metadataLoader.pluginUnLoaded(pluginDescriptor);
assertThat(SCMMetadataStore.getInstance().getConfigurationMetadata(pluginDescriptor.id())).isNull();
assertThat(SCMMetadataStore.getInstance().getViewMetadata(pluginDescriptor.id())).isNull();
} |
public static CopyFilter getCopyFilter(Configuration conf) {
String filtersClassName = conf
.get(DistCpConstants.CONF_LABEL_FILTERS_CLASS);
if (filtersClassName != null) {
try {
Class<? extends CopyFilter> filtersClass = conf
.getClassByName(filtersClassName)
.asSubclass(CopyFilter.class);
filtersClassName = filtersClass.getName();
Constructor<? extends CopyFilter> constructor = filtersClass
.getDeclaredConstructor(Configuration.class);
return constructor.newInstance(conf);
} catch (Exception e) {
LOG.error(DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG +
filtersClassName, e);
throw new RuntimeException(
DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG +
filtersClassName, e);
}
} else {
return getDefaultCopyFilter(conf);
}
} | @Test
public void testGetCopyFilterEmptyString() throws Exception {
final String filterName = "";
Configuration configuration = new Configuration(false);
configuration.set(DistCpConstants.CONF_LABEL_FILTERS_CLASS, filterName);
intercept(RuntimeException.class,
DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG + filterName,
() -> CopyFilter.getCopyFilter(configuration));
} |
public static void main(String[] args) {
demonstrateTreasureChestIteratorForType(RING);
demonstrateTreasureChestIteratorForType(POTION);
demonstrateTreasureChestIteratorForType(WEAPON);
demonstrateTreasureChestIteratorForType(ANY);
demonstrateBstIterator();
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
public static Object eval(String expression, Map<String, Object> context) {
return eval(expression, context, ListUtil.empty());
} | @Test
public void evalTest(){
final Dict dict = Dict.create()
.set("a", 100.3)
.set("b", 45)
.set("c", -199.100);
final Object eval = ExpressionUtil.eval("a-(b-c)", dict);
assertEquals(-143.8, (double)eval, 0);
} |
@Override
protected InputStream decorate(final InputStream in, final MessageDigest digest) throws IOException {
if(null == digest) {
log.warn("MD5 calculation disabled");
return super.decorate(in, null);
}
else {
return new DigestInputStream(in, digest);
}
} | @Test
public void testDecorate() throws Exception {
final NullInputStream n = new NullInputStream(1L);
assertSame(NullInputStream.class, new SwiftSmallObjectUploadFeature(session, new SwiftWriteFeature(
session, new SwiftRegionService(session))).decorate(n, null).getClass());
} |
public void removeMembership(String groupMembershipUuid) {
try (DbSession dbSession = dbClient.openSession(false)) {
UserGroupDto userGroupDto = findMembershipOrThrow(groupMembershipUuid, dbSession);
removeMembership(userGroupDto.getGroupUuid(), userGroupDto.getUserUuid());
}
} | @Test
public void removeMemberByMembershipUuid_ifFound_shouldRemoveMemberFromGroup() {
mockAdminInGroup(GROUP_A, USER_1);
GroupDto groupDto = mockGroupDto();
UserDto userDto = mockUserDto();
UserGroupDto userGroupDto = new UserGroupDto().setUuid(UUID).setUserUuid(USER_1).setGroupUuid(GROUP_A);
when(userGroupDao.selectByQuery(any(), any(), anyInt(), anyInt())).thenReturn(List.of(userGroupDto));
groupMembershipService.removeMembership(UUID);
verify(userGroupDao).selectByQuery(dbSession, new UserGroupQuery(UUID, null, null), 1, 1);
verify(userGroupDao).delete(dbSession, groupDto, userDto);
verify(dbSession).commit();
} |
public void delete(DbSession dbSession, GroupDto group) {
checkGroupIsNotDefault(dbSession, group);
checkNotTryingToDeleteLastAdminGroup(dbSession, group);
removeGroupPermissions(dbSession, group);
removeGroupFromPermissionTemplates(dbSession, group);
removeGroupMembers(dbSession, group);
removeGroupFromQualityProfileEdit(dbSession, group);
removeGroupFromQualityGateEdit(dbSession, group);
removeGroupScimLink(dbSession, group);
removeExternalGroupMapping(dbSession, group);
removeGithubOrganizationGroup(dbSession, group);
removeGroup(dbSession, group);
} | @Test
public void delete_whenLastAdminGroup_throwAndDontDeleteGroup() {
GroupDto groupDto = mockGroupDto();
when(dbClient.groupDao().selectByName(dbSession, DefaultGroups.USERS))
.thenReturn(Optional.of(new GroupDto().setUuid("another_group_uuid"))); // We must pass the default group check
when(dbClient.authorizationDao().countUsersWithGlobalPermissionExcludingGroup(dbSession, GlobalPermission.ADMINISTER.getKey(), groupDto.getUuid()))
.thenReturn(0);
assertThatThrownBy(() -> groupService.delete(dbSession, groupDto))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("The last system admin group cannot be deleted");
verifyNoGroupDelete(dbSession, groupDto);
} |
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception {
return newGetter(object, parent, modifier, field.getType(), field::get,
(t, et) -> new FieldGetter(parent, field, modifier, t, et));
} | @Test
public void newFieldGetter_whenExtractingFromNonEmpty_Collection_AndReducerSuffixInNotEmpty_thenInferTypeFromCollectionItem()
throws Exception {
OuterObject object = new OuterObject("name", new InnerObject("inner"));
Getter getter = GetterFactory.newFieldGetter(object, null, innersCollectionField, "[any]");
Class<?> returnType = getter.getReturnType();
assertEquals(InnerObject.class, returnType);
} |
public static Integer parsePort(Configuration flinkConfig, ConfigOption<String> port) {
checkNotNull(flinkConfig.get(port), port.key() + " should not be null.");
try {
return Integer.parseInt(flinkConfig.get(port));
} catch (NumberFormatException ex) {
throw new FlinkRuntimeException(
port.key()
+ " should be specified to a fixed port. Do not support a range of ports.",
ex);
}
} | @Test
void testParsePortNull() {
final Configuration cfg = new Configuration();
ConfigOption<String> testingPort =
ConfigOptions.key("test.port").stringType().noDefaultValue();
assertThatThrownBy(
() -> KubernetesUtils.parsePort(cfg, testingPort),
"Should fail with an exception.")
.satisfies(
cause ->
assertThat(cause)
.isInstanceOf(NullPointerException.class)
.hasMessageContaining(
testingPort.key() + " should not be null."));
} |
@Override
public Num calculate(BarSeries series, Position position) {
if (position.isClosed()) {
Num profit = excludeCosts ? position.getGrossProfit() : position.getProfit();
return profit.isPositive() ? profit : series.zero();
}
return series.zero();
} | @Test
public void calculateOnlyWithProfitPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series),
Trade.buyAt(3, series), Trade.sellAt(5, series));
AnalysisCriterion profit = getCriterion(false);
assertNumEquals(15, profit.calculate(series, tradingRecord));
} |
@Override
public boolean isValid(final int timeout) throws SQLException {
return databaseConnectionManager.isValid(timeout);
} | @Test
void assertIsInvalid() throws SQLException {
try (ShardingSphereConnection connection = new ShardingSphereConnection(DefaultDatabase.LOGIC_NAME, mockContextManager())) {
connection.getDatabaseConnectionManager().getConnections(DefaultDatabase.LOGIC_NAME, "ds", 0, 1, ConnectionMode.MEMORY_STRICTLY);
assertFalse(connection.isValid(0));
}
} |
@Override
public String getString(String path) {
return fileConfig.getString(path);
} | @Test
void getString() {
SimpleFileConfig config = new SimpleFileConfig();
Assertions.assertEquals(File.pathSeparator, config.getString("path.separator"));
config = new SimpleFileConfig(new File("file.conf"), "");
Assertions.assertEquals("default", config.getString("service.vgroupMapping.default_tx_group"));
config = new SimpleFileConfig(new File("src/test/resources/file"), "file:");
Assertions.assertEquals("default", config.getString("service.vgroupMapping.default_tx_group"));
} |
@Udf(description = "Returns a new string encoded using the outputEncoding ")
public String encode(
@UdfParameter(
description = "The source string. If null, then function returns null.") final String str,
@UdfParameter(
description = "The input encoding."
+ " If null, then function returns null.") final String inputEncoding,
@UdfParameter(
description = "The output encoding."
+ " If null, then function returns null.") final String outputEncoding) {
if (str == null || inputEncoding == null || outputEncoding == null) {
return null;
}
final String encodedString = inputEncoding.toLowerCase() + outputEncoding.toLowerCase();
final Encode.Encoder encoder = ENCODER_MAP.get(encodedString);
if (encoder == null) {
throw new KsqlFunctionException("Supported input and output encodings are: "
+ "hex, utf8, ascii and base64");
}
return encoder.apply(str);
} | @Test
public void shouldEncodeHexToAscii() {
assertThat(udf.encode("4578616d706C6521", "hex", "ascii"), is("Example!"));
assertThat(udf.encode("506C616E74207472656573", "hex", "ascii"), is("Plant trees"));
assertThat(udf.encode("31202b2031203d2031", "hex", "ascii"), is("1 + 1 = 1"));
assertThat(udf.encode("ce95cebbcebbceacceb4ceb1", "hex", "ascii"), is("������������"));
assertThat(udf.encode("c39c6265726d656e736368", "hex", "ascii"), is("��bermensch"));
assertThat(udf.encode("0x48656c6c6f20576f726c6421", "hex", "ascii"), is("Hello World!"));
assertThat(udf.encode("0x9", "hex", "ascii"), is("\t"));
assertThat(udf.encode("0x", "hex", "ascii"), is(""));
assertThat(udf.encode("X'436c6f7564792a7e2a3f'", "hex", "ascii"), is("Cloudy*~*?"));
assertThat(udf.encode("x'4578616d706C6521'", "hex", "ascii"), is("Example!"));
assertThat(udf.encode("X''", "hex", "ascii"), is(""));
assertThat(udf.encode("x''", "hex", "ascii"), is(""));
assertThat(udf.encode("0x578616d706C6521", "hex", "ascii"), is("\u0005xample!"));
Assert.assertThrows(KsqlFunctionException.class, () -> udf.encode("578616d706C6521", "hex", "ascii"));
Assert.assertThrows(KsqlFunctionException.class, () -> udf.encode("X'578616d706C6521'", "hex", "ascii"));
Assert.assertThrows(KsqlFunctionException.class, () -> udf.encode("x'578616d706C6521'", "hex", "ascii"));
} |
@Override
public int addListener(ObjectListener listener) {
if (listener instanceof ScoredSortedSetAddListener) {
return addListener("__keyevent@*:zadd", (ScoredSortedSetAddListener) listener, ScoredSortedSetAddListener::onAdd);
}
if (listener instanceof ScoredSortedSetRemoveListener) {
return addListener("__keyevent@*:zrem", (ScoredSortedSetRemoveListener) listener, ScoredSortedSetRemoveListener::onRemove);
}
if (listener instanceof TrackingListener) {
return addTrackingListener((TrackingListener) listener);
}
return super.addListener(listener);
} | @Test
public void testAddListener() {
testWithParams(redisson -> {
RScoredSortedSet<Integer> ss = redisson.getScoredSortedSet("test");
AtomicInteger latch = new AtomicInteger();
int id = ss.addListener(new ScoredSortedSetAddListener() {
@Override
public void onAdd(String name) {
latch.incrementAndGet();
}
});
ss.add(1, 1);
Awaitility.await().atMost(Duration.ofSeconds(1)).untilAsserted(() -> {
assertThat(latch.get()).isEqualTo(1);
});
ss.removeListener(id);
ss.add(1, 1);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
assertThat(latch.get()).isEqualTo(1);
}, NOTIFY_KEYSPACE_EVENTS, "Ez");
} |
@Override
public Collection<SQLToken> generateSQLTokens(final InsertStatementContext insertStatementContext) {
Collection<SQLToken> result = new LinkedList<>();
EncryptTable encryptTable = encryptRule.getEncryptTable(insertStatementContext.getSqlStatement().getTable().getTableName().getIdentifier().getValue());
for (ColumnSegment each : insertStatementContext.getSqlStatement().getColumns()) {
List<String> derivedColumnNames = getDerivedColumnNames(encryptTable, each);
if (!derivedColumnNames.isEmpty()) {
result.add(new InsertColumnsToken(each.getStopIndex() + 1, derivedColumnNames));
}
}
return result;
} | @Test
void assertGenerateSQLTokensExistColumns() {
EncryptInsertDerivedColumnsTokenGenerator tokenGenerator = new EncryptInsertDerivedColumnsTokenGenerator(mockEncryptRule());
Collection<SQLToken> actual = tokenGenerator.generateSQLTokens(mockInsertStatementContext());
assertThat(actual.size(), is(1));
assertThat(actual.iterator().next().getStartIndex(), is(1));
} |
public TenantCapacity getTenantCapacity(String tenant) {
return tenantCapacityPersistService.getTenantCapacity(tenant);
} | @Test
void testGetTenantCapacity() {
TenantCapacity tenantCapacity = new TenantCapacity();
tenantCapacity.setId(1L);
tenantCapacity.setTenant("testTenant");
when(tenantCapacityPersistService.getTenantCapacity(eq("testTenant"))).thenReturn(tenantCapacity);
TenantCapacity resTenantCapacity = service.getTenantCapacity("testTenant");
assertEquals(tenantCapacity.getId(), resTenantCapacity.getId());
assertEquals(tenantCapacity.getTenant(), resTenantCapacity.getTenant());
} |
@Override
public Collection<JobManagerRunner> getJobManagerRunners() {
return new ArrayList<>(this.jobManagerRunners.values());
} | @Test
void testGetJobManagerRunners() {
assertThat(testInstance.getJobManagerRunners()).isEmpty();
final JobManagerRunner jobManagerRunner0 = TestingJobManagerRunner.newBuilder().build();
final JobManagerRunner jobManagerRunner1 = TestingJobManagerRunner.newBuilder().build();
testInstance.register(jobManagerRunner0);
testInstance.register(jobManagerRunner1);
assertThat(testInstance.getJobManagerRunners())
.containsExactlyInAnyOrder(jobManagerRunner0, jobManagerRunner1);
} |
@Override
public Path copy(final Path source, final Path copy, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
try {
final String target = new DefaultUrlProvider(session.getHost()).toUrl(copy).find(DescriptiveUrl.Type.provider).getUrl();
if(session.getFeature(Lock.class) != null && status.getLockId() != null) {
// Indicate that the client has knowledge of that state token
session.getClient().copy(new DAVPathEncoder().encode(source), target, status.isExists(),
Collections.singletonMap(HttpHeaders.IF, String.format("(<%s>)", status.getLockId())));
}
else {
session.getClient().copy(new DAVPathEncoder().encode(source), target, status.isExists());
}
listener.sent(status.getLength());
return copy.withAttributes(source.attributes());
}
catch(SardineException e) {
throw new DAVExceptionMappingService().map("Cannot copy {0}", e, source);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map(e, source);
}
} | @Test
public void testCopyDirectory() throws Exception {
final Path directory = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
final String name = new AlphanumericRandomStringService().random();
final Path file = new Path(directory, name, EnumSet.of(Path.Type.file));
new DAVDirectoryFeature(session).mkdir(directory, new TransferStatus());
new DAVTouchFeature(session).touch(file, new TransferStatus());
final Path copy = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new DAVDirectoryFeature(session).mkdir(copy, new TransferStatus());
assertThrows(ConflictException.class, () -> new DAVCopyFeature(session).copy(directory, copy, new TransferStatus().exists(false), new DisabledConnectionCallback(), new DisabledStreamListener()));
new DAVCopyFeature(session).copy(directory, copy, new TransferStatus().exists(true), new DisabledConnectionCallback(), new DisabledStreamListener());
assertTrue(new DAVFindFeature(session).find(file));
assertTrue(new DAVFindFeature(session).find(copy));
assertTrue(new DAVFindFeature(session).find(new Path(copy, name, EnumSet.of(Path.Type.file))));
new DAVDeleteFeature(session).delete(Arrays.asList(copy, directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public TimestampedKeyValueStore<K, V> build() {
KeyValueStore<Bytes, byte[]> store = storeSupplier.get();
if (!(store instanceof TimestampedBytesStore)) {
if (store.persistent()) {
store = new KeyValueToTimestampedKeyValueByteStoreAdapter(store);
} else {
store = new InMemoryTimestampedKeyValueStoreMarker(store);
}
}
return new MeteredTimestampedKeyValueStore<>(
maybeWrapCaching(maybeWrapLogging(store)),
storeSupplier.metricsScope(),
time,
keySerde,
valueSerde);
} | @Test
public void shouldNotWrapTimestampedByteStore() {
setUp();
when(supplier.get()).thenReturn(new RocksDBTimestampedStore("name", "metrics-scope"));
final TimestampedKeyValueStore<String, String> store = builder
.withLoggingDisabled()
.withCachingDisabled()
.build();
assertThat(((WrappedStateStore) store).wrapped(), instanceOf(RocksDBTimestampedStore.class));
} |
@SuppressWarnings("unchecked")
public static <W extends BoundedWindow> StateContext<W> nullContext() {
return (StateContext<W>) NULL_CONTEXT;
} | @Test
public void nullContextThrowsOnOptions() {
StateContext<BoundedWindow> context = StateContexts.nullContext();
thrown.expect(IllegalArgumentException.class);
context.getPipelineOptions();
} |
@Override
public void onStartup() {
config = RuleLoaderConfig.load();
List<String> masks = List.of(MASK_PORTAL_TOKEN);
ModuleRegistry.registerModule(RuleLoaderConfig.CONFIG_NAME, RuleLoaderStartupHook.class.getName(), Config.getNoneDecryptedInstance().getJsonMapConfigNoCache(RuleLoaderConfig.CONFIG_NAME), masks);
if(config.isEnabled()) {
// by default the rules for the service is loaded from the light-portal; however, it can be configured to loaded from config folder.
if(RuleLoaderConfig.RULE_SOURCE_CONFIG_FOLDER.equals(config.getRuleSource())) {
// load the rules for the service from the externalized config folder. The filename is rules.yml
String ruleString = Config.getInstance().getStringFromFile("rules.yml");
rules = RuleMapper.string2RuleMap(ruleString);
if(logger.isInfoEnabled()) logger.info("Load YAML rules from config folder with size = " + rules.size());
// load the endpoint rule mapping from the rule-loader.yml
endpointRules = config.getEndpointRules();
} else {
// by default, load from light-portal
ServerConfig serverConfig = ServerConfig.getInstance();
Result<String> result = getServiceById(config.getPortalHost(), serverConfig.getServiceId());
if(result.isSuccess()) {
String serviceString = result.getResult();
if(logger.isDebugEnabled()) logger.debug("getServiceById result = " + serviceString);
Map<String, Object> objectMap = JsonMapper.string2Map(serviceString);
endpointRules = (Map<String, Object>)objectMap.get("endpointRules");
// need to get the rule bodies here to create a map of ruleId to ruleBody.
Iterator<Object> iterator = endpointRules.values().iterator();
String ruleString = "\n";
Set<String> ruleIdSet = new HashSet<>(); // use this set to ensure the same ruleId will only be concat once.
while (iterator.hasNext()) {
Map<String, List> value = (Map<String, List>)iterator.next();
Iterator<List> iteratorList = value.values().iterator();
while(iteratorList.hasNext()) {
List<Map<String, String>> list = iteratorList.next();
for(Map<String, String> map: list) {
// in this map, we might have ruleId, roles, variables as keys. Here we only need to get the ruleId in order to load rule body.
String ruleId = map.get("ruleId");
if(!ruleIdSet.contains(ruleId)) {
if (logger.isDebugEnabled()) logger.debug("Load rule for ruleId = " + ruleId);
// get rule content for each id and concat them together.
String r = getRuleById(config.getPortalHost(), DEFAULT_HOST, ruleId).getResult();
Map<String, Object> ruleMap = JsonMapper.string2Map(r);
ruleString = ruleString + ruleMap.get("value") + "\n";
ruleIdSet.add(ruleId);
}
}
}
}
rules = RuleMapper.string2RuleMap(ruleString);
if(logger.isInfoEnabled()) logger.info("Load YAML rules from light-portal with size = " + rules.size());
} else {
logger.error("Could not load rule for serviceId = " + serverConfig.getServiceId() + " error = " + result.getError());
}
}
if(rules != null) {
// create the rule engine with the rule map.
ruleEngine = new RuleEngine(rules, null);
// iterate all action classes to initialize them to ensure that the jar file are deployed and configuration is registered.
// This is to prevent runtime exception and also ensure that the configuration is part of the server info response.
loadPluginClass();
}
} else {
if(logger.isInfoEnabled()) logger.info("Rule Loader is not enabled and skipped loading rules from the portal.");
}
} | @Test
@Ignore
public void testRuleLoader() {
RuleLoaderStartupHook ruleLoaderStartupHook = new RuleLoaderStartupHook();
ruleLoaderStartupHook.onStartup();
} |
AwsCredentials credentialsEcs() {
String response = createRestClient(ecsIamRoleEndpoint, awsConfig).get().getBody();
return parseCredentials(response);
} | @Test
public void credentialsEcs() {
// given
String response = """
{
"Code": "Success",
"AccessKeyId": "Access1234",
"SecretAccessKey": "Secret1234",
"Token": "Token1234",
"Expiration": "2020-03-27T21:01:33Z"
}""";
stubFor(get(urlEqualTo("/"))
.willReturn(aResponse().withStatus(HttpURLConnection.HTTP_OK).withBody(response)));
// when
AwsCredentials result = awsMetadataApi.credentialsEcs();
// then
assertEquals("Access1234", result.getAccessKey());
assertEquals("Secret1234", result.getSecretKey());
assertEquals("Token1234", result.getToken());
} |
@Override
protected void processRecord(RowData row) {
// limit the materialized table
if (materializedTable.size() - validRowPosition >= maxRowCount) {
cleanUp();
}
materializedTable.add(row);
} | @Test
void testLimitedSnapshot() throws Exception {
final ResolvedSchema schema =
ResolvedSchema.physical(
new String[] {"f0", "f1"},
new DataType[] {DataTypes.STRING(), DataTypes.INT()});
@SuppressWarnings({"unchecked", "rawtypes"})
final DataStructureConverter<RowData, Row> rowConverter =
(DataStructureConverter)
DataStructureConverters.getConverter(schema.toPhysicalRowDataType());
try (TestMaterializedCollectBatchResult result =
new TestMaterializedCollectBatchResult(
CliClientTestUtils.createTestClient(schema),
2, // limit the materialized table to 2 rows
3,
createInternalBinaryRowDataConverter(
schema.toPhysicalRowDataType()))) { // with 3 rows overcommitment
result.isRetrieving = true;
result.processRecord(Row.of("D", 1));
result.processRecord(Row.of("A", 1));
result.processRecord(Row.of("B", 1));
result.processRecord(Row.of("A", 1));
assertRowEquals(
Arrays.asList(
null, null, Row.of("B", 1), Row.of("A", 1)), // two over-committed rows
result.getMaterializedTable(),
rowConverter);
assertThat(result.snapshot(1)).isEqualTo(TypedResult.payload(2));
assertRowEquals(
Collections.singletonList(Row.of("B", 1)),
result.retrievePage(1),
rowConverter);
assertRowEquals(
Collections.singletonList(Row.of("A", 1)),
result.retrievePage(2),
rowConverter);
result.processRecord(Row.of("C", 1));
assertRowEquals(
Arrays.asList(Row.of("A", 1), Row.of("C", 1)), // limit clean up has taken place
result.getMaterializedTable(),
rowConverter);
result.processRecord(Row.of("A", 1));
assertRowEquals(
Arrays.asList(null, Row.of("C", 1), Row.of("A", 1)),
result.getMaterializedTable(),
rowConverter);
}
} |
@Override
public boolean equals(Object object)
{
if (this == object)
{
return true;
}
if (object == null || getClass() != object.getClass())
{
return false;
}
if (!super.equals(object))
{
return false;
}
UpdateEntityResponse<?> that = (UpdateEntityResponse<?>) object;
return Objects.equals(_entity, that._entity);
} | @Test(dataProvider = "testEqualsDataProvider")
public void testEquals
(
boolean shouldEquals,
@Nonnull UpdateEntityResponse<TestRecordTemplateClass.Foo> updateEntityResponse,
@Nullable Object compareObject
)
{
assertEquals(updateEntityResponse.equals(compareObject), shouldEquals);
} |
@Override
public void onCycleComplete(com.netflix.hollow.api.producer.Status status, HollowProducer.ReadState readState, long version, Duration elapsed) {
boolean isCycleSuccess;
long cycleEndTimeNano = System.nanoTime();
if (status.getType() == com.netflix.hollow.api.producer.Status.StatusType.SUCCESS) {
isCycleSuccess = true;
consecutiveFailures = 0l;
lastCycleSuccessTimeNanoOptional = OptionalLong.of(cycleEndTimeNano);
} else {
isCycleSuccess = false;
consecutiveFailures ++;
}
CycleMetrics.Builder cycleMetricsBuilder = new CycleMetrics.Builder()
.setConsecutiveFailures(consecutiveFailures)
.setCycleDurationMillis(elapsed.toMillis())
.setIsCycleSuccess(isCycleSuccess);
lastCycleSuccessTimeNanoOptional.ifPresent(cycleMetricsBuilder::setLastCycleSuccessTimeNano);
cycleMetricsReporting(cycleMetricsBuilder.build());
} | @Test
public void testCycleCompleteWithSuccess() {
final class TestProducerMetricsListener extends AbstractProducerMetricsListener {
@Override
public void cycleMetricsReporting(CycleMetrics cycleMetrics) {
Assert.assertNotNull(cycleMetrics);
Assert.assertEquals(0l, cycleMetrics.getConsecutiveFailures());
Assert.assertEquals(Optional.of(true), cycleMetrics.getIsCycleSuccess());
Assert.assertEquals(OptionalLong.of(TEST_CYCLE_DURATION_MILLIS.toMillis()), cycleMetrics.getCycleDurationMillis());
Assert.assertNotEquals(OptionalLong.of(TEST_LAST_CYCLE_NANOS), cycleMetrics.getLastCycleSuccessTimeNano());
Assert.assertNotEquals(OptionalLong.empty(), cycleMetrics.getLastCycleSuccessTimeNano());
}
}
AbstractProducerMetricsListener concreteProducerMetricsListener = new TestProducerMetricsListener();
concreteProducerMetricsListener.lastCycleSuccessTimeNanoOptional = OptionalLong.of(TEST_LAST_CYCLE_NANOS);
concreteProducerMetricsListener.onCycleStart(TEST_VERSION);
concreteProducerMetricsListener.onCycleComplete(TEST_STATUS_SUCCESS, mockReadState, TEST_VERSION, TEST_CYCLE_DURATION_MILLIS);
} |
@Override
public void close() {
userTransactionService.shutdown(true);
} | @Test
void assertClose() {
transactionManagerProvider.close();
verify(userTransactionService).shutdown(true);
} |
static byte[] generateRandomPayload(Integer recordSize, List<byte[]> payloadByteList, byte[] payload,
SplittableRandom random, boolean payloadMonotonic, long recordValue) {
if (!payloadByteList.isEmpty()) {
payload = payloadByteList.get(random.nextInt(payloadByteList.size()));
} else if (recordSize != null) {
for (int j = 0; j < payload.length; ++j)
payload[j] = (byte) (random.nextInt(26) + 65);
} else if (payloadMonotonic) {
payload = Long.toString(recordValue).getBytes(StandardCharsets.UTF_8);
} else {
throw new IllegalArgumentException("no payload File Path or record Size or payload-monotonic option provided");
}
return payload;
} | @Test
public void testGenerateRandomPayloadException() {
Integer recordSize = null;
byte[] payload = null;
List<byte[]> payloadByteList = new ArrayList<>();
SplittableRandom random = new SplittableRandom(0);
IllegalArgumentException thrown = assertThrows(IllegalArgumentException.class, () -> ProducerPerformance.generateRandomPayload(recordSize, payloadByteList, payload, random, false, 0L));
assertEquals("no payload File Path or record Size or payload-monotonic option provided", thrown.getMessage());
} |
static String joinAndEscapeStrings(final String[] strs,
final char delimiterChar, final char escapeChar) {
int len = strs.length;
// Escape each string in string array.
for (int index = 0; index < len; index++) {
if (strs[index] == null) {
return null;
}
strs[index] = escapeString(strs[index], delimiterChar, escapeChar);
}
// Join the strings after they have been escaped.
return StringUtils.join(strs, delimiterChar);
} | @Test
void testJoinAndEscapeStrings() throws Exception {
assertEquals("*!cluster!*!b**o***!xer!oozie**",
TimelineReaderUtils.joinAndEscapeStrings(
new String[]{"!cluster", "!b*o*!xer", "oozie*"}, '!', '*'));
assertEquals("*!cluster!*!b**o***!xer!!",
TimelineReaderUtils.joinAndEscapeStrings(
new String[]{"!cluster", "!b*o*!xer", "", ""}, '!', '*'));
assertNull(TimelineReaderUtils.joinAndEscapeStrings(
new String[]{"!cluster", "!b*o*!xer", null, ""}, '!', '*'));
} |
@Override
public void close() {
pos = 0;
buffer = null;
} | @Test
public void testClose() {
out.close();
assertEquals(0, out.position());
assertNull(out.buffer);
} |
@Override
public ObjectNode encode(LispTeAddress address, CodecContext context) {
checkNotNull(address, "LispTeAddress cannot be null");
final ObjectNode result = context.mapper().createObjectNode();
final ArrayNode jsonRecords = result.putArray(TE_RECORDS);
final JsonCodec<LispTeAddress.TeRecord> recordCodec =
context.codec(LispTeAddress.TeRecord.class);
for (final LispTeAddress.TeRecord record : address.getTeRecords()) {
jsonRecords.add(recordCodec.encode(record, context));
}
return result;
} | @Test
public void testLispTeAddressEncode() {
LispTeAddress address = new LispTeAddress.Builder()
.withTeRecords(ImmutableList.of(record1, record2))
.build();
ObjectNode addressJson = teAddressCodec.encode(address, context);
assertThat("errors in encoding Traffic Engineering address JSON",
addressJson, LispTeAddressJsonMatcher.matchesTeAddress(address));
} |
@Override
public void start() throws Exception {
LOG.info("Process starting.");
mRunning = true;
mJournalSystem.start();
startMasterComponents(false);
mServices.forEach(SimpleService::start);
// Perform the initial catchup before joining leader election,
// to avoid potential delay if this master is selected as leader
if (Configuration.getBoolean(PropertyKey.MASTER_JOURNAL_CATCHUP_PROTECT_ENABLED)) {
LOG.info("Waiting for journals to catch up.");
mJournalSystem.waitForCatchup();
}
LOG.info("Starting leader selector.");
mLeaderSelector.start(getRpcAddress());
while (!Thread.interrupted()) {
if (!mRunning) {
LOG.info("master process is not running. Breaking out");
break;
}
if (Configuration.getBoolean(PropertyKey.MASTER_JOURNAL_CATCHUP_PROTECT_ENABLED)) {
LOG.info("Waiting for journals to catch up.");
mJournalSystem.waitForCatchup();
}
LOG.info("Started in stand-by mode.");
mLeaderSelector.waitForState(NodeState.PRIMARY);
mLastGainPrimacyTime = CommonUtils.getCurrentMs();
if (!mRunning) {
break;
}
try {
if (!promote()) {
continue;
}
mServices.forEach(SimpleService::promote);
LOG.info("Primary started");
} catch (Throwable t) {
if (Configuration.getBoolean(PropertyKey.MASTER_JOURNAL_BACKUP_WHEN_CORRUPTED)) {
takeEmergencyBackup();
}
throw t;
}
mLeaderSelector.waitForState(NodeState.STANDBY);
mLastLosePrimacyTime = CommonUtils.getCurrentMs();
if (Configuration.getBoolean(PropertyKey.MASTER_JOURNAL_EXIT_ON_DEMOTION)) {
stop();
} else {
if (!mRunning) {
break;
}
// Dump important information asynchronously
ExecutorService es = null;
List<Future<Void>> dumpFutures = new ArrayList<>();
try {
es = Executors.newFixedThreadPool(
2, ThreadFactoryUtils.build("info-dumper-%d", true));
dumpFutures.addAll(ProcessUtils.dumpInformationOnFailover(es));
} catch (Throwable t) {
LOG.warn("Failed to dump metrics and jstacks before demotion", t);
}
// Shut down services like RPC, WebServer, Journal and all master components
LOG.info("Losing the leadership.");
mServices.forEach(SimpleService::demote);
demote();
// Block until information dump is done and close resources
for (Future<Void> f : dumpFutures) {
try {
f.get();
} catch (InterruptedException | ExecutionException e) {
LOG.warn("Failed to dump metrics and jstacks before demotion", e);
}
}
if (es != null) {
es.shutdownNow();
}
}
}
} | @Test
public void startStopStandbyStandbyServer() throws Exception {
Configuration.set(PropertyKey.STANDBY_MASTER_GRPC_ENABLED, true);
AlluxioMasterProcess master =
new AlluxioMasterProcess(new NoopJournalSystem(), new AlwaysStandbyPrimarySelector());
master.registerService(
RpcServerService.Factory.create(
master.getRpcBindAddress(), master, master.getRegistry()));
master.registerService(WebServerService.Factory.create(master.getWebBindAddress(), master));
master.registerService(MetricsService.Factory.create());
Thread t = new Thread(() -> {
try {
master.start();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
t.start();
startStopTest(master,
true,
Configuration.getBoolean(PropertyKey.STANDBY_MASTER_WEB_ENABLED),
Configuration.getBoolean(PropertyKey.STANDBY_MASTER_METRICS_SINK_ENABLED));
} |
public static <T> T[] getBeans(Class<T> interfaceClass) {
Object object = serviceMap.get(interfaceClass.getName());
if(object == null) return null;
if(object instanceof Object[]) {
return (T[])object;
} else {
Object array = Array.newInstance(interfaceClass, 1);
Array.set(array, 0, object);
return (T[])array;
}
} | @Test
public void testArrayNotDefined() {
Dummy[] dummies = SingletonServiceFactory.getBeans(Dummy.class);
Assert.assertNull(dummies);
} |
public boolean isRoot() {
return mUri.getPath().equals(SEPARATOR)
|| (mUri.getPath().isEmpty() && hasAuthority());
} | @Test
public void isRootTests() {
assertFalse(new AlluxioURI(".").isRoot());
assertTrue(new AlluxioURI("/").isRoot());
assertTrue(new AlluxioURI("file:/").isRoot());
assertTrue(new AlluxioURI("alluxio://localhost:19998").isRoot());
assertTrue(new AlluxioURI("alluxio://localhost:19998/").isRoot());
assertTrue(new AlluxioURI("hdfs://localhost:19998").isRoot());
assertTrue(new AlluxioURI("hdfs://localhost:19998/").isRoot());
assertTrue(new AlluxioURI("file://localhost/").isRoot());
assertFalse(new AlluxioURI("file://localhost/a/b").isRoot());
assertFalse(new AlluxioURI("a/b").isRoot());
} |
public void createQprofileChangesForRuleUpdates(DbSession dbSession, Set<PluginRuleUpdate> pluginRuleUpdates) {
List<QProfileChangeDto> changesToPersist = pluginRuleUpdates.stream()
.flatMap(pluginRuleUpdate -> {
RuleChangeDto ruleChangeDto = createNewRuleChange(pluginRuleUpdate);
insertRuleChange(dbSession, ruleChangeDto);
return findQualityProfilesForRule(dbSession, pluginRuleUpdate.getRuleUuid()).stream()
.map(qualityProfileUuid -> buildQprofileChangeDtoForRuleChange(qualityProfileUuid, ruleChangeDto));
}).toList();
if (!changesToPersist.isEmpty()) {
dbClient.qProfileChangeDao().bulkInsert(dbSession, changesToPersist);
}
} | @Test
public void updateWithoutCommit_whenOneRuleBelongingToTwoQualityProfilesChanged_thenInsertOneRuleChangeAndTwoQualityProfileChanges() {
List<ActiveRuleDto> activeRuleDtos = List.of(
new ActiveRuleDto().setProfileUuid("profileUuid1").setRuleUuid(RULE_UUID),
new ActiveRuleDto().setProfileUuid("profileUuid2").setRuleUuid(RULE_UUID));
when(activeRuleDao.selectByRuleUuid(any(), any())).thenReturn(activeRuleDtos);
PluginRuleUpdate pluginRuleUpdate = new PluginRuleUpdate();
pluginRuleUpdate.setNewCleanCodeAttribute(CleanCodeAttribute.CLEAR);
pluginRuleUpdate.setOldCleanCodeAttribute(CleanCodeAttribute.TESTED);
pluginRuleUpdate.setRuleUuid(RULE_UUID);
underTest.createQprofileChangesForRuleUpdates(dbSession, Set.of(pluginRuleUpdate));
verify(qualityProfileChangeDao, times(1)).bulkInsert(argThat(dbSession::equals),
argThat(qProfileChangeDtos ->
qProfileChangeDtos.stream()
.allMatch(dto -> "UPDATED".equals(dto.getChangeType()) && dto.getRuleChange() != null)));
} |
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("type", type)
.add("uuid", uuid)
.add("component", component)
.add("entity", entity)
.add("submitter", submitter)
.toString();
} | @Test
public void verify_toString() {
CeTask.Component component = new CeTask.Component("COMPONENT_UUID_1", "COMPONENT_KEY_1", "The component");
CeTask.Component entity = new CeTask.Component("ENTITY_UUID_1", "ENTITY_KEY_1", "The entity");
underTest.setType("TYPE_1");
underTest.setUuid("UUID_1");
underTest.setComponent(component);
underTest.setEntity(entity);
underTest.setSubmitter(new CeTask.User("UUID_USER_1", "LOGIN_1"));
underTest.setCharacteristics(ImmutableMap.of("k1", "v1", "k2", "v2"));
CeTask task = underTest.build();
System.out.println(task.toString());
assertThat(task).hasToString("CeTask{" +
"type=TYPE_1, " +
"uuid=UUID_1, " +
"component=Component{uuid='COMPONENT_UUID_1', key='COMPONENT_KEY_1', name='The component'}, " +
"entity=Component{uuid='ENTITY_UUID_1', key='ENTITY_KEY_1', name='The entity'}, " +
"submitter=User{uuid='UUID_USER_1', login='LOGIN_1'}" +
"}");
} |
public static Map<String, Collection<DataNode>> load(final String databaseName, final DatabaseType protocolType, final Map<String, DataSource> dataSourceMap,
final Collection<ShardingSphereRule> builtRules, final Collection<String> configuredTables) {
Collection<String> featureRequiredSingleTables = SingleTableLoadUtils.getFeatureRequiredSingleTables(builtRules);
if (configuredTables.isEmpty() && featureRequiredSingleTables.isEmpty()) {
return new LinkedHashMap<>();
}
Collection<String> excludedTables = SingleTableLoadUtils.getExcludedTables(builtRules);
Map<String, Collection<DataNode>> actualDataNodes = load(databaseName, dataSourceMap, excludedTables);
Collection<String> splitTables = SingleTableLoadUtils.splitTableLines(configuredTables);
if (splitTables.contains(SingleTableConstants.ALL_TABLES) || splitTables.contains(SingleTableConstants.ALL_SCHEMA_TABLES)) {
return actualDataNodes;
}
Map<String, Map<String, Collection<String>>> configuredTableMap = getConfiguredTableMap(databaseName, protocolType, splitTables);
return loadSpecifiedDataNodes(actualDataNodes, featureRequiredSingleTables, configuredTableMap);
} | @Test
void assertLoad() {
ShardingSphereRule builtRule = mock(ShardingSphereRule.class);
TableMapperRuleAttribute ruleAttribute = mock(TableMapperRuleAttribute.class, RETURNS_DEEP_STUBS);
when(ruleAttribute.getDistributedTableNames()).thenReturn(Arrays.asList("salary", "employee", "student"));
when(builtRule.getAttributes()).thenReturn(new RuleAttributes(ruleAttribute));
Map<String, Collection<DataNode>> actual = SingleTableDataNodeLoader.load(DefaultDatabase.LOGIC_NAME, databaseType, dataSourceMap, Collections.singleton(builtRule), configuredSingleTables);
assertFalse(actual.containsKey("employee"));
assertFalse(actual.containsKey("salary"));
assertFalse(actual.containsKey("student"));
assertTrue(actual.containsKey("dept"));
assertTrue(actual.containsKey("teacher"));
assertTrue(actual.containsKey("class"));
assertThat(actual.get("dept").iterator().next().getDataSourceName(), is("ds0"));
assertThat(actual.get("teacher").iterator().next().getDataSourceName(), is("ds1"));
assertThat(actual.get("class").iterator().next().getDataSourceName(), is("ds1"));
} |
@Override
public LogLevel getLogLevel() {
return log != null ? log.getLogLevel() : null;
} | @Test
public void testBaseStepGetLogLevelWontThrowNPEWithNullLog() {
when( mockHelper.logChannelInterfaceFactory.create( any(), any( LoggingObjectInterface.class ) ) ).thenAnswer(
(Answer<LogChannelInterface>) invocation -> {
( (BaseStep) invocation.getArguments()[ 0 ] ).getLogLevel();
return mockHelper.logChannelInterface;
} );
new BaseStep( mockHelper.stepMeta, mockHelper.stepDataInterface, 0, mockHelper.transMeta, mockHelper.trans )
.getLogLevel();
} |
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
} | @Test
public void testMergeStringMapOverwrite() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
parseParamDefMap(
"{'tomergemap': {'type': 'STRING_MAP', 'value': {'tomerge': 'hello', 'all': 'allval'}}}");
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap(
"{'tomergemap': {'type': 'STRING_MAP', 'value': {'tomerge': 'goodbye', 'new': 'newval'}}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext);
assertEquals(1, allParams.size());
StringMapParamDefinition tomergemap = allParams.get("tomergemap").asStringMapParamDef();
assertEquals("goodbye", tomergemap.getValue().get("tomerge"));
assertEquals("allval", tomergemap.getValue().get("all"));
assertEquals("newval", tomergemap.getValue().get("new"));
assertEquals(ParamSource.DEFINITION, tomergemap.getSource());
} |
@Override
protected License open(final Local file) {
// Verify immediately and exit if not a valid receipt
final ReceiptVerifier verifier = new ReceiptVerifier(file);
if(verifier.verify(new DisabledLicenseVerifierCallback())) {
// Set name
final Receipt receipt = new Receipt(file, verifier.getGuid());
if(log.isInfoEnabled()) {
log.info(String.format("Valid receipt %s in %s", receipt, file));
}
// Copy to Application Support for users switching versions
final Local support = SupportDirectoryFinderFactory.get().find();
try {
file.copy(LocalFactory.get(support, String.format("%s.cyberduckreceipt",
PreferencesFactory.get().getProperty("application.name"))));
}
catch(AccessDeniedException e) {
log.warn(e.getMessage());
}
return receipt;
}
else {
log.error(String.format("Invalid receipt found in %s", file));
System.exit(APPSTORE_VALIDATION_FAILURE);
return null;
}
} | @Test
public void testOpen() throws Exception {
// Expect exit code 173
new ReceiptFactory().open();
} |
public static long validateMillisecondInstant(final Instant instant, final String messagePrefix) {
try {
if (instant == null) {
throw new IllegalArgumentException(messagePrefix + VALIDATE_MILLISECOND_NULL_SUFFIX);
}
return instant.toEpochMilli();
} catch (final ArithmeticException e) {
throw new IllegalArgumentException(messagePrefix + VALIDATE_MILLISECOND_OVERFLOW_SUFFIX, e);
}
} | @Test
public void shouldReturnMillisecondsOnValidInstant() {
final Instant sampleInstant = Instant.now();
assertEquals(sampleInstant.toEpochMilli(), validateMillisecondInstant(sampleInstant, "sampleInstant"));
} |
@Bean("ReadCache")
public ReadCache provideReader(AnalysisCacheEnabled analysisCacheEnabled, AnalysisCacheMemoryStorage storage) {
if (analysisCacheEnabled.isEnabled()) {
storage.load();
return new ReadCacheImpl(storage);
}
return new NoOpReadCache();
} | @Test
public void provide_noop_reader_cache_when_disable() {
when(analysisCacheEnabled.isEnabled()).thenReturn(false);
var cache = cacheProvider.provideReader(analysisCacheEnabled, storage);
assertThat(cache).isInstanceOf(NoOpReadCache.class);
} |
@Override
public long getBackoffTime() {
return backoffTimeMS;
} | @Test
void testBackoffTime() {
final long backoffTimeMS = 10_000L;
final FailureRateRestartBackoffTimeStrategy restartStrategy =
new FailureRateRestartBackoffTimeStrategy(new ManualClock(), 1, 1, backoffTimeMS);
assertThat(restartStrategy.getBackoffTime()).isEqualTo(backoffTimeMS);
} |
@Override
public Optional<Lock> unlock(@Nonnull String resource, @Nullable String lockContext) {
return doUnlock(resource, getLockedByString(lockContext));
} | @Test
void unlockNonExistentLock() {
assertThat(lockService.unlock("test-resource", null)).isEmpty();
} |
public void setNeedClientAuth(Boolean needClientAuth) {
this.needClientAuth = needClientAuth;
} | @Test
public void testSetNeedClientAuth() throws Exception {
configuration.setNeedClientAuth(true);
configuration.configure(configurable);
assertTrue(configurable.isNeedClientAuth());
} |
@Override
public void exportData(JsonWriter writer) throws IOException {
throw new UnsupportedOperationException("Can not export 1.0 format from this version.");
} | @Test(expected = UnsupportedOperationException.class)
public void testExportDisabled() throws IOException {
JsonWriter writer = new JsonWriter(new StringWriter());
dataService.exportData(writer);
} |
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
} | @Test
public void testAllowedTypeCastingIntoLong() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
ParamsMergeHelperTest.this.parseParamDefMap(
"{'tomerge': {'type': 'LONG','value': 123, 'name': 'tomerge'}}");
Map<String, ParamDefinition> paramsToMerge =
ParamsMergeHelperTest.this.parseParamDefMap(
"{'tomerge': {'type': 'STRING', 'value': '234', 'name': 'tomerge'}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext);
assertEquals(1, allParams.size());
assertEquals(Long.valueOf(234), allParams.get("tomerge").asLongParamDef().getValue());
} |
@Override
public RemotingCommand processRequest(final ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
return this.processRequest(ctx.channel(), request, true);
} | @Test
public void testSingleAck_TopicCheck() throws RemotingCommandException {
AckMessageRequestHeader requestHeader = new AckMessageRequestHeader();
requestHeader.setTopic("wrongTopic");
requestHeader.setQueueId(0);
requestHeader.setOffset(0L);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.ACK_MESSAGE, requestHeader);
request.makeCustomHeaderToNet();
RemotingCommand response = ackMessageProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.TOPIC_NOT_EXIST);
assertThat(response.getRemark()).contains("not exist, apply first");
} |
public @CheckForNull String readLink() throws IOException {
return null;
} | @Test
public void testReadLink_AbstractBase() throws Exception {
// This test checks the method's behavior in the abstract base class,
// which generally does nothing.
VirtualFile root = new VirtualFileMinimalImplementation();
assertThat(root.readLink(), nullValue());
} |
@Override
public void connectTables(DeviceId deviceId, int fromTable, int toTable) {
TrafficSelector.Builder selector = DefaultTrafficSelector.builder();
TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder();
treatment.transition(toTable);
FlowRule flowRule = DefaultFlowRule.builder()
.forDevice(deviceId)
.withSelector(selector.build())
.withTreatment(treatment.build())
.withPriority(DROP_PRIORITY)
.fromApp(appId)
.makePermanent()
.forTable(fromTable)
.build();
applyRule(flowRule, true);
} | @Test
public void testConnectTables() {
int testFromTable = 1;
int testToTable = 2;
fros = Sets.newConcurrentHashSet();
TrafficSelector.Builder selectorBuilder = DefaultTrafficSelector.builder();
TrafficTreatment.Builder treatmentBuilder = DefaultTrafficTreatment.builder();
target.connectTables(DEVICE_ID, testFromTable, testToTable);
FlowRule.Builder flowRuleBuilder = DefaultFlowRule.builder()
.forDevice(DEVICE_ID)
.withSelector(selectorBuilder.build())
.withTreatment(treatmentBuilder.transition(testToTable).build())
.withPriority(DROP_PRIORITY)
.fromApp(TEST_APP_ID)
.forTable(testFromTable)
.makePermanent();
validateFlowRule(flowRuleBuilder.build());
} |
public static Optional<String> getRowUniqueKey(final String rowPath) {
Pattern pattern = Pattern.compile(getShardingSphereDataNodePath() + "/([\\w\\-]+)/schemas/([\\w\\-]+)/tables" + "/([\\w\\-]+)" + "/(\\w+)$", Pattern.CASE_INSENSITIVE);
Matcher matcher = pattern.matcher(rowPath);
return matcher.find() ? Optional.of(matcher.group(4)) : Optional.empty();
} | @Test
void assertGetRowUniqueKeyHappyPath() {
assertThat(ShardingSphereDataNode.getRowUniqueKey("/statistics/databases/db_name/schemas/db_schema/tables/tbl_name/key"), is(Optional.of("key")));
} |
@UdafFactory(description = "Build a value-to-count histogram of input Strings")
public static TableUdaf<String, Map<String, Long>, Map<String, Long>> histogramString() {
return histogram();
} | @Test
public void shouldNotExceedSizeLimit() {
final TableUdaf<String, Map<String, Long>, Map<String, Long>> udaf = HistogramUdaf.histogramString();
Map<String, Long> agg = udaf.initialize();
for (int thisValue = 1; thisValue < 2500; thisValue++) {
agg = udaf.aggregate(String.valueOf(thisValue), agg);
}
assertThat(agg.entrySet(), hasSize(1000));
assertThat(agg, hasEntry("1", 1L));
assertThat(agg, hasEntry("1000", 1L));
assertThat(agg, not(hasEntry("1001", 1L)));
} |
@Nonnull
@Override
public List<DataConnectionResource> listResources() {
try {
try (Connection connection = getConnection()) {
DatabaseMetaData databaseMetaData = connection.getMetaData();
ResourceReader reader = new ResourceReader();
switch (resolveDialect(databaseMetaData)) {
case H2:
reader.withCatalog(connection.getCatalog())
.exclude(
(catalog, schema, table) ->
H2_SYSTEM_SCHEMA_LIST.contains(schema)
);
break;
case POSTGRESQL:
reader.withCatalog(connection.getCatalog());
break;
case MYSQL:
reader.exclude(
(catalog, schema, table) ->
catalog != null && MYSQL_SYSTEM_CATALOG_LIST.contains(catalog.toUpperCase(ROOT))
);
break;
case MICROSOFT_SQL_SERVER:
reader
.withCatalog(connection.getCatalog())
.exclude(
(catalog, schema, table) ->
MSSQL_SYSTEM_SCHEMA_LIST.contains(schema)
|| MSSQL_SYSTEM_TABLE_LIST.contains(table)
);
break;
default:
// Nothing to do
}
return reader.listResources(connection);
}
} catch (Exception exception) {
throw new HazelcastException("Could not read resources for DataConnection " + getName(), exception);
}
} | @Test
public void list_resources_should_return_table() throws Exception {
jdbcDataConnection = new JdbcDataConnection(SHARED_DATA_CONNECTION_CONFIG);
executeJdbc(JDBC_URL_SHARED, "CREATE TABLE MY_TABLE (ID INT, NAME VARCHAR)");
List<DataConnectionResource> dataConnectionResources = jdbcDataConnection.listResources();
assertThat(dataConnectionResources).contains(
new DataConnectionResource("TABLE", DB_NAME_SHARED, "PUBLIC", "MY_TABLE")
);
} |
@Override
public void begin() throws TransactionException {
begin(DEFAULT_GLOBAL_TX_TIMEOUT);
} | @Test
public void rollBackNoXIDExceptionTest() throws TransactionException {
RootContext.unbind();
GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();
tx.begin();
Assertions.assertThrows(TransactionException.class, tx::rollback);
} |
T call() throws IOException, RegistryException {
String apiRouteBase = "https://" + registryEndpointRequestProperties.getServerUrl() + "/v2/";
URL initialRequestUrl = registryEndpointProvider.getApiRoute(apiRouteBase);
return call(initialRequestUrl);
} | @Test
public void testCall_logNullExceptionMessage() throws IOException, RegistryException {
setUpRegistryResponse(new IOException());
try {
endpointCaller.call();
Assert.fail();
} catch (IOException ex) {
Mockito.verify(mockEventHandlers)
.dispatch(
LogEvent.error("\u001B[31;1mI/O error for image [serverUrl/imageName]:\u001B[0m"));
Mockito.verify(mockEventHandlers)
.dispatch(LogEvent.error("\u001B[31;1m java.io.IOException\u001B[0m"));
Mockito.verify(mockEventHandlers)
.dispatch(LogEvent.error("\u001B[31;1m (null exception message)\u001B[0m"));
Mockito.verifyNoMoreInteractions(mockEventHandlers);
}
} |
public static <T> RemoteIterator<T> remoteIteratorFromSingleton(
@Nullable T singleton) {
return new SingletonIterator<>(singleton);
} | @Test
public void testSingletonNotClosed() throws Throwable {
CloseCounter closeCounter = new CloseCounter();
RemoteIterator<CloseCounter> it = remoteIteratorFromSingleton(closeCounter);
verifyInvoked(it, 1, this::log);
close(it);
closeCounter.assertCloseCount(0);
} |
@GetMapping(value = "/json/{appId}/{clusterName}/{namespace:.+}")
public ResponseEntity<String> queryConfigAsJson(@PathVariable String appId,
@PathVariable String clusterName,
@PathVariable String namespace,
@RequestParam(value = "dataCenter", required = false) String dataCenter,
@RequestParam(value = "ip", required = false) String clientIp,
@RequestParam(value = "label", required = false) String clientLabel,
HttpServletRequest request,
HttpServletResponse response) throws IOException {
String result =
queryConfig(ConfigFileOutputFormat.JSON, appId, clusterName, namespace, dataCenter,
clientIp, clientLabel, request, response);
if (result == null) {
return NOT_FOUND_RESPONSE;
}
return new ResponseEntity<>(result, jsonResponseHeaders, HttpStatus.OK);
} | @Test
public void testQueryConfigAsJson() throws Exception {
String someKey = "someKey";
String someValue = "someValue";
Type responseType = new TypeToken<Map<String, String>>(){}.getType();
String someWatchKey = "someWatchKey";
Set<String> watchKeys = Sets.newHashSet(someWatchKey);
Map<String, String> configurations =
ImmutableMap.of(someKey, someValue);
ApolloConfig someApolloConfig = mock(ApolloConfig.class);
when(configController
.queryConfig(someAppId, someClusterName, someNamespace, someDataCenter, "-1", someClientIp, someClientLabel,null,
someRequest, someResponse)).thenReturn(someApolloConfig);
when(someApolloConfig.getConfigurations()).thenReturn(configurations);
when(watchKeysUtil
.assembleAllWatchKeys(someAppId, someClusterName, someNamespace, someDataCenter))
.thenReturn(watchKeys);
ResponseEntity<String> response =
configFileController
.queryConfigAsJson(someAppId, someClusterName, someNamespace, someDataCenter,
someClientIp, someClientLabel, someRequest, someResponse);
assertEquals(HttpStatus.OK, response.getStatusCode());
assertEquals(configurations, GSON.fromJson(response.getBody(), responseType));
} |
private RemotingCommand updateAndCreateSubscriptionGroup(ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
long startTime = System.currentTimeMillis();
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
LOGGER.info("AdminBrokerProcessor#updateAndCreateSubscriptionGroup called by {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()));
SubscriptionGroupConfig config = RemotingSerializable.decode(request.getBody(), SubscriptionGroupConfig.class);
if (config != null) {
this.brokerController.getSubscriptionGroupManager().updateSubscriptionGroupConfig(config);
}
response.setCode(ResponseCode.SUCCESS);
response.setRemark(null);
long executionTime = System.currentTimeMillis() - startTime;
LOGGER.info("executionTime of create subscriptionGroup:{} is {} ms" ,config.getGroupName() ,executionTime);
InvocationStatus status = response.getCode() == ResponseCode.SUCCESS ?
InvocationStatus.SUCCESS : InvocationStatus.FAILURE;
Attributes attributes = BrokerMetricsManager.newAttributesBuilder()
.put(LABEL_INVOCATION_STATUS, status.getName())
.build();
BrokerMetricsManager.consumerGroupCreateExecuteTime.record(executionTime, attributes);
return response;
} | @Test
public void testUpdateAndCreateSubscriptionGroup() throws RemotingCommandException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.UPDATE_AND_CREATE_SUBSCRIPTIONGROUP, null);
SubscriptionGroupConfig subscriptionGroupConfig = new SubscriptionGroupConfig();
subscriptionGroupConfig.setBrokerId(1);
subscriptionGroupConfig.setGroupName("groupId");
subscriptionGroupConfig.setConsumeEnable(Boolean.TRUE);
subscriptionGroupConfig.setConsumeBroadcastEnable(Boolean.TRUE);
subscriptionGroupConfig.setRetryMaxTimes(111);
subscriptionGroupConfig.setConsumeFromMinEnable(Boolean.TRUE);
request.setBody(JSON.toJSON(subscriptionGroupConfig).toString().getBytes());
RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
} |
@SqlNullable
@Description("Returns the Point value that is the mathematical centroid of a Spherical Geography")
@ScalarFunction("ST_Centroid")
@SqlType(SPHERICAL_GEOGRAPHY_TYPE_NAME)
public static Slice stSphericalCentroid(@SqlType(SPHERICAL_GEOGRAPHY_TYPE_NAME) Slice input)
{
OGCGeometry geometry = EsriGeometrySerde.deserialize(input);
if (geometry.isEmpty()) {
return null;
}
// TODO: add support for other types e.g. POLYGON
validateSphericalType("ST_Centroid", geometry, EnumSet.of(POINT, MULTI_POINT));
if (geometry instanceof OGCPoint) {
return input;
}
OGCGeometryCollection geometryCollection = (OGCGeometryCollection) geometry;
for (int i = 0; i < geometryCollection.numGeometries(); i++) {
OGCGeometry g = geometryCollection.geometryN(i);
validateSphericalType("ST_Centroid", g, EnumSet.of(POINT));
Point p = (Point) g.getEsriGeometry();
checkLongitude(p.getX());
checkLatitude(p.getY());
}
Point centroid;
if (geometryCollection.numGeometries() == 1) {
centroid = (Point) geometryCollection.geometryN(0).getEsriGeometry();
}
else {
double x3DTotal = 0;
double y3DTotal = 0;
double z3DTotal = 0;
for (int i = 0; i < geometryCollection.numGeometries(); i++) {
CartesianPoint cp = new CartesianPoint((Point) geometryCollection.geometryN(i).getEsriGeometry());
x3DTotal += cp.getX();
y3DTotal += cp.getY();
z3DTotal += cp.getZ();
}
double centroidVectorLength = Math.sqrt(x3DTotal * x3DTotal + y3DTotal * y3DTotal + z3DTotal * z3DTotal);
if (centroidVectorLength == 0.0) {
throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("Unexpected error. Average vector length adds to zero (%f, %f, %f)", x3DTotal, y3DTotal, z3DTotal));
}
centroid = new CartesianPoint(
x3DTotal / centroidVectorLength,
y3DTotal / centroidVectorLength,
z3DTotal / centroidVectorLength).asSphericalPoint();
}
return EsriGeometrySerde.serialize(new OGCPoint(centroid, geometryCollection.getEsriSpatialReference()));
} | @Test
public void testSTSphericalCentroid()
{
// Spherical centroid testing
assertSphericalCentroid("POINT (3 5)", new Point(3, 5));
assertSphericalCentroid("POINT EMPTY", null);
assertSphericalCentroid("MULTIPOINT EMPTY", null);
assertSphericalCentroid("MULTIPOINT (3 5)", new Point(3, 5));
assertSphericalCentroid("MULTIPOINT (0 -45, 0 45)", new Point(0, 0));
assertSphericalCentroid("MULTIPOINT (45 0, -45 0)", new Point(0, 0));
assertSphericalCentroid("MULTIPOINT (0 0, -180 0)", new Point(-90, 45));
assertSphericalCentroid("MULTIPOINT (0 -45, 0 45, 30 0)", new Point(12.36780515862267, 0));
assertSphericalCentroid("MULTIPOINT (0 -45, 0 45, 30 0, -30 0)", new Point(0, 0));
} |
public static String checkComponentKey(String key) {
checkArgument(!isNullOrEmpty(key), "Component key can't be empty");
checkArgument(key.length() <= MAX_COMPONENT_KEY_LENGTH, "Component key length (%s) is longer than the maximum authorized (%s). '%s' was provided.",
key.length(), MAX_COMPONENT_KEY_LENGTH, key);
return key;
} | @Test
void check_key() {
String key = repeat("a", 400);
assertThat(ComponentValidator.checkComponentKey(key)).isEqualTo(key);
} |
@JsonIgnore
public boolean isInlineWorkflow() {
return initiator.getType().isInline();
} | @Test
public void testRoundTripSerde() throws Exception {
for (String fileName :
Arrays.asList(
"sample-workflow-instance-created.json", "sample-workflow-instance-succeeded.json")) {
WorkflowInstance expected =
loadObject("fixtures/instances/" + fileName, WorkflowInstance.class);
String ser1 = MAPPER.writeValueAsString(expected);
WorkflowInstance actual =
MAPPER.readValue(MAPPER.writeValueAsString(expected), WorkflowInstance.class);
String ser2 = MAPPER.writeValueAsString(actual);
assertEquals(expected, actual);
assertEquals(ser1, ser2);
assertFalse(actual.isInlineWorkflow());
}
} |
@Override
public void start() {
boolean hasExternalPlugins = pluginRepository.getPlugins().stream().anyMatch(plugin -> plugin.getType().equals(PluginType.EXTERNAL));
try (DbSession session = dbClient.openSession(false)) {
PropertyDto property = Optional.ofNullable(dbClient.propertiesDao().selectGlobalProperty(session, PLUGINS_RISK_CONSENT))
.orElse(defaultPluginRiskConsentProperty());
if (hasExternalPlugins && NOT_ACCEPTED == PluginRiskConsent.valueOf(property.getValue())) {
addWarningInSonarDotLog();
property.setValue(REQUIRED.name());
dbClient.propertiesDao().saveProperty(session, property);
session.commit();
} else if (!hasExternalPlugins && REQUIRED == PluginRiskConsent.valueOf(property.getValue())) {
dbClient.propertiesDao().deleteGlobalProperty(PLUGINS_RISK_CONSENT, session);
session.commit();
}
}
} | @Test
public void do_nothing_when_there_is_no_external_plugin() {
setupExternalPluginConsent(NOT_ACCEPTED);
setupBundledPlugin();
underTest.start();
assertThat(dbClient.propertiesDao().selectGlobalProperty(PLUGINS_RISK_CONSENT))
.extracting(PropertyDto::getValue)
.isEqualTo(NOT_ACCEPTED.name());
} |
public static String getGroupName(final String serviceNameWithGroup) {
if (StringUtils.isBlank(serviceNameWithGroup)) {
return StringUtils.EMPTY;
}
if (!serviceNameWithGroup.contains(Constants.SERVICE_INFO_SPLITER)) {
return Constants.DEFAULT_GROUP;
}
return serviceNameWithGroup.split(Constants.SERVICE_INFO_SPLITER)[0];
} | @Test
void testGetGroupName() {
String validServiceName = "group@@serviceName";
assertEquals("group", NamingUtils.getGroupName(validServiceName));
} |
protected void setMethod() {
boolean activateBody = RestMeta.isActiveBody( wMethod.getText() );
boolean activateParams = RestMeta.isActiveParameters( wMethod.getText() );
wlBody.setEnabled( activateBody );
wBody.setEnabled( activateBody );
wApplicationType.setEnabled( activateBody );
wlParameters.setEnabled( activateParams );
wParameters.setEnabled( activateParams );
wGet.setEnabled( activateParams );
wlMatrixParameters.setEnabled( activateParams );
wMatrixParameters.setEnabled( activateParams );
wMatrixGet.setEnabled( activateParams );
} | @Test
public void testSetMethod_PATCH() {
doReturn( RestMeta.HTTP_METHOD_PATCH ).when( method ).getText();
dialog.setMethod();
verify( bodyl, times( 1 ) ).setEnabled( true );
verify( body, times( 1 ) ).setEnabled( true );
verify( type, times( 1 ) ).setEnabled( true );
verify( paramsl, times( 1 ) ).setEnabled( true );
verify( params, times( 1 ) ).setEnabled( true );
verify( paramsb, times( 1 ) ).setEnabled( true );
verify( matrixl, times( 1 ) ).setEnabled( true );
verify( matrix, times( 1 ) ).setEnabled( true );
verify( matrixb, times( 1 ) ).setEnabled( true );
} |
@Override
public int acquiredPermits() {
return get(acquiredPermitsAsync());
} | @Test
public void testAcquiredPermits() throws InterruptedException {
RPermitExpirableSemaphore semaphore = redisson.getPermitExpirableSemaphore("test-semaphore");
assertThat(semaphore.trySetPermits(2)).isTrue();
Assertions.assertEquals(0, semaphore.acquiredPermits());
String acquire1 = semaphore.tryAcquire(200, 1000, TimeUnit.MILLISECONDS);
assertThat(acquire1).isNotNull();
Assertions.assertEquals(1, semaphore.acquiredPermits());
String acquire2 = semaphore.tryAcquire(200, 1000, TimeUnit.MILLISECONDS);
assertThat(acquire2).isNotNull();
String acquire3 = semaphore.tryAcquire(200, 1000, TimeUnit.MILLISECONDS);
assertThat(acquire3).isNull();
Assertions.assertEquals(2, semaphore.acquiredPermits());
Thread.sleep(1100);
String acquire4 = semaphore.tryAcquire(200, 1000, TimeUnit.MILLISECONDS);
assertThat(acquire4).isNotNull();
Thread.sleep(1100);
Assertions.assertEquals(0, semaphore.acquiredPermits());
} |
public ArtifactResponse buildArtifactResponse(ArtifactResolveRequest artifactResolveRequest, String entityId, SignType signType) throws InstantiationException, ValidationException, ArtifactBuildException, BvdException {
final var artifactResponse = OpenSAMLUtils.buildSAMLObject(ArtifactResponse.class);
final var status = OpenSAMLUtils.buildSAMLObject(Status.class);
final var statusCode = OpenSAMLUtils.buildSAMLObject(StatusCode.class);
final var issuer = OpenSAMLUtils.buildSAMLObject(Issuer.class);
return ArtifactResponseBuilder
.newInstance(artifactResponse)
.addID()
.addIssueInstant()
.addInResponseTo(artifactResolveRequest.getArtifactResolve().getID())
.addStatus(StatusBuilder
.newInstance(status)
.addStatusCode(statusCode, StatusCode.SUCCESS)
.build())
.addIssuer(issuer, entityId)
.addMessage(buildResponse(artifactResolveRequest, entityId, signType))
.addSignature(signatureService, signType)
.build();
} | @Test
void parseArtifactResolveSessionExpired() throws ValidationException, SamlParseException, ArtifactBuildException, BvdException, InstantiationException {
ArtifactResolveRequest artifactResolveRequest = getArtifactResolveRequest("success", true,false, SAML_COMBICONNECT, EncryptionType.BSN, ENTRANCE_ENTITY_ID);
artifactResolveRequest.getSamlSession().setResolveBeforeTime(System.currentTimeMillis());
ArtifactResponse artifactResponse = artifactResponseService.buildArtifactResponse(artifactResolveRequest, ENTRANCE_ENTITY_ID, TD);
assertEquals("urn:oasis:names:tc:SAML:2.0:status:Requester", ((Response) artifactResponse.getMessage()).getStatus().getStatusCode().getValue());
assertEquals("urn:oasis:names:tc:SAML:2.0:status:RequestDenied", ((Response) artifactResponse.getMessage()).getStatus().getStatusCode().getStatusCode().getValue());
} |
public Set<? extends AuthenticationRequest> getRequest(final Host bookmark, final LoginCallback prompt)
throws LoginCanceledException {
final StringBuilder url = new StringBuilder();
url.append(bookmark.getProtocol().getScheme().toString()).append("://");
url.append(bookmark.getHostname());
if(!(bookmark.getProtocol().getScheme().getPort() == bookmark.getPort())) {
url.append(":").append(bookmark.getPort());
}
final String context = PathNormalizer.normalize(bookmark.getProtocol().getContext());
// Custom authentication context
url.append(context);
if(bookmark.getProtocol().getDefaultHostname().endsWith("identity.api.rackspacecloud.com")
|| bookmark.getHostname().endsWith("identity.api.rackspacecloud.com")) {
return Collections.singleton(new Authentication20RAXUsernameKeyRequest(
URI.create(url.toString()),
bookmark.getCredentials().getUsername(), bookmark.getCredentials().getPassword(), null)
);
}
final LoginOptions options = new LoginOptions(bookmark.getProtocol()).password(false).anonymous(false).publickey(false);
if(context.contains("1.0")) {
return Collections.singleton(new Authentication10UsernameKeyRequest(URI.create(url.toString()),
bookmark.getCredentials().getUsername(), bookmark.getCredentials().getPassword()));
}
else if(context.contains("1.1")) {
return Collections.singleton(new Authentication11UsernameKeyRequest(URI.create(url.toString()),
bookmark.getCredentials().getUsername(), bookmark.getCredentials().getPassword()));
}
else if(context.contains("2.0")) {
// Prompt for tenant
final String user;
final String tenant;
if(StringUtils.contains(bookmark.getCredentials().getUsername(), ':')) {
final String[] parts = StringUtils.splitPreserveAllTokens(bookmark.getCredentials().getUsername(), ':');
tenant = parts[0];
user = parts[1];
}
else {
user = bookmark.getCredentials().getUsername();
tenant = prompt.prompt(bookmark, bookmark.getCredentials().getUsername(),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"),
LocaleFactory.localizedString("Tenant Name", "Mosso"), options
.usernamePlaceholder(LocaleFactory.localizedString("Tenant Name", "Mosso"))).getUsername();
// Save tenant in username
bookmark.getCredentials().setUsername(String.format("%s:%s", tenant, bookmark.getCredentials().getUsername()));
}
final Set<AuthenticationRequest> requests = new LinkedHashSet<>();
requests.add(new Authentication20UsernamePasswordRequest(
URI.create(url.toString()),
user, bookmark.getCredentials().getPassword(), tenant)
);
requests.add(new Authentication20UsernamePasswordTenantIdRequest(
URI.create(url.toString()),
user, bookmark.getCredentials().getPassword(), tenant)
);
requests.add(new Authentication20AccessKeySecretKeyRequest(
URI.create(url.toString()),
user, bookmark.getCredentials().getPassword(), tenant));
return requests;
}
else if(context.contains("3")) {
// Prompt for project
final String user;
final String project;
final String domain;
if(StringUtils.contains(bookmark.getCredentials().getUsername(), ':')) {
final String[] parts = StringUtils.splitPreserveAllTokens(bookmark.getCredentials().getUsername(), ':');
if(parts.length == 3) {
project = parts[0];
domain = parts[1];
user = parts[2];
}
else {
project = parts[0];
user = parts[1];
domain = prompt.prompt(bookmark, bookmark.getCredentials().getUsername(),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"),
LocaleFactory.localizedString("Project Domain Name", "Mosso"), options
.usernamePlaceholder(LocaleFactory.localizedString("Project Domain Name", "Mosso"))).getUsername();
// Save project name and domain in username
bookmark.getCredentials().setUsername(String.format("%s:%s:%s", project, domain, bookmark.getCredentials().getUsername()));
}
}
else {
user = bookmark.getCredentials().getUsername();
final Credentials projectName = prompt.prompt(bookmark, bookmark.getCredentials().getUsername(),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"),
LocaleFactory.localizedString("Project Name", "Mosso"), options
.usernamePlaceholder(LocaleFactory.localizedString("Project Name", "Mosso")));
if(StringUtils.contains(bookmark.getCredentials().getUsername(), ':')) {
final String[] parts = StringUtils.splitPreserveAllTokens(projectName.getUsername(), ':');
project = parts[0];
domain = parts[1];
}
else {
project = projectName.getUsername();
domain = prompt.prompt(bookmark, bookmark.getCredentials().getUsername(),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"),
LocaleFactory.localizedString("Project Domain Name", "Mosso"), options
.usernamePlaceholder(LocaleFactory.localizedString("Project Domain Name", "Mosso"))).getUsername();
}
// Save project name and domain in username
bookmark.getCredentials().setUsername(String.format("%s:%s:%s", project, domain, bookmark.getCredentials().getUsername()));
}
final Set<AuthenticationRequest> requests = new LinkedHashSet<>();
requests.add(new Authentication3UsernamePasswordProjectRequest(
URI.create(url.toString()),
user, bookmark.getCredentials().getPassword(), project, domain)
);
return requests;
}
else {
log.warn(String.format("Unknown context version in %s. Default to v1 authentication.", context));
// Default to 1.0
return Collections.singleton(new Authentication10UsernameKeyRequest(URI.create(url.toString()),
bookmark.getCredentials().getUsername(), bookmark.getCredentials().getPassword()));
}
} | @Test
public void testGetDefault2EmptyTenant() throws Exception {
final SwiftAuthenticationService s = new SwiftAuthenticationService();
final SwiftProtocol protocol = new SwiftProtocol() {
@Override
public String getContext() {
return "/v2.0/tokens";
}
};
final Host host = new Host(protocol, "region-b.geo-1.identity.hpcloudsvc.com", new Credentials("u", "P"));
assertEquals(Client.AuthVersion.v20,
s.getRequest(host,
new DisabledLoginCallback() {
@Override
public Credentials prompt(final Host bookmark, final String username, final String title, final String reason, final LoginOptions options) {
return new Credentials("");
}
}).iterator().next().getVersion());
assertEquals(":u", host.getCredentials().getUsername());
} |
@ConstantFunction(name = "add", argTypes = {INT, INT}, returnType = INT, isMonotonic = true)
public static ConstantOperator addInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createInt(Math.addExact(first.getInt(), second.getInt()));
} | @Test
public void addInt() {
assertEquals(20,
ScalarOperatorFunctions.addInt(O_INT_10, O_INT_10).getInt());
} |
public CreateTableBuilder withPkConstraintName(String pkConstraintName) {
this.pkConstraintName = validateConstraintName(pkConstraintName);
return this;
} | @Test
public void withPkConstraintName_throws_IAE_if_name_is_not_lowercase() {
assertThatThrownBy(() -> underTest.withPkConstraintName("Too"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Constraint name must be lower case and contain only alphanumeric chars or '_', got 'Too'");
} |
public static String prepareUrl(@NonNull String url) {
url = url.trim();
String lowerCaseUrl = url.toLowerCase(Locale.ROOT); // protocol names are case insensitive
if (lowerCaseUrl.startsWith("feed://")) {
Log.d(TAG, "Replacing feed:// with http://");
return prepareUrl(url.substring("feed://".length()));
} else if (lowerCaseUrl.startsWith("pcast://")) {
Log.d(TAG, "Removing pcast://");
return prepareUrl(url.substring("pcast://".length()));
} else if (lowerCaseUrl.startsWith("pcast:")) {
Log.d(TAG, "Removing pcast:");
return prepareUrl(url.substring("pcast:".length()));
} else if (lowerCaseUrl.startsWith("itpc")) {
Log.d(TAG, "Replacing itpc:// with http://");
return prepareUrl(url.substring("itpc://".length()));
} else if (lowerCaseUrl.startsWith(AP_SUBSCRIBE)) {
Log.d(TAG, "Removing antennapod-subscribe://");
return prepareUrl(url.substring(AP_SUBSCRIBE.length()));
} else if (lowerCaseUrl.contains(AP_SUBSCRIBE_DEEPLINK)) {
Log.d(TAG, "Removing " + AP_SUBSCRIBE_DEEPLINK);
String query = Uri.parse(url).getQueryParameter("url");
try {
return prepareUrl(URLDecoder.decode(query, "UTF-8"));
} catch (UnsupportedEncodingException e) {
return prepareUrl(query);
}
} else if (!(lowerCaseUrl.startsWith("http://") || lowerCaseUrl.startsWith("https://"))) {
Log.d(TAG, "Adding http:// at the beginning of the URL");
return "http://" + url;
} else {
return url;
}
} | @Test
public void testProtocolRelativeUrlIsAbsolute() {
final String in = "https://example.com";
final String inBase = "http://examplebase.com";
final String out = UrlChecker.prepareUrl(in, inBase);
assertEquals(in, out);
} |
@Override
public ExportResult<CalendarContainerResource> export(
UUID jobId, TokensAndUrlAuthData authData, Optional<ExportInformation> exportInformation) {
if (!exportInformation.isPresent()) {
return exportCalendars(authData, Optional.empty());
} else {
StringPaginationToken paginationToken =
(StringPaginationToken) exportInformation.get().getPaginationData();
if (paginationToken != null && paginationToken.getToken().startsWith(CALENDAR_TOKEN_PREFIX)) {
// Next thing to export is more calendars
return exportCalendars(authData, Optional.of(paginationToken));
} else {
// Next thing to export is events
IdOnlyContainerResource idOnlyContainerResource =
(IdOnlyContainerResource) exportInformation.get().getContainerResource();
Optional<PaginationData> pageData = Optional.ofNullable(paginationToken);
return getCalendarEvents(authData,
idOnlyContainerResource.getId(),
pageData);
}
}
} | @Test
public void exportCalendarSubsequentSet() throws IOException {
setUpSingleCalendarResponse();
// Looking at subsequent page, with no page after it
PaginationData paginationData = new StringPaginationToken(CALENDAR_TOKEN_PREFIX + NEXT_TOKEN);
ExportInformation exportInformation = new ExportInformation(paginationData, null);
calendarListResponse.setNextPageToken(null);
// Run test
ExportResult<CalendarContainerResource> result =
googleCalendarExporter.export(UUID.randomUUID(), null, Optional.of(exportInformation));
// Check results
// Verify correct calls were made
InOrder inOrder = Mockito.inOrder(calendarListRequest);
inOrder.verify(calendarListRequest).setPageToken(NEXT_TOKEN);
inOrder.verify(calendarListRequest).execute();
// Check pagination token
ContinuationData continuationData = (ContinuationData) result.getContinuationData();
StringPaginationToken paginationToken =
(StringPaginationToken) continuationData.getPaginationData();
assertThat(paginationToken).isNull();
} |
@Override
public ConfiguredDataSourceProvenance getProvenance() {
return provenance;
} | @Test
public void loadTest() throws URISyntaxException {
URI dataFile = JsonDataSourceTest.class.getResource("/org/tribuo/json/test.json").toURI();
RowProcessor<MockOutput> rowProcessor = buildRowProcessor();
JsonDataSource<MockOutput> source = new JsonDataSource<>(dataFile, rowProcessor, true);
MutableDataset<MockOutput> dataset = new MutableDataset<>(source);
assertEquals(20,dataset.size(),"Found an incorrect number of rows when loading the json file.");
DatasetProvenance prov = dataset.getProvenance();
List<ObjectMarshalledProvenance> datasetProvenance = ProvenanceUtil.marshalProvenance(prov);
assertFalse(datasetProvenance.isEmpty());
ObjectProvenance unmarshalledProvenance = ProvenanceUtil.unmarshalProvenance(datasetProvenance);
assertEquals(prov,unmarshalledProvenance);
} |
public Properties getProperties() {
return properties;
} | @Test
public void testHibernateProperties() {
assertNull(Configuration.INSTANCE.getProperties().getProperty("hibernate.types.nothing"));
assertEquals("def", Configuration.INSTANCE.getProperties().getProperty("hibernate.types.abc"));
} |
public static boolean checkParenthesis(String str) {
boolean result = true;
if (str != null) {
int open = 0;
int closed = 0;
int i = 0;
while ((i = str.indexOf('(', i)) >= 0) {
i++;
open++;
}
i = 0;
while ((i = str.indexOf(')', i)) >= 0) {
i++;
closed++;
}
result = open == closed;
}
return result;
} | @Test
public void testCheckParenthesis() throws Exception {
String str = "fred:(((ddd))";
assertFalse(URISupport.checkParenthesis(str));
str += ")";
assertTrue(URISupport.checkParenthesis(str));
} |
@Override
protected Range get(ResultSet rs, int position, SharedSessionContractImplementor session, Object owner) throws SQLException {
Object pgObject = rs.getObject(position);
if (pgObject == null) {
return null;
}
String type = ReflectionUtils.invokeGetter(pgObject, "type");
String value = ReflectionUtils.invokeGetter(pgObject, "value");
switch (type) {
case "int4range":
return integerRange(value);
case "int8range":
return longRange(value);
case "numrange":
return bigDecimalRange(value);
case "tsrange":
return localDateTimeRange(value);
case "tstzrange":
return ZonedDateTime.class.equals(elementType) ? zonedDateTimeRange(value) : offsetDateTimeRange(value);
case "daterange":
return localDateRange(value);
default:
throw new HibernateException(
new IllegalStateException("The range type [" + type + "] is not supported!")
);
}
} | @Test
public void test() {
Restriction ageRestrictionInt = doInJPA(entityManager -> {
entityManager.persist(new Restriction());
Restriction restriction = new Restriction();
restriction.setRangeInt(int4Range);
restriction.setRangeIntEmpty(int4RangeEmpty);
restriction.setRangeLong(int8Range);
restriction.setRangeLongEmpty(int8RangeEmpty);
restriction.setRangeBigDecimal(numeric);
restriction.setRangeBigDecimalEmpty(numericEmpty);
restriction.setRangeLocalDateTime(localDateTimeRange);
restriction.setRangeLocalDateTimeEmpty(localDateTimeRangeEmpty);
restriction.setRangeZonedDateTime(tsTz);
restriction.setRangeZonedDateTimeEmpty(tsTzEmpty);
restriction.setRangeLocalDate(dateRange);
restriction.setRangeLocalDateEmpty(dateRangeEmpty);
restriction.setOffsetZonedDateTime(tsTzO);
entityManager.persist(restriction);
return restriction;
});
doInJPA(entityManager -> {
Restriction ar = entityManager.find(Restriction.class, ageRestrictionInt.getId());
assertEquals(int4Range, ar.getRangeInt());
assertEquals(int4RangeEmpty, ar.getRangeIntEmpty());
assertEquals(int8Range, ar.getRangeLong());
assertEquals(int8RangeEmpty, ar.getRangeLongEmpty());
assertEquals(numeric, ar.getRangeBigDecimal());
assertEquals(numericEmpty, ar.getRangeBigDecimalEmpty());
assertEquals(localDateTimeRange, ar.getRangeLocalDateTime());
assertEquals(localDateTimeRangeEmpty, ar.getRangeLocalDateTimeEmpty());
assertEquals(tsTzO, ar.getOffsetZonedDateTime());
assertEquals(dateRange, ar.getRangeLocalDate());
assertEquals(dateRangeEmpty, ar.getRangeLocalDateEmpty());
ZoneId zone = ar.getRangeZonedDateTime().getLowerBound().getValue().get().getZone();
ZonedDateTime lower = tsTz.getLowerBound().getValue().get().withZoneSameInstant(zone);
ZonedDateTime upper = tsTz.getUpperBound().getValue().get().withZoneSameInstant(zone);
assertEquals(ar.getRangeZonedDateTime(), Range.rightOpen(lower, upper));
ZoneId zoneEmpty = ar.getRangeZonedDateTimeEmpty().getLowerBound().getValue().get().getZone();
ZonedDateTime lowerEmpty = tsTzEmpty.getLowerBound().getValue().get().withZoneSameInstant(zoneEmpty);
ZonedDateTime upperEmpty = tsTzEmpty.getUpperBound().getValue().get().withZoneSameInstant(zoneEmpty);
assertEquals(ar.getRangeZonedDateTimeEmpty(), Range.rightOpen(lowerEmpty, upperEmpty));
});
} |
@Override
public CompletableFuture<T> toCompletableFuture()
{
return _task.toCompletionStage().toCompletableFuture();
} | @Test
public void testCreateStageFromRunnable_withExecutor() throws Exception
{
final String[] stringArr = new String[1];
String testResult = "testCreateStageFromCompletableFuture";
ParSeqBasedCompletionStage<Void> stageFromCompletionStage =
_parSeqBasedCompletionStageFactory.buildStageFromRunnableAsync(() -> {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
e.printStackTrace();
}
stringArr[0] = testResult;
}, _executor);
Assert.assertEquals(stringArr[0], null);
stageFromCompletionStage.toCompletableFuture().get(); //ensure completion
Assert.assertEquals(stringArr[0], testResult);
} |
@SuppressWarnings({"deprecation", "checkstyle:linelength"})
public void convertSiteProperties(Configuration conf,
Configuration yarnSiteConfig, boolean drfUsed,
boolean enableAsyncScheduler, boolean userPercentage,
FSConfigToCSConfigConverterParams.PreemptionMode preemptionMode) {
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class.getCanonicalName());
if (conf.getBoolean(
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,
FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_ENABLED)) {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
int interval = conf.getInt(
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS,
FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS);
yarnSiteConfig.setInt(PREFIX +
"schedule-asynchronously.scheduling-interval-ms", interval);
}
// This should be always true to trigger cs auto
// refresh queue.
yarnSiteConfig.setBoolean(
YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION,
FairSchedulerConfiguration.DEFAULT_PREEMPTION)) {
preemptionEnabled = true;
String policies = addMonitorPolicy(ProportionalCapacityPreemptionPolicy.
class.getCanonicalName(), yarnSiteConfig);
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
policies);
int waitTimeBeforeKill = conf.getInt(
FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,
FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL);
yarnSiteConfig.setInt(
CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
waitTimeBeforeKill);
long waitBeforeNextStarvationCheck = conf.getLong(
FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS,
FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS);
yarnSiteConfig.setLong(
CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
waitBeforeNextStarvationCheck);
} else {
if (preemptionMode ==
FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY) {
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, "");
}
}
// For auto created queue's auto deletion.
if (!userPercentage) {
String policies = addMonitorPolicy(AutoCreatedQueueDeletionPolicy.
class.getCanonicalName(), yarnSiteConfig);
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
policies);
// Set the expired for deletion interval to 10s, consistent with fs.
yarnSiteConfig.setInt(CapacitySchedulerConfiguration.
AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, 10);
}
if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,
FairSchedulerConfiguration.DEFAULT_ASSIGN_MULTIPLE)) {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, true);
} else {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false);
}
// Make auto cs conf refresh enabled.
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
addMonitorPolicy(QueueConfigurationAutoRefreshPolicy
.class.getCanonicalName(), yarnSiteConfig));
int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN,
FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN);
if (maxAssign != FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN) {
yarnSiteConfig.setInt(
CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT,
maxAssign);
}
float localityThresholdNode = conf.getFloat(
FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE,
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE);
if (localityThresholdNode !=
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE) {
yarnSiteConfig.setFloat(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY,
localityThresholdNode);
}
float localityThresholdRack = conf.getFloat(
FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK,
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK);
if (localityThresholdRack !=
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK) {
yarnSiteConfig.setFloat(
CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY,
localityThresholdRack);
}
if (conf.getBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT,
FairSchedulerConfiguration.DEFAULT_SIZE_BASED_WEIGHT)) {
sizeBasedWeight = true;
}
if (drfUsed) {
yarnSiteConfig.set(
CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DominantResourceCalculator.class.getCanonicalName());
}
if (enableAsyncScheduler) {
yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
}
} | @SuppressWarnings("deprecation")
@Test
public void testSiteContinuousSchedulingConversion() {
yarnConfig.setBoolean(
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, true);
yarnConfig.setInt(
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, 666);
converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false,
false, false, null);
assertTrue("Cont. scheduling", yarnConvertedConfig.getBoolean(
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false));
assertEquals("Scheduling interval", 666,
yarnConvertedConfig.getInt(
"yarn.scheduler.capacity.schedule-asynchronously" +
".scheduling-interval-ms", -1));
} |
@Override
public ValidationTaskResult validateImpl(Map<String, String> optionMap) {
// Skip this test if NOSASL
if (mConf.get(PropertyKey.SECURITY_AUTHENTICATION_TYPE)
.equals(AuthType.NOSASL)) {
return new ValidationTaskResult(ValidationUtils.State.SKIPPED, getName(),
String.format("Impersonation validation is skipped for NOSASL"), "");
}
ValidationTaskResult loadConfig = loadHdfsConfig();
if (loadConfig.getState() != ValidationUtils.State.OK) {
mAdvice.insert(0, "Validating the proxy user requires additional HDFS "
+ "configuration. ");
return loadConfig.setAdvice(mAdvice.toString());
}
// TODO(jiacheng): validate proxyuser.hosts for the cluster
// Validate proxyuser config for the current Alluxio user
try {
String alluxioUser = getCurrentUser();
return validateProxyUsers(alluxioUser);
} catch (UnauthenticatedException e) {
mMsg.append(String.format("Failed to authenticate in Alluxio: "));
mMsg.append(ExceptionUtils.asPlainText(e));
mAdvice.append("Please fix the authentication issue.");
return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(),
mMsg.toString(), mAdvice.toString());
}
} | @Test
public void proxyUserNotWildcard() {
String userName = System.getProperty("user.name");
// Configured proxy users and groups, but not wildcard
String proxyUserKey = String.format("hadoop.proxyuser.%s.users", userName);
String proxyGroupKey = String.format("hadoop.proxyuser.%s.groups", userName);
prepareHdfsConfFiles(ImmutableMap.of(proxyUserKey, "user1,user2", proxyGroupKey, "groups"));
HdfsProxyUserValidationTask task =
new HdfsProxyUserValidationTask("hdfs://namenode:9000/alluxio", mConf);
ValidationTaskResult result = task.validateImpl(ImmutableMap.of());
assertEquals(ValidationUtils.State.WARNING, result.getState());
assertThat(result.getResult(), containsString(
String.format("%s=user1,user2 and %s=groups", proxyUserKey, proxyGroupKey)));
assertThat(result.getAdvice(), containsString(
"Please make sure that includes all users/groups Alluxio needs to impersonate as."));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.