focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public List<Job> toScheduledJobs(Instant from, Instant upTo) {
List<Job> jobs = new ArrayList<>();
Instant nextRun = getNextRun(from);
while (nextRun.isBefore(upTo)) {
jobs.add(toJob(new ScheduledState(nextRun, this)));
nextRun = getNextRun(nextRun);
}
return jobs;
} | @Test
void testToScheduledJobsGetsAllJobsBetweenStartAndEndNoResults() {
final RecurringJob recurringJob = aDefaultRecurringJob()
.withCronExpression(Cron.weekly())
.build();
final List<Job> jobs = recurringJob.toScheduledJobs(now(), now().plusSeconds(5));
assertThat(jobs).isEmpty();
} |
@Override
public UserIdentity login(String username, Object credentials, ServletRequest request) {
if (!(request instanceof HttpServletRequest)) {
return null;
}
String doAsUser = request.getParameter(DO_AS);
if (doAsUser == null && _fallbackToSpnegoAllowed) {
SpnegoUserIdentity fallbackIdentity = (SpnegoUserIdentity) _fallbackSpnegoLoginService.login(username, credentials, request);
SpnegoUserPrincipal fallbackPrincipal = (SpnegoUserPrincipal) fallbackIdentity.getUserPrincipal();
if (!fallbackIdentity.isEstablished()) {
LOG.info("Service user {} isn't authorized as spnego fallback principal", fallbackPrincipal.getName());
}
return fallbackIdentity;
} else {
SpnegoUserIdentity serviceIdentity = (SpnegoUserIdentity) _delegateSpnegoLoginService.login(username, credentials, request);
SpnegoUserPrincipal servicePrincipal = (SpnegoUserPrincipal) serviceIdentity.getUserPrincipal();
LOG.info("Authorizing proxy user {} from {} service", doAsUser, servicePrincipal.getName());
UserIdentity doAsIdentity = null;
if (doAsUser != null && !doAsUser.isEmpty()) {
doAsIdentity = _endUserAuthorizer.getUserIdentity((HttpServletRequest) request, doAsUser);
}
Principal principal = new TrustedProxyPrincipal(doAsUser, servicePrincipal);
Subject subject = new Subject(READ_ONLY_SUBJECT, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet());
if (!serviceIdentity.isEstablished()) {
LOG.info("Service user {} isn't authorized as a trusted proxy", servicePrincipal.getName());
return new SpnegoUserIdentity(subject, principal, null);
} else {
if (doAsIdentity == null) {
LOG.info("Couldn't authorize user {}", doAsUser);
}
return new SpnegoUserIdentity(subject, principal, doAsIdentity);
}
}
} | @Test
public void testInvalidAuthServiceUser() {
SpnegoLoginServiceWithAuthServiceLifecycle mockSpnegoLoginService = mock(SpnegoLoginServiceWithAuthServiceLifecycle.class);
SpnegoLoginServiceWithAuthServiceLifecycle mockFallbackLoginService = mock(SpnegoLoginServiceWithAuthServiceLifecycle.class);
SpnegoUserPrincipal servicePrincipal = new SpnegoUserPrincipal(TEST_SERVICE_USER, ENCODED_TOKEN);
Subject subject = new Subject(true, Collections.singleton(servicePrincipal), Collections.emptySet(), Collections.emptySet());
SpnegoUserIdentity result = new SpnegoUserIdentity(subject, servicePrincipal, null);
expect(mockSpnegoLoginService.login(anyString(), anyObject(), anyObject())).andReturn(result);
TestAuthorizer userAuthorizer = new TestAuthorizer(TEST_USER);
HttpServletRequest mockRequest = mock(HttpServletRequest.class);
expect(mockRequest.getParameter(DO_AS)).andReturn(TEST_USER);
replay(mockSpnegoLoginService);
TrustedProxyLoginService trustedProxyLoginService = new TrustedProxyLoginService(mockSpnegoLoginService, mockFallbackLoginService,
userAuthorizer, false);
UserIdentity doAsIdentity = trustedProxyLoginService.login(null, ENCODED_TOKEN, mockRequest);
assertNotNull(doAsIdentity);
assertFalse(((SpnegoUserIdentity) doAsIdentity).isEstablished());
} |
public Result parse(final String string) throws DateNotParsableException {
return this.parse(string, new Date());
} | @Test
public void testLast4hoursArtificialReference() throws Exception {
DateTime reference = DateTime.now(DateTimeZone.UTC).minusHours(7);
NaturalDateParser.Result last4 = naturalDateParserAntarctica.parse("last 4 hours", reference.toDate());
assertThat(last4.getFrom()).as("from should be exactly 4 hours in the past").isEqualTo(reference.minusHours(4));
assertThat(last4.getTo()).as("to should be the reference date").isEqualTo(reference);
reference = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("12.06.2021 09:45:23");
DateTime fourHoursAgo = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("12.06.2021 05:45:23");
last4 = naturalDateParserAntarctica.parse("last 4 hours", reference.toDate());
assertThat(last4.getFrom()).as("from should be exactly 4 hours in the past").isEqualTo(fourHoursAgo);
assertThat(last4.getTo()).as("to should be the reference date").isEqualTo(reference);
} |
public long residentMemorySizeEstimate() {
long size = 0;
size += Long.BYTES; // value.context.timestamp
size += Long.BYTES; // value.context.offset
if (topic != null) {
size += topic.toCharArray().length;
}
size += Integer.BYTES; // partition
for (final Header header : headers) {
size += header.key().toCharArray().length;
final byte[] value = header.value();
if (value != null) {
size += value.length;
}
}
return size;
} | @Test
public void shouldEstimateNullTopicAndEmptyHeadersAsZeroLength() {
final Headers headers = new RecordHeaders();
final ProcessorRecordContext context = new ProcessorRecordContext(
42L,
73L,
0,
null,
new RecordHeaders()
);
assertEquals(MIN_SIZE, context.residentMemorySizeEstimate());
} |
@EventListener
public void handleRedisKeyExpiredEvent(RedisKeyExpiredEvent<RdaSession> event) {
if (event.getValue() instanceof RdaSession) {
RdaSession session = (RdaSession) event.getValue();
if (!session.isFinished()) {
confirmService.sendConfirm(
session.getReturnUrl(),
session.getConfirmId(),
session.getConfirmSecret(),
false,
session.getApp(),
RdaError.TIMEOUT
);
}
}
} | @Test
void testHandleRedisKeyExpiredEventStatusInitialized() {
RdaSession session = new RdaSession();
session.setStatus(Status.INITIALIZED);
session.setReturnUrl("http://localhost");
session.setConfirmId("id");
session.setConfirmSecret("secret");
Mockito.when(event.getValue()).thenReturn(session);
timeoutService.handleRedisKeyExpiredEvent(event);
Mockito.verify(confirmService).sendConfirm(session.getReturnUrl(), "id", "secret", false, session.getApp(), RdaError.TIMEOUT);
} |
public static Document loadXMLFile( String filename ) throws KettleXMLException {
try {
return loadXMLFile( KettleVFS.getFileObject( filename ) );
} catch ( Exception e ) {
throw new KettleXMLException( e );
}
} | @Test
public void loadFile_ExceptionCheckingFile() throws Exception {
FileObject fileObjectMock = mock( FileObject.class );
doReturn( true ).when( fileObjectMock ).exists();
doThrow( new FileSystemException( DUMMY ) ).when( fileObjectMock ).isFile();
try {
XMLHandler.loadXMLFile( fileObjectMock );
} catch ( KettleXMLException e ) {
System.out.println( e.getMessage() );
assertTrue( e.getMessage().contains( "Unable to check if file" ) );
}
} |
public static String readLink(File f) {
/* NB: Use readSymbolicLink in java.nio.file.Path once available. Could
* use getCanonicalPath in File to get the target of the symlink but that
* does not indicate if the given path refers to a symlink.
*/
if (f == null) {
LOG.warn("Can not read a null symLink");
return "";
}
try {
return Shell.execCommand(
Shell.getReadlinkCommand(f.toString())).trim();
} catch (IOException x) {
return "";
}
} | @Test
public void testReadSymlinkWithAFileAsInput() throws IOException {
File file = new File(del, FILE);
String result = FileUtil.readLink(file);
Assert.assertEquals("", result);
Verify.delete(file);
} |
static GeneratedResource getGeneratedResource(EfestoCompilationOutput compilationOutput) {
if (compilationOutput instanceof EfestoRedirectOutput) {
return new GeneratedRedirectResource(((EfestoRedirectOutput) compilationOutput).getModelLocalUriId(),
((EfestoRedirectOutput) compilationOutput).getTargetEngine());
} else if (compilationOutput instanceof EfestoCallableOutput) {
return new GeneratedExecutableResource(((EfestoCallableOutput) compilationOutput).getModelLocalUriId(), ((EfestoCallableOutput) compilationOutput).getFullClassNames());
} else {
throw new KieCompilerServiceException("Unmanaged type " + compilationOutput.getClass().getName());
}
} | @Test
void getGeneratedResource() {
GeneratedResource retrieved = CompilationManagerUtils.getGeneratedResource(finalOutput);
commonEvaluateGeneratedExecutableResource(retrieved);
} |
@Override
public Driver merge(Driver other) {
checkArgument(parents == null || Objects.equals(parent(), other.parent()),
"Parent drivers are not the same");
// Merge the behaviours.
Map<Class<? extends Behaviour>, Class<? extends Behaviour>>
behaviours = Maps.newHashMap();
behaviours.putAll(this.behaviours);
other.behaviours().forEach(b -> behaviours.put(b, other.implementation(b)));
// Merge the properties.
ImmutableMap.Builder<String, String> properties = ImmutableMap.builder();
properties.putAll(other.properties());
// remove duplicated properties from this driver and merge
this.properties().entrySet().stream()
.filter(e -> !other.properties().containsKey(e.getKey()))
.forEach(properties::put);
List<Driver> completeParents = new ArrayList<>();
if (parents != null) {
parents.forEach(parent -> other.parents().forEach(otherParent -> {
if (otherParent.name().equals(parent.name())) {
completeParents.add(parent.merge(otherParent));
} else if (!completeParents.contains(otherParent)) {
completeParents.add(otherParent);
} else if (!completeParents.contains(parent)) {
completeParents.add(parent);
}
}));
}
return new DefaultDriver(name, !completeParents.isEmpty() ? completeParents : other.parents(),
manufacturer, hwVersion, swVersion,
ImmutableMap.copyOf(behaviours), properties.build());
} | @Test
public void merge() {
DefaultDriver one = new DefaultDriver("foo.bar", new ArrayList<>(), "Circus", "lux", "1.2a",
ImmutableMap.of(TestBehaviour.class,
TestBehaviourImpl.class),
ImmutableMap.of("foo", "bar"));
Driver ddc =
one.merge(new DefaultDriver("foo.bar", new ArrayList<>(), "", "", "",
ImmutableMap.of(TestBehaviourTwo.class,
TestBehaviourTwoImpl.class),
ImmutableMap.of("goo", "wee")));
assertEquals("incorrect name", "foo.bar", ddc.name());
assertEquals("incorrect mfr", "Circus", ddc.manufacturer());
assertEquals("incorrect hw", "lux", ddc.hwVersion());
assertEquals("incorrect sw", "1.2a", ddc.swVersion());
assertEquals("incorrect behaviour count", 2, ddc.behaviours().size());
assertTrue("incorrect behaviour", ddc.hasBehaviour(TestBehaviourTwo.class));
assertEquals("incorrect property count", 2, ddc.properties().size());
assertEquals("incorrect key count", 2, ddc.keys().size());
assertEquals("incorrect property", "wee", ddc.value("goo"));
assertTrue("incorrect toString", ddc.toString().contains("Circus"));
} |
public static void runCommand(Config config) throws TerseException {
try {
ManifestWorkspace workspace = new ManifestWorkspace(config.out);
ClassLoader parent = ConnectPluginPath.class.getClassLoader();
ServiceLoaderScanner serviceLoaderScanner = new ServiceLoaderScanner();
ReflectionScanner reflectionScanner = new ReflectionScanner();
PluginSource classpathSource = PluginUtils.classpathPluginSource(parent);
ManifestWorkspace.SourceWorkspace<?> classpathWorkspace = workspace.forSource(classpathSource);
PluginScanResult classpathPlugins = discoverPlugins(classpathSource, reflectionScanner, serviceLoaderScanner);
Map<Path, Set<Row>> rowsByLocation = new LinkedHashMap<>();
Set<Row> classpathRows = enumerateRows(classpathWorkspace, classpathPlugins);
rowsByLocation.put(null, classpathRows);
ClassLoaderFactory factory = new ClassLoaderFactory();
try (DelegatingClassLoader delegatingClassLoader = factory.newDelegatingClassLoader(parent)) {
beginCommand(config);
for (Path pluginLocation : config.locations) {
PluginSource source = PluginUtils.isolatedPluginSource(pluginLocation, delegatingClassLoader, factory);
ManifestWorkspace.SourceWorkspace<?> pluginWorkspace = workspace.forSource(source);
PluginScanResult plugins = discoverPlugins(source, reflectionScanner, serviceLoaderScanner);
Set<Row> rows = enumerateRows(pluginWorkspace, plugins);
rowsByLocation.put(pluginLocation, rows);
for (Row row : rows) {
handlePlugin(config, row);
}
}
endCommand(config, workspace, rowsByLocation);
}
} catch (Throwable e) {
failCommand(config, e);
}
} | @Test
public void testSyncManifestsDryRunReadOnlyServices() {
PluginLocationType type = PluginLocationType.CLASS_HIERARCHY;
PluginLocation locationA = setupLocation(workspace.resolve("location-a"), type, TestPlugins.TestPlugin.NON_MIGRATED_MULTI_PLUGIN);
String subPath = "META-INF/services";
assertTrue(locationA.path.resolve(subPath).toFile().setReadOnly());
CommandResult res = runCommand(
"sync-manifests",
"--plugin-location",
locationA,
"--dry-run"
);
assertEquals(2, res.returnCode);
} |
public static List<String> splitToWhiteSpaceSeparatedTokens(String input) {
if (input == null) {
return new ArrayList<>();
}
StringTokenizer tokenizer = new StringTokenizer(input.trim(), QUOTE_CHAR + WHITESPACE, true);
List<String> tokens = new ArrayList<>();
StringBuilder quotedText = new StringBuilder();
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (QUOTE_CHAR.equals(token)) {
// if we have a quote, add the next tokens to the quoted text
// until the quoting has finished
quotedText.append(QUOTE_CHAR);
String buffer = quotedText.toString();
if (isSingleQuoted(buffer) || isDoubleQuoted(buffer)) {
tokens.add(buffer.substring(1, buffer.length() - 1));
quotedText = new StringBuilder();
}
} else if (WHITESPACE.equals(token)) {
// a white space, if in quote, add the white space, otherwise
// skip it
if (quotedText.length() > 0) {
quotedText.append(WHITESPACE);
}
} else {
if (quotedText.length() > 0) {
quotedText.append(token);
} else {
tokens.add(token);
}
}
}
if (quotedText.length() > 0) {
throw new IllegalArgumentException("Invalid quoting found in args " + quotedText);
}
return tokens;
} | @Test
public void testWhitespaceSeparatedArgsWithSpaces() {
List<String> args = splitToWhiteSpaceSeparatedTokens("\"arg 0 \" arg1 \"arg 2\"");
assertEquals("arg 0 ", args.get(0));
assertEquals("arg1", args.get(1));
assertEquals("arg 2", args.get(2));
} |
@Override
public double rand() {
// faster calculation by inversion
boolean inv = p > 0.5;
double np = n * Math.min(p, 1.0 - p);
// Poisson's approximation for extremely low np
int x;
if (np < 1E-6) {
x = PoissonDistribution.tinyLambdaRand(np);
} else {
RandomNumberGenerator rng;
if (np < 55) {
// inversion method, using chop-down search from 0
if (p <= 0.5) {
rng = new ModeSearch(p);
} else {
rng = new ModeSearch(1.0 - p); // faster calculation by inversion
}
} else {
// ratio of uniforms method
if (p <= 0.5) {
rng = new Patchwork(p);
} else {
rng = new Patchwork(1.0 - p); // faster calculation by inversion
}
}
x = rng.rand();
}
// undo inversion
return inv ? n - x : x;
} | @Test
public void testRandOverflow() {
System.out.println("rand overflow");
MathEx.setSeed(19650218);
BinomialDistribution instance = new BinomialDistribution(1000, 0.999000999000999);
assertEquals(999, instance.rand(), 1E-7);
} |
public Optional<Column> findColumn(final ColumnName columnName) {
return findColumnMatching(withName(columnName));
} | @Test
public void shouldGetHeaderColumns() {
assertThat(SOME_SCHEMA.findColumn(H0), is(Optional.of(
Column.of(H0, HEADERS_TYPE, Namespace.HEADERS, 0, Optional.empty())
)));
assertThat(SCHEMA_WITH_EXTRACTED_HEADERS.findColumn(H0), is(Optional.of(
Column.of(H0, BYTES, Namespace.HEADERS, 0, Optional.of("key0"))
)));
assertThat(SCHEMA_WITH_EXTRACTED_HEADERS.findColumn(H1), is(Optional.of(
Column.of(H1, BYTES, Namespace.HEADERS, 1, Optional.of("key1"))
)));
} |
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(
MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
} | @SuppressWarnings({"rawtypes", "unchecked"})
@Test
void testParameterizedArrays() {
GenericArrayClass<Boolean> function =
new GenericArrayClass<Boolean>() {
private static final long serialVersionUID = 1L;
};
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(
function, TypeInformation.of(new TypeHint<Boolean[]>() {}));
assertThat(ti).isInstanceOf(ObjectArrayTypeInfo.class);
ObjectArrayTypeInfo<?, ?> oati = (ObjectArrayTypeInfo<?, ?>) ti;
assertThat(oati.getComponentInfo()).isEqualTo(BasicTypeInfo.BOOLEAN_TYPE_INFO);
} |
@Override
@CheckForNull
public EmailMessage format(Notification notification) {
if (!"alerts".equals(notification.getType())) {
return null;
}
// Retrieve useful values
String projectId = notification.getFieldValue("projectId");
String projectKey = notification.getFieldValue("projectKey");
String projectName = notification.getFieldValue("projectName");
String projectVersion = notification.getFieldValue("projectVersion");
String branchName = notification.getFieldValue("branch");
String alertName = notification.getFieldValue("alertName");
String alertText = notification.getFieldValue("alertText");
String alertLevel = notification.getFieldValue("alertLevel");
String ratingMetricsInOneString = notification.getFieldValue("ratingMetrics");
boolean isNewAlert = Boolean.parseBoolean(notification.getFieldValue("isNewAlert"));
String fullProjectName = computeFullProjectName(projectName, branchName);
// Generate text
String subject = generateSubject(fullProjectName, alertLevel, isNewAlert);
String messageBody = generateMessageBody(projectName, projectKey, projectVersion, branchName, alertName, alertText, isNewAlert, ratingMetricsInOneString);
// And finally return the email that will be sent
return new EmailMessage()
.setMessageId("alerts/" + projectId)
.setSubject(subject)
.setPlainTextMessage(messageBody);
} | @Test
public void shouldFormatBackToGreenMessage() {
Notification notification = createNotification("Passed", "", "OK", "false");
EmailMessage message = template.format(notification);
assertThat(message.getMessageId(), is("alerts/45"));
assertThat(message.getSubject(), is("\"Foo\" is back to green"));
assertThat(message.getMessage(), is("" +
"Project: Foo\n" +
"Version: V1-SNAP\n" +
"Quality gate status: Passed\n" +
"\n" +
"\n" +
"More details at: http://nemo.sonarsource.org/dashboard?id=org.sonar.foo:foo"));
} |
@Override
public Collection<TimeSeriesEntry<V, L>> lastEntries(int count) {
return get(lastEntriesAsync(count));
} | @Test
public void testLastEntries() {
RTimeSeries<String, String> t = redisson.getTimeSeries("test");
t.add(1, "10");
t.add(2, "20", "200");
t.add(3, "30");
Collection<TimeSeriesEntry<String, String>> s = t.lastEntries(2);
assertThat(s).containsExactly(new TimeSeriesEntry<>(2, "20", "200"),
new TimeSeriesEntry<>(3, "30"));
assertThat(t.size()).isEqualTo(3);
} |
@Override
public List<byte[]> clusterGetKeysInSlot(int slot, Integer count) {
RFuture<List<byte[]>> f = executorService.readAsync((String)null, ByteArrayCodec.INSTANCE, CLUSTER_GETKEYSINSLOT, slot, count);
return syncFuture(f);
} | @Test
public void testClusterGetKeysInSlot() {
List<byte[]> keys = connection.clusterGetKeysInSlot(12, 10);
assertThat(keys).isEmpty();
} |
@Override
public void log(Request request, Response response) {
try {
RequestLogEntry.Builder builder = new RequestLogEntry.Builder();
String peerAddress = request.getRemoteAddr();
int peerPort = request.getRemotePort();
long startTime = request.getTimeStamp();
long endTime = System.currentTimeMillis();
Integer statusCodeOverride = (Integer) request.getAttribute(HttpRequestDispatch.ACCESS_LOG_STATUS_CODE_OVERRIDE);
builder.peerAddress(peerAddress)
.peerPort(peerPort)
.localPort(getLocalPort(request))
.timestamp(Instant.ofEpochMilli(startTime))
.duration(Duration.ofMillis(Math.max(0, endTime - startTime)))
.responseSize(response.getHttpChannel().getBytesWritten())
.requestSize(request.getHttpInput().getContentReceived())
.statusCode(statusCodeOverride != null ? statusCodeOverride : response.getCommittedMetaData().getStatus());
addNonNullValue(builder, request.getMethod(), RequestLogEntry.Builder::httpMethod);
addNonNullValue(builder, request.getRequestURI(), RequestLogEntry.Builder::rawPath);
addNonNullValue(builder, request.getProtocol(), RequestLogEntry.Builder::httpVersion);
addNonNullValue(builder, request.getScheme(), RequestLogEntry.Builder::scheme);
addNonNullValue(builder, request.getHeader("User-Agent"), RequestLogEntry.Builder::userAgent);
addNonNullValue(builder, getServerName(request), RequestLogEntry.Builder::hostString);
addNonNullValue(builder, request.getHeader("Referer"), RequestLogEntry.Builder::referer);
addNonNullValue(builder, request.getQueryString(), RequestLogEntry.Builder::rawQuery);
HttpRequest jdiscRequest = (HttpRequest) request.getAttribute(HttpRequest.class.getName());
if (jdiscRequest != null) {
addNonNullValue(builder, jdiscRequest.getUserPrincipal(), RequestLogEntry.Builder::userPrincipal);
}
String requestFilterId = (String) request.getAttribute(RequestUtils.JDISC_REQUEST_CHAIN);
addNonNullValue(builder, requestFilterId, (b, chain) -> b.addExtraAttribute("request-chain", chain));
String responseFilterId = (String) request.getAttribute(RequestUtils.JDISC_RESPONSE_CHAIN);
addNonNullValue(builder, responseFilterId, (b, chain) -> b.addExtraAttribute("response-chain", chain));
UUID connectionId = (UUID) request.getAttribute(JettyConnectionLogger.CONNECTION_ID_REQUEST_ATTRIBUTE);
addNonNullValue(builder, connectionId, (b, uuid) -> b.connectionId(uuid.toString()));
String remoteAddress = getRemoteAddress(request);
if (!Objects.equal(remoteAddress, peerAddress)) {
builder.remoteAddress(remoteAddress);
}
int remotePort = getRemotePort(request);
if (remotePort != peerPort) {
builder.remotePort(remotePort);
}
LOGGED_REQUEST_HEADERS.forEach(header -> {
String value = request.getHeader(header);
if (value != null) {
builder.addExtraAttribute(header, value);
}
});
X509Certificate[] clientCert = (X509Certificate[]) request.getAttribute(RequestUtils.SERVLET_REQUEST_X509CERT);
if (clientCert != null && clientCert.length > 0) {
builder.sslPrincipal(clientCert[0].getSubjectX500Principal());
}
AccessLogEntry accessLogEntry = (AccessLogEntry) request.getAttribute(JDiscHttpServlet.ATTRIBUTE_NAME_ACCESS_LOG_ENTRY);
if (accessLogEntry != null) {
var extraAttributes = accessLogEntry.getKeyValues();
if (extraAttributes != null) {
extraAttributes.forEach(builder::addExtraAttributes);
}
addNonNullValue(builder, accessLogEntry.getHitCounts(), RequestLogEntry.Builder::hitCounts);
addNonNullValue(builder, accessLogEntry.getTrace(), RequestLogEntry.Builder::traceNode);
accessLogEntry.getContent().ifPresent(builder::content);
}
http2StreamId(request).ifPresent(streamId -> builder.addExtraAttribute("http2-stream-id", Integer.toString(streamId)));
requestLog.log(builder.build());
} catch (Exception e) {
// Catching any exceptions here as it is unclear how Jetty handles exceptions from a RequestLog.
logger.log(Level.SEVERE, "Failed to log access log entry: " + e.getMessage(), e);
}
} | @Test
void requireThatStatusCodeCanBeOverridden() {
Request jettyRequest = createRequestBuilder()
.uri("http", "localhost", 12345, "/api/", null)
.build();
InMemoryRequestLog requestLog = new InMemoryRequestLog();
new AccessLogRequestLog(requestLog).log(jettyRequest, JettyMockResponseBuilder.newBuilder().build());
assertEquals(200, requestLog.entries().remove(0).statusCode().getAsInt());
jettyRequest.setAttribute(HttpRequestDispatch.ACCESS_LOG_STATUS_CODE_OVERRIDE, 404);
new AccessLogRequestLog(requestLog).log(jettyRequest, JettyMockResponseBuilder.newBuilder().build());
assertEquals(404, requestLog.entries().remove(0).statusCode().getAsInt());
} |
public JsonReader newJsonReader(Reader reader) {
JsonReader jsonReader = new JsonReader(reader);
jsonReader.setStrictness(strictness == null ? Strictness.LEGACY_STRICT : strictness);
return jsonReader;
} | @Test
public void testNewJsonReader_Default() throws IOException {
String json = "test"; // String without quotes
JsonReader jsonReader = new Gson().newJsonReader(new StringReader(json));
assertThrows(MalformedJsonException.class, jsonReader::nextString);
jsonReader.close();
} |
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
Configuration conf = FlinkOptions.fromMap(context.getCatalogTable().getOptions());
checkArgument(!StringUtils.isNullOrEmpty(conf.getString(FlinkOptions.PATH)),
"Option [path] should not be empty.");
setupTableOptions(conf.getString(FlinkOptions.PATH), conf);
ResolvedSchema schema = context.getCatalogTable().getResolvedSchema();
sanityCheck(conf, schema);
setupConfOptions(conf, context.getObjectIdentifier(), context.getCatalogTable(), schema);
setupSortOptions(conf, context.getConfiguration());
return new HoodieTableSink(conf, schema);
} | @Test
void testInferAvroSchemaForSink() {
// infer the schema if not specified
final HoodieTableSink tableSink1 =
(HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(MockContext.getInstance(this.conf));
final Configuration conf1 = tableSink1.getConf();
assertThat(conf1.get(FlinkOptions.SOURCE_AVRO_SCHEMA), is(INFERRED_SCHEMA));
// set up the explicit schema using the file path
this.conf.setString(FlinkOptions.SOURCE_AVRO_SCHEMA_PATH, AVRO_SCHEMA_FILE_PATH);
HoodieTableSink tableSink2 =
(HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(MockContext.getInstance(this.conf));
Configuration conf2 = tableSink2.getConf();
assertNull(conf2.get(FlinkOptions.SOURCE_AVRO_SCHEMA), "expect schema string as null");
// infer special avro data types that needs namespace
this.conf.removeConfig(FlinkOptions.SOURCE_AVRO_SCHEMA_PATH);
ResolvedSchema schema3 = SchemaBuilder.instance()
.field("f_decimal", DataTypes.DECIMAL(3, 2).notNull())
.field("f_map", DataTypes.MAP(DataTypes.VARCHAR(20), DataTypes.VARCHAR(10)))
.field("f_array", DataTypes.ARRAY(DataTypes.VARCHAR(10)))
.field("f_record", DataTypes.ROW(DataTypes.FIELD("r1", DataTypes.VARCHAR(10)), DataTypes.FIELD("r2", DataTypes.INT())))
.primaryKey("f_decimal")
.build();
final HoodieTableSink tableSink3 =
(HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(MockContext.getInstance(this.conf, schema3, ""));
final Configuration conf3 = tableSink3.getConf();
final String expected = AvroSchemaConverter.convertToSchema(schema3.toSinkRowDataType().getLogicalType(), AvroSchemaUtils.getAvroRecordQualifiedName("t1")).toString();
assertThat(conf3.get(FlinkOptions.SOURCE_AVRO_SCHEMA), is(expected));
} |
public String getMd5(String input) {
byte[] md5;
// MessageDigest instance is NOT thread-safe
synchronized (mdInst) {
mdInst.update(input.getBytes(UTF_8));
md5 = mdInst.digest();
}
int j = md5.length;
char str[] = new char[j * 2];
int k = 0;
for (int i = 0; i < j; i++) {
byte byte0 = md5[i];
str[k++] = hexDigits[byte0 >>> 4 & 0xf];
str[k++] = hexDigits[byte0 & 0xf];
}
return new String(str);
} | @Test
void test() {
MD5Utils sharedMd5Utils = new MD5Utils();
final String[] input = {
"provider-appgroup-one/org.apache.dubbo.config.spring.api.HelloService:dubboorg.apache.dubbo.config.spring.api.HelloService{REGISTRY_CLUSTER=registry-one, anyhost=true, application=provider-app, background=false, compiler=javassist, deprecated=false, dubbo=2.0.2, dynamic=true, file.cache=false, generic=false, group=group-one, interface=org.apache.dubbo.config.spring.api.HelloService, logger=slf4j, metadata-type=remote, methods=sayHello, organization=test, owner=com.test, release=, service-name-mapping=true, side=provider}",
"provider-appgroup-two/org.apache.dubbo.config.spring.api.DemoService:dubboorg.apache.dubbo.config.spring.api.DemoService{REGISTRY_CLUSTER=registry-two, anyhost=true, application=provider-app, background=false, compiler=javassist, deprecated=false, dubbo=2.0.2, dynamic=true, file.cache=false, generic=false, group=group-two, interface=org.apache.dubbo.config.spring.api.DemoService, logger=slf4j, metadata-type=remote, methods=sayName,getBox, organization=test, owner=com.test, release=, service-name-mapping=true, side=provider}"
};
final String[] result = {sharedMd5Utils.getMd5(input[0]), new MD5Utils().getMd5(input[1])};
System.out.println("Expected result: " + Arrays.asList(result));
int nThreads = 8;
CountDownLatch latch = new CountDownLatch(nThreads);
List<Throwable> errors = Collections.synchronizedList(new ArrayList<>());
ExecutorService executorService = Executors.newFixedThreadPool(nThreads);
try {
for (int i = 0; i < nThreads; i++) {
MD5Utils md5Utils = i < nThreads / 2 ? sharedMd5Utils : new MD5Utils();
executorService.submit(new Md5Task(input[i % 2], result[i % 2], md5Utils, latch, errors));
}
latch.await();
Assertions.assertEquals(Collections.EMPTY_LIST, errors);
Assertions.assertEquals(0, latch.getCount());
} catch (Throwable e) {
Assertions.fail(StringUtils.toString(e));
} finally {
executorService.shutdown();
}
} |
@Override
public <R> QueryResult<R> query(final Query<R> query, final PositionBound positionBound, final QueryConfig config) {
return internal.query(query, positionBound, config);
} | @Test
public void shouldTimeIteratorDuration() {
final MultiVersionedKeyQuery<String, String> query = MultiVersionedKeyQuery.withKey(KEY);
final PositionBound bound = PositionBound.unbounded();
final QueryConfig config = new QueryConfig(false);
when(inner.query(any(), any(), any())).thenReturn(
QueryResult.forResult(new LogicalSegmentIterator(Collections.emptyListIterator(), RAW_KEY, 0L, 0L, ResultOrder.ANY)));
final KafkaMetric iteratorDurationAvgMetric = getMetric("iterator-duration-avg");
final KafkaMetric iteratorDurationMaxMetric = getMetric("iterator-duration-max");
assertThat(iteratorDurationAvgMetric, not(nullValue()));
assertThat(iteratorDurationMaxMetric, not(nullValue()));
assertThat((Double) iteratorDurationAvgMetric.metricValue(), equalTo(Double.NaN));
assertThat((Double) iteratorDurationMaxMetric.metricValue(), equalTo(Double.NaN));
final QueryResult<VersionedRecordIterator<String>> first = store.query(query, bound, config);
try (final VersionedRecordIterator<String> iterator = first.getResult()) {
// nothing to do, just close immediately
mockTime.sleep(2);
}
assertThat((double) iteratorDurationAvgMetric.metricValue(), equalTo(2.0 * TimeUnit.MILLISECONDS.toNanos(1)));
assertThat((double) iteratorDurationMaxMetric.metricValue(), equalTo(2.0 * TimeUnit.MILLISECONDS.toNanos(1)));
final QueryResult<VersionedRecordIterator<String>> second = store.query(query, bound, config);
try (final VersionedRecordIterator<String> iterator = second.getResult()) {
// nothing to do, just close immediately
mockTime.sleep(3);
}
assertThat((double) iteratorDurationAvgMetric.metricValue(), equalTo(2.5 * TimeUnit.MILLISECONDS.toNanos(1)));
assertThat((double) iteratorDurationMaxMetric.metricValue(), equalTo(3.0 * TimeUnit.MILLISECONDS.toNanos(1)));
} |
public boolean isNamespaceReferencedWithHotRestart(@Nonnull String namespace) {
return nodeEngine.getConfig()
.getCacheConfigs()
.values()
.stream()
.filter(cacheConfig -> cacheConfig.getDataPersistenceConfig().isEnabled())
.map(CacheSimpleConfig::getUserCodeNamespace)
.anyMatch(namespace::equals)
|| getCacheConfigs()
.stream()
.filter(cacheConfig -> cacheConfig.getHotRestartConfig().isEnabled())
.map(CacheConfig::getUserCodeNamespace)
.anyMatch(namespace::equals);
} | @Test
public void testIsNamespaceReferencedWithHotRestart_withSimpleCacheConfigs_true() {
CacheService cacheService = new TestCacheService(mockNodeEngine, true);
CacheSimpleConfig cacheConfigMock = Mockito.mock(CacheSimpleConfig.class);
DataPersistenceConfig dataPersistenceConfigMock = Mockito.mock(DataPersistenceConfig.class);
when(dataPersistenceConfigMock.isEnabled()).thenReturn(true);
when(cacheConfigMock.getDataPersistenceConfig()).thenReturn(dataPersistenceConfigMock);
when(cacheConfigMock.getUserCodeNamespace()).thenReturn("ns1");
when(cacheConfigMock.getDataPersistenceConfig()).thenReturn(dataPersistenceConfigMock);
when(mockConfig.getCacheConfigs()).thenReturn(Map.of("test-cache", cacheConfigMock));
assertTrue(cacheService.isNamespaceReferencedWithHotRestart("ns1"));
} |
@Override
public Map<String, Object> encode(Object object) throws EncodeException {
if (object == null) {
return Collections.emptyMap();
}
ObjectParamMetadata metadata =
classToMetadata.computeIfAbsent(object.getClass(), ObjectParamMetadata::parseObjectType);
return metadata.objectFields.stream()
.map(field -> this.FieldValuePair(object, field))
.filter(fieldObjectPair -> fieldObjectPair.right.isPresent())
.collect(Collectors.toMap(this::fieldName,
fieldObjectPair -> fieldObjectPair.right.get()));
} | @Test
void defaultEncoder_withOverriddenParamName() {
HashSet<Object> expectedNames = new HashSet<>();
expectedNames.add("fooAlias");
expectedNames.add("bar");
final NormalObjectWithOverriddenParamName normalObject =
new NormalObjectWithOverriddenParamName("fooz", "barz");
final Map<String, Object> encodedMap = encoder.encode(normalObject);
assertThat(encodedMap.keySet()).as("@Param ignored").isEqualTo(expectedNames);
} |
@Udf
public String concat(@UdfParameter final String... jsonStrings) {
if (jsonStrings == null) {
return null;
}
final List<JsonNode> nodes = new ArrayList<>(jsonStrings.length);
boolean allObjects = true;
for (final String jsonString : jsonStrings) {
if (jsonString == null) {
return null;
}
final JsonNode node = UdfJsonMapper.parseJson(jsonString);
if (node.isMissingNode()) {
return null;
}
if (allObjects && !node.isObject()) {
allObjects = false;
}
nodes.add(node);
}
JsonNode result = nodes.get(0);
if (allObjects) {
for (int i = 1; i < nodes.size(); i++) {
result = concatObjects((ObjectNode) result, (ObjectNode) nodes.get(i));
}
} else {
for (int i = 1; i < nodes.size(); i++) {
result = concatArrays(toArrayNode(result), toArrayNode(nodes.get(i)));
}
}
return UdfJsonMapper.writeValueAsJson(result);
} | @Test
public void shouldWrapPrimitivesInArrays() {
// When:
final String result = udf.concat("null", "null");
// Then:
assertEquals("[null,null]", result);
} |
@VisibleForTesting
ImmutableList<EventWithContext> eventsFromAggregationResult(EventFactory eventFactory, AggregationEventProcessorParameters parameters, AggregationResult result)
throws EventProcessorException {
final ImmutableList.Builder<EventWithContext> eventsWithContext = ImmutableList.builder();
final Set<String> sourceStreams = eventStreamService.buildEventSourceStreams(getStreams(parameters),
result.sourceStreams());
for (final AggregationKeyResult keyResult : result.keyResults()) {
if (!satisfiesConditions(keyResult)) {
LOG.debug("Skipping result <{}> because the conditions <{}> don't match", keyResult, config.conditions());
continue;
}
final String keyString = String.join("|", keyResult.key());
final String eventMessage = createEventMessageString(keyString, keyResult);
// Extract event time and range from the key result or use query time range as fallback.
// These can be different, e.g. during catch up processing.
final DateTime eventTime = keyResult.timestamp().orElse(result.effectiveTimerange().to());
final Event event = eventFactory.createEvent(eventDefinition, eventTime, eventMessage);
// The keyResult timestamp is set to the end of the range
event.setTimerangeStart(keyResult.timestamp().map(t -> t.minus(config.searchWithinMs())).orElse(parameters.timerange().getFrom()));
event.setTimerangeEnd(keyResult.timestamp().orElse(parameters.timerange().getTo()));
event.setReplayInfo(EventReplayInfo.builder()
.timerangeStart(event.getTimerangeStart())
.timerangeEnd(event.getTimerangeEnd())
.query(config.query())
.streams(sourceStreams)
.filters(config.filters())
.build());
sourceStreams.forEach(event::addSourceStream);
final Map<String, Object> fields = new HashMap<>();
// Each group value will be a separate field in the message to make it usable as event fields.
//
// Example result:
// groupBy=["application_name", "username"]
// result-key=["sshd", "jane"]
//
// Message fields:
// application_name=sshd
// username=jane
for (int i = 0; i < config.groupBy().size(); i++) {
try {
fields.put(config.groupBy().get(i), keyResult.key().get(i));
} catch (IndexOutOfBoundsException e) {
throw new EventProcessorException(
"Couldn't create events for: " + eventDefinition.title() + " (possibly due to non-existing grouping fields)",
false, eventDefinition.id(), eventDefinition, e);
}
}
// Group By fields need to be saved on the event so they are available to the subsequent notification events
event.setGroupByFields(fields.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toString())));
// The field name for the series value is composed of the series function and field. We don't take the
// series ID into account because it would be very hard to use for the user. That means a series with
// the same function and field but different ID would overwrite a previous one.
// This shouldn't be a problem though, because the same function and field will always compute the same
// value.
//
// Examples:
// aggregation_value_count_source=42
// aggregation_value_card_anonid=23
for (AggregationSeriesValue seriesValue : keyResult.seriesValues()) {
final String function = seriesValue.series().type().toLowerCase(Locale.ROOT);
final Optional<String> field = fieldFromSeries(seriesValue.series());
final String fieldName = field.map(f -> String.format(Locale.ROOT, "aggregation_value_%s_%s", function, f))
.orElseGet(() -> String.format(Locale.ROOT, "aggregation_value_%s", function));
fields.put(fieldName, seriesValue.value());
}
// This is the concatenated key value
fields.put("aggregation_key", keyString);
// TODO: Can we find a useful source value?
final Message message = messageFactory.createMessage(eventMessage, "", result.effectiveTimerange().to());
message.addFields(fields);
// Ask any event query modifier for its state and collect it into the event modifier state
final Map<String, Object> eventModifierState = eventQueryModifiers.stream()
.flatMap(modifier -> modifier.eventModifierData(result.additionalResults()).entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
LOG.debug("Creating event {}/{} - {} {} ({})", eventDefinition.title(), eventDefinition.id(), keyResult.key(), seriesString(keyResult), fields);
eventsWithContext.add(EventWithContext.builder()
.event(event)
.messageContext(message)
.eventModifierState(eventModifierState)
.build());
}
return eventsWithContext.build();
} | @Test
public void testEventsFromAggregationResultWithEventModifierState() throws EventProcessorException {
final DateTime now = DateTime.now(DateTimeZone.UTC);
final AbsoluteRange timerange = AbsoluteRange.create(now.minusHours(1), now.minusHours(1).plusMillis(SEARCH_WINDOW_MS));
// We expect to get the end of the aggregation timerange as event time
final TestEvent event1 = new TestEvent(timerange.to());
final TestEvent event2 = new TestEvent(timerange.to());
when(eventFactory.createEvent(any(EventDefinition.class), any(DateTime.class), anyString()))
.thenReturn(event1) // first invocation return value
.thenReturn(event2); // second invocation return value
final EventDefinitionDto eventDefinitionDto = buildEventDefinitionDto(ImmutableSet.of("stream-2"), ImmutableList.of(), null, emptyList());
final AggregationEventProcessorParameters parameters = AggregationEventProcessorParameters.builder()
.timerange(timerange)
.build();
final EventQuerySearchTypeSupplier queryModifier = new EventQuerySearchTypeSupplier() {
@Nonnull
@Override
public Set<SearchType> additionalSearchTypes(EventDefinition eventDefinition) {
fail("Should not be called in this test, we only look at the result in isolation");
return Set.of();
}
@Override
public @Nonnull Map<String, Object> eventModifierData(Map<String, SearchType.Result> results) {
assertThat(results).hasSize(1);
assertThat(results.containsKey("query-modifier")).isTrue();
assertThat(results.get("query-modifier").id()).isEqualTo("test");
return Map.of("query-modifier", results.get("query-modifier").id());
}
};
final AggregationEventProcessor eventProcessor = new AggregationEventProcessor(
eventDefinitionDto, searchFactory, eventProcessorDependencyCheck, stateService, moreSearch,
eventStreamService, messages, notificationService, permittedStreams,
Set.of(queryModifier), messageFactory);
final AggregationResult result = AggregationResult.builder()
.effectiveTimerange(timerange)
.totalAggregatedMessages(1)
.sourceStreams(ImmutableSet.of("stream-1", "stream-2"))
.keyResults(ImmutableList.of(
AggregationKeyResult.builder()
.key(ImmutableList.of("one", "two"))
.timestamp(timerange.to())
.seriesValues(ImmutableList.of(
AggregationSeriesValue.builder()
.key(ImmutableList.of("a"))
.value(42.0d)
.series(Count.builder()
.id("abc123")
.field("source")
.build())
.build(),
AggregationSeriesValue.builder()
.key(ImmutableList.of("a"))
.value(23.0d)
.series(Count.builder()
.id("abc123-no-field")
.build())
.build(),
AggregationSeriesValue.builder()
.key(ImmutableList.of("a"))
.value(1.0d)
.series(Cardinality.builder()
.id("xyz789")
.field("source")
.build())
.build()
))
.build()
))
.additionalResults(ImmutableMap.of(
"query-modifier", PivotResult.builder()
.id("test")
.effectiveTimerange(timerange)
.total(1)
.build()
))
.build();
final ImmutableList<EventWithContext> eventsWithContext = eventProcessor.eventsFromAggregationResult(eventFactory, parameters, result);
assertThat(eventsWithContext).hasSize(1);
assertThat(eventsWithContext.get(0).eventModifierState()).hasSize(1);
} |
@Nonnull
public static List<JetSqlRow> evaluate(
@Nullable Expression<Boolean> predicate,
@Nullable List<Expression<?>> projection,
@Nonnull Stream<JetSqlRow> rows,
@Nonnull ExpressionEvalContext context
) {
return rows
.map(row -> evaluate(predicate, projection, row, context))
.filter(Objects::nonNull)
.collect(Collectors.toList());
} | @Test
public void test_evaluate() {
List<Object[]> rows = asList(new Object[]{0, "a"}, new Object[]{1, "b"});
List<JetSqlRow> evaluated = ExpressionUtil.evaluate(null, null, rows.stream().map(v -> new JetSqlRow(TEST_SS, v)), createExpressionEvalContext());
assertThat(toList(evaluated, JetSqlRow::getValues)).containsExactlyElementsOf(rows);
} |
public Optional<String> getNodeName(String nodeId) {
return nodeNameCache.getUnchecked(nodeId);
} | @Test
public void getNodeNameReturnsEmptyOptionalIfNodeIdIsInvalid() {
when(cluster.nodeIdToName("node_id")).thenReturn(Optional.empty());
assertThat(nodeInfoCache.getNodeName("node_id")).isEmpty();
} |
@SuppressWarnings("ShouldNotSubclass")
public final ThrowableSubject hasCauseThat() {
// provides a more helpful error message if hasCauseThat() methods are chained too deep
// e.g. assertThat(new Exception()).hCT().hCT()....
// TODO(diamondm) in keeping with other subjects' behavior this should still NPE if the subject
// *itself* is null, since there's no context to lose. See also b/37645583
if (actual == null) {
check("getCause()")
.withMessage("Causal chain is not deep enough - add a .isNotNull() check?")
.fail();
return ignoreCheck()
.that(
new Throwable() {
@Override
@SuppressWarnings("UnsynchronizedOverridesSynchronized")
public Throwable fillInStackTrace() {
setStackTrace(new StackTraceElement[0]); // for old versions of Android
return this;
}
});
}
return check("getCause()").that(actual.getCause());
} | @Test
public void hasCauseThat_null() {
assertThat(new Exception("foobar")).hasCauseThat().isNull();
} |
@Override
public V put(final K key, final V value) {
final Entry<K, V>[] table = this.table;
final int hash = key.hashCode();
final int index = HashUtil.indexFor(hash, table.length, mask);
for (Entry<K, V> e = table[index]; e != null; e = e.hashNext) {
final K entryKey;
if ((entryKey = e.key) == key || entryKey.equals(key)) {
moveToTop(e);
return e.setValue(value);
}
}
final Entry<K, V> e = new Entry<>(key, value);
e.hashNext = table[index];
table[index] = e;
final Entry<K, V> top = this.top;
e.next = top;
if (top != null) {
top.previous = e;
} else {
back = e;
}
this.top = e;
_size += 1;
if (removeEldestEntry(back)) {
remove(eldestKey());
} else if (_size > capacity) {
rehash(HashUtil.nextCapacity(capacity));
}
return null;
} | @Test
public void testPutGet() {
final LinkedHashMap<Integer, String> tested = new LinkedHashMap<>();
for (int i = 0; i < 1000; ++i) {
tested.put(i, Integer.toString(i));
}
Assert.assertEquals(1000, tested.size());
for (int i = 0; i < 1000; ++i) {
Assert.assertEquals(Integer.toString(i), tested.get(i));
}
for (int i = 0; i < 1000; ++i) {
Assert.assertEquals(Integer.toString(i), tested.put(i, Integer.toString(i + 1)));
}
Assert.assertEquals(1000, tested.size());
for (int i = 0; i < 1000; ++i) {
Assert.assertEquals(Integer.toString(i + 1), tested.get(i));
}
} |
private PDStructureTreeRoot getStructureTreeRoot()
{
PDStructureNode parent = this.getParent();
while (parent instanceof PDStructureElement)
{
parent = ((PDStructureElement) parent).getParent();
}
if (parent instanceof PDStructureTreeRoot)
{
return (PDStructureTreeRoot) parent;
}
return null;
} | @Test
void testClassMap() throws IOException
{
Set<Revisions<PDAttributeObject>> attributeSet = new HashSet<>();
Set<String> classSet = new HashSet<>();
try (PDDocument doc = Loader.loadPDF(
RandomAccessReadBuffer.createBufferFromStream(PDStructureElementTest.class
.getResourceAsStream("PDFBOX-2725-878725.pdf"))))
{
PDStructureTreeRoot structureTreeRoot = doc.getDocumentCatalog().getStructureTreeRoot();
checkElement(structureTreeRoot.getK(), attributeSet, structureTreeRoot.getClassMap(), classSet);
}
// collect attributes and check their count.
assertEquals(72, attributeSet.size());
int cnt = attributeSet.stream().map(attributes -> attributes.size()).reduce(0, Integer::sum);
assertEquals(45, cnt);
assertEquals(10, classSet.size());
} |
public static ArrayNode generateRowArrayNode(TableModel tm) {
ArrayNode array = MAPPER.createArrayNode();
for (TableModel.Row r : tm.getRows()) {
array.add(toJsonNode(r, tm));
}
return array;
} | @Test
public void basic() {
TableModel tm = new TableModel(FOO, BAR);
tm.addRow().cell(FOO, 1).cell(BAR, 2);
tm.addRow().cell(FOO, 3).cell(BAR, 4);
ArrayNode array = TableUtils.generateRowArrayNode(tm);
Assert.assertEquals("wrong results", ARRAY_AS_STRING, array.toString());
} |
void precheckMaxResultLimitOnLocalPartitions(String mapName) {
// check if feature is enabled
if (!isPreCheckEnabled) {
return;
}
// limit number of local partitions to check to keep runtime constant
PartitionIdSet localPartitions = mapServiceContext.getCachedOwnedPartitions();
int partitionsToCheck = min(localPartitions.size(), maxLocalPartitionsLimitForPreCheck);
if (partitionsToCheck == 0) {
return;
}
// calculate size of local partitions
int localPartitionSize = getLocalPartitionSize(mapName, localPartitions, partitionsToCheck);
if (localPartitionSize == 0) {
return;
}
// check local result size
long localResultLimit = getNodeResultLimit(partitionsToCheck);
if (localPartitionSize > localResultLimit * MAX_RESULT_LIMIT_FACTOR_FOR_PRECHECK) {
var localMapStatsProvider = mapServiceContext.getLocalMapStatsProvider();
if (localMapStatsProvider != null && localMapStatsProvider.hasLocalMapStatsImpl(mapName)) {
localMapStatsProvider.getLocalMapStatsImpl(mapName).incrementQueryResultSizeExceededCount();
}
throw new QueryResultSizeExceededException(maxResultLimit, " Result size exceeded in local pre-check.");
}
} | @Test
public void testLocalPreCheckEnabledWitPartitionBelowLimit() {
int[] partitionsSizes = {848};
populatePartitions(partitionsSizes);
initMocksWithConfiguration(200000, 1);
limiter.precheckMaxResultLimitOnLocalPartitions(ANY_MAP_NAME);
} |
@Override
public void defineDataTableType(DataTableType tableType) {
dataTableTypeRegistry.defineDataTableType(tableType);
} | @Test
void should_define_data_table_parameter_type() {
DataTableType expected = new DataTableType(Date.class, (DataTable dataTable) -> null);
registry.defineDataTableType(expected);
} |
void refreshRouteTable(String group) {
if (isShutdown) {
return;
}
final String groupName = group;
Status status = null;
try {
RouteTable instance = RouteTable.getInstance();
Configuration oldConf = instance.getConfiguration(groupName);
String oldLeader = Optional.ofNullable(instance.selectLeader(groupName)).orElse(PeerId.emptyPeer())
.getEndpoint().toString();
// fix issue #3661 https://github.com/alibaba/nacos/issues/3661
status = instance.refreshLeader(this.cliClientService, groupName, rpcRequestTimeoutMs);
if (!status.isOk()) {
Loggers.RAFT.error("Fail to refresh leader for group : {}, status is : {}", groupName, status);
}
status = instance.refreshConfiguration(this.cliClientService, groupName, rpcRequestTimeoutMs);
if (!status.isOk()) {
Loggers.RAFT
.error("Fail to refresh route configuration for group : {}, status is : {}", groupName, status);
}
} catch (Exception e) {
Loggers.RAFT.error("Fail to refresh raft metadata info for group : {}, error is : {}", groupName, e);
}
} | @Test
void testRefreshRouteTable() {
server.refreshRouteTable(groupId);
verify(cliClientServiceMock, times(1)).connect(peerId1.getEndpoint());
verify(cliClientServiceMock).getLeader(eq(peerId1.getEndpoint()), any(CliRequests.GetLeaderRequest.class), eq(null));
} |
public RowMetaInterface getPrevStepFields( String stepname ) throws KettleStepException {
return getPrevStepFields( findStep( stepname ) );
} | @Test
public void testGetPrevStepFields() throws KettleStepException {
DataGridMeta dgm = new DataGridMeta();
dgm.allocate( 2 );
dgm.setFieldName( new String[] { "id" } );
dgm.setFieldType( new String[] { ValueMetaFactory.getValueMetaName( ValueMetaInterface.TYPE_INTEGER ) } );
List<List<String>> dgm1Data = new ArrayList<>();
dgm1Data.add( singletonList( "1" ) );
dgm1Data.add( singletonList( "2" ) );
dgm.setDataLines( dgm1Data );
DataGridMeta dgm2 = new DataGridMeta();
dgm2.allocate( 2 );
dgm2.setFieldName( new String[] { "foo" } );
dgm2.setFieldType( new String[] { ValueMetaFactory.getValueMetaName( ValueMetaInterface.TYPE_STRING ) } );
List<List<String>> dgm1Data2 = new ArrayList<>();
dgm1Data2.add( singletonList( "3" ) );
dgm1Data2.add( singletonList( "4" ) );
dgm2.setDataLines( dgm1Data2 );
StepMeta dg = new StepMeta( "input1", dgm );
StepMeta dg2 = new StepMeta( "input2", dgm2 );
TextFileOutputMeta textFileOutputMeta = new TextFileOutputMeta();
StepMeta textFileOutputStep = new StepMeta( "BACKLOG-21039", textFileOutputMeta );
TransHopMeta hop = new TransHopMeta( dg, textFileOutputStep, true );
TransHopMeta hop2 = new TransHopMeta( dg2, textFileOutputStep, true );
transMeta.addStep( dg );
transMeta.addStep( dg2 );
transMeta.addStep( textFileOutputStep );
transMeta.addTransHop( hop );
transMeta.addTransHop( hop2 );
RowMetaInterface allRows = transMeta.getPrevStepFields( textFileOutputStep, null, null );
assertNotNull( allRows );
assertEquals( 2, allRows.size() );
assertEquals( "id", allRows.getValueMeta( 0 ).getName() );
assertEquals( "foo", allRows.getValueMeta( 1 ).getName() );
assertEquals( ValueMetaInterface.TYPE_INTEGER, allRows.getValueMeta( 0 ).getType() );
assertEquals( ValueMetaInterface.TYPE_STRING, allRows.getValueMeta( 1 ).getType() );
RowMetaInterface rows1 = transMeta.getPrevStepFields( textFileOutputStep, "input1", null );
assertNotNull( rows1 );
assertEquals( 1, rows1.size() );
assertEquals( "id", rows1.getValueMeta( 0 ).getName() );
assertEquals( ValueMetaInterface.TYPE_INTEGER, rows1.getValueMeta( 0 ).getType() );
RowMetaInterface rows2 = transMeta.getPrevStepFields( textFileOutputStep, "input2", null );
assertNotNull( rows2 );
assertEquals( 1, rows2.size() );
assertEquals( "foo", rows2.getValueMeta( 0 ).getName() );
assertEquals( ValueMetaInterface.TYPE_STRING, rows2.getValueMeta( 0 ).getType() );
dgm.setFieldName( new String[] { "id", "name" } );
dgm.setFieldType( new String[] {
ValueMetaFactory.getValueMetaName( ValueMetaInterface.TYPE_INTEGER ),
ValueMetaFactory.getValueMetaName( ValueMetaInterface.TYPE_STRING ),
} );
allRows = transMeta.getPrevStepFields( textFileOutputStep, null, null );
assertNotNull( allRows );
assertEquals( 3, allRows.size() );
assertEquals( "id", allRows.getValueMeta( 0 ).getName() );
assertEquals( "name", allRows.getValueMeta( 1 ).getName() );
assertEquals( "foo", allRows.getValueMeta( 2 ).getName() );
assertEquals( ValueMetaInterface.TYPE_INTEGER, allRows.getValueMeta( 0 ).getType() );
assertEquals( ValueMetaInterface.TYPE_STRING, allRows.getValueMeta( 1 ).getType() );
assertEquals( ValueMetaInterface.TYPE_STRING, allRows.getValueMeta( 2 ).getType() );
rows1 = transMeta.getPrevStepFields( textFileOutputStep, "input1", null );
assertNotNull( rows1 );
assertEquals( 2, rows1.size() );
assertEquals( "id", rows1.getValueMeta( 0 ).getName() );
assertEquals( "name", rows1.getValueMeta( 1 ).getName() );
assertEquals( ValueMetaInterface.TYPE_INTEGER, rows1.getValueMeta( 0 ).getType() );
assertEquals( ValueMetaInterface.TYPE_STRING, rows1.getValueMeta( 1 ).getType() );
} |
@Override
public Response toResponse(Throwable exception) {
debugLog(exception);
if (exception instanceof WebApplicationException w) {
var res = w.getResponse();
if (res.getStatus() >= 500) {
log(w);
}
return res;
}
if (exception instanceof AuthenticationException) {
return Response.status(Status.UNAUTHORIZED).build();
}
if (exception instanceof ValidationException ve) {
if (ve.seeOther() != null) {
return Response.seeOther(ve.seeOther()).build();
}
return buildContentNegotiatedErrorResponse(ve.localizedMessage(), Status.BAD_REQUEST);
}
// the remaining exceptions are unexpected, let's log them
log(exception);
if (exception instanceof FederationException fe) {
var errorMessage = new Message(FEDERATION_ERROR_MESSAGE, fe.reason().name());
return buildContentNegotiatedErrorResponse(errorMessage, Status.INTERNAL_SERVER_ERROR);
}
var status = Status.INTERNAL_SERVER_ERROR;
var errorMessage = new Message(SERVER_ERROR_MESSAGE, (String) null);
return buildContentNegotiatedErrorResponse(errorMessage, status);
} | @Test
void toResponse_propagateWebApplicationException_forbidden() {
when(uriInfo.getRequestUri()).thenReturn(REQUEST_URI);
var status = 500;
var ex = new ServerErrorException(status);
// when
var res = mapper.toResponse(ex);
// then
assertEquals(status, res.getStatus());
} |
public synchronized ApplicationDescription saveApplication(InputStream stream) {
try (InputStream ais = stream) {
byte[] cache = toByteArray(ais);
InputStream bis = new ByteArrayInputStream(cache);
boolean plainXml = isPlainXml(cache);
ApplicationDescription desc = plainXml ?
parsePlainAppDescription(bis) : parseZippedAppDescription(bis);
checkState(!appFile(desc.name(), APP_XML).exists(),
"Application %s already installed", desc.name());
if (plainXml) {
expandPlainApplication(cache, desc);
} else {
bis.reset();
boolean isSelfContainedJar = expandZippedApplication(bis, desc);
if (isSelfContainedJar) {
bis.reset();
stageSelfContainedJar(bis, desc);
}
/*
* Reset the ZIP file and reparse the app description now
* that the ZIP is expanded onto the filesystem. This way any
* file referenced as part of the description (i.e. app.png)
* can be loaded into the app description.
*/
bis.reset();
desc = parseZippedAppDescription(bis);
bis.reset();
saveApplication(bis, desc, isSelfContainedJar);
}
installArtifacts(desc);
return desc;
} catch (IOException e) {
throw new ApplicationException("Unable to save application", e);
}
} | @Test
public void saveZippedApp() throws IOException {
InputStream stream = getClass().getResourceAsStream("app.zip");
ApplicationDescription app = aar.saveApplication(stream);
validate(app);
stream.close();
} |
public static int indexOfOutOfQuotes(String str, String searched) {
return indexOfOutOfQuotes(str, searched, 0);
} | @Test
public void test_indexOfOutOfQuotes() {
assertThat(indexOfOutOfQuotes("bla\"bla\"bla", "bla")).isEqualTo(0);
assertThat(indexOfOutOfQuotes("\"bla\"bla", "bla")).isEqualTo(5);
assertThat(indexOfOutOfQuotes("\"bla\"", "bla")).isEqualTo(-1);
assertThat(indexOfOutOfQuotes("bla\"bla\"bla", "bla", 0)).isEqualTo(0);
assertThat(indexOfOutOfQuotes("bla\"bla\"bla", "bla", 1)).isEqualTo(8);
assertThat(indexOfOutOfQuotes("bla\"bla\"bla", "bla", 9)).isEqualTo(-1);
} |
public static <T extends PluginInfo> void unloadIncompatiblePlugins(Map<String, T> pluginsByKey) {
// loop as long as the previous loop ignored some plugins. That allows to support dependencies
// on many levels, for example D extends C, which extends B, which requires A. If A is not installed,
// then B, C and D must be ignored. That's not possible to achieve this algorithm with a single iteration over plugins.
var validator = new PluginRequirementsValidator<>(pluginsByKey);
Set<String> removedKeys = new HashSet<>();
do {
removedKeys.clear();
for (T plugin : pluginsByKey.values()) {
if (!validator.isCompatible(plugin)) {
removedKeys.add(plugin.getKey());
}
}
for (String removedKey : removedKeys) {
pluginsByKey.remove(removedKey);
}
} while (!removedKeys.isEmpty());
} | @Test
public void unloadIncompatiblePlugins_removes_incompatible_plugins() {
PluginInfo pluginE = new PluginInfo("pluginE");
PluginInfo pluginD = new PluginInfo("pluginD")
.setBasePlugin("pluginC");
PluginInfo pluginC = new PluginInfo("pluginC")
.setBasePlugin("pluginB");
PluginInfo pluginB = new PluginInfo("pluginB")
.addRequiredPlugin(RequiredPlugin.parse("pluginA:1.0"));
Map<String, PluginInfo> plugins = new HashMap<>();
plugins.put(pluginB.getKey(), pluginB);
plugins.put(pluginC.getKey(), pluginC);
plugins.put(pluginD.getKey(), pluginD);
plugins.put(pluginE.getKey(), pluginE);
PluginRequirementsValidator.unloadIncompatiblePlugins(plugins);
assertThat(plugins).contains(Map.entry(pluginE.getKey(), pluginE));
} |
@Override
public String builder(final String paramName, final ServerWebExchange exchange) {
return HostAddressUtils.acquireHost(exchange);
} | @Test
public void testBuilderWithNullParamName() {
assertEquals(testhost, hostParameterData.builder(null, exchange));
} |
public static <T> Inner<T> create() {
return new Inner<T>();
} | @Test
@Category(NeedsRunner.class)
public void testFilterMultipleFields() {
// Pass only elements where field1 + field2 >= 100.
PCollection<AutoValue_FilterTest_Simple> filtered =
pipeline
.apply(
Create.of(
new AutoValue_FilterTest_Simple("", 52, 48),
new AutoValue_FilterTest_Simple("", 52, 2),
new AutoValue_FilterTest_Simple("", 70, 33)))
.apply(
Filter.<AutoValue_FilterTest_Simple>create()
.whereFieldNames(
Lists.newArrayList("field2", "field3"),
r -> r.getInt32("field2") + r.getInt32("field3") >= 100));
PAssert.that(filtered)
.containsInAnyOrder(
new AutoValue_FilterTest_Simple("", 52, 48),
new AutoValue_FilterTest_Simple("", 70, 33));
pipeline.run();
} |
private void fail(final ChannelHandlerContext ctx, int length) {
fail(ctx, String.valueOf(length));
} | @Test
public void testTooLongLine2() throws Exception {
EmbeddedChannel ch = new EmbeddedChannel(new LenientLineBasedFrameDecoder(16, false, false, false));
assertFalse(ch.writeInbound(copiedBuffer("12345678901234567", CharsetUtil.US_ASCII)));
try {
ch.writeInbound(copiedBuffer("890\r\nfirst\r\n", CharsetUtil.US_ASCII));
fail();
} catch (Exception e) {
assertThat(e, is(instanceOf(TooLongFrameException.class)));
}
ByteBuf buf = ch.readInbound();
ByteBuf buf2 = copiedBuffer("first\r\n", CharsetUtil.US_ASCII);
assertThat(buf, is(buf2));
assertThat(ch.finish(), is(false));
buf.release();
buf2.release();
} |
public static String addSuffixIfNot(CharSequence str, CharSequence suffix) {
return appendIfMissing(str, suffix, suffix);
} | @Test
public void addSuffixIfNotTest() {
String str = "hutool";
String result = CharSequenceUtil.addSuffixIfNot(str, "tool");
assertEquals(str, result);
result = CharSequenceUtil.addSuffixIfNot(str, " is Good");
assertEquals(str + " is Good", result);
// https://gitee.com/dromara/hutool/issues/I4NS0F
result = CharSequenceUtil.addSuffixIfNot("", "/");
assertEquals("/", result);
} |
@Override
protected int compareFirst(final Path p1, final Path p2) {
final long d1 = p1.attributes().getModificationDate();
final long d2 = p2.attributes().getModificationDate();
if(d1 == d2) {
return 0;
}
if(ascending) {
return d1 > d2 ? 1 : -1;
}
return d1 > d2 ? -1 : 1;
} | @Test
public void testCompareFirst() {
assertEquals(0, new TimestampComparator(true).compareFirst(new Path("/a", EnumSet.of(Path.Type.file)), new Path("/b", EnumSet.of(Path.Type.file))));
final Path p1 = new Path("/a", EnumSet.of(Path.Type.file));
p1.attributes().setModificationDate(System.currentTimeMillis());
final Path p2 = new Path("/b", EnumSet.of(Path.Type.file));
p2.attributes().setModificationDate(System.currentTimeMillis() - 1000);
assertEquals(1, new TimestampComparator(true).compareFirst(p1, p2));
} |
public static String buildLikeValue(String value, WildcardPosition wildcardPosition) {
String escapedValue = escapePercentAndUnderscore(value);
String wildcard = "%";
switch (wildcardPosition) {
case BEFORE:
escapedValue = wildcard + escapedValue;
break;
case AFTER:
escapedValue += wildcard;
break;
case BEFORE_AND_AFTER:
escapedValue = wildcard + escapedValue + wildcard;
break;
default:
throw new UnsupportedOperationException("Unhandled WildcardPosition: " + wildcardPosition);
}
return escapedValue;
} | @Test
void buildLikeValue_with_special_characters() {
String escapedValue = "like-\\/_/%//-value";
String wildcard = "%";
assertThat(buildLikeValue("like-\\_%/-value", BEFORE)).isEqualTo(wildcard + escapedValue);
assertThat(buildLikeValue("like-\\_%/-value", AFTER)).isEqualTo(escapedValue + wildcard);
assertThat(buildLikeValue("like-\\_%/-value", BEFORE_AND_AFTER)).isEqualTo(wildcard + escapedValue + wildcard);
} |
@Override
public Long createPost(PostSaveReqVO createReqVO) {
// 校验正确性
validatePostForCreateOrUpdate(null, createReqVO.getName(), createReqVO.getCode());
// 插入岗位
PostDO post = BeanUtils.toBean(createReqVO, PostDO.class);
postMapper.insert(post);
return post.getId();
} | @Test
public void testCreatePost_success() {
// 准备参数
PostSaveReqVO reqVO = randomPojo(PostSaveReqVO.class,
o -> o.setStatus(randomEle(CommonStatusEnum.values()).getStatus()))
.setId(null); // 防止 id 被设置
// 调用
Long postId = postService.createPost(reqVO);
// 断言
assertNotNull(postId);
// 校验记录的属性是否正确
PostDO post = postMapper.selectById(postId);
assertPojoEquals(reqVO, post, "id");
} |
public static HttpResponseStatus parseLine(CharSequence line) {
return (line instanceof AsciiString) ? parseLine((AsciiString) line) : parseLine(line.toString());
} | @Test
public void parseLineStringCodeAndPhrase() {
assertSame(HttpResponseStatus.OK, parseLine("200 OK"));
} |
@Override
public void onMsg(TbContext ctx, TbMsg msg) throws ExecutionException, InterruptedException, TbNodeException {
var metaDataCopy = msg.getMetaData().copy();
String msgData = msg.getData();
boolean msgChanged = false;
JsonNode dataNode = JacksonUtil.toJsonNode(msgData);
if (dataNode.isObject()) {
switch (copyFrom) {
case METADATA:
ObjectNode msgDataNode = (ObjectNode) dataNode;
Map<String, String> metaDataMap = metaDataCopy.getData();
for (Map.Entry<String, String> entry : metaDataMap.entrySet()) {
String mdKey = entry.getKey();
String mdValue = entry.getValue();
if (matches(mdKey)) {
msgChanged = true;
msgDataNode.put(mdKey, mdValue);
}
}
msgData = JacksonUtil.toString(msgDataNode);
break;
case DATA:
Iterator<Map.Entry<String, JsonNode>> iteratorNode = dataNode.fields();
while (iteratorNode.hasNext()) {
Map.Entry<String, JsonNode> entry = iteratorNode.next();
String msgKey = entry.getKey();
JsonNode msgValue = entry.getValue();
if (matches(msgKey)) {
msgChanged = true;
String value = msgValue.isTextual() ?
msgValue.asText() : JacksonUtil.toString(msgValue);
metaDataCopy.putValue(msgKey, value);
}
}
break;
default:
log.debug("Unexpected CopyFrom value: {}. Allowed values: {}", copyFrom, TbMsgSource.values());
}
}
ctx.tellSuccess(msgChanged ? TbMsg.transformMsg(msg, metaDataCopy, msgData) : msg);
} | @Test
void givenMsgDataNotJSONObject_whenOnMsg_thenTVerifyOutput() throws Exception {
TbMsg msg = getTbMsg(deviceId, TbMsg.EMPTY_JSON_ARRAY);
node.onMsg(ctx, msg);
ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class);
verify(ctx).tellSuccess(newMsgCaptor.capture());
verify(ctx, never()).tellFailure(any(), any());
TbMsg newMsg = newMsgCaptor.getValue();
assertThat(newMsg).isNotNull();
assertThat(newMsg).isSameAs(msg);
} |
public String encode(String name, String value) {
return encode(new DefaultCookie(name, value));
} | @Test
public void testEncodingSingleCookieV0() throws ParseException {
int maxAge = 50;
String result = "myCookie=myValue; Max-Age=50; Expires=(.+?); Path=/apathsomewhere;" +
" Domain=.adomainsomewhere; Secure; SameSite=Lax; Partitioned";
DefaultCookie cookie = new DefaultCookie("myCookie", "myValue");
cookie.setDomain(".adomainsomewhere");
cookie.setMaxAge(maxAge);
cookie.setPath("/apathsomewhere");
cookie.setSecure(true);
cookie.setSameSite(SameSite.Lax);
cookie.setPartitioned(true);
String encodedCookie = ServerCookieEncoder.STRICT.encode(cookie);
Matcher matcher = Pattern.compile(result).matcher(encodedCookie);
assertTrue(matcher.find());
Date expiresDate = DateFormatter.parseHttpDate(matcher.group(1));
long diff = (expiresDate.getTime() - System.currentTimeMillis()) / 1000;
// 2 secs should be fine
assertTrue(Math.abs(diff - maxAge) <= 2);
} |
public synchronized NumaResourceAllocation allocateNumaNodes(
Container container) throws ResourceHandlerException {
NumaResourceAllocation allocation = allocate(container.getContainerId(),
container.getResource());
if (allocation != null) {
try {
// Update state store.
context.getNMStateStore().storeAssignedResources(container,
NUMA_RESOURCE_TYPE, Arrays.asList(allocation));
} catch (IOException e) {
releaseNumaResource(container.getContainerId());
throw new ResourceHandlerException(e);
}
}
return allocation;
} | @Test
public void testAllocateNumaNodeWhenNoNumaCpuResourcesAvailable()
throws Exception {
NumaResourceAllocation nodeInfo = numaResourceAllocator
.allocateNumaNodes(getContainer(
ContainerId.fromString("container_1481156246874_0001_01_000001"),
Resource.newInstance(2048, 600)));
Assert.assertNull("Should not assign numa nodes when there"
+ " are no sufficient cpu resources available.", nodeInfo);
} |
@Override
public boolean isDetected() {
return "true".equalsIgnoreCase(system.envVariable("GITLAB_CI"));
} | @Test
public void isDetected() {
setEnvVariable("GITLAB_CI", "true");
assertThat(underTest.isDetected()).isTrue();
setEnvVariable("GITLAB_CI", null);
assertThat(underTest.isDetected()).isFalse();
} |
public static PartitionGroupReleaseStrategy.Factory loadPartitionGroupReleaseStrategyFactory(
final Configuration configuration) {
final boolean partitionReleaseDuringJobExecution =
configuration.get(JobManagerOptions.PARTITION_RELEASE_DURING_JOB_EXECUTION);
if (partitionReleaseDuringJobExecution) {
return new RegionPartitionGroupReleaseStrategy.Factory();
} else {
return new NotReleasingPartitionGroupReleaseStrategy.Factory();
}
} | @Test
public void featureEnabledByDefault() {
final Configuration emptyConfiguration = new Configuration();
final PartitionGroupReleaseStrategy.Factory factory =
PartitionGroupReleaseStrategyFactoryLoader.loadPartitionGroupReleaseStrategyFactory(
emptyConfiguration);
assertThat(factory).isInstanceOf(RegionPartitionGroupReleaseStrategy.Factory.class);
} |
@VisibleForTesting
FSEditLog getEditLog() {
return editLog;
} | @Test
public void testTailer() throws IOException, InterruptedException,
ServiceFailedException {
Configuration conf = getConf();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 0);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100);
conf.setLong(EditLogTailer.DFS_HA_TAILEDITS_MAX_TXNS_PER_LOCK_KEY, 3);
HAUtil.setAllowStandbyReads(conf, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0)
.build();
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
try {
for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
NameNodeAdapter.mkdirs(nn1, getDirPath(i),
new PermissionStatus("test","test", new FsPermission((short)00755)),
true);
}
HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
assertEquals("Inconsistent number of applied txns on Standby",
nn1.getNamesystem().getEditLog().getLastWrittenTxId(),
nn2.getNamesystem().getFSImage().getLastAppliedTxId() + 1);
for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
assertTrue(NameNodeAdapter.getFileInfo(nn2,
getDirPath(i), false, false, false).isDirectory());
}
for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
NameNodeAdapter.mkdirs(nn1, getDirPath(i),
new PermissionStatus("test","test", new FsPermission((short)00755)),
true);
}
HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
assertEquals("Inconsistent number of applied txns on Standby",
nn1.getNamesystem().getEditLog().getLastWrittenTxId(),
nn2.getNamesystem().getFSImage().getLastAppliedTxId() + 1);
for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
assertTrue(NameNodeAdapter.getFileInfo(nn2,
getDirPath(i), false, false, false).isDirectory());
}
} finally {
cluster.shutdown();
}
} |
public void doDeleteRevisions( PurgeUtilitySpecification purgeSpecification ) throws PurgeDeletionException {
if ( purgeSpecification != null ) {
getLogger().setCurrentFilePath( purgeSpecification.getPath() );
logConfiguration( purgeSpecification );
if ( purgeSpecification.getPath() != null && !purgeSpecification.getPath().isEmpty() ) {
processRevisionDeletion( purgeSpecification );
}
// Now do shared objects if required
if ( purgeSpecification.isSharedObjects() ) {
if ( purgeSpecification.isPurgeFiles() ) {
for ( String sharedObjectpath : sharedObjectFolders ) {
purgeSpecification.fileFilter = "*";
purgeSpecification.setPath( sharedObjectpath );
processRevisionDeletion( purgeSpecification );
}
} else {
throw new PurgeDeletionException( "Must purge files before shared objects" );
}
}
}
} | @Test
public void doPurgeUtilVersionCountTest() throws PurgeDeletionException {
IUnifiedRepository mockRepo = mock( IUnifiedRepository.class );
final HashMap<String, List<VersionSummary>> versionListMap = processVersionMap( mockRepo );
UnifiedRepositoryPurgeService purgeService = getPurgeService( mockRepo );
PurgeUtilitySpecification spec = new PurgeUtilitySpecification();
spec.setVersionCount( 3 );
spec.setPath( "/" );
purgeService.doDeleteRevisions( spec );
verifyVersionCountDeletion( versionListMap, mockRepo, "1", spec.getVersionCount() );
verifyVersionCountDeletion( versionListMap, mockRepo, "2", spec.getVersionCount() );
} |
public static String configsTopic(final KsqlConfig ksqlConfig) {
return toKsqlInternalTopic(ksqlConfig, KSQL_CONFIGS_TOPIC_SUFFIX);
} | @Test
public void shouldReturnConfigsTopic() {
// Given/When
final String commandTopic = ReservedInternalTopics.configsTopic(ksqlConfig);
// Then
assertThat(commandTopic, is("_confluent-ksql-default__configs"));
} |
@Override
public RequestTemplate parseRequestTemplate(final Method method, final ShenyuClientFactoryBean shenyuClientFactoryBean) {
final RequestTemplate requestTemplate = new RequestTemplate();
requestTemplate.setMethod(method);
requestTemplate.setReturnType(method.getReturnType());
for (final Annotation methodAnnotation : method.getAnnotations()) {
this.processAnnotationOnMethod(requestTemplate, methodAnnotation, method, shenyuClientFactoryBean);
}
return requestTemplate;
} | @Test
public void parseRequestTplTest() {
SpringMvcContract contract = new SpringMvcContract();
RequestTemplate template = contract.parseRequestTemplate(FIND_BY_ID, bean);
assertSame(template.getMethod(), FIND_BY_ID);
assertEquals(template.getPath(), "/findById");
assertEquals(template.getHttpMethod(), ShenyuRequest.HttpMethod.GET);
template = contract.parseRequestTemplate(INSERT, bean);
assertSame(template.getMethod(), INSERT);
assertEquals(template.getPath(), "/insert");
assertEquals(template.getHttpMethod(), ShenyuRequest.HttpMethod.POST);
template = contract.parseRequestTemplate(UPDATE, bean);
assertSame(template.getMethod(), UPDATE);
assertEquals(template.getPath(), "/update");
assertEquals(template.getHttpMethod(), ShenyuRequest.HttpMethod.PUT);
template = contract.parseRequestTemplate(DEL, bean);
assertSame(template.getMethod(), DEL);
assertEquals(template.getPath(), "/delete");
assertEquals(template.getHttpMethod(), ShenyuRequest.HttpMethod.DELETE);
} |
@Override
public Optional<String> canUpgradeTo(final DataSource other) {
final List<String> issues = PROPERTIES.stream()
.filter(prop -> !prop.isCompatible(this, other))
.map(prop -> getCompatMessage(other, prop))
.collect(Collectors.toList());
checkSchemas(getSchema(), other.getSchema())
.map(s -> getCompatMessage(other, SCHEMA_PROP) + ". (" + s + ")")
.ifPresent(issues::add);
final String err = String.join("\n\tAND ", issues);
return err.isEmpty() ? Optional.empty() : Optional.of(err);
} | @Test
public void shouldEnforceSameTimestampColumn() {
// Given:
final KsqlStream<String> streamA = new KsqlStream<>(
"sql",
SourceName.of("A"),
SOME_SCHEMA,
Optional.empty(),
true,
topic,
false
);
final KsqlStream<String> streamB = new KsqlStream<>(
"sql",
SourceName.of("A"),
SOME_SCHEMA,
Optional.of(new TimestampColumn(ColumnName.of("foo"), Optional.empty())),
true,
topic,
false
);
// When:
final Optional<String> err = streamA.canUpgradeTo(streamB);
// Then:
assertThat(err.isPresent(), is(true));
assertThat(err.get(), containsString("has timestampColumn = Optional.empty which is not upgradeable to Optional[TimestampColumn{column=`foo`, format=Optional.empty}]"));
} |
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
} | @Test
public void testUnknownProducerErrorShouldBeRetriedForFutureBatchesWhenFirstFails() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0));
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.sequenceNumber(tp0));
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 1000L, 10L);
sender.runOnce(); // receive the response.
assertTrue(request1.isDone());
assertEquals(1000L, request1.get().offset());
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertEquals(OptionalLong.of(1000L), transactionManager.lastAckedOffset(tp0));
// Send second ProduceRequest
Future<RecordMetadata> request2 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(2, transactionManager.sequenceNumber(tp0));
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
// Send the third ProduceRequest, in parallel with the second. It should be retried even though the
// lastAckedOffset > logStartOffset when its UnknownProducerResponse comes back.
Future<RecordMetadata> request3 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(3, transactionManager.sequenceNumber(tp0));
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertFalse(request2.isDone());
assertFalse(request3.isDone());
assertEquals(2, client.inFlightRequestCount());
sendIdempotentProducerResponse(1, tp0, Errors.UNKNOWN_PRODUCER_ID, -1L, 1010L);
sender.runOnce(); // receive response 2, should reset the sequence numbers and be retried.
sender.runOnce(); // bump epoch and retry request 2
// We should have reset the sequence number state of the partition because the state was lost on the broker.
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
assertEquals(2, transactionManager.sequenceNumber(tp0));
assertFalse(request2.isDone());
assertFalse(request3.isDone());
assertEquals(2, client.inFlightRequestCount());
assertEquals((short) 1, transactionManager.producerIdAndEpoch().epoch);
// receive the original response 3. note the expected sequence is still the originally assigned sequence.
sendIdempotentProducerResponse(2, tp0, Errors.UNKNOWN_PRODUCER_ID, -1, 1010L);
sender.runOnce(); // receive response 3
assertEquals(1, client.inFlightRequestCount());
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
assertEquals(2, transactionManager.sequenceNumber(tp0));
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 1011L, 1010L);
sender.runOnce(); // receive response 2, don't send request 3 since we can have at most 1 in flight when retrying
assertTrue(request2.isDone());
assertFalse(request3.isDone());
assertFalse(client.hasInFlightRequests());
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertEquals(1011L, request2.get().offset());
assertEquals(OptionalLong.of(1011L), transactionManager.lastAckedOffset(tp0));
sender.runOnce(); // resend request 3.
assertEquals(1, client.inFlightRequestCount());
sendIdempotentProducerResponse(1, tp0, Errors.NONE, 1012L, 1010L);
sender.runOnce(); // receive response 3.
assertFalse(client.hasInFlightRequests());
assertTrue(request3.isDone());
assertEquals(1012L, request3.get().offset());
assertEquals(OptionalLong.of(1012L), transactionManager.lastAckedOffset(tp0));
} |
public boolean isDistributing() {
return (state & MASK_DISTRIBUTING) != 0;
} | @Test
public void isDistributing() {
LacpState state = new LacpState((byte) 0x20);
assertTrue(state.isDistributing());
} |
@Override
protected void consume(CharSequence token, TokenQueue output) {
// do nothing
} | @Test
public void shouldConsume() {
BlackHoleTokenChannel channel = new BlackHoleTokenChannel("ABC");
TokenQueue output = mock(TokenQueue.class);
CodeReader codeReader = new CodeReader("ABCD");
assertThat(channel.consume(codeReader, output)).isTrue();
assertThat(codeReader.getLinePosition()).isOne();
assertThat(codeReader.getColumnPosition()).isEqualTo(3);
verifyNoInteractions(output);
} |
@JsonProperty
public Collection<String> getStreamIds() {
return message.getStreamIds();
} | @Test
public void testGetStreamIds() throws Exception {
assertThat(messageSummary.getStreamIds()).containsAll(STREAM_IDS);
} |
@Override
public RedisClusterNode clusterGetNodeForSlot(int slot) {
Iterable<RedisClusterNode> res = clusterGetNodes();
for (RedisClusterNode redisClusterNode : res) {
if (redisClusterNode.isMaster() && redisClusterNode.getSlotRange().contains(slot)) {
return redisClusterNode;
}
}
return null;
} | @Test
public void testClusterGetNodeForSlot() {
RedisClusterNode node1 = connection.clusterGetNodeForSlot(1);
RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000);
assertThat(node1.getId()).isNotEqualTo(node2.getId());
} |
@VisibleForTesting
static Map<String, Object> serializableHeaders(Map<String, Object> headers) {
Map<String, Object> returned = new HashMap<>();
if (headers != null) {
for (Map.Entry<String, Object> h : headers.entrySet()) {
Object value = h.getValue();
if (value instanceof List<?>) {
// Transformation for List type headers
value =
((List<?>) value)
.stream().map(RabbitMqMessage::getTransformedValue).collect(Collectors.toList());
} else if (!(value instanceof Serializable)) {
value = getTransformedValue(value);
}
returned.put(h.getKey(), value);
}
}
return returned;
} | @Test(expected = UnsupportedOperationException.class)
public void testSerializableHeadersThrowsIfValueIsNotSerializable() {
Map<String, Object> rawHeaders = new HashMap<>();
Object notSerializableObject = Optional.of(new Object());
rawHeaders.put("key1", notSerializableObject);
RabbitMqMessage.serializableHeaders(rawHeaders);
} |
@Override
public boolean add(String e) {
return get(addAsync(e));
} | @Test
public void testFirstLast() {
RLexSortedSet set = redisson.getLexSortedSet("simple");
set.add("a");
set.add("b");
set.add("c");
set.add("d");
Assertions.assertEquals("a", set.first());
Assertions.assertEquals("d", set.last());
} |
public static Coin parseCoin(final String str) {
try {
long satoshis = btcToSatoshi(new BigDecimal(str));
return Coin.valueOf(satoshis);
} catch (ArithmeticException e) {
throw new IllegalArgumentException(e); // Repackage exception to honor method contract
}
} | @Test(expected = IllegalArgumentException.class)
public void testParseCoinOverprecise() {
parseCoin("0.000000011");
} |
void snapshotSession(final ClientSession session)
{
final String responseChannel = session.responseChannel();
final byte[] encodedPrincipal = session.encodedPrincipal();
final int length = MessageHeaderEncoder.ENCODED_LENGTH + ClientSessionEncoder.BLOCK_LENGTH +
ClientSessionEncoder.responseChannelHeaderLength() + responseChannel.length() +
ClientSessionEncoder.encodedPrincipalHeaderLength() + encodedPrincipal.length;
if (length <= publication.maxPayloadLength())
{
idleStrategy.reset();
while (true)
{
final long result = publication.tryClaim(length, bufferClaim);
if (result > 0)
{
final MutableDirectBuffer buffer = bufferClaim.buffer();
final int offset = bufferClaim.offset();
encodeSession(session, responseChannel, encodedPrincipal, buffer, offset);
bufferClaim.commit();
break;
}
checkResultAndIdle(result);
}
}
else
{
final int offset = 0;
encodeSession(session, responseChannel, encodedPrincipal, offerBuffer, offset);
offer(offerBuffer, offset, length);
}
} | @Test
void snapshotSessionUsesOfferIfDataDoesNotIntoMaxPayloadSize()
{
final String responseChannel = "aeron:udp?endpoint=localhost:8080|alias=long time ago";
final byte[] encodedPrincipal = new byte[1000];
ThreadLocalRandom.current().nextBytes(encodedPrincipal);
final ContainerClientSession session =
new ContainerClientSession(8, -3, responseChannel, encodedPrincipal, null);
final int length = MessageHeaderEncoder.ENCODED_LENGTH + ClientSessionEncoder.BLOCK_LENGTH +
ClientSessionEncoder.responseChannelHeaderLength() + responseChannel.length() +
ClientSessionEncoder.encodedPrincipalHeaderLength() + encodedPrincipal.length;
when(publication.maxPayloadLength()).thenReturn(20);
when(publication.offer(any(), eq(0), eq(length)))
.thenReturn(BACK_PRESSURED, ADMIN_ACTION)
.thenAnswer(mockOffer());
serviceSnapshotTaker.snapshotSession(session);
final InOrder inOrder = inOrder(idleStrategy, publication);
inOrder.verify(publication).maxPayloadLength();
inOrder.verify(idleStrategy).reset();
inOrder.verify(publication).offer(any(), anyInt(), anyInt());
inOrder.verify(idleStrategy).idle();
inOrder.verify(publication).offer(any(), anyInt(), anyInt());
inOrder.verify(idleStrategy).idle();
inOrder.verify(publication).offer(any(), anyInt(), anyInt());
inOrder.verifyNoMoreInteractions();
clientSessionDecoder.wrapAndApplyHeader(buffer, 0, messageHeaderDecoder);
assertEquals(session.id(), clientSessionDecoder.clusterSessionId());
assertEquals(session.responseStreamId(), clientSessionDecoder.responseStreamId());
assertEquals(responseChannel, clientSessionDecoder.responseChannel());
assertEquals(encodedPrincipal.length, clientSessionDecoder.encodedPrincipalLength());
final byte[] snapshotPrincipal = new byte[encodedPrincipal.length];
clientSessionDecoder.getEncodedPrincipal(snapshotPrincipal, 0, snapshotPrincipal.length);
assertArrayEquals(encodedPrincipal, snapshotPrincipal);
} |
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case R.id.about_menu_option:
Navigation.findNavController(requireView())
.navigate(MainFragmentDirections.actionMainFragmentToAboutAnySoftKeyboardFragment());
return true;
case R.id.tweaks_menu_option:
Navigation.findNavController(requireView())
.navigate(MainFragmentDirections.actionMainFragmentToMainTweaksFragment());
return true;
case R.id.backup_prefs:
mDialogController.showDialog(R.id.backup_prefs);
return true;
case R.id.restore_prefs:
mDialogController.showDialog(R.id.restore_prefs);
return true;
default:
return super.onOptionsItemSelected(item);
}
} | @Test
public void testRestorePickerCancel() throws Exception {
final var shadowApplication = Shadows.shadowOf((Application) getApplicationContext());
final MainFragment fragment = startFragment();
final FragmentActivity activity = fragment.getActivity();
fragment.onOptionsItemSelected(
Shadows.shadowOf(activity).getOptionsMenu().findItem(R.id.restore_prefs));
TestRxSchedulers.foregroundFlushAllJobs();
Assert.assertNotSame(
GeneralDialogTestUtil.NO_DIALOG, GeneralDialogTestUtil.getLatestShownDialog());
Assert.assertTrue(
GeneralDialogTestUtil.getLatestShownDialog()
.getButton(DialogInterface.BUTTON_POSITIVE)
.callOnClick());
TestRxSchedulers.foregroundAdvanceBy(1);
// this will open the System's file chooser
ShadowActivity.IntentForResult fileRequest =
shadowApplication.getNextStartedActivityForResult();
Assert.assertNotNull(fileRequest);
Assert.assertEquals(Intent.ACTION_OPEN_DOCUMENT, fileRequest.intent.getAction());
final var backupFile = Files.createTempFile("ask-backup", ".xml");
Intent resultData = new Intent();
resultData.setData(Uri.fromFile(backupFile.toFile()));
Shadows.shadowOf(activity)
.receiveResult(fileRequest.intent, Activity.RESULT_CANCELED, resultData);
TestRxSchedulers.drainAllTasks();
// pick cancel
Assert.assertSame(
GeneralDialogTestUtil.NO_DIALOG, GeneralDialogTestUtil.getLatestShownDialog());
} |
@Override
public void accept(Props props) {
if (isClusterEnabled(props)) {
checkClusterProperties(props);
}
} | @Test
public void accept_throws_MessageException_if_no_node_type_is_configured() {
TestAppSettings settings = new TestAppSettings(of(CLUSTER_ENABLED.getKey(), "true"));
ClusterSettings clusterSettings = new ClusterSettings(network);
Props props = settings.getProps();
assertThatThrownBy(() -> clusterSettings.accept(props))
.isInstanceOf(MessageException.class)
.hasMessage("Property sonar.cluster.node.type is mandatory");
} |
@Override
public int hashCode()
{
//taken from java.lang.Long
return (int)(value ^ (value >> 32));
} | @Test
void testHashCode()
{
for (int i = -1000; i < 3000; i += 200)
{
COSInteger test1 = COSInteger.get(i);
COSInteger test2 = COSInteger.get(i);
assertEquals(test1.hashCode(), test2.hashCode());
COSInteger test3 = COSInteger.get(i + 1);
assertNotSame(test3.hashCode(), test1.hashCode());
}
} |
public static <NodeT, EdgeT> Set<NodeT> reachableNodes(
Network<NodeT, EdgeT> network, Set<NodeT> startNodes, Set<NodeT> endNodes) {
Set<NodeT> visitedNodes = new HashSet<>();
Queue<NodeT> queuedNodes = new ArrayDeque<>();
queuedNodes.addAll(startNodes);
// Perform a breadth-first traversal rooted at the input node.
while (!queuedNodes.isEmpty()) {
NodeT currentNode = queuedNodes.remove();
// If we have already visited this node or it is a terminal node than do not add any
// successors.
if (!visitedNodes.add(currentNode) || endNodes.contains(currentNode)) {
continue;
}
queuedNodes.addAll(network.successors(currentNode));
}
return visitedNodes;
} | @Test
public void testReachableNodesWithPathAroundBoundaryNode() {
// Since there is a path around J, we will include E, G, and H
assertEquals(
ImmutableSet.of("I", "J", "E", "G", "H", "K", "L"),
Networks.reachableNodes(createNetwork(), ImmutableSet.of("I"), ImmutableSet.of("J")));
} |
public boolean overlaps(Domain other)
{
checkCompatibility(other);
return !this.intersect(other).isNone();
} | @Test
public void testOverlaps()
{
assertTrue(Domain.all(BIGINT).overlaps(Domain.all(BIGINT)));
assertFalse(Domain.all(BIGINT).overlaps(Domain.none(BIGINT)));
assertTrue(Domain.all(BIGINT).overlaps(Domain.notNull(BIGINT)));
assertTrue(Domain.all(BIGINT).overlaps(Domain.onlyNull(BIGINT)));
assertTrue(Domain.all(BIGINT).overlaps(Domain.singleValue(BIGINT, 0L)));
assertFalse(Domain.none(BIGINT).overlaps(Domain.all(BIGINT)));
assertFalse(Domain.none(BIGINT).overlaps(Domain.none(BIGINT)));
assertFalse(Domain.none(BIGINT).overlaps(Domain.notNull(BIGINT)));
assertFalse(Domain.none(BIGINT).overlaps(Domain.onlyNull(BIGINT)));
assertFalse(Domain.none(BIGINT).overlaps(Domain.singleValue(BIGINT, 0L)));
assertTrue(Domain.notNull(BIGINT).overlaps(Domain.all(BIGINT)));
assertFalse(Domain.notNull(BIGINT).overlaps(Domain.none(BIGINT)));
assertTrue(Domain.notNull(BIGINT).overlaps(Domain.notNull(BIGINT)));
assertFalse(Domain.notNull(BIGINT).overlaps(Domain.onlyNull(BIGINT)));
assertTrue(Domain.notNull(BIGINT).overlaps(Domain.singleValue(BIGINT, 0L)));
assertTrue(Domain.onlyNull(BIGINT).overlaps(Domain.all(BIGINT)));
assertFalse(Domain.onlyNull(BIGINT).overlaps(Domain.none(BIGINT)));
assertFalse(Domain.onlyNull(BIGINT).overlaps(Domain.notNull(BIGINT)));
assertTrue(Domain.onlyNull(BIGINT).overlaps(Domain.onlyNull(BIGINT)));
assertFalse(Domain.onlyNull(BIGINT).overlaps(Domain.singleValue(BIGINT, 0L)));
assertTrue(Domain.singleValue(BIGINT, 0L).overlaps(Domain.all(BIGINT)));
assertFalse(Domain.singleValue(BIGINT, 0L).overlaps(Domain.none(BIGINT)));
assertTrue(Domain.singleValue(BIGINT, 0L).overlaps(Domain.notNull(BIGINT)));
assertFalse(Domain.singleValue(BIGINT, 0L).overlaps(Domain.onlyNull(BIGINT)));
assertTrue(Domain.singleValue(BIGINT, 0L).overlaps(Domain.singleValue(BIGINT, 0L)));
} |
public boolean hasMajorAndMinorVersionHigherOrEqualTo(String majorAndMinorVersion) {
return hasMajorAndMinorVersionHigherOrEqualTo(new VersionNumber(majorAndMinorVersion));
} | @Test
void hasMajorAndMinorVersionHigherOrEqualTo() {
assertThat(v("6.0.0").hasMajorAndMinorVersionHigherOrEqualTo(v("6.0.0"))).isTrue();
assertThat(v("6.1.1").hasMajorAndMinorVersionHigherOrEqualTo(v("6.1.0"))).isTrue();
assertThat(v("6.0.0").hasMajorAndMinorVersionHigherOrEqualTo(v("5.0.0"))).isTrue();
assertThat(v("10.0.0").hasMajorAndMinorVersionHigherOrEqualTo(v("9.0.0"))).isTrue();
assertThat(v("10.0.0").hasMajorAndMinorVersionHigherOrEqualTo(v("1.0.0"))).isTrue();
assertThat(v("5.0.1").hasMajorAndMinorVersionHigherOrEqualTo(v("5.0.0"))).isTrue();
assertThat(v("10.6").hasMajorAndMinorVersionHigherOrEqualTo(v("10.0.0"))).isTrue();
assertThat(v("7.0.0-beta.2").hasMajorAndMinorVersionHigherOrEqualTo(v("6.0.0"))).isTrue();
assertThat(v("7.0.0-beta.1").hasMajorAndMinorVersionHigherOrEqualTo(v("7.0.0-alpha.1"))).isTrue();
assertThat(v("7.0.0-beta.3").hasMajorAndMinorVersionHigherOrEqualTo(v("7.0.0-beta.2"))).isTrue();
assertThat(v("6.0.0").hasMajorAndMinorVersionHigherOrEqualTo(v("6.1.0"))).isFalse();
assertThat(v("5.0.1").hasMajorAndMinorVersionHigherOrEqualTo(v("6.0.0"))).isFalse();
assertThat(v("1.0.0").hasMajorAndMinorVersionHigherOrEqualTo(v("10.0.0"))).isFalse();
assertThat(v("9.0.0").hasMajorAndMinorVersionHigherOrEqualTo(v("10.0.0"))).isFalse();
assertThat(v("10.6").hasMajorAndMinorVersionHigherOrEqualTo(v("11.0.0"))).isFalse();
assertThat(v("1.8.0_241").hasMajorAndMinorVersionHigherOrEqualTo(v("21"))).isFalse();
} |
@Override public boolean containsKey(long key) {
return hsa.get(key) != NULL_ADDRESS;
} | @Test
public void testContainsKey_fail() {
long key = newKey();
assertFalseKV(map.containsKey(key), key, 0);
} |
@Override
public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context,
Map<String, Long> recentlyUnloadedBundles,
Map<String, Long> recentlyUnloadedBrokers) {
final var conf = context.brokerConfiguration();
decisionCache.clear();
stats.clear();
Map<String, BrokerLookupData> availableBrokers;
try {
availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync()
.get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS);
} catch (ExecutionException | InterruptedException | TimeoutException e) {
counter.update(Failure, Unknown);
log.warn("Failed to fetch available brokers. Stop unloading.", e);
return decisionCache;
}
try {
final var loadStore = context.brokerLoadDataStore();
stats.setLoadDataStore(loadStore);
boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log);
var skipReason = stats.update(
context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf);
if (skipReason.isPresent()) {
if (debugMode) {
log.warn(CANNOT_CONTINUE_UNLOAD_MSG
+ " Skipped the load stat update. Reason:{}.",
skipReason.get());
}
counter.update(Skip, skipReason.get());
return decisionCache;
}
counter.updateLoadData(stats.avg, stats.std);
if (debugMode) {
log.info("brokers' load stats:{}", stats);
}
// skip metrics
int numOfBrokersWithEmptyLoadData = 0;
int numOfBrokersWithFewBundles = 0;
final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd();
boolean transfer = conf.isLoadBalancerTransferEnabled();
if (stats.std() > targetStd
|| isUnderLoaded(context, stats.peekMinBroker(), stats)
|| isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
unloadConditionHitCount++;
} else {
unloadConditionHitCount = 0;
}
if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Shedding condition hit count:{} is less than or equal to the threshold:{}.",
unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold());
}
counter.update(Skip, HitCount);
return decisionCache;
}
while (true) {
if (!stats.hasTransferableBrokers()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Exhausted target transfer brokers.");
}
break;
}
UnloadDecision.Reason reason;
if (stats.std() > targetStd) {
reason = Overloaded;
} else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) {
reason = Underloaded;
if (debugMode) {
log.info(String.format("broker:%s is underloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this underloaded broker.",
stats.peekMinBroker(),
context.brokerLoadDataStore().get(stats.peekMinBroker()).get(),
stats.std(), targetStd));
}
} else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
reason = Overloaded;
if (debugMode) {
log.info(String.format("broker:%s is overloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this overloaded broker.",
stats.peekMaxBroker(),
context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(),
stats.std(), targetStd));
}
} else {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ "The overall cluster load meets the target, std:{} <= targetStd:{}."
+ "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.",
stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker());
}
break;
}
String maxBroker = stats.pollMaxBroker();
String minBroker = stats.peekMinBroker();
Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker);
Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker);
if (maxBrokerLoadData.isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " MaxBrokerLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
if (minBrokerLoadData.isEmpty()) {
log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker);
numOfBrokersWithEmptyLoadData++;
continue;
}
double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA();
double minLoad = minBrokerLoadData.get().getWeightedMaxEMA();
double offload = (maxLoad - minLoad) / 2;
BrokerLoadData brokerLoadData = maxBrokerLoadData.get();
double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn()
+ brokerLoadData.getMsgThroughputOut();
double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn()
+ minBrokerLoadData.get().getMsgThroughputOut();
double offloadThroughput = maxBrokerThroughput * offload / maxLoad;
if (debugMode) {
log.info(String.format(
"Attempting to shed load from broker:%s%s, which has the max resource "
+ "usage:%.2f%%, targetStd:%.2f,"
+ " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.",
maxBroker, transfer ? " to broker:" + minBroker : "",
maxLoad * 100,
targetStd,
offload * 100,
offloadThroughput / KB
));
}
double trafficMarkedToOffload = 0;
double trafficMarkedToGain = 0;
Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker);
if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " TopBundlesLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData();
if (maxBrokerTopBundlesLoadData.size() == 1) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Sole namespace bundle:%s is overloading the broker. ",
maxBroker, maxBrokerTopBundlesLoadData.iterator().next()));
continue;
}
Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker);
var minBrokerTopBundlesLoadDataIter =
minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() :
null;
if (maxBrokerTopBundlesLoadData.isEmpty()) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Broker overloaded despite having no bundles", maxBroker));
continue;
}
int remainingTopBundles = maxBrokerTopBundlesLoadData.size();
for (var e : maxBrokerTopBundlesLoadData) {
String bundle = e.bundleName();
if (channel != null && !channel.isOwner(bundle, maxBroker)) {
if (debugMode) {
log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " MaxBroker:%s is not the owner.", bundle, maxBroker));
}
continue;
}
if (recentlyUnloadedBundles.containsKey(bundle)) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " Bundle has been recently unloaded at ts:%d.",
bundle, recentlyUnloadedBundles.get(bundle)));
}
continue;
}
if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " This unload can't meet "
+ "affinity(isolation) or anti-affinity group policies.", bundle));
}
continue;
}
if (remainingTopBundles <= 1) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is"
+ " less than or equal to 1.",
bundle, maxBroker));
}
break;
}
var bundleData = e.stats();
double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut;
boolean swap = false;
List<Unload> minToMaxUnloads = new ArrayList<>();
double minBrokerBundleSwapThroughput = 0.0;
if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) {
// see if we can swap bundles from min to max broker to balance better.
if (transfer && minBrokerTopBundlesLoadDataIter != null) {
var maxBrokerNewThroughput =
maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain
- maxBrokerBundleThroughput;
var minBrokerNewThroughput =
minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain
+ maxBrokerBundleThroughput;
while (minBrokerTopBundlesLoadDataIter.hasNext()) {
var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next();
if (!isTransferable(context, availableBrokers,
minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) {
continue;
}
var minBrokerBundleThroughput =
minBrokerBundleData.stats().msgThroughputIn
+ minBrokerBundleData.stats().msgThroughputOut;
var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput;
var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput;
if (maxBrokerNewThroughputTmp < maxBrokerThroughput
&& minBrokerNewThroughputTmp < maxBrokerThroughput) {
minToMaxUnloads.add(new Unload(minBroker,
minBrokerBundleData.bundleName(), Optional.of(maxBroker)));
maxBrokerNewThroughput = maxBrokerNewThroughputTmp;
minBrokerNewThroughput = minBrokerNewThroughputTmp;
minBrokerBundleSwapThroughput += minBrokerBundleThroughput;
if (minBrokerNewThroughput <= maxBrokerNewThroughput
&& maxBrokerNewThroughput < maxBrokerThroughput * 0.75) {
swap = true;
break;
}
}
}
}
if (!swap) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is "
+ "greater than the target :%.2f KByte/s.",
bundle,
(trafficMarkedToOffload + maxBrokerBundleThroughput) / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB,
offloadThroughput / KB));
}
break;
}
}
Unload unload;
if (transfer) {
if (swap) {
minToMaxUnloads.forEach(minToMaxUnload -> {
if (debugMode) {
log.info("Decided to gain bundle:{} from min broker:{}",
minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker());
}
var decision = new UnloadDecision();
decision.setUnload(minToMaxUnload);
decision.succeed(reason);
decisionCache.add(decision);
});
if (debugMode) {
log.info(String.format(
"Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.",
minBrokerBundleSwapThroughput / KB, minBroker, maxBroker));
trafficMarkedToGain += minBrokerBundleSwapThroughput;
}
}
unload = new Unload(maxBroker, bundle, Optional.of(minBroker));
} else {
unload = new Unload(maxBroker, bundle);
}
var decision = new UnloadDecision();
decision.setUnload(unload);
decision.succeed(reason);
decisionCache.add(decision);
trafficMarkedToOffload += maxBrokerBundleThroughput;
remainingTopBundles--;
if (debugMode) {
log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s."
+ " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s."
+ " Target:%.2f KByte/s.",
bundle, maxBrokerBundleThroughput / KB,
trafficMarkedToOffload / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain) / KB,
offloadThroughput / KB));
}
}
if (trafficMarkedToOffload > 0) {
var adjustedOffload =
(trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput;
stats.offload(maxLoad, minLoad, adjustedOffload);
if (debugMode) {
log.info(
String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}",
stats, maxLoad, minLoad, adjustedOffload));
}
} else {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " There is no bundle that can be unloaded in top bundles load data. "
+ "Consider splitting bundles owned by the broker "
+ "to make each bundle serve less traffic "
+ "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport"
+ " to report more bundles in the top bundles load data.", maxBroker));
}
} // while end
if (debugMode) {
log.info("decisionCache:{}", decisionCache);
}
if (decisionCache.isEmpty()) {
UnloadDecision.Reason reason;
if (numOfBrokersWithEmptyLoadData > 0) {
reason = NoLoadData;
} else if (numOfBrokersWithFewBundles > 0) {
reason = NoBundles;
} else {
reason = HitCount;
}
counter.update(Skip, reason);
} else {
unloadConditionHitCount = 0;
}
} catch (Throwable e) {
log.error("Failed to process unloading. ", e);
this.counter.update(Failure, Unknown);
}
return decisionCache;
} | @Test
public void testTargetStd() {
UnloadCounter counter = new UnloadCounter();
TransferShedder transferShedder = new TransferShedder(counter);
var ctx = getContext();
BrokerRegistry brokerRegistry = mock(BrokerRegistry.class);
doReturn(CompletableFuture.completedFuture(Map.of(
"broker1:8080", mock(BrokerLookupData.class),
"broker2:8080", mock(BrokerLookupData.class),
"broker3:8080", mock(BrokerLookupData.class)
))).when(brokerRegistry).getAvailableBrokerLookupDataAsync();
doReturn(brokerRegistry).when(ctx).brokerRegistry();
ctx.brokerConfiguration().setLoadBalancerDebugModeEnabled(true);
var brokerLoadDataStore = ctx.brokerLoadDataStore();
brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 10, "broker1:8080"));
brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 20, "broker2:8080"));
brokerLoadDataStore.pushAsync("broker3:8080", getCpuLoad(ctx, 30, "broker3:8080"));
var topBundlesLoadDataStore = ctx.topBundleLoadDataStore();
topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 30, 30));
topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 40, 40));
topBundlesLoadDataStore.pushAsync("broker3:8080", getTopBundlesLoad("my-tenant/my-namespaceC", 50, 50));
var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of());
assertTrue(res.isEmpty());
assertEquals(counter.getBreakdownCounters().get(Skip).get(HitCount).get(), 1);
assertEquals(counter.getLoadAvg(), 0.2000000063578288);
assertEquals(counter.getLoadStd(), 0.08164966587949089);
} |
void readEntries(ReadHandle lh, long firstEntry, long lastEntry, boolean shouldCacheEntry,
final AsyncCallbacks.ReadEntriesCallback callback, Object ctx) {
final PendingReadKey key = new PendingReadKey(firstEntry, lastEntry);
Map<PendingReadKey, PendingRead> pendingReadsForLedger =
cachedPendingReads.computeIfAbsent(lh.getId(), (l) -> new ConcurrentHashMap<>());
boolean listenerAdded = false;
while (!listenerAdded) {
AtomicBoolean createdByThisThread = new AtomicBoolean();
FindPendingReadOutcome findBestCandidateOutcome = findPendingRead(key,
pendingReadsForLedger, createdByThisThread);
PendingRead pendingRead = findBestCandidateOutcome.pendingRead;
if (findBestCandidateOutcome.needsAdditionalReads()) {
AsyncCallbacks.ReadEntriesCallback wrappedCallback = new AsyncCallbacks.ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
PendingReadKey missingOnLeft = findBestCandidateOutcome.missingOnLeft;
PendingReadKey missingOnRight = findBestCandidateOutcome.missingOnRight;
if (missingOnRight != null && missingOnLeft != null) {
AsyncCallbacks.ReadEntriesCallback readFromLeftCallback =
new AsyncCallbacks.ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entriesFromLeft, Object dummyCtx1) {
AsyncCallbacks.ReadEntriesCallback readFromRightCallback =
new AsyncCallbacks.ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entriesFromRight,
Object dummyCtx2) {
List<Entry> finalResult =
new ArrayList<>(entriesFromLeft.size()
+ entries.size() + entriesFromRight.size());
finalResult.addAll(entriesFromLeft);
finalResult.addAll(entries);
finalResult.addAll(entriesFromRight);
callback.readEntriesComplete(finalResult, ctx);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception,
Object dummyCtx3) {
entries.forEach(Entry::release);
entriesFromLeft.forEach(Entry::release);
callback.readEntriesFailed(exception, ctx);
}
};
rangeEntryCache.asyncReadEntry0(lh,
missingOnRight.startEntry, missingOnRight.endEntry,
shouldCacheEntry, readFromRightCallback, null);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object dummyCtx4) {
entries.forEach(Entry::release);
callback.readEntriesFailed(exception, ctx);
}
};
rangeEntryCache.asyncReadEntry0(lh, missingOnLeft.startEntry, missingOnLeft.endEntry,
shouldCacheEntry, readFromLeftCallback, null);
} else if (missingOnLeft != null) {
AsyncCallbacks.ReadEntriesCallback readFromLeftCallback =
new AsyncCallbacks.ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entriesFromLeft,
Object dummyCtx5) {
List<Entry> finalResult =
new ArrayList<>(entriesFromLeft.size() + entries.size());
finalResult.addAll(entriesFromLeft);
finalResult.addAll(entries);
callback.readEntriesComplete(finalResult, ctx);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception,
Object dummyCtx6) {
entries.forEach(Entry::release);
callback.readEntriesFailed(exception, ctx);
}
};
rangeEntryCache.asyncReadEntry0(lh, missingOnLeft.startEntry, missingOnLeft.endEntry,
shouldCacheEntry, readFromLeftCallback, null);
} else if (missingOnRight != null) {
AsyncCallbacks.ReadEntriesCallback readFromRightCallback =
new AsyncCallbacks.ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entriesFromRight,
Object dummyCtx7) {
List<Entry> finalResult =
new ArrayList<>(entriesFromRight.size() + entries.size());
finalResult.addAll(entries);
finalResult.addAll(entriesFromRight);
callback.readEntriesComplete(finalResult, ctx);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception,
Object dummyCtx8) {
entries.forEach(Entry::release);
callback.readEntriesFailed(exception, ctx);
}
};
rangeEntryCache.asyncReadEntry0(lh, missingOnRight.startEntry, missingOnRight.endEntry,
shouldCacheEntry, readFromRightCallback, null);
}
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
callback.readEntriesFailed(exception, ctx);
}
};
listenerAdded = pendingRead.addListener(wrappedCallback, ctx, key.startEntry, key.endEntry);
} else {
listenerAdded = pendingRead.addListener(callback, ctx, key.startEntry, key.endEntry);
}
if (createdByThisThread.get()) {
CompletableFuture<List<EntryImpl>> readResult = rangeEntryCache.readFromStorage(lh, firstEntry,
lastEntry, shouldCacheEntry);
pendingRead.attach(readResult);
}
}
} | @Test
public void simpleConcurrentReadNoMatch() throws Exception {
long firstEntry = 100;
long endEntry = 199;
long firstEntrySecondRead = 1000;
long endEntrySecondRead = 1099;
boolean shouldCacheEntry = false;
PreparedReadFromStorage read1 =
prepareReadFromStorage(lh, rangeEntryCache, firstEntry, endEntry, shouldCacheEntry);
PreparedReadFromStorage read2 =
prepareReadFromStorage(lh, rangeEntryCache, firstEntrySecondRead, endEntrySecondRead, shouldCacheEntry);
PendingReadsManager pendingReadsManager = new PendingReadsManager(rangeEntryCache);
CapturingReadEntriesCallback callback = new CapturingReadEntriesCallback();
pendingReadsManager.readEntries(lh, firstEntry, endEntry, shouldCacheEntry, callback, CTX);
CapturingReadEntriesCallback callback2 = new CapturingReadEntriesCallback();
pendingReadsManager.readEntries(lh, firstEntrySecondRead, endEntrySecondRead, shouldCacheEntry, callback2, CTX2);
read1.storageReadCompleted();
callback.get();
read2.storageReadCompleted();
callback2.get();
assertSame(callback.getCtx(), CTX);
assertSame(callback2.getCtx(), CTX2);
verifyRange(callback.entries, firstEntry, endEntry);
verifyRange(callback2.entries, firstEntrySecondRead, endEntrySecondRead);
} |
public void check(Search search, Predicate<String> hasReadPermissionForStream) {
checkUserIsPermittedToSeeStreams(search.streamIdsForPermissionsCheck(), hasReadPermissionForStream);
checkMissingRequirements(search);
} | @Test
public void failsForMissingCapabilities() {
final Search search = searchWithCapabilityRequirements("awesomeness");
assertThatExceptionOfType(MissingCapabilitiesException.class)
.isThrownBy(() -> sut.check(search, id -> true))
.satisfies(ex -> assertThat(ex.getMissingRequirements()).containsOnlyKeys("awesomeness"));
} |
public DistroDataProcessor findDataProcessor(String processType) {
return dataProcessorMap.get(processType);
} | @Test
void testFindDataProcessor() {
DistroDataProcessor distroDataProcessor = componentHolder.findDataProcessor(type);
assertEquals(this.distroDataProcessor, distroDataProcessor);
} |
@Override
public HttpResponseOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final CreateFileUploadRequest createFileUploadRequest = new CreateFileUploadRequest()
.directS3Upload(true)
.timestampModification(status.getModified() != null ? new DateTime(status.getModified()) : null)
.timestampCreation(status.getCreated() != null ? new DateTime(status.getCreated()) : null)
.parentId(Long.parseLong(nodeid.getVersionId(file.getParent())))
.name(file.getName());
final CreateFileUploadResponse createFileUploadResponse = new NodesApi(session.getClient())
.createFileUploadChannel(createFileUploadRequest, StringUtils.EMPTY);
if(log.isDebugEnabled()) {
log.debug(String.format("upload started for %s with response %s", file, createFileUploadResponse));
}
final MultipartOutputStream proxy = new MultipartOutputStream(createFileUploadResponse, file, status);
return new HttpResponseOutputStream<Node>(new MemorySegementingOutputStream(proxy, partsize),
new SDSAttributesAdapter(session), status) {
@Override
public Node getStatus() {
return proxy.getResult();
}
};
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file);
}
} | @Test
public void testWriteEncrypted() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).createRoom(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), true);
final byte[] content = RandomUtils.nextBytes(new HostPreferences(session.getHost()).getInteger("sds.upload.multipart.chunksize") + 1);
final Path test = new Path(room, new NFDNormalizer().normalize(String.format("ä%s", new AlphanumericRandomStringService().random())).toString(), EnumSet.of(Path.Type.file));
{
final TripleCryptWriteFeature writer = new TripleCryptWriteFeature(session, nodeid, new SDSDirectS3MultipartWriteFeature(session, nodeid));
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
status.setChecksum(new MD5ChecksumCompute().compute(new ByteArrayInputStream(content), new TransferStatus()));
status.setModified(1632127025217L);
final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
assertEquals(content.length, out.getStatus().getSize(), 0L);
}
assertNotNull(test.attributes().getVersionId());
assertTrue(new DefaultFindFeature(session).find(test));
assertTrue(new SDSFindFeature(session, nodeid).find(test));
final PathAttributes attr = new SDSAttributesFinderFeature(session, nodeid).find(test);
assertEquals(test.attributes().getVersionId(), attr.getVersionId());
assertEquals(1632127025217L, attr.getModificationDate());
assertEquals(1632127025217L, new DefaultAttributesFinderFeature(session).find(test).getModificationDate());
final byte[] compare = new byte[content.length];
final InputStream stream = new TripleCryptReadFeature(session, nodeid, new SDSReadFeature(session, nodeid)).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback() {
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(content, compare);
String previousVersion = attr.getVersionId();
// Overwrite
{
final byte[] change = RandomUtils.nextBytes(256);
final TransferStatus status = new TransferStatus();
status.setLength(change.length);
final TripleCryptWriteFeature writer = new TripleCryptWriteFeature(session, nodeid, new SDSDirectS3MultipartWriteFeature(session, nodeid));
final StatusOutputStream<Node> out = writer.write(test, status.exists(true), new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(change), out);
assertNotEquals(previousVersion, new SDSAttributesAdapter(session).toAttributes(out.getStatus()).getVersionId());
}
// Read with previous version must fail
try {
test.attributes().withVersionId(previousVersion);
new TripleCryptReadFeature(session, nodeid, new SDSReadFeature(session, nodeid)).read(test, new TransferStatus(), new DisabledConnectionCallback() {
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
fail();
}
catch(NotfoundException e) {
// Expected
}
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static Writer createWriter(Configuration conf, Writer.Option... opts
) throws IOException {
Writer.CompressionOption compressionOption =
Options.getOption(Writer.CompressionOption.class, opts);
CompressionType kind;
if (compressionOption != null) {
kind = compressionOption.getValue();
} else {
kind = getDefaultCompressionType(conf);
opts = Options.prependOptions(opts, Writer.compression(kind));
}
switch (kind) {
default:
case NONE:
return new Writer(conf, opts);
case RECORD:
return new RecordCompressWriter(conf, opts);
case BLOCK:
return new BlockCompressWriter(conf, opts);
}
} | @SuppressWarnings("deprecation")
@Test
public void testRecursiveSeqFileCreate() throws IOException {
FileSystem fs = FileSystem.getLocal(conf);
Path parentDir = new Path(GenericTestUtils.getTempPath(
"recursiveCreateDir"));
Path name = new Path(parentDir, "file");
boolean createParent = false;
try {
SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
RandomDatum.class, 512, (short) 1, 4096, createParent,
CompressionType.NONE, null, new Metadata());
fail("Expected an IOException due to missing parent");
} catch (IOException ioe) {
// Expected
}
try {
createParent = true;
SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
RandomDatum.class, 512, (short) 1, 4096, createParent,
CompressionType.NONE, null, new Metadata());
// should succeed, fails if exception thrown
} finally {
fs.deleteOnExit(parentDir);
fs.close();
}
} |
@Override
public double cdf(double k) {
int L = Math.max(0, m + n - N);
if (k < L) {
return 0.0;
} else if (k >= Math.min(m, n)) {
return 1.0;
}
double p = 0.0;
for (int i = L; i <= k; i++) {
p += p(i);
}
return p;
} | @Test
public void testCdf() {
System.out.println("cdf");
HyperGeometricDistribution instance = new HyperGeometricDistribution(100, 30, 70);
instance.rand();
assertEquals(3.404564e-26, instance.cdf(0), 1E-30);
assertEquals(7.152988e-23, instance.cdf(1), 1E-27);
assertEquals(3.583732e-20, instance.cdf(2), 1E-25);
assertEquals(0.4013632, instance.cdf(20), 1E-7);
assertEquals(0.5891093, instance.cdf(21), 1E-7);
assertEquals(0.9999568, instance.cdf(28), 1E-7);
assertEquals(0.9999981, instance.cdf(29), 1E-7);
assertEquals(1.0, instance.cdf(30), 1E-7);
assertEquals(1.0, instance.cdf(31), 1E-7);
} |
@Override
public Set keySet() {
return null;
} | @Test
public void testKeySet() throws Exception {
assertNull(NULL_QUERY_CACHE.keySet());
} |
public Map<String, ClusterMetadata> getClusters() {
return clusters;
} | @Test
void testGetClusters() {
Map<String, ClusterMetadata> clusters = serviceMetadata.getClusters();
assertNotNull(clusters);
assertEquals(0, clusters.size());
} |
@NotNull
@Override
public List<InetAddress> lookup(@NotNull String host) throws UnknownHostException {
InetAddress address = InetAddress.getByName(host);
if (configuration.getBoolean(SONAR_VALIDATE_WEBHOOKS_PROPERTY).orElse(SONAR_VALIDATE_WEBHOOKS_DEFAULT_VALUE)
&& (address.isLoopbackAddress() || address.isAnyLocalAddress() || isLocalAddress(address))) {
throw new IllegalArgumentException("Invalid URL: loopback and wildcard addresses are not allowed for webhooks.");
}
return Collections.singletonList(address);
} | @Test
public void lookup_dont_fail_on_classic_host_with_validation_enabled() throws UnknownHostException {
when(configuration.getBoolean(SONAR_VALIDATE_WEBHOOKS_PROPERTY))
.thenReturn(Optional.of(true));
Assertions.assertThat(underTest.lookup("sonarsource.com").toString()).contains("sonarsource.com/");
} |
private KsqlScalarFunction createFunction(
final Class theClass,
final UdfDescription udfDescriptionAnnotation,
final Udf udfAnnotation,
final Method method,
final String path,
final String sensorName,
final Class<? extends Kudf> udfClass
) {
// sanity check
FunctionLoaderUtils
.instantiateFunctionInstance(method.getDeclaringClass(), udfDescriptionAnnotation.name());
final FunctionInvoker invoker = FunctionLoaderUtils.createFunctionInvoker(method);
final String functionName = udfDescriptionAnnotation.name();
LOGGER.info("Adding function " + functionName + " for method " + method);
final List<ParameterInfo> parameters = FunctionLoaderUtils
.createParameters(method, functionName, typeParser);
final ParamType javaReturnSchema = FunctionLoaderUtils
.getReturnType(method, udfAnnotation.schema(), typeParser);
final SchemaProvider schemaProviderFunction = FunctionLoaderUtils
.handleUdfReturnSchema(
theClass,
javaReturnSchema,
udfAnnotation.schema(),
typeParser,
udfAnnotation.schemaProvider(),
udfDescriptionAnnotation.name(),
method.isVarArgs()
);
return KsqlScalarFunction.create(
schemaProviderFunction,
javaReturnSchema,
parameters,
FunctionName.of(functionName.toUpperCase()),
udfClass,
getUdfFactory(method, udfDescriptionAnnotation, functionName, invoker, sensorName),
udfAnnotation.description(),
path,
method.isVarArgs()
);
} | @Test
@SuppressWarnings("rawtypes")
public void shouldConfigureConfigurableUdaf() throws Exception {
// Given:
final UdafFactoryInvoker creator
= createUdafLoader().createUdafFactoryInvoker(
TestUdaf.class.getMethod("createSumInt"),
FunctionName.of("test-udf"),
"desc",
new String[]{""},
"",
"");
final AggregateFunctionInitArguments initArgs = new AggregateFunctionInitArguments(
Collections.singletonList(0),
ImmutableMap.of("ksql.functions.test_udaf.init", 100L)
);
// When:
final KsqlAggregateFunction function = creator.createFunction(initArgs, Collections.emptyList());
final Object initvalue = function.getInitialValueSupplier().get();
// Then:
assertThat(initvalue, is(100L));
} |
public static void popupViewAnimation(View... views) {
int offset = 500;
final int offsetInterval = 200;
for (View view : views) {
if (view != null) {
Animation animation = AnimationUtils.loadAnimation(view.getContext(), R.anim.link_popup);
animation.setStartOffset(offset);
view.startAnimation(animation);
}
offset += offsetInterval;
}
} | @Test
public void testPopupAnimation() {
View v1 = Mockito.mock(View.class);
View v2 = Mockito.mock(View.class);
Mockito.doReturn(mApplication).when(v1).getContext();
Mockito.doReturn(mApplication).when(v2).getContext();
SetupSupport.popupViewAnimation(v1, v2);
ArgumentCaptor<Animation> animation1Captor = ArgumentCaptor.forClass(Animation.class);
ArgumentCaptor<Animation> animation2Captor = ArgumentCaptor.forClass(Animation.class);
Mockito.verify(v1).startAnimation(animation1Captor.capture());
Mockito.verify(v2).startAnimation(animation2Captor.capture());
Animation animation1 = animation1Captor.getValue();
Animation animation2 = animation2Captor.getValue();
Assert.assertEquals(500, animation1.getStartOffset());
Assert.assertEquals(700, animation2.getStartOffset());
} |
OutputT apply(InputT input) throws UserCodeExecutionException {
Optional<UserCodeExecutionException> latestError = Optional.empty();
long waitFor = 0L;
while (waitFor != BackOff.STOP) {
try {
sleepIfNeeded(waitFor);
incIfPresent(getCallCounter());
return getThrowableFunction().apply(input);
} catch (UserCodeExecutionException e) {
if (!e.shouldRepeat()) {
throw e;
}
latestError = Optional.of(e);
} catch (InterruptedException ignored) {
}
try {
incIfPresent(getBackoffCounter());
waitFor = getBackOff().nextBackOffMillis();
} catch (IOException e) {
throw new UserCodeExecutionException(e);
}
}
throw latestError.orElse(
new UserCodeExecutionException("failed to process for input: " + input));
} | @Test
public void givenCallerQuotaErrorsExceedsLimit_emitsIntoFailurePCollection() {
PCollectionTuple pct =
pipeline
.apply(Create.of(1))
.apply(
ParDo.of(
new DoFnWithRepeaters(
new CallerImpl(LIMIT + 1, UserCodeQuotaException.class),
new SetupTeardownImpl(0)))
.withOutputTags(OUTPUT_TAG, TupleTagList.of(FAILURE_TAG)));
PAssert.that(pct.get(OUTPUT_TAG)).empty();
PAssert.that(pct.get(FAILURE_TAG)).containsInAnyOrder(UserCodeQuotaException.class.getName());
pipeline.run();
} |
@Override
public String getNniLinks(String target) {
DriverHandler handler = handler();
NetconfController controller = handler.get(NetconfController.class);
MastershipService mastershipService = handler.get(MastershipService.class);
DeviceId ncDeviceId = handler.data().deviceId();
checkNotNull(controller, "Netconf controller is null");
String reply = null;
if (!mastershipService.isLocalMaster(ncDeviceId)) {
log.warn("Not master for {} Use {} to execute command",
ncDeviceId,
mastershipService.getMasterFor(ncDeviceId));
return null;
}
try {
StringBuilder request = new StringBuilder();
request.append(VOLT_NE_OPEN + VOLT_NE_NAMESPACE)
.append(ANGLE_RIGHT + NEW_LINE)
.append(buildStartTag(VOLT_PORTS));
if (target != null) {
int nni;
try {
nni = Integer.parseInt(target);
if (nni <= ZERO) {
log.error("Invalid integer for nnilink-id:{}", target);
return null;
}
} catch (NumberFormatException e) {
log.error("Non-number input for nnilink-id:{}", target);
return null;
}
request.append(buildStartTag(ETH_NNILINK_PORTS))
.append(buildStartTag(ETH_NNILINK_PORT))
.append(buildStartTag(NNILINK_ID, false))
.append(target)
.append(buildEndTag(NNILINK_ID))
.append(buildEndTag(ETH_NNILINK_PORT))
.append(buildEndTag(ETH_NNILINK_PORTS));
} else {
request.append(buildEmptyTag(ETH_NNILINK_PORTS));
}
request.append(buildEndTag(VOLT_PORTS))
.append(VOLT_NE_CLOSE);
reply = controller.getDevicesMap()
.get(ncDeviceId)
.getSession()
.get(request.toString(), REPORT_ALL);
} catch (NetconfException e) {
log.error("Cannot communicate to device {} exception {}", ncDeviceId, e);
}
return reply;
} | @Test
public void testInvalidGetNniLinksInput() throws Exception {
String reply;
String target;
for (int i = ZERO; i < INVALID_GET_TCS.length; i++) {
target = INVALID_GET_TCS[i];
reply = voltConfig.getNniLinks(target);
assertNull("Incorrect response for INVALID_GET_TCS", reply);
}
} |
public static TaskExecutorProcessSpec processSpecFromConfig(final Configuration config) {
try {
return createMemoryProcessSpec(
config, PROCESS_MEMORY_UTILS.memoryProcessSpecFromConfig(config));
} catch (IllegalConfigurationException e) {
throw new IllegalConfigurationException(
"TaskManager memory configuration failed: " + e.getMessage(), e);
}
} | @Test
public void testConsistencyCheckOfDerivedNetworkMemoryLessThanMinFails() {
final Configuration configuration =
setupConfigWithFlinkAndTaskHeapToDeriveGivenNetworkMem(500);
configuration.set(TaskManagerOptions.NETWORK_MEMORY_MIN, MemorySize.parse("900m"));
configuration.set(TaskManagerOptions.NETWORK_MEMORY_MAX, MemorySize.parse("1000m"));
// internal validation should fail
assertThatExceptionOfType(IllegalConfigurationException.class)
.isThrownBy(() -> TaskExecutorProcessUtils.processSpecFromConfig(configuration));
} |
public DenseMatrix selectColumns(int[] columnIndices) {
if (columnIndices == null || columnIndices.length == 0) {
throw new IllegalArgumentException("Invalid column indices.");
}
DenseMatrix returnVal = new DenseMatrix(dim1,columnIndices.length);
for (int i = 0; i < dim1; i++) {
for (int j = 0; j < columnIndices.length; j++) {
int curIdx = columnIndices[j];
if (curIdx < 0 || curIdx >= dim2) {
throw new IllegalArgumentException("Invalid column index, expected [0, " + dim2 +"), found " + curIdx);
}
returnVal.values[i][j] = get(i,curIdx);
}
}
return returnVal;
} | @Test
public void selectColumnsTest() {
DenseMatrix a = generateSquareRandom(8, new Random(42));
DenseMatrix columns = a.selectColumns(new int[] {0,5,7});
assertEquals(8, columns.getShape()[0]);
assertEquals(3, columns.getShape()[1]);
assertEquals(a.getColumn(0), columns.getColumn(0));
assertEquals(a.getColumn(5), columns.getColumn(1));
assertEquals(a.getColumn(7), columns.getColumn(2));
} |
public static boolean equals(FlatRecordTraversalObjectNode left, FlatRecordTraversalObjectNode right) {
if (left == null && right == null) {
return true;
}
if (left == null || right == null) {
return false;
}
if (!left.getSchema().getName().equals(right.getSchema().getName())) {
return false;
}
extractCommonObjectSchema(left, right);
return compare(left, right);
} | @Test
public void differentList() {
SimpleHollowDataset dataset = SimpleHollowDataset.fromClassDefinitions(Movie.class);
FakeHollowSchemaIdentifierMapper idMapper = new FakeHollowSchemaIdentifierMapper(dataset);
HollowObjectMapper objMapper = new HollowObjectMapper(HollowWriteStateCreator.createWithSchemas(dataset.getSchemas()));
FlatRecordWriter flatRecordWriter = new FlatRecordWriter(dataset, idMapper);
Movie movie1 = new Movie();
movie1.awardsReceived = new ArrayList<>();
movie1.awardsReceived.add(new Award("Oscar", 2020));
movie1.awardsReceived.add(new Award("Golden Globe", 2025));
Movie movie2 = new Movie();
movie2.awardsReceived = new ArrayList<>();
movie2.awardsReceived.add(new Award("Oscar", 2020));
movie2.awardsReceived.add(new Award("Golden Globe", 2026));
flatRecordWriter.reset();
objMapper.writeFlat(movie1, flatRecordWriter);
FlatRecord flatRecord1 = flatRecordWriter.generateFlatRecord();
flatRecordWriter.reset();
objMapper.writeFlat(movie2, flatRecordWriter);
FlatRecord flatRecord2 = flatRecordWriter.generateFlatRecord();
Assertions.assertThat(FlatRecordTraversalObjectNodeEquality.equals(new FlatRecordTraversalObjectNode(flatRecord1), new FlatRecordTraversalObjectNode(flatRecord2))).isFalse();
} |
@Override
public MongoPaginationHelper<T> sort(Bson sort) {
return new DefaultMongoPaginationHelper<>(collection, filter, sort, perPage, includeGrandTotal,
grandTotalFilter, collation);
} | @Test
void testSort() {
assertThat(paginationHelper.sort(ascending("_id")).page(1))
.isEqualTo(paginationHelper.sort(ascending("_id")).page(1, alwaysTrue()))
.isEqualTo(paginationHelper.sort(SortOrder.ASCENDING.toBsonSort("_id")).page(1))
.isEqualTo(paginationHelper.sort(SortOrder.ASCENDING.toBsonSort("_id")).page(1, alwaysTrue()))
.containsExactlyElementsOf(DTOs);
assertThat(paginationHelper.sort(ascending("name")).page(1))
.isEqualTo(paginationHelper.sort(ascending("name")).page(1, alwaysTrue()))
.isEqualTo(paginationHelper.sort(SortOrder.ASCENDING.toBsonSort("name")).page(1))
.isEqualTo(paginationHelper.sort(SortOrder.ASCENDING.toBsonSort("name")).page(1, alwaysTrue()))
.containsExactlyElementsOf(DTOs);
assertThat(paginationHelper.sort(descending("_id")).page(1))
.isEqualTo(paginationHelper.sort(descending("_id")).page(1, alwaysTrue()))
.isEqualTo(paginationHelper.sort(SortOrder.DESCENDING.toBsonSort("_id")).page(1))
.isEqualTo(paginationHelper.sort(SortOrder.DESCENDING.toBsonSort("_id")).page(1, alwaysTrue()))
.containsExactlyElementsOf(Lists.reverse(DTOs));
assertThat(paginationHelper.sort(descending("name")).page(1))
.isEqualTo(paginationHelper.sort(descending("name")).page(1, alwaysTrue()))
.isEqualTo(paginationHelper.sort(SortOrder.DESCENDING.toBsonSort("name")).page(1))
.isEqualTo(paginationHelper.sort(SortOrder.DESCENDING.toBsonSort("name")).page(1, alwaysTrue()))
.containsExactlyElementsOf(Lists.reverse(DTOs));
} |
public DdlCommandResult execute(
final String sql,
final DdlCommand ddlCommand,
final boolean withQuery,
final Set<SourceName> withQuerySources
) {
return execute(sql, ddlCommand, withQuery, withQuerySources, false);
} | @Test
public void shouldThrowOnAlterMissingSource() {
// Given:
alterSource = new AlterSourceCommand(STREAM_NAME, DataSourceType.KSTREAM.getKsqlType(), NEW_COLUMNS);
// When:
final KsqlException e = assertThrows(KsqlException.class,
() -> cmdExec.execute(SQL_TEXT, alterSource, false, NO_QUERY_SOURCES));
// Then:
assertThat(e.getMessage(), is("Source s1 does not exist."));
} |
public QueryConfiguration applyOverrides(QueryConfigurationOverrides overrides)
{
Map<String, String> sessionProperties;
if (overrides.getSessionPropertiesOverrideStrategy() == OVERRIDE) {
sessionProperties = new HashMap<>(overrides.getSessionPropertiesOverride());
}
else {
sessionProperties = new HashMap<>(this.sessionProperties);
if (overrides.getSessionPropertiesOverrideStrategy() == SUBSTITUTE) {
sessionProperties.putAll(overrides.getSessionPropertiesOverride());
}
}
overrides.getSessionPropertiesToRemove().forEach(sessionProperties::remove);
return new QueryConfiguration(
overrides.getCatalogOverride().orElse(catalog),
overrides.getSchemaOverride().orElse(schema),
Optional.ofNullable(overrides.getUsernameOverride().orElse(username.orElse(null))),
Optional.ofNullable(overrides.getPasswordOverride().orElse(password.orElse(null))),
Optional.of(sessionProperties),
isReusableTable,
Optional.of(partitions));
} | @Test
public void testSessionPropertyRemovalWithOverrides()
{
overrides.setSessionPropertiesToRemove("property_1, property_2");
overrides.setSessionPropertiesOverrideStrategy(OVERRIDE);
QueryConfiguration removed = new QueryConfiguration(
CATALOG_OVERRIDE,
SCHEMA_OVERRIDE,
Optional.of(USERNAME_OVERRIDE),
Optional.of(PASSWORD_OVERRIDE),
Optional.of(ImmutableMap.of("property_3", "value_3")),
Optional.of(CLIENT_TAGS),
Optional.empty());
assertEquals(CONFIGURATION_1.applyOverrides(overrides), removed);
} |
@Override
public void readTags(BiConsumer<String, String> tagReader) {
for (int i = 0; i < tagPtr; i += 2) {
String tag = tags[i];
String tagValue = tags[i + 1];
tagReader.accept(tag, tagValue);
}
} | @Test
public void testReadTags() {
MetricDescriptorImpl descriptor = new MetricDescriptorImpl(mock(Supplier.class))
.withTag("tag0", "tag0Value")
.withTag("tag1", "tag1Value")
.withTag("tag2", "tag2Value")
.withTag("tag3", "tag3Value")
.withTag("tag4", "tag4Value");
BiConsumer<String, String> tagConsumerMock = mock(BiConsumer.class);
descriptor.readTags(tagConsumerMock);
InOrder inOrder = inOrder(tagConsumerMock);
inOrder.verify(tagConsumerMock).accept("tag0", "tag0Value");
inOrder.verify(tagConsumerMock).accept("tag1", "tag1Value");
inOrder.verify(tagConsumerMock).accept("tag2", "tag2Value");
inOrder.verify(tagConsumerMock).accept("tag3", "tag3Value");
inOrder.verify(tagConsumerMock).accept("tag4", "tag4Value");
inOrder.verifyNoMoreInteractions();
} |
@Override
public OUT nextRecord(OUT record) throws IOException {
OUT returnRecord = null;
do {
returnRecord = super.nextRecord(record);
} while (returnRecord == null && !reachedEnd());
return returnRecord;
} | @Test
void ignoreSingleCharPrefixComments() {
try {
final String fileContent =
"#description of the data\n"
+ "#successive commented line\n"
+ "this is|1|2.0|\n"
+ "a test|3|4.0|\n"
+ "#next|5|6.0|\n";
final FileInputSplit split = createTempFile(fileContent);
final TupleTypeInfo<Tuple3<String, Integer, Double>> typeInfo =
TupleTypeInfo.getBasicTupleTypeInfo(String.class, Integer.class, Double.class);
final CsvInputFormat<Tuple3<String, Integer, Double>> format =
new TupleCsvInputFormat<>(PATH, "\n", "|", typeInfo);
format.setCommentPrefix("#");
final Configuration parameters = new Configuration();
format.configure(parameters);
format.open(split);
Tuple3<String, Integer, Double> result = new Tuple3<>();
result = format.nextRecord(result);
assertThat(result.f0).isEqualTo("this is");
assertThat(result.f1).isOne();
assertThat(result.f2).isEqualTo(new Double(2.0));
result = format.nextRecord(result);
assertThat(result.f0).isEqualTo("a test");
assertThat(result.f1).isEqualTo(Integer.valueOf(3));
assertThat(result.f2).isEqualTo(new Double(4.0));
result = format.nextRecord(result);
assertThat(result).isNull();
} catch (Exception ex) {
ex.printStackTrace();
fail("Test failed due to a " + ex.getClass().getName() + ": " + ex.getMessage());
}
} |
@Override
public void batchWriteAppend(long journalId, DataOutputBuffer buffer) throws InterruptedException, JournalException {
if (currentTransaction == null) {
throw new JournalException("failed to append because no running txn!");
}
// id is the key
DatabaseEntry theKey = new DatabaseEntry();
TupleBinding<Long> idBinding = TupleBinding.getPrimitiveBinding(Long.class);
idBinding.objectToEntry(journalId, theKey);
// entity is the value
DatabaseEntry theData = new DatabaseEntry(buffer.getData(), 0, buffer.getLength());
JournalException exception = null;
for (int i = 0; i < RETRY_TIME; i++) {
try {
// sleep before retry
if (i != 0) {
Thread.sleep(SLEEP_INTERVAL_SEC * 1000L);
}
OperationStatus status = currentJournalDB.put(currentTransaction, theKey, theData);
if (status != OperationStatus.SUCCESS) {
throw new JournalException(String.format(
"failed to append journal after retried %d times! status[%s] db[%s] key[%s] data[%s]",
i + 1, status, currentJournalDB, theKey, theData));
}
// success
uncommittedEntries.add(Pair.create(theKey, theData));
return;
} catch (DatabaseException e) {
String errMsg = String.format(
"failed to append journal after retried %d times! key[%s] value[%s] txn[%s] db[%s]",
i + 1, theKey, theData, currentTransaction, currentJournalDB);
LOG.error(errMsg, e);
exception = new JournalException(errMsg);
exception.initCause(e);
} catch (JournalException e) {
LOG.error("failed to write journal", e);
exception = e;
}
}
// failed after retried
throw exception;
} | @Test(expected = JournalException.class)
public void testAppendNoBegin(
@Mocked CloseSafeDatabase database,
@Mocked BDBEnvironment environment) throws Exception {
BDBJEJournal journal = new BDBJEJournal(environment, database);
String data = "petals on a wet black bough";
DataOutputBuffer buffer = new DataOutputBuffer();
Text.writeString(buffer, data);
journal.batchWriteAppend(1, buffer);
Assert.fail();
} |
static String getRelativeFileInternal(File canonicalBaseFile, File canonicalFileToRelativize) {
List<String> basePath = getPathComponents(canonicalBaseFile);
List<String> pathToRelativize = getPathComponents(canonicalFileToRelativize);
//if the roots aren't the same (i.e. different drives on a windows machine), we can't construct a relative
//path from one to the other, so just return the canonical file
if (!basePath.get(0).equals(pathToRelativize.get(0))) {
return canonicalFileToRelativize.getPath();
}
int commonDirs;
StringBuilder sb = new StringBuilder();
for (commonDirs=1; commonDirs<basePath.size() && commonDirs<pathToRelativize.size(); commonDirs++) {
if (!basePath.get(commonDirs).equals(pathToRelativize.get(commonDirs))) {
break;
}
}
boolean first = true;
for (int i=commonDirs; i<basePath.size(); i++) {
if (!first) {
sb.append(File.separatorChar);
} else {
first = false;
}
sb.append("..");
}
first = true;
for (int i=commonDirs; i<pathToRelativize.size(); i++) {
if (first) {
if (sb.length() != 0) {
sb.append(File.separatorChar);
}
first = false;
} else {
sb.append(File.separatorChar);
}
sb.append(pathToRelativize.get(i));
}
if (sb.length() == 0) {
return ".";
}
return sb.toString();
} | @Test
public void pathUtilTest10() {
File[] roots = File.listRoots();
File basePath = new File(roots[0] + "some" + File.separatorChar);
File relativePath = new File(roots[0] + "some" + File.separatorChar + "dir");
String path = PathUtil.getRelativeFileInternal(basePath, relativePath);
Assert.assertEquals(path, "dir");
} |
public Bson createDbQuery(final List<String> filters, final String query) {
try {
final var searchQuery = searchQueryParser.parse(query);
final var filterExpressionFilters = dbFilterParser.parse(filters, attributes);
return buildDbQuery(searchQuery, filterExpressionFilters);
} catch (IllegalArgumentException e) {
throw new BadRequestException("Invalid argument in search query: " + e.getMessage());
}
} | @Test
void combinesSearchQueryAndFilterExpressionsToSingleQuery() {
final SearchQuery searchQuery = mock(SearchQuery.class);
doReturn(List.of(Filters.eq("title", "carramba"))).when(searchQuery).toBsonFilterList();
doReturn(searchQuery).when(searchQueryParser).parse(eq("title:carramba"));
doReturn(List.of(Filters.eq("nvmd", "lalala"), Filters.eq("hohoho", "42")))
.when(dbFilterParser)
.parse(List.of("nvmd:lalala", "hohoho:42"), attributes);
final Bson dbQuery = toTest.createDbQuery(List.of("nvmd:lalala", "hohoho:42"), "title:carramba");
assertEquals(Filters.and(
Filters.eq("title", "carramba"),
Filters.eq("nvmd", "lalala"),
Filters.eq("hohoho", "42")
), dbQuery);
} |
public static <T> Point<T> interpolate(Point<T> p1, Point<T> p2, Instant targetTime) {
checkNotNull(p1, "Cannot perform interpolation when the first input points is null");
checkNotNull(p2, "Cannot perform interpolation when the second input points is null");
checkNotNull(targetTime, "Cannot perform interpolation when the targetTime is null");
checkArgument(
p1.time().isBefore(p2.time()) || p1.time().equals(p2.time()),
"The input points must be in chronological order"
);
TimeWindow window = TimeWindow.of(p1.time(), p2.time());
checkArgument(
window.contains(targetTime),
"The targetTime is outside the required time window"
);
if (p1.time().equals(targetTime)) {
return (new PointBuilder<T>(p1)).build();
} else if (p2.time().equals(targetTime)) {
return (new PointBuilder<T>(p2)).build();
} else {
double fraction = window.toFractionOfRange(targetTime);
//build an interpolated point
LatLong interpolatedLatLong = interpolateLatLong(p1.latLong(), p2.latLong(), fraction);
Double interpolatedCourseInDegrees = interpolateCourse(
isNull(p1.course()) ? null : p1.course().inDegrees(),
isNull(p2.course()) ? null : p2.course().inDegrees(),
fraction
);
//correct the interpolated course when one of the input values was null
if (interpolatedCourseInDegrees == null) {
interpolatedCourseInDegrees = Spherical.courseInDegrees(p1.latLong(), p2.latLong());
}
double interpolatedSpeed = interpolateSpeed(p1, p2, fraction);
Distance interpolatedAltitude = interpolate(
p1.altitude(),
p2.altitude(),
fraction
);
//return a copy of the 1st input point but with corrected trajectory data
return (new PointBuilder<T>(p1))
.latLong(interpolatedLatLong)
.course(Course.ofDegrees(interpolatedCourseInDegrees))
.speed(Speed.ofKnots(interpolatedSpeed))
.altitude(interpolatedAltitude)
.time(targetTime)
.build();
}
} | @Test
public void testInterpolatePoint() {
Point<String> p1 = (new PointBuilder<String>())
.time(Instant.EPOCH)
.altitude(Distance.ofFeet(1000.0))
.courseInDegrees(120.0)
.latLong(new LatLong(0.0, 10.0))
.speedInKnots(200.0)
.build();
Point<String> p2 = (new PointBuilder<String>())
.time(Instant.EPOCH.plusSeconds(8))
.altitude(Distance.ofFeet(500.0))
.courseInDegrees(130.0)
.latLong(new LatLong(5.0, 15.0))
.speedInKnots(300.0)
.build();
Point<String> testPoint = interpolate(p1, p2, Instant.EPOCH.plusSeconds(4));
double TOLERANCE = 0.0001;
assertEquals(
Instant.EPOCH.plusSeconds(4),
testPoint.time()
);
assertEquals(
750.0,
testPoint.altitude().inFeet(),
TOLERANCE
);
assertEquals(
125.0,
testPoint.course().inDegrees(),
TOLERANCE
);
assertEquals(LatLong.of(2.5, 12.5), testPoint.latLong());
assertEquals(
250.0,
testPoint.speed().inKnots(),
TOLERANCE
);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.