focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static String utf8Str(Object obj) {
return str(obj, CharsetUtil.CHARSET_UTF_8);
} | @Test
public void wrapAllTest() {
String[] strings = StrUtil.wrapAll("`", "`", StrUtil.splitToArray("1,2,3,4", ','));
assertEquals("[`1`, `2`, `3`, `4`]", StrUtil.utf8Str(strings));
strings = StrUtil.wrapAllWithPair("`", StrUtil.splitToArray("1,2,3,4", ','));
assertEquals("[`1`, `2`, `3`, `4`]", StrUtil.utf8Str(strings));
} |
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception {
Http2HeadersSink sink = new Http2HeadersSink(
streamId, headers, maxHeaderListSize, validateHeaders);
// Check for dynamic table size updates, which must occur at the beginning:
// https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2
decodeDynamicTableSizeUpdates(in);
decode(in, sink);
// Now that we've read all of our headers we can perform the validation steps. We must
// delay throwing until this point to prevent dynamic table corruption.
sink.finish();
} | @Test
public void testLiteralNeverIndexedWithLargeValue() throws Http2Exception {
// Ignore header that exceeds max header size
final StringBuilder sb = new StringBuilder();
sb.append("1004");
sb.append(hex("name"));
sb.append("7F813F");
for (int i = 0; i < 8192; i++) {
sb.append("61"); // 'a'
}
assertThrows(Http2Exception.class, new Executable() {
@Override
public void execute() throws Throwable {
decode(sb.toString());
}
});
} |
public static void main(String[] args) {
var mammoth = new Mammoth();
mammoth.observe();
mammoth.timePasses();
mammoth.observe();
mammoth.timePasses();
mammoth.observe();
} | @Test
void shouldExecuteWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
public static List<Converter> getConverters() {
return Arrays.asList(CONVERTERS);
} | @Test
public void testSerialization() {
InternalSerializationService ss = new DefaultSerializationServiceBuilder().build();
for (Converter converter : Converters.getConverters()) {
assertSame(converter, ss.toObject(ss.toData(converter)));
}
ss.dispose();
} |
public List<PrometheusQueryResult> queryMetric(String queryString,
long startTimeMs,
long endTimeMs) throws IOException {
URI queryUri = URI.create(_prometheusEndpoint.toURI() + QUERY_RANGE_API_PATH);
HttpPost httpPost = new HttpPost(queryUri);
List<NameValuePair> data = new ArrayList<>();
data.add(new BasicNameValuePair(QUERY, queryString));
/* "start" and "end" are expected to be unix timestamp in seconds (number of seconds since the Unix epoch).
They accept values with a decimal point (up to 64 bits). The samples returned are inclusive of the "end"
timestamp provided.
*/
data.add(new BasicNameValuePair(START, String.valueOf((double) startTimeMs / SEC_TO_MS)));
data.add(new BasicNameValuePair(END, String.valueOf((double) endTimeMs / SEC_TO_MS)));
// step is expected to be in seconds, and accept values with a decimal point (up to 64 bits).
data.add(new BasicNameValuePair(STEP, String.valueOf((double) _samplingIntervalMs / SEC_TO_MS)));
httpPost.setEntity(new UrlEncodedFormEntity(data));
try (CloseableHttpResponse response = _httpClient.execute(httpPost)) {
int responseCode = response.getStatusLine().getStatusCode();
HttpEntity entity = response.getEntity();
InputStream content = entity.getContent();
String responseString = IOUtils.toString(content, StandardCharsets.UTF_8);
if (responseCode != HttpServletResponse.SC_OK) {
throw new IOException(String.format("Received non-success response code on Prometheus API HTTP call,"
+ " response code = %d, response body = %s",
responseCode, responseString));
}
PrometheusResponse prometheusResponse = GSON.fromJson(responseString, PrometheusResponse.class);
if (prometheusResponse == null) {
throw new IOException(String.format(
"No response received from Prometheus API query, response body = %s", responseString));
}
if (!SUCCESS.equals(prometheusResponse.status())) {
throw new IOException(String.format(
"Prometheus API query was not successful, response body = %s", responseString));
}
if (prometheusResponse.data() == null
|| prometheusResponse.data().result() == null) {
throw new IOException(String.format(
"Response from Prometheus HTTP API is malformed, response body = %s", responseString));
}
EntityUtils.consume(entity);
return prometheusResponse.data().result();
}
} | @Test
public void testSuccessfulResponseDeserialized() throws Exception {
this.serverBootstrap.registerHandler(PrometheusAdapter.QUERY_RANGE_API_PATH, new HttpRequestHandler() {
@Override
public void handle(HttpRequest request, HttpResponse response, HttpContext context) {
response.setStatusCode(HttpServletResponse.SC_OK);
response.setEntity(buildSuccessResponseEntity());
}
});
HttpHost httpHost = this.start();
PrometheusAdapter prometheusAdapter
= new PrometheusAdapter(this.httpclient, httpHost, SAMPLING_INTERVAL_MS);
final List<PrometheusQueryResult> prometheusQueryResults = prometheusAdapter.queryMetric(
"kafka_server_BrokerTopicMetrics_OneMinuteRate{name=\"BytesOutPerSec\",topic=\"\"}",
START_TIME_MS, END_TIME_MS);
assertEquals(expectedResults().toString(), prometheusQueryResults.toString());
assertEquals(expectedResults(), prometheusQueryResults);
} |
@Override
public Region createRegion(RegionId regionId, String name, Region.Type type,
List<Set<NodeId>> masterNodeIds) {
checkNotNull(regionId, REGION_ID_NULL);
checkNotNull(name, NAME_NULL);
checkNotNull(type, REGION_TYPE_NULL);
return store.createRegion(regionId, name, type, genAnnots(regionId),
masterNodeIds == null ? of() : masterNodeIds);
} | @Test(expected = IllegalArgumentException.class)
public void duplicateCreate() {
service.createRegion(RID1, "R1", METRO, MASTERS);
service.createRegion(RID1, "R2", CAMPUS, MASTERS);
} |
public static void delete(Collection<ResourceId> resourceIds, MoveOptions... moveOptions)
throws IOException {
if (resourceIds.isEmpty()) {
// Short-circuit.
return;
}
Collection<ResourceId> resourceIdsToDelete;
if (Sets.newHashSet(moveOptions)
.contains(MoveOptions.StandardMoveOptions.IGNORE_MISSING_FILES)) {
resourceIdsToDelete =
FluentIterable.from(matchResources(Lists.newArrayList(resourceIds)))
.filter(matchResult -> !matchResult.status().equals(Status.NOT_FOUND))
.transformAndConcat(
new Function<MatchResult, Iterable<Metadata>>() {
@SuppressFBWarnings(
value = "NP_METHOD_PARAMETER_TIGHTENS_ANNOTATION",
justification = "https://github.com/google/guava/issues/920")
@Nonnull
@Override
public Iterable<Metadata> apply(@Nonnull MatchResult input) {
try {
return Lists.newArrayList(input.metadata());
} catch (IOException e) {
throw new RuntimeException(
String.format("Failed to get metadata from MatchResult: %s.", input),
e);
}
}
})
.transform(
new Function<Metadata, ResourceId>() {
@SuppressFBWarnings(
value = "NP_METHOD_PARAMETER_TIGHTENS_ANNOTATION",
justification = "https://github.com/google/guava/issues/920")
@Nonnull
@Override
public ResourceId apply(@Nonnull Metadata input) {
return input.resourceId();
}
})
.toList();
} else {
resourceIdsToDelete = resourceIds;
}
if (resourceIdsToDelete.isEmpty()) {
return;
}
getFileSystemInternal(resourceIdsToDelete.iterator().next().getScheme())
.delete(resourceIdsToDelete);
} | @Test
public void testDeleteIgnoreMissingFiles() throws Exception {
Path existingPath = temporaryFolder.newFile().toPath();
Path nonExistentPath = existingPath.resolveSibling("non-existent");
createFileWithContent(existingPath, "content1");
FileSystems.delete(
toResourceIds(ImmutableList.of(existingPath, nonExistentPath), false /* isDirectory */));
} |
public static SqlArgument of(final SqlType type) {
return new SqlArgument(type, null, null);
} | @SuppressWarnings("UnstableApiUsage")
@Test
public void shouldImplementHashCodeAndEqualsProperly() {
new EqualsTester()
.addEqualityGroup(SqlArgument.of(SqlArray.of(SqlTypes.STRING)), SqlArgument.of(SqlArray.of(SqlTypes.STRING)))
.addEqualityGroup(
SqlArgument.of(SqlLambdaResolved.of(ImmutableList.of(SqlTypes.STRING), SqlTypes.INTEGER)),
SqlArgument.of(SqlLambdaResolved.of(ImmutableList.of(SqlTypes.STRING), SqlTypes.INTEGER)))
.addEqualityGroup(
SqlArgument.of(SqlLambda.of(2)),
SqlArgument.of(SqlLambda.of(2)))
.addEqualityGroup(
SqlArgument.of(SqlLambda.of(4)))
.addEqualityGroup(SqlArgument.of(null, null), SqlArgument.of(null, null))
.addEqualityGroup(SqlArgument.of(SqlIntervalUnit.INSTANCE), SqlArgument.of(SqlIntervalUnit.INSTANCE))
.testEquals();
} |
@Override
public Path copy(final Path file, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
try {
final BrickApiClient client = new BrickApiClient(session);
if(status.isExists()) {
if(log.isWarnEnabled()) {
log.warn(String.format("Delete file %s to be replaced with %s", target, file));
}
new BrickDeleteFeature(session).delete(Collections.singletonList(target), callback, new Delete.DisabledCallback());
}
final FileActionEntity entity = new FileActionsApi(client)
.copy(new CopyPathBody().destination(StringUtils.removeStart(target.getAbsolute(), String.valueOf(Path.DELIMITER))),
StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER)));
listener.sent(status.getLength());
if(entity.getFileMigrationId() != null) {
this.poll(client, entity);
}
return target.withAttributes(file.attributes());
}
catch(ApiException e) {
throw new BrickExceptionMappingService().map("Cannot copy {0}", e, file);
}
} | @Test
public void testCopyDirectory() throws Exception {
final Path directory = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
final String name = new AlphanumericRandomStringService().random();
final Path file = new Path(directory, name, EnumSet.of(Path.Type.file));
new BrickDirectoryFeature(session).mkdir(directory, new TransferStatus());
final Local local = new Local(System.getProperty("java.io.tmpdir"), file.getName());
final byte[] random = RandomUtils.nextBytes(2547);
IOUtils.write(random, local.getOutputStream(false));
final TransferStatus status = new TransferStatus().withLength(random.length);
new BrickUploadFeature(session, new BrickWriteFeature(session)).upload(file, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
new DisabledStreamListener(), status, new DisabledLoginCallback());
local.delete();
assertTrue(new BrickFindFeature(session).find(file));
final Path copy = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new BrickCopyFeature(session).copy(directory, copy, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener());
assertTrue(new BrickFindFeature(session).find(file));
assertTrue(new BrickFindFeature(session).find(copy));
assertTrue(new BrickFindFeature(session).find(new Path(copy, name, EnumSet.of(Path.Type.file))));
new BrickDeleteFeature(session).delete(Arrays.asList(copy, directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static void deleteDirectory(File directory) throws IOException {
requireNonNull(directory, DIRECTORY_CAN_NOT_BE_NULL);
if (!directory.exists()) {
return;
}
Path path = directory.toPath();
if (Files.isSymbolicLink(path)) {
throw new IOException(format("Directory '%s' is a symbolic link", directory));
}
if (directory.isFile()) {
throw new IOException(format("Directory '%s' is a file", directory));
}
deleteDirectoryImpl(path);
} | @Test
public void deleteDirectory_throws_NPE_if_argument_is_null() throws IOException {
expectDirectoryCanNotBeNullNPE(() -> FileUtils2.deleteDirectory(null));
} |
public int getSinkDefaultDOP() {
// load includes query engine execution and storage engine execution
// so we can't let query engine use up resources
// At the same time, the improvement of performance and concurrency is not linear
// but the memory usage increases linearly, so we control the slope of concurrent growth
int avgCoreNum = getAvgNumHardwareCoresOfBe();
if (avgCoreNum <= 24) {
return Math.max(1, avgCoreNum / 3);
} else {
return Math.min(32, avgCoreNum / 4);
}
} | @Test
public void getSinkDefaultDOP() {
BackendResourceStat stat = BackendResourceStat.getInstance();
stat.setNumHardwareCoresOfBe(0L, 23);
stat.setNumHardwareCoresOfBe(1L, 23);
assertThat(stat.getAvgNumHardwareCoresOfBe()).isEqualTo(23);
assertThat(stat.getSinkDefaultDOP()).isEqualTo(23 / 3);
stat.reset();
stat.setNumHardwareCoresOfBe(0L, 32);
stat.setNumHardwareCoresOfBe(1L, 32);
assertThat(stat.getAvgNumHardwareCoresOfBe()).isEqualTo(32);
assertThat(stat.getSinkDefaultDOP()).isEqualTo(32 / 4);
stat.reset();
stat.setNumHardwareCoresOfBe(0L, 32 * 2 * 4);
stat.setNumHardwareCoresOfBe(1L, 32 * 2 * 4);
assertThat(stat.getAvgNumHardwareCoresOfBe()).isEqualTo(32 * 2 * 4);
assertThat(stat.getSinkDefaultDOP()).isEqualTo(32);
} |
@Override
@CheckForNull
public ScannerReport.Changesets readChangesets(int componentRef) {
ensureInitialized();
return delegate.readChangesets(componentRef);
} | @Test
public void readChangesets_returns_null_if_no_changeset() {
assertThat(underTest.readChangesets(COMPONENT_REF)).isNull();
} |
@Override
public void run() {
if (processor != null) {
processor.execute();
} else {
if (!beforeHook()) {
logger.info("before-feature hook returned [false], aborting: {}", this);
} else {
scenarios.forEachRemaining(this::processScenario);
}
afterFeature();
}
} | @Test
void testCallArgNull() {
run("call-arg-null.feature");
} |
public StreamsMetadata getLocalMetadata() {
return localMetadata.get();
} | @Test
public void shouldGetLocalMetadataWithRightActiveStandbyInfo() {
assertEquals(hostOne, metadataState.getLocalMetadata().hostInfo());
assertEquals(hostToActivePartitions.get(hostOne), metadataState.getLocalMetadata().topicPartitions());
assertEquals(hostToStandbyPartitions.get(hostOne), metadataState.getLocalMetadata().standbyTopicPartitions());
assertEquals(storeNames, metadataState.getLocalMetadata().stateStoreNames());
assertEquals(storeNames.stream().filter(s -> !s.equals(globalTable)).collect(Collectors.toSet()),
metadataState.getLocalMetadata().standbyStateStoreNames());
} |
@ConstantFunction(name = "replace", argTypes = {VARCHAR, VARCHAR, VARCHAR}, returnType = VARCHAR)
public static ConstantOperator replace(ConstantOperator value, ConstantOperator target,
ConstantOperator replacement) {
return ConstantOperator.createVarchar(value.getVarchar().replace(target.getVarchar(), replacement.getVarchar()));
} | @Test
public void testReplace() {
assertEquals("20240806", ScalarOperatorFunctions.replace(
new ConstantOperator("2024-08-06", Type.VARCHAR),
new ConstantOperator("-", Type.VARCHAR),
new ConstantOperator("", Type.VARCHAR)
).getVarchar());
} |
@Override
public synchronized void
registerProviderService(NetworkId networkId,
VirtualProviderService virtualProviderService) {
Set<VirtualProviderService> services =
servicesByNetwork.computeIfAbsent(networkId, k -> new HashSet<>());
services.add(virtualProviderService);
} | @Test
public void registerProviderServiceTest() {
TestProvider1 provider1 = new TestProvider1();
virtualProviderManager.registerProvider(provider1);
TestProviderService1 providerService1 = new TestProviderService1();
virtualProviderManager.registerProviderService(NETWORK_ID1, providerService1);
assertEquals(providerService1,
virtualProviderManager.getProviderService(NETWORK_ID1, TestProvider1.class));
} |
public static IntrinsicMapTaskExecutor withSharedCounterSet(
List<Operation> operations,
CounterSet counters,
ExecutionStateTracker executionStateTracker) {
return new IntrinsicMapTaskExecutor(operations, counters, executionStateTracker);
} | @Test
public void testGetProgressAndRequestSplit() throws Exception {
TestOutputReceiver receiver =
new TestOutputReceiver(counterSet, NameContextsForTests.nameContextForTest());
TestReadOperation operation = new TestReadOperation(receiver, createContext("ReadOperation"));
ExecutionStateTracker stateTracker = ExecutionStateTracker.newForTest();
try (IntrinsicMapTaskExecutor executor =
IntrinsicMapTaskExecutor.withSharedCounterSet(
Arrays.asList(new Operation[] {operation}), counterSet, stateTracker)) {
operation.setProgress(approximateProgressAtIndex(1L));
Assert.assertEquals(positionAtIndex(1L), positionFromProgress(executor.getWorkerProgress()));
Assert.assertEquals(
positionAtIndex(1L),
positionFromSplitResult(executor.requestDynamicSplit(splitRequestAtIndex(1L))));
}
} |
public static String substVars(String val, PropertyContainer pc1) {
return substVars(val, pc1, null);
} | @Test(timeout = 1000)
public void stubstVarsShouldNotGoIntoInfiniteLoop() {
context.putProperty("v1", "if");
context.putProperty("v2", "${v3}");
context.putProperty("v3", "${v4}");
context.putProperty("v4", "${v2}c");
expectedException.expect(Exception.class);
OptionHelper.substVars(text, context);
} |
public Plan validateReservationSubmissionRequest(
ReservationSystem reservationSystem, ReservationSubmissionRequest request,
ReservationId reservationId) throws YarnException {
String message;
if (reservationId == null) {
message = "Reservation id cannot be null. Please try again specifying "
+ " a valid reservation id by creating a new reservation id.";
throw RPCUtil.getRemoteException(message);
}
// Check if it is a managed queue
String queue = request.getQueue();
Plan plan = getPlanFromQueue(reservationSystem, queue,
AuditConstants.SUBMIT_RESERVATION_REQUEST);
validateReservationDefinition(reservationId,
request.getReservationDefinition(), plan,
AuditConstants.SUBMIT_RESERVATION_REQUEST);
return plan;
} | @Test
public void testSubmitReservationExceedsGangSize() {
ReservationSubmissionRequest request =
createSimpleReservationSubmissionRequest(1, 1, 1, 5, 4);
Resource resource = Resource.newInstance(512, 1);
when(plan.getTotalCapacity()).thenReturn(resource);
Plan plan = null;
try {
plan =
rrValidator.validateReservationSubmissionRequest(rSystem, request,
ReservationSystemTestUtil.getNewReservationId());
Assert.fail();
} catch (YarnException e) {
Assert.assertNull(plan);
String message = e.getMessage();
Assert.assertTrue(message.startsWith(
"The size of the largest gang in the reservation definition"));
Assert.assertTrue(message.contains(
"exceed the capacity available "));
LOG.info(message);
}
} |
public void unzip(String from, boolean remove) throws IOException {
String to = Helper.pruneFileEnd(from);
unzip(from, to, remove);
} | @Test
public void testUnzip() throws Exception {
String to = "./target/tmp/test";
Helper.removeDir(new File(to));
new Unzipper().unzip("./src/test/resources/com/graphhopper/util/test.zip", to, false);
assertTrue(new File("./target/tmp/test/file2 bäh").exists());
assertTrue(new File("./target/tmp/test/folder1").isDirectory());
assertTrue(new File("./target/tmp/test/folder1/folder 3").isDirectory());
Helper.removeDir(new File(to));
} |
public static TemplateEngine createEngine() {
return TemplateFactory.create();
} | @Test
public void thymeleafEngineTest() {
// 字符串模板
TemplateEngine engine = TemplateUtil.createEngine(
new TemplateConfig("templates").setCustomEngine(ThymeleafEngine.class));
Template template = engine.getTemplate("<h3 th:text=\"${message}\"></h3>");
String result = template.render(Dict.create().set("message", "Hutool"));
assertEquals("<h3>Hutool</h3>", result);
//ClassPath模板
engine = TemplateUtil.createEngine(
new TemplateConfig("templates", ResourceMode.CLASSPATH).setCustomEngine(ThymeleafEngine.class));
template = engine.getTemplate("thymeleaf_test.ttl");
result = template.render(Dict.create().set("message", "Hutool"));
assertEquals("<h3>Hutool</h3>", result);
} |
@Override
public void readOne(TProtocol in, TProtocol out) throws TException {
readOneStruct(in, out);
} | @Test
public void testUnionWithStructWithUnknownField() throws Exception {
CountingErrorHandler countingHandler = new CountingErrorHandler();
BufferedProtocolReadToWrite p =
new BufferedProtocolReadToWrite(ThriftSchemaConverter.toStructType(UnionV3.class), countingHandler);
ByteArrayOutputStream in = new ByteArrayOutputStream();
final ByteArrayOutputStream out = new ByteArrayOutputStream();
UnionV3 validUnion = UnionV3.aStruct(new StructV1("a valid struct"));
StructV2 structV2 = new StructV2("a valid struct");
structV2.setAge("a valid age");
UnionThatLooksLikeUnionV3 unionWithUnknownStructField = UnionThatLooksLikeUnionV3.aStruct(structV2);
validUnion.write(protocol(in));
unionWithUnknownStructField.write(protocol(in));
ByteArrayInputStream baos = new ByteArrayInputStream(in.toByteArray());
// both should not throw
p.readOne(protocol(baos), protocol(out));
p.readOne(protocol(baos), protocol(out));
assertEquals(1, countingHandler.recordCountOfMissingFields);
assertEquals(1, countingHandler.fieldIgnoredCount);
in = new ByteArrayOutputStream();
validUnion.write(protocol(in));
unionWithUnknownStructField.write(protocol(in));
baos = new ByteArrayInputStream(in.toByteArray());
// both should not throw
p.readOne(protocol(baos), protocol(out));
p.readOne(protocol(baos), protocol(out));
assertEquals(2, countingHandler.recordCountOfMissingFields);
assertEquals(2, countingHandler.fieldIgnoredCount);
} |
@Override
@NonNull
public Mono<ServerResponse> handle(@NonNull ServerRequest request) {
return request.bodyToMono(Unstructured.class)
.switchIfEmpty(Mono.error(() -> new ExtensionConvertException(
"Cannot read body to " + scheme.groupVersionKind())))
.flatMap(client::create)
.flatMap(createdExt -> ServerResponse
.created(URI.create(pathPattern() + "/" + createdExt.getMetadata().getName()))
.contentType(MediaType.APPLICATION_JSON)
.bodyValue(createdExt));
} | @Test
void shouldReturnErrorWhenNoBodyProvided() {
var serverRequest = MockServerRequest.builder()
.body(Mono.empty());
var scheme = Scheme.buildFromType(FakeExtension.class);
var getHandler = new ExtensionCreateHandler(scheme, client);
var responseMono = getHandler.handle(serverRequest);
StepVerifier.create(responseMono)
.verifyError(ExtensionConvertException.class);
} |
public PDFMergerUtility()
{
sources = new ArrayList<>();
} | @Test
void testPDFMergerUtility() throws IOException
{
checkMergeIdentical("PDFBox.GlobalResourceMergeTest.Doc01.decoded.pdf",
"PDFBox.GlobalResourceMergeTest.Doc02.decoded.pdf",
"GlobalResourceMergeTestResult1.pdf",
IOUtils.createMemoryOnlyStreamCache());
// once again, with scratch file
checkMergeIdentical("PDFBox.GlobalResourceMergeTest.Doc01.decoded.pdf",
"PDFBox.GlobalResourceMergeTest.Doc02.decoded.pdf",
"GlobalResourceMergeTestResult2.pdf",
IOUtils.createTempFileOnlyStreamCache());
} |
public void createPipe(CreatePipeStmt stmt) throws DdlException {
try {
lock.writeLock().lock();
Pair<Long, String> dbIdAndName = resolvePipeNameUnlock(stmt.getPipeName());
boolean existed = nameToId.containsKey(dbIdAndName);
if (existed) {
if (!stmt.isIfNotExists() && !stmt.isReplace()) {
ErrorReport.reportSemanticException(ErrorCode.ERR_PIPE_EXISTS);
}
if (stmt.isIfNotExists()) {
return;
} else if (stmt.isReplace()) {
LOG.info("Pipe {} already exist, replace it with a new one", stmt.getPipeName());
Pipe pipe = pipeMap.get(nameToId.get(dbIdAndName));
dropPipeImpl(pipe);
}
}
// Add pipe
long id = GlobalStateMgr.getCurrentState().getNextId();
Pipe pipe = Pipe.fromStatement(id, stmt);
putPipe(pipe);
repo.addPipe(pipe);
} finally {
lock.writeLock().unlock();
}
} | @Test
public void executeAutoIngest() throws Exception {
mockRepoExecutor();
mockTaskExecution(Constants.TaskRunState.SUCCESS);
// auto_ingest=false
String pipeP3 = "p3";
String p3Sql = "create pipe p3 properties('auto_ingest'='false') as " +
"insert into tbl1 select * from files('path'='fake://pipe', 'format'='parquet')";
createPipe(p3Sql);
Pipe pipe = getPipe(pipeP3);
Assert.assertEquals(Pipe.State.RUNNING, pipe.getState());
pipe.poll();
pipe.schedule();
pipe.schedule();
pipe.poll();
// schedule task
pipe.schedule();
// finalize task
pipe.schedule();
// trigger eos
pipe.schedule();
Assert.assertTrue(pipe.getPipeSource().eos());
Assert.assertEquals(Pipe.State.FINISHED, pipe.getState());
// auto_ingest=true
String pipeP4 = "p4";
String p4Sql = "create pipe p4 properties('auto_ingest'='true') as " +
"insert into tbl1 select * from files('path'='fake://pipe', 'format'='parquet')";
createPipe(p4Sql);
pipe = getPipe(pipeP4);
Assert.assertEquals(Pipe.State.RUNNING, pipe.getState());
pipe.poll();
pipe.schedule();
pipe.poll();
pipe.schedule();
Assert.assertFalse(pipe.getPipeSource().eos());
Assert.assertEquals(Pipe.State.RUNNING, pipe.getState());
} |
@PostConstruct
public void applyPluginMetadata() {
if (taskPreference() != null) {
for (ConfigurationProperty configurationProperty : configuration) {
if (isValidPluginConfiguration(configurationProperty.getConfigKeyName())) {
Boolean isSecure = pluginConfigurationFor(configurationProperty.getConfigKeyName()).getOption(Property.SECURE);
configurationProperty.handleSecureValueConfiguration(isSecure);
}
}
}
} | @Test
public void postConstructShouldDoNothingForPluggableTaskWithoutCorrespondingPlugin() throws Exception {
ConfigurationProperty configurationProperty = ConfigurationPropertyMother.create("KEY1");
Configuration configuration = new Configuration(configurationProperty);
PluggableTask task = new PluggableTask(new PluginConfiguration("abc.def", "1"), configuration);
assertFalse(configurationProperty.isSecure());
task.applyPluginMetadata();
assertFalse(configurationProperty.isSecure());
} |
public Status status() { return status; } | @Test
public void test_autoscaling_limits_when_min_equals_max() {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).capacity(Capacity.from(min, min)).build();
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.25, 120);
assertEquals(Autoscaling.Status.unavailable, fixture.autoscale().status());
} |
@Override
protected void rename(
List<HadoopResourceId> srcResourceIds,
List<HadoopResourceId> destResourceIds,
MoveOptions... moveOptions)
throws IOException {
if (moveOptions.length > 0) {
throw new UnsupportedOperationException("Support for move options is not yet implemented.");
}
for (int i = 0; i < srcResourceIds.size(); ++i) {
final Path srcPath = srcResourceIds.get(i).toPath();
final Path destPath = destResourceIds.get(i).toPath();
// this enforces src and dest file systems to match
final org.apache.hadoop.fs.FileSystem fs = srcPath.getFileSystem(configuration);
// rename in HDFS requires the target directory to exist or silently fails (BEAM-4861)
mkdirs(destPath);
boolean success = fs.rename(srcPath, destPath);
// If the failure was due to the file already existing, delete and retry (BEAM-5036).
// This should be the exceptional case, so handle here rather than incur the overhead of
// testing first
if (!success && fs.exists(srcPath) && fs.exists(destPath)) {
LOG.debug(LOG_DELETING_EXISTING_FILE, Path.getPathWithoutSchemeAndAuthority(destPath));
fs.delete(destPath, false); // not recursive
success = fs.rename(srcPath, destPath);
}
if (!success) {
if (!fs.exists(srcPath)) {
throw new FileNotFoundException(
String.format(
"Unable to rename resource %s to %s as source not found.", srcPath, destPath));
} else if (fs.exists(destPath)) {
throw new FileAlreadyExistsException(
String.format(
"Unable to rename resource %s to %s as destination already exists and couldn't be deleted.",
srcPath, destPath));
} else {
throw new IOException(
String.format(
"Unable to rename resource %s to %s. No further information provided by underlying filesystem.",
srcPath, destPath));
}
}
}
} | @Test(expected = FileNotFoundException.class)
public void testRenameRetryScenario() throws Exception {
testRename();
// retry the knowing that sources are already moved to destination
fileSystem.rename(
ImmutableList.of(testPath("testFileA"), testPath("testFileB")),
ImmutableList.of(testPath("renameFileA"), testPath("renameFileB")));
} |
public static KeyValueIterator<Windowed<GenericKey>, GenericRow> fetch(
final ReadOnlySessionStore<GenericKey, GenericRow> store,
final GenericKey key
) {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlySessionStore<GenericKey, GenericRow>> stores = getStores(store);
final Function<ReadOnlySessionStore<GenericKey, GenericRow>,
KeyValueIterator<Windowed<GenericKey>, GenericRow>> fetchFunc
= sessionStore -> fetchUncached(sessionStore, key);
return findFirstNonEmptyIterator(stores, fetchFunc);
} | @Test
public void shouldCallUnderlyingStoreSingleKey() throws IllegalAccessException {
when(provider.stores(any(), any())).thenReturn(ImmutableList.of(meteredSessionStore));
SERDES_FIELD.set(meteredSessionStore, serdes);
when(serdes.rawKey(any())).thenReturn(BYTES);
when(meteredSessionStore.wrapped()).thenReturn(wrappedSessionStore);
when(wrappedSessionStore.wrapped()).thenReturn(sessionStore);
when(sessionStore.fetch(any())).thenReturn(storeIterator);
when(storeIterator.hasNext()).thenReturn(false);
SessionStoreCacheBypass.fetch(store, SOME_KEY);
verify(sessionStore).fetch(new Bytes(BYTES));
} |
@Override
public RouteContext createRouteContext(final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final SingleRule rule,
final ConfigurationProperties props, final ConnectionContext connectionContext) {
if (1 == database.getResourceMetaData().getStorageUnits().size()) {
return createSingleDataSourceRouteContext(rule, database, queryContext);
}
RouteContext result = new RouteContext();
SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext();
SingleMetaDataValidatorFactory.newInstance(sqlStatementContext.getSqlStatement()).ifPresent(optional -> optional.validate(rule, sqlStatementContext, database));
Collection<QualifiedTable> singleTables = getSingleTables(database, rule, result, sqlStatementContext);
SingleRouteEngineFactory.newInstance(singleTables, sqlStatementContext.getSqlStatement()).ifPresent(optional -> optional.route(result, rule));
return result;
} | @Test
void assertCreateRouteContextWithSingleDataSource() throws SQLException {
SingleRule rule = new SingleRule(new SingleRuleConfiguration(),
DefaultDatabase.LOGIC_NAME, new H2DatabaseType(), Collections.singletonMap("foo_ds", new MockedDataSource(mockConnection())), Collections.emptyList());
rule.getAttributes().getAttribute(DataNodeRuleAttribute.class).getAllDataNodes().put("t_order", Collections.singleton(createDataNode("foo_ds")));
ShardingSphereDatabase database = mockSingleDatabase();
RouteContext actual = new SingleSQLRouter().createRouteContext(
createQueryContext(), mock(RuleMetaData.class), database, rule, new ConfigurationProperties(new Properties()), new ConnectionContext(Collections::emptySet));
assertThat(actual.getRouteUnits().size(), is(1));
RouteUnit routeUnit = actual.getRouteUnits().iterator().next();
assertThat(routeUnit.getDataSourceMapper().getLogicName(), is("foo_ds"));
assertThat(routeUnit.getDataSourceMapper().getActualName(), is("foo_ds"));
assertFalse(routeUnit.getTableMappers().isEmpty());
} |
@Override
public boolean isDataDriven( RestMeta meta ) {
// this step is data driven no matter what.
// either the url, method, body, headers, and/or parameters come from the previous step
return true;
} | @Test
public void testIsDataDriven() throws Exception {
assertTrue( consumer.isDataDriven( meta ) );
} |
@Override
public void connect(final SocketAddress endpoint, final int timeout) throws IOException {
final CountDownLatch signal = new CountDownLatch(1);
final Thread t = threadFactory.newThread(new Runnable() {
@Override
public void run() {
try {
connect(endpoint);
}
catch(IOException e) {
exception = e;
}
finally {
signal.countDown();
}
}
});
t.start();
try {
// Wait for #run to finish
if(!signal.await(timeout, TimeUnit.MILLISECONDS)) {
throw new SocketTimeoutException();
}
}
catch(InterruptedException e) {
final SocketTimeoutException s = new SocketTimeoutException(e.getMessage());
s.initCause(e);
throw s;
}
if(exception != null) {
throw exception;
}
} | @Test(expected = SocketTimeoutException.class)
public void testConnect() throws Exception {
new UDTSocket().connect(new InetSocketAddress("localhost", 11111), 1000);
} |
@Bean
public PluginDataHandler dividePluginDataHandler() {
return new DividePluginDataHandler();
} | @Test
public void testDividePluginDataHandler() {
applicationContextRunner.run(context -> {
PluginDataHandler handler = context.getBean("dividePluginDataHandler", PluginDataHandler.class);
assertNotNull(handler);
}
);
} |
public static String escapeHtml4(CharSequence html) {
Html4Escape escape = new Html4Escape();
return escape.replace(html).toString();
} | @Test
public void escapeHtml4Test() {
String escapeHtml4 = EscapeUtil.escapeHtml4("<a>你好</a>");
assertEquals("<a>你好</a>", escapeHtml4);
String result = EscapeUtil.unescapeHtml4("振荡器类型");
assertEquals("振荡器类型", result);
String escape = EscapeUtil.escapeHtml4("*@-_+./(123你好)");
assertEquals("*@-_+./(123你好)", escape);
} |
void addPeerClusterWatches(@Nonnull Set<String> newPeerClusters, @Nonnull FailoutConfig failoutConfig)
{
final Set<String> existingPeerClusters = _peerWatches.keySet();
if (newPeerClusters.isEmpty())
{
removePeerClusterWatches();
return;
}
final Set<String> peerClustersToAdd = new HashSet<>(newPeerClusters);
peerClustersToAdd.removeAll(existingPeerClusters);
if (!peerClustersToAdd.isEmpty())
{
addClusterWatches(peerClustersToAdd, failoutConfig);
}
final Set<String> peerClustersToRemove = new HashSet<>(existingPeerClusters);
peerClustersToRemove.removeAll(newPeerClusters);
if (!peerClustersToRemove.isEmpty())
{
removeClusterWatches(peerClustersToRemove);
}
} | @Test
public void testAddPeerClusterWatchesWithPeerClusterAdded()
{
_manager.addPeerClusterWatches(new HashSet<>(Arrays.asList(PEER_CLUSTER_NAME1)), mock(FailoutConfig.class));
_manager.addPeerClusterWatches(new HashSet<>(Arrays.asList(PEER_CLUSTER_NAME1, PEER_CLUSTER_NAME2)), mock(FailoutConfig.class));
verify(_loadBalancerState, times(1)).listenToCluster(eq(PEER_CLUSTER_NAME2), any());
verify(_loadBalancerState, times(1)).listenToCluster(eq(PEER_CLUSTER_NAME1), any());
verify(_warmUpHandler, times(1)).warmUpConnections(eq(PEER_CLUSTER_NAME1), any());
verify(_warmUpHandler, times(1)).warmUpConnections(eq(PEER_CLUSTER_NAME2), any());
verify(_warmUpHandler, never()).cancelPendingRequests(any());
} |
public static boolean isAnyNullOrEmptyAfterTrim(String... values) {
if (values == null) {
return false;
}
return Arrays.stream(values).anyMatch(s -> !isNullOrEmptyAfterTrim(s));
} | @Test
void isAnyFilledTest() {
assertTrue(isAnyNullOrEmptyAfterTrim("test-string-1", "test-string-2"));
assertTrue(isAnyNullOrEmptyAfterTrim("test-string-1", ""));
assertFalse(isAnyNullOrEmptyAfterTrim("", "", null));
} |
public void addFilter(Filter filter) {
filterChain.addFilter(filter);
} | @Test
void testAddFilter() {
final var target = mock(Target.class);
final var filterManager = new FilterManager();
verifyNoMoreInteractions(target);
final var filter = mock(Filter.class);
when(filter.execute(any(Order.class))).thenReturn("filter");
filterManager.addFilter(filter);
final Order order = mock(Order.class);
assertEquals("filter", filterManager.filterRequest(order));
verify(filter, times(1)).execute(any(Order.class));
verifyNoMoreInteractions(target, filter, order);
} |
@Override
public Set<String> get(URL url) {
String serviceInterface = url.getServiceInterface();
String registryCluster = getRegistryCluster(url);
MetadataReport metadataReport = metadataReportInstance.getMetadataReport(registryCluster);
if (metadataReport == null) {
return Collections.emptySet();
}
return metadataReport.getServiceAppMapping(serviceInterface, url);
} | @Test
void testGet() {
Set<String> set = new HashSet<>();
set.add("app1");
MetadataReportInstance reportInstance = mock(MetadataReportInstance.class);
Mockito.when(reportInstance.getMetadataReport(any())).thenReturn(metadataReport);
when(metadataReport.getServiceAppMapping(any(), any())).thenReturn(set);
mapping.metadataReportInstance = reportInstance;
Set<String> result = mapping.get(url);
assertEquals(set, result);
} |
@Override
public Validation validate(Validation val) {
if (StringUtils.isBlank(systemEnvironment.getPropertyImpl("jetty.home"))) {
systemEnvironment.setProperty("jetty.home", systemEnvironment.getPropertyImpl("user.dir"));
}
systemEnvironment.setProperty("jetty.base", systemEnvironment.getPropertyImpl("jetty.home"));
File home = new File(systemEnvironment.getPropertyImpl("jetty.home"));
File work = new File(systemEnvironment.getPropertyImpl("jetty.home"), "work");
if (home.exists()) {
if (work.exists()) {
try {
FileUtils.deleteDirectory(work);
} catch (IOException e) {
String message = format("Error trying to remove Jetty working directory {0}: {1}",
work.getAbsolutePath(), e);
return val.addError(new RuntimeException(message));
}
}
work.mkdir();
}
return Validation.SUCCESS;
} | @Test
public void shouldRecreateWorkDirIfItExists() throws IOException {
File oldWorkDir = new File(homeDir, "work");
oldWorkDir.mkdir();
new File(oldWorkDir, "junk.txt").createNewFile();
when(systemEnvironment.getPropertyImpl("jetty.home")).thenReturn(homeDir.getAbsolutePath());
Validation val = new Validation();
jettyWorkDirValidator.validate(val);
assertThat(val.isSuccessful(), is(true));
File recreatedWorkDir = new File(homeDir, "work");
assertThat(recreatedWorkDir.exists(), is(true));
assertThat(recreatedWorkDir.listFiles().length, is(0));
} |
@Override
public void setGPSLocation(double latitude, double longitude) {
} | @Test
public void setGPSLocation() {
List<SensorsDataAPI.AutoTrackEventType> types = new ArrayList<>();
types.add(SensorsDataAPI.AutoTrackEventType.APP_START);
types.add(SensorsDataAPI.AutoTrackEventType.APP_END);
mSensorsAPI.setGPSLocation(1000.0, 45.5, "GPS");
} |
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
} | @Test
public void testBraceletOfClayBreak()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", BREAK_BRACELET_OF_CLAY, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_BRACELET_OF_CLAY, 28);
} |
public Arguments parse(String[] args) {
JCommander jCommander = new JCommander(this);
jCommander.setProgramName("jsonschema2pojo");
try {
jCommander.parse(args);
if (this.showHelp) {
jCommander.usage();
exit(EXIT_OKAY);
} else if (printVersion) {
Properties properties = new Properties();
properties.load(getClass().getResourceAsStream("version.properties"));
jCommander.getConsole().println(jCommander.getProgramName() + " version " + properties.getProperty("version"));
exit(EXIT_OKAY);
}
} catch (IOException | ParameterException e) {
System.err.println(e.getMessage());
jCommander.usage();
exit(EXIT_ERROR);
}
return this;
} | @Test
public void missingArgsCausesHelp() {
ArgsForTest args = (ArgsForTest) new ArgsForTest().parse(new String[] {});
assertThat(args.status, is(1));
assertThat(new String(systemErrCapture.toByteArray(), StandardCharsets.UTF_8), is(containsString("--target")));
assertThat(new String(systemErrCapture.toByteArray(), StandardCharsets.UTF_8), is(containsString("--source")));
assertThat(new String(systemOutCapture.toByteArray(), StandardCharsets.UTF_8), is(containsString("Usage: jsonschema2pojo")));
} |
public static boolean canImplicitlyCast(final SqlDecimal s1, final SqlDecimal s2) {
return s1.getScale() <= s2.getScale()
&& (s1.getPrecision() - s1.getScale()) <= (s2.getPrecision() - s2.getScale());
} | @Test
public void shouldAllowImplicitlyCastOnHigherPrecisionAndScale() {
// Given:
final SqlDecimal s1 = SqlTypes.decimal(5, 2);
final SqlDecimal s2 = SqlTypes.decimal(6, 3);
// When:
final boolean compatible = DecimalUtil.canImplicitlyCast(s1, s2);
// Then:
assertThat(compatible, is(true));
} |
@Override
public MetadataReport getMetadataReport(URL url) {
url = url.setPath(MetadataReport.class.getName()).removeParameters(EXPORT_KEY, REFER_KEY);
String key = url.toServiceString(NAMESPACE_KEY);
MetadataReport metadataReport = serviceStoreMap.get(key);
if (metadataReport != null) {
return metadataReport;
}
// Lock the metadata access process to ensure a single instance of the metadata instance
lock.lock();
try {
metadataReport = serviceStoreMap.get(key);
if (metadataReport != null) {
return metadataReport;
}
boolean check = url.getParameter(CHECK_KEY, true) && url.getPort() != 0;
try {
metadataReport = createMetadataReport(url);
} catch (Exception e) {
if (!check) {
logger.warn(PROXY_FAILED_EXPORT_SERVICE, "", "", "The metadata reporter failed to initialize", e);
} else {
throw e;
}
}
if (check && metadataReport == null) {
throw new IllegalStateException("Can not create metadata Report " + url);
}
if (metadataReport != null) {
serviceStoreMap.put(key, metadataReport);
}
return metadataReport;
} finally {
// Release the lock
lock.unlock();
}
} | @Test
void testGetForDiffGroup() {
URL url1 = URL.valueOf("zookeeper://" + NetUtils.getLocalAddress().getHostName()
+ ":4444/org.apache.dubbo.TestService?version=1.0.0&application=vic&group=aaa");
URL url2 = URL.valueOf("zookeeper://" + NetUtils.getLocalAddress().getHostName()
+ ":4444/org.apache.dubbo.TestService?version=1.0.0&application=vic&group=bbb");
MetadataReport metadataReport1 = metadataReportFactory.getMetadataReport(url1);
MetadataReport metadataReport2 = metadataReportFactory.getMetadataReport(url2);
Assertions.assertNotEquals(metadataReport1, metadataReport2);
} |
private boolean isEmpty(ConsumerRecords<K, V> records) {
return records == null || records.isEmpty();
} | @Test
public void when_partitionAddedWhileJobDown_then_consumedFromBeginning() throws Exception {
String sinkListName = randomName();
IList<Entry<Integer, String>> sinkList = instance().getList(sinkListName);
Pipeline p = Pipeline.create();
Properties properties = properties();
properties.setProperty("auto.offset.reset", "latest");
p.readFrom(KafkaSources.<Integer, String>kafka(properties, topic1Name))
.withoutTimestamps()
.writeTo(Sinks.list(sinkList));
Job job = instance().getJet().newJob(p, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE));
assertTrueEventually(() -> {
// This might add multiple `0` events to the topic - we need to do this because the source starts from
// the latest position and we don't exactly know when it starts, so we try repeatedly
kafkaTestSupport.produce(topic1Name, 0, "0");
assertFalse(sinkList.isEmpty());
assertEquals(entry(0, "0"), sinkList.get(0));
});
job.suspend();
assertThat(job).eventuallyHasStatus(SUSPENDED);
// Note that the job might not have consumed all the zeroes from the topic at this point
// When
kafkaTestSupport.setPartitionCount(topic1Name, INITIAL_PARTITION_COUNT + 2);
// We produce to a partition that didn't exist during the previous job execution.
// The job must start reading the new partition from the beginning, otherwise it would miss this item.
Entry<Integer, String> event = produceEventToNewPartition();
job.resume();
// All events after the resume will be loaded: the non-consumed zeroes, and the possibly multiple
// events added in produceEventToNewPartition(). But they must include the event added to the new partition.
assertTrueEventually(() -> assertThat(sinkList).contains(event));
} |
public void putAll(Headers headers) {
for (int i = 0; i < headers.size(); i++) {
addNormal(headers.originalName(i), headers.name(i), headers.value(i));
}
} | @Test
void putAll() {
Headers headers = new Headers();
headers.add("Via", "duct");
headers.add("Cookie", "this=that");
headers.add("Cookie", "frizzle=frazzle");
Headers other = new Headers();
other.add("cookie", "a=b");
other.add("via", "com");
headers.putAll(other);
// Only check the order per field, not for the entire set.
Truth.assertThat(headers.getAll("Via")).containsExactly("duct", "com").inOrder();
Truth.assertThat(headers.getAll("coOkiE"))
.containsExactly("this=that", "frizzle=frazzle", "a=b")
.inOrder();
Truth.assertThat(headers.size()).isEqualTo(5);
} |
public static LocalDateTime beginOfDay(LocalDateTime time) {
return time.with(LocalTime.MIN);
} | @Test
public void beginOfDayTest() {
final LocalDateTime localDateTime = LocalDateTimeUtil.parse("2020-01-23T12:23:56");
final LocalDateTime beginOfDay = LocalDateTimeUtil.beginOfDay(localDateTime);
assertEquals("2020-01-23T00:00", beginOfDay.toString());
} |
@Override
public Response replaceLabelsOnNodes(NodeToLabelsEntryList newNodeToLabels,
HttpServletRequest hsr) throws IOException {
// Step1. Check the parameters to ensure that the parameters are not empty.
if (newNodeToLabels == null) {
routerMetrics.incrReplaceLabelsOnNodesFailedRetrieved();
throw new IllegalArgumentException("Parameter error, newNodeToLabels must not be empty.");
}
List<NodeToLabelsEntry> nodeToLabelsEntries = newNodeToLabels.getNodeToLabels();
if (CollectionUtils.isEmpty(nodeToLabelsEntries)) {
routerMetrics.incrReplaceLabelsOnNodesFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), REPLACE_LABELSONNODES, UNKNOWN,
TARGET_WEB_SERVICE, "Parameter error, " +
"nodeToLabelsEntries must not be empty.");
throw new IllegalArgumentException("Parameter error, " +
"nodeToLabelsEntries must not be empty.");
}
try {
// Step2. We map the NodeId and NodeToLabelsEntry in the request.
Map<String, NodeToLabelsEntry> nodeIdToLabels = new HashMap<>();
newNodeToLabels.getNodeToLabels().forEach(nodeIdToLabel -> {
String nodeId = nodeIdToLabel.getNodeId();
nodeIdToLabels.put(nodeId, nodeIdToLabel);
});
// Step3. We map SubCluster with NodeToLabelsEntryList
Map<SubClusterInfo, NodeToLabelsEntryList> subClusterToNodeToLabelsEntryList =
new HashMap<>();
nodeIdToLabels.forEach((nodeId, nodeToLabelsEntry) -> {
SubClusterInfo subClusterInfo = getNodeSubcluster(nodeId);
NodeToLabelsEntryList nodeToLabelsEntryList = subClusterToNodeToLabelsEntryList.
getOrDefault(subClusterInfo, new NodeToLabelsEntryList());
nodeToLabelsEntryList.getNodeToLabels().add(nodeToLabelsEntry);
subClusterToNodeToLabelsEntryList.put(subClusterInfo, nodeToLabelsEntryList);
});
// Step4. Traverse the subCluster and call the replaceLabelsOnNodes interface.
long startTime = clock.getTime();
final HttpServletRequest hsrCopy = clone(hsr);
StringBuilder builder = new StringBuilder();
subClusterToNodeToLabelsEntryList.forEach((subClusterInfo, nodeToLabelsEntryList) -> {
SubClusterId subClusterId = subClusterInfo.getSubClusterId();
try {
DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorForSubCluster(
subClusterInfo);
interceptor.replaceLabelsOnNodes(nodeToLabelsEntryList, hsrCopy);
builder.append("subCluster-").append(subClusterId.getId()).append(":Success,");
} catch (Exception e) {
LOG.error("replaceLabelsOnNodes Failed. subClusterId = {}.", subClusterId, e);
builder.append("subCluster-").append(subClusterId.getId()).append(":Failed,");
}
});
long stopTime = clock.getTime();
RouterAuditLogger.logSuccess(getUser().getShortUserName(), REPLACE_LABELSONNODES,
TARGET_WEB_SERVICE);
routerMetrics.succeededReplaceLabelsOnNodesRetrieved(stopTime - startTime);
// Step5. return call result.
return Response.status(Status.OK).entity(builder.toString()).build();
} catch (Exception e) {
routerMetrics.incrReplaceLabelsOnNodesFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), REPLACE_LABELSONNODES, UNKNOWN,
TARGET_WEB_SERVICE, e.getLocalizedMessage());
throw e;
}
} | @Test
public void testReplaceLabelsOnNodes() throws Exception {
// subCluster0 -> node0:0 -> label:NodeLabel0
// subCluster1 -> node1:1 -> label:NodeLabel1
// subCluster2 -> node2:2 -> label:NodeLabel2
// subCluster3 -> node3:3 -> label:NodeLabel3
NodeToLabelsEntryList nodeToLabelsEntryList = new NodeToLabelsEntryList();
for (int i = 0; i < NUM_SUBCLUSTER; i++) {
// labels
List<String> labels = new ArrayList<>();
labels.add("NodeLabel" + i);
// nodes
String nodeId = "node" + i + ":" + i;
NodeToLabelsEntry nodeToLabelsEntry = new NodeToLabelsEntry(nodeId, labels);
List<NodeToLabelsEntry> nodeToLabelsEntries = nodeToLabelsEntryList.getNodeToLabels();
nodeToLabelsEntries.add(nodeToLabelsEntry);
}
// one of the results:
// subCluster#0:Success;subCluster#1:Success;subCluster#3:Success;subCluster#2:Success;
// We can't confirm the complete return order.
Response response = interceptor.replaceLabelsOnNodes(nodeToLabelsEntryList, null);
Assert.assertNotNull(response);
Assert.assertEquals(200, response.getStatus());
Object entityObject = response.getEntity();
Assert.assertNotNull(entityObject);
String entityValue = String.valueOf(entityObject);
String[] entities = entityValue.split(",");
Assert.assertNotNull(entities);
Assert.assertEquals(4, entities.length);
String expectValue =
"subCluster-0:Success,subCluster-1:Success,subCluster-2:Success,subCluster-3:Success,";
for (String entity : entities) {
Assert.assertTrue(expectValue.contains(entity));
}
} |
@Override
public DirectPipelineResult run(Pipeline pipeline) {
try {
options =
MAPPER
.readValue(MAPPER.writeValueAsBytes(options), PipelineOptions.class)
.as(DirectOptions.class);
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
performRewrites(pipeline);
MetricsEnvironment.setMetricsSupported(true);
try {
DirectGraphVisitor graphVisitor = new DirectGraphVisitor();
pipeline.traverseTopologically(graphVisitor);
@SuppressWarnings("rawtypes")
KeyedPValueTrackingVisitor keyedPValueVisitor = KeyedPValueTrackingVisitor.create();
pipeline.traverseTopologically(keyedPValueVisitor);
DisplayDataValidator.validatePipeline(pipeline);
DisplayDataValidator.validateOptions(options);
ExecutorService metricsPool =
Executors.newCachedThreadPool(
new ThreadFactoryBuilder()
.setThreadFactory(MoreExecutors.platformThreadFactory())
.setDaemon(false) // otherwise you say you want to leak, please don't!
.setNameFormat("direct-metrics-counter-committer")
.build());
DirectGraph graph = graphVisitor.getGraph();
EvaluationContext context =
EvaluationContext.create(
clockSupplier.get(),
Enforcement.bundleFactoryFor(enabledEnforcements, graph),
graph,
keyedPValueVisitor.getKeyedPValues(),
metricsPool);
TransformEvaluatorRegistry registry =
TransformEvaluatorRegistry.javaSdkNativeRegistry(context, options);
PipelineExecutor executor =
ExecutorServiceParallelExecutor.create(
options.getTargetParallelism(),
registry,
Enforcement.defaultModelEnforcements(enabledEnforcements),
context,
metricsPool);
executor.start(graph, RootProviderRegistry.javaNativeRegistry(context, options));
DirectPipelineResult result = new DirectPipelineResult(executor, context);
if (options.isBlockOnRun()) {
try {
result.waitUntilFinish();
} catch (UserCodeException userException) {
throw new PipelineExecutionException(userException.getCause());
} catch (Throwable t) {
if (t instanceof RuntimeException) {
throw (RuntimeException) t;
}
throw new RuntimeException(t);
}
}
return result;
} finally {
MetricsEnvironment.setMetricsSupported(false);
}
} | @Test
public void testUnencodableOutputFromBoundedRead() throws Exception {
Pipeline p = getPipeline();
p.apply(GenerateSequence.from(0).to(10)).setCoder(new LongNoDecodeCoder());
thrown.expectCause(isA(CoderException.class));
thrown.expectMessage("Cannot decode a long");
p.run();
} |
static void writeImageJson(Optional<Path> imageJsonOutputPath, JibContainer jibContainer)
throws IOException {
if (imageJsonOutputPath.isPresent()) {
ImageMetadataOutput metadataOutput = ImageMetadataOutput.fromJibContainer(jibContainer);
Files.write(
imageJsonOutputPath.get(), metadataOutput.toJson().getBytes(StandardCharsets.UTF_8));
}
} | @Test
public void testWriteImageJson()
throws InvalidImageReferenceException, IOException, DigestException {
String imageId = "sha256:61bb3ec31a47cb730eb58a38bbfa813761a51dca69d10e39c24c3d00a7b2c7a9";
String digest = "sha256:3f1be7e19129edb202c071a659a4db35280ab2bb1a16f223bfd5d1948657b6fc";
when(mockJibContainer.getTargetImage())
.thenReturn(ImageReference.parse("eclipse-temurin:8-jre"));
when(mockJibContainer.getImageId()).thenReturn(DescriptorDigest.fromDigest(imageId));
when(mockJibContainer.getDigest()).thenReturn(DescriptorDigest.fromDigest(digest));
when(mockJibContainer.getTags()).thenReturn(ImmutableSet.of("latest", "tag-2"));
Path outputPath = temporaryFolder.getRoot().toPath().resolve("jib-image.json");
JibCli.writeImageJson(Optional.of(outputPath), mockJibContainer);
String outputJson = new String(Files.readAllBytes(outputPath), StandardCharsets.UTF_8);
ImageMetadataOutput metadataOutput =
JsonTemplateMapper.readJson(outputJson, ImageMetadataOutput.class);
assertThat(metadataOutput.getImage()).isEqualTo("eclipse-temurin:8-jre");
assertThat(metadataOutput.getImageId()).isEqualTo(imageId);
assertThat(metadataOutput.getImageDigest()).isEqualTo(digest);
assertThat(metadataOutput.getTags()).containsExactly("latest", "tag-2");
} |
static SVNClientManager newSvnClientManager(SvnConfiguration configuration) {
ISVNOptions options = SVNWCUtil.createDefaultOptions(true);
final char[] passwordValue = getCharsOrNull(configuration.password());
final char[] passPhraseValue = getCharsOrNull(configuration.passPhrase());
ISVNAuthenticationManager authManager = SVNWCUtil.createDefaultAuthenticationManager(
null,
configuration.username(),
passwordValue,
configuration.privateKey(),
passPhraseValue,
false);
return SVNClientManager.newInstance(options, authManager);
} | @Test
public void newSvnClientManager_whenPasswordConfigured_shouldNotReturnNull() {
when(config.password()).thenReturn("password");
when(config.passPhrase()).thenReturn("passPhrase");
assertThat(newSvnClientManager(config)).isNotNull();
} |
boolean sendRecords() {
int processed = 0;
recordBatch(toSend.size());
final SourceRecordWriteCounter counter =
toSend.isEmpty() ? null : new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
for (final SourceRecord preTransformRecord : toSend) {
ProcessingContext<SourceRecord> context = new ProcessingContext<>(preTransformRecord);
final SourceRecord record = transformationChain.apply(context, preTransformRecord);
final ProducerRecord<byte[], byte[]> producerRecord = convertTransformedRecord(context, record);
if (producerRecord == null || context.failed()) {
counter.skipRecord();
recordDropped(preTransformRecord);
processed++;
continue;
}
log.trace("{} Appending record to the topic {} with key {}, value {}", this, record.topic(), record.key(), record.value());
Optional<SubmittedRecords.SubmittedRecord> submittedRecord = prepareToSendRecord(preTransformRecord, producerRecord);
try {
final String topic = producerRecord.topic();
maybeCreateTopic(topic);
producer.send(
producerRecord,
(recordMetadata, e) -> {
if (e != null) {
if (producerClosed) {
log.trace("{} failed to send record to {}; this is expected as the producer has already been closed", AbstractWorkerSourceTask.this, topic, e);
} else {
log.error("{} failed to send record to {}: ", AbstractWorkerSourceTask.this, topic, e);
}
log.trace("{} Failed record: {}", AbstractWorkerSourceTask.this, preTransformRecord);
producerSendFailed(context, false, producerRecord, preTransformRecord, e);
if (retryWithToleranceOperator.getErrorToleranceType() == ToleranceType.ALL) {
counter.skipRecord();
submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack);
}
} else {
counter.completeRecord();
log.trace("{} Wrote record successfully: topic {} partition {} offset {}",
AbstractWorkerSourceTask.this,
recordMetadata.topic(), recordMetadata.partition(),
recordMetadata.offset());
recordSent(preTransformRecord, producerRecord, recordMetadata);
submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack);
if (topicTrackingEnabled) {
recordActiveTopic(producerRecord.topic());
}
}
});
// Note that this will cause retries to take place within a transaction
} catch (RetriableException | org.apache.kafka.common.errors.RetriableException e) {
log.warn("{} Failed to send record to topic '{}' and partition '{}'. Backing off before retrying: ",
this, producerRecord.topic(), producerRecord.partition(), e);
toSend = toSend.subList(processed, toSend.size());
submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::drop);
counter.retryRemaining();
return false;
} catch (ConnectException e) {
log.warn("{} Failed to send record to topic '{}' and partition '{}' due to an unrecoverable exception: ",
this, producerRecord.topic(), producerRecord.partition(), e);
log.trace("{} Failed to send {} with unrecoverable exception: ", this, producerRecord, e);
throw e;
} catch (KafkaException e) {
producerSendFailed(context, true, producerRecord, preTransformRecord, e);
}
processed++;
recordDispatched(preTransformRecord);
}
toSend = null;
batchDispatched();
return true;
} | @Test
public void testSendRecordsTopicDescribeRetriesMidway() {
createWorkerTask();
// Differentiate only by Kafka partition so we can reuse conversion expectations
SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
SourceRecord record3 = new SourceRecord(PARTITION, OFFSET, OTHER_TOPIC, 3, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
expectPreliminaryCalls(TOPIC);
expectPreliminaryCalls(OTHER_TOPIC);
when(admin.describeTopics(anyString()))
.thenReturn(Collections.emptyMap())
.thenThrow(new RetriableException(new TimeoutException("timeout")))
.thenReturn(Collections.emptyMap());
when(admin.createOrFindTopics(any(NewTopic.class))).thenAnswer(
(Answer<TopicAdmin.TopicCreationResponse>) invocation -> {
NewTopic newTopic = invocation.getArgument(0);
return createdTopic(newTopic.name());
});
// Try to send 3, make first pass, second fail. Should save last record
workerTask.toSend = Arrays.asList(record1, record2, record3);
workerTask.sendRecords();
assertEquals(Collections.singletonList(record3), workerTask.toSend);
// Next they all succeed
workerTask.sendRecords();
assertNull(workerTask.toSend);
verify(admin, times(3)).describeTopics(anyString());
ArgumentCaptor<NewTopic> newTopicCaptor = ArgumentCaptor.forClass(NewTopic.class);
verify(admin, times(2)).createOrFindTopics(newTopicCaptor.capture());
assertEquals(Arrays.asList(TOPIC, OTHER_TOPIC), newTopicCaptor.getAllValues()
.stream()
.map(NewTopic::name)
.collect(Collectors.toList()));
} |
static NavigableMap<Integer, Long> buildFilteredLeaderEpochMap(NavigableMap<Integer, Long> leaderEpochs) {
List<Integer> epochsWithNoMessages = new ArrayList<>();
Map.Entry<Integer, Long> previousEpochAndOffset = null;
for (Map.Entry<Integer, Long> currentEpochAndOffset : leaderEpochs.entrySet()) {
if (previousEpochAndOffset != null && previousEpochAndOffset.getValue().equals(currentEpochAndOffset.getValue())) {
epochsWithNoMessages.add(previousEpochAndOffset.getKey());
}
previousEpochAndOffset = currentEpochAndOffset;
}
if (epochsWithNoMessages.isEmpty()) {
return leaderEpochs;
}
TreeMap<Integer, Long> filteredLeaderEpochs = new TreeMap<>(leaderEpochs);
for (Integer epochWithNoMessage : epochsWithNoMessages) {
filteredLeaderEpochs.remove(epochWithNoMessage);
}
return filteredLeaderEpochs;
} | @Test
public void testBuildFilteredLeaderEpochMap() {
TreeMap<Integer, Long> leaderEpochToStartOffset = new TreeMap<>();
leaderEpochToStartOffset.put(0, 0L);
leaderEpochToStartOffset.put(1, 0L);
leaderEpochToStartOffset.put(2, 0L);
leaderEpochToStartOffset.put(3, 30L);
leaderEpochToStartOffset.put(4, 40L);
leaderEpochToStartOffset.put(5, 60L);
leaderEpochToStartOffset.put(6, 60L);
leaderEpochToStartOffset.put(7, 70L);
leaderEpochToStartOffset.put(8, 70L);
TreeMap<Integer, Long> expectedLeaderEpochs = new TreeMap<>();
expectedLeaderEpochs.put(2, 0L);
expectedLeaderEpochs.put(3, 30L);
expectedLeaderEpochs.put(4, 40L);
expectedLeaderEpochs.put(6, 60L);
expectedLeaderEpochs.put(8, 70L);
NavigableMap<Integer, Long> refinedLeaderEpochMap = RemoteLogManager.buildFilteredLeaderEpochMap(leaderEpochToStartOffset);
assertEquals(expectedLeaderEpochs, refinedLeaderEpochMap);
} |
public static String getTypeName(final int type) {
switch (type) {
case START_EVENT_V3:
return "Start_v3";
case STOP_EVENT:
return "Stop";
case QUERY_EVENT:
return "Query";
case ROTATE_EVENT:
return "Rotate";
case INTVAR_EVENT:
return "Intvar";
case LOAD_EVENT:
return "Load";
case NEW_LOAD_EVENT:
return "New_load";
case SLAVE_EVENT:
return "Slave";
case CREATE_FILE_EVENT:
return "Create_file";
case APPEND_BLOCK_EVENT:
return "Append_block";
case DELETE_FILE_EVENT:
return "Delete_file";
case EXEC_LOAD_EVENT:
return "Exec_load";
case RAND_EVENT:
return "RAND";
case XID_EVENT:
return "Xid";
case USER_VAR_EVENT:
return "User var";
case FORMAT_DESCRIPTION_EVENT:
return "Format_desc";
case TABLE_MAP_EVENT:
return "Table_map";
case PRE_GA_WRITE_ROWS_EVENT:
return "Write_rows_event_old";
case PRE_GA_UPDATE_ROWS_EVENT:
return "Update_rows_event_old";
case PRE_GA_DELETE_ROWS_EVENT:
return "Delete_rows_event_old";
case WRITE_ROWS_EVENT_V1:
return "Write_rows_v1";
case UPDATE_ROWS_EVENT_V1:
return "Update_rows_v1";
case DELETE_ROWS_EVENT_V1:
return "Delete_rows_v1";
case BEGIN_LOAD_QUERY_EVENT:
return "Begin_load_query";
case EXECUTE_LOAD_QUERY_EVENT:
return "Execute_load_query";
case INCIDENT_EVENT:
return "Incident";
case HEARTBEAT_LOG_EVENT:
case HEARTBEAT_LOG_EVENT_V2:
return "Heartbeat";
case IGNORABLE_LOG_EVENT:
return "Ignorable";
case ROWS_QUERY_LOG_EVENT:
return "Rows_query";
case WRITE_ROWS_EVENT:
return "Write_rows";
case UPDATE_ROWS_EVENT:
return "Update_rows";
case DELETE_ROWS_EVENT:
return "Delete_rows";
case GTID_LOG_EVENT:
return "Gtid";
case ANONYMOUS_GTID_LOG_EVENT:
return "Anonymous_Gtid";
case PREVIOUS_GTIDS_LOG_EVENT:
return "Previous_gtids";
case PARTIAL_UPDATE_ROWS_EVENT:
return "Update_rows_partial";
case TRANSACTION_CONTEXT_EVENT :
return "Transaction_context";
case VIEW_CHANGE_EVENT :
return "view_change";
case XA_PREPARE_LOG_EVENT :
return "Xa_prepare";
case TRANSACTION_PAYLOAD_EVENT :
return "transaction_payload";
default:
return "Unknown type:" + type;
}
} | @Test
public void getTypeNameInputPositiveOutputNotNull3() {
// Arrange
final int type = 39;
// Act
final String actual = LogEvent.getTypeName(type);
// Assert result
Assert.assertEquals("Update_rows_partial", actual);
} |
static EndTransactionMarker deserializeValue(ControlRecordType type, ByteBuffer value) {
ensureTransactionMarkerControlType(type);
if (value.remaining() < CURRENT_END_TXN_MARKER_VALUE_SIZE)
throw new InvalidRecordException("Invalid value size found for end transaction marker. Must have " +
"at least " + CURRENT_END_TXN_MARKER_VALUE_SIZE + " bytes, but found only " + value.remaining());
short version = value.getShort(0);
if (version < 0)
throw new InvalidRecordException("Invalid version found for end transaction marker: " + version +
". May indicate data corruption");
if (version > CURRENT_END_TXN_MARKER_VERSION)
log.debug("Received end transaction marker value version {}. Parsing as version {}", version,
CURRENT_END_TXN_MARKER_VERSION);
int coordinatorEpoch = value.getInt(2);
return new EndTransactionMarker(type, coordinatorEpoch);
} | @Test
public void testIllegalNegativeVersion() {
ByteBuffer buffer = ByteBuffer.allocate(2);
buffer.putShort((short) -1);
buffer.flip();
assertThrows(InvalidRecordException.class, () -> EndTransactionMarker.deserializeValue(ControlRecordType.ABORT, buffer));
} |
@Override
public String toString() {
return (value == null) ? "(null)" : '"' + getValue() + '"';
} | @Test
void testUnresolvedReference() {
var reference = ModelReference.unresolved(Optional.of("myModelId"),
Optional.of(new UrlReference("https://host:my/path")),
Optional.of(new FileReference("foo.txt")));
assertEquals("myModelId https://host:my/path foo.txt", reference.toString());
assertEquals(reference, ModelReference.valueOf(reference.toString()));
} |
public void reset() {
this.count = 0;
this.msgRateIn = 0;
this.msgThroughputIn = 0;
this.msgRateOut = 0;
this.msgThroughputOut = 0;
this.averageMsgSize = 0;
this.storageSize = 0;
this.backlogSize = 0;
this.bytesInCounter = 0;
this.msgInCounter = 0;
this.bytesOutCounter = 0;
this.msgOutCounter = 0;
this.publishers.clear();
this.publishersMap.clear();
this.subscriptions.clear();
this.waitingPublishers = 0;
this.replication.clear();
this.deduplicationStatus = null;
this.topicEpoch = null;
this.nonContiguousDeletedMessagesRanges = 0;
this.nonContiguousDeletedMessagesRangesSerializedSize = 0;
this.offloadedStorageSize = 0;
this.lastOffloadLedgerId = 0;
this.lastOffloadFailureTimeStamp = 0;
this.lastOffloadSuccessTimeStamp = 0;
this.publishRateLimitedTimes = 0L;
this.earliestMsgPublishTimeInBacklogs = 0L;
this.delayedMessageIndexSizeInBytes = 0;
this.compaction.reset();
this.ownerBroker = null;
this.bucketDelayedIndexStats.clear();
this.backlogQuotaLimitSize = 0;
this.backlogQuotaLimitTime = 0;
this.oldestBacklogMessageAgeSeconds = -1;
this.oldestBacklogMessageSubscriptionName = null;
} | @Test
public void testReset() {
TopicStatsImpl stats = new TopicStatsImpl();
stats.earliestMsgPublishTimeInBacklogs = 1L;
stats.reset();
assertEquals(stats.earliestMsgPublishTimeInBacklogs, 0L);
} |
@Nullable
public static ValueReference of(Object value) {
if (value instanceof Boolean) {
return of((Boolean) value);
} else if (value instanceof Double) {
return of((Double) value);
} else if (value instanceof Float) {
return of((Float) value);
} else if (value instanceof Integer) {
return of((Integer) value);
} else if (value instanceof Long) {
return of((Long) value);
} else if (value instanceof String) {
return of((String) value);
} else if (value instanceof Enum) {
return of((Enum) value);
} else if (value instanceof EncryptedValue encryptedValue) {
return of(encryptedValue);
} else {
return null;
}
} | @Test
public void deserializeEnum() throws IOException {
assertThat(objectMapper.readValue("{\"@type\":\"string\",\"@value\":\"A\"}", ValueReference.class)).isEqualTo(ValueReference.of(TestEnum.A));
assertThat(objectMapper.readValue("{\"@type\":\"string\",\"@value\":\"B\"}", ValueReference.class)).isEqualTo(ValueReference.of(TestEnum.B));
} |
@Override
@CheckForNull
public EmailMessage format(Notification notification) {
if (!BuiltInQPChangeNotification.TYPE.equals(notification.getType())) {
return null;
}
BuiltInQPChangeNotificationBuilder profilesNotification = parse(notification);
StringBuilder message = new StringBuilder("The following built-in profiles have been updated:\n\n");
profilesNotification.getProfiles().stream()
.sorted(Comparator.comparing(Profile::getLanguageName).thenComparing(Profile::getProfileName))
.forEach(profile -> {
message.append("\"")
.append(profile.getProfileName())
.append("\" - ")
.append(profile.getLanguageName())
.append(": ")
.append(server.getPublicRootUrl()).append("/profiles/changelog?language=")
.append(profile.getLanguageKey())
.append("&name=")
.append(encode(profile.getProfileName()))
.append("&since=")
.append(formatDate(new Date(profile.getStartDate())))
.append("&to=")
.append(formatDate(new Date(profile.getEndDate())))
.append("\n");
int newRules = profile.getNewRules();
if (newRules > 0) {
message.append(" ").append(newRules).append(" new rule")
.append(plural(newRules))
.append('\n');
}
int updatedRules = profile.getUpdatedRules();
if (updatedRules > 0) {
message.append(" ").append(updatedRules).append(" rule")
.append(updatedRules > 1 ? "s have been updated" : " has been updated")
.append("\n");
}
int removedRules = profile.getRemovedRules();
if (removedRules > 0) {
message.append(" ").append(removedRules).append(" rule")
.append(plural(removedRules))
.append(" removed\n");
}
message.append("\n");
});
message.append("This is a good time to review your quality profiles and update them to benefit from the latest evolutions: ");
message.append(server.getPublicRootUrl()).append("/profiles");
// And finally return the email that will be sent
return new EmailMessage()
.setMessageId(BuiltInQPChangeNotification.TYPE)
.setSubject("Built-in quality profiles have been updated")
.setPlainTextMessage(message.toString());
} | @Test
public void notification_contains_count_of_updated_rules() {
String profileName = newProfileName();
String languageKey = newLanguageKey();
String languageName = newLanguageName();
BuiltInQPChangeNotificationBuilder notification = new BuiltInQPChangeNotificationBuilder()
.addProfile(Profile.newBuilder()
.setProfileName(profileName)
.setLanguageKey(languageKey)
.setLanguageName(languageName)
.setUpdatedRules(2)
.build());
EmailMessage emailMessage = underTest.format(notification.build());
assertMessage(emailMessage, "\n 2 rules have been updated\n");
} |
@ScalarOperator(CAST)
@SqlType(StandardTypes.DOUBLE)
public static double castToDouble(@SqlType(StandardTypes.SMALLINT) long value)
{
return value;
} | @Test
public void testCastToDouble()
{
assertFunction("cast(SMALLINT'37' as double)", DOUBLE, 37.0);
assertFunction("cast(SMALLINT'17' as double)", DOUBLE, 17.0);
} |
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
for (final GarbageCollectorMXBean gc : garbageCollectors) {
final String name = WHITESPACE.matcher(gc.getName()).replaceAll("-");
gauges.put(name(name, "count"), (Gauge<Long>) gc::getCollectionCount);
gauges.put(name(name, "time"), (Gauge<Long>) gc::getCollectionTime);
}
return Collections.unmodifiableMap(gauges);
} | @Test
public void hasGaugesForGcCountsAndElapsedTimes() {
assertThat(metrics.getMetrics().keySet())
.containsOnly("PS-OldGen.time", "PS-OldGen.count");
} |
public static <IN, OUT> CompletableFuture<OUT> thenComposeAsyncIfNotDone(
CompletableFuture<IN> completableFuture,
Executor executor,
Function<? super IN, ? extends CompletionStage<OUT>> composeFun) {
return completableFuture.isDone()
? completableFuture.thenCompose(composeFun)
: completableFuture.thenComposeAsync(composeFun, executor);
} | @Test
void testComposeAsyncIfNotDone() {
testFutureContinuation(
(CompletableFuture<?> future, Executor executor) ->
FutureUtils.thenComposeAsyncIfNotDone(future, executor, o -> null));
} |
@Override
List<DiscoveryNode> resolveNodes() {
try {
return lookup();
} catch (TimeoutException e) {
logger.warning(String.format("DNS lookup for serviceDns '%s' failed: DNS resolution timeout", serviceDns));
return Collections.emptyList();
} catch (UnknownHostException e) {
logger.warning(String.format("DNS lookup for serviceDns '%s' failed: unknown host", serviceDns));
return Collections.emptyList();
} catch (Exception e) {
logger.warning(String.format("DNS lookup for serviceDns '%s' failed", serviceDns), e);
return Collections.emptyList();
}
} | @Test
public void resolveTimeout() {
// given
ILogger logger = mock(ILogger.class);
DnsEndpointResolver dnsEndpointResolver = new DnsEndpointResolver(logger, SERVICE_DNS, UNSET_PORT, TEST_DNS_TIMEOUT_SECONDS, timingOutLookupProvider());
// when
List<DiscoveryNode> result = dnsEndpointResolver.resolveNodes();
// then
assertEquals(0, result.size());
verify(logger).warning(String.format("DNS lookup for serviceDns '%s' failed: DNS resolution timeout", SERVICE_DNS));
verify(logger, never()).warning(anyString(), any(Throwable.class));
} |
@VisibleForTesting
Entity exportNativeEntity(Collector collector, EntityDescriptorIds entityDescriptorIds) {
final SidecarCollectorEntity collectorEntity = SidecarCollectorEntity.create(
ValueReference.of(collector.name()),
ValueReference.of(collector.serviceType()),
ValueReference.of(collector.nodeOperatingSystem()),
ValueReference.of(collector.executablePath()),
collector.executeParameters() != null ? ValueReference.of(collector.executeParameters()) : null,
collector.validationParameters() != null ? ValueReference.of(collector.validationParameters()) : null,
ValueReference.of(collector.defaultTemplate()));
final JsonNode data = objectMapper.convertValue(collectorEntity, JsonNode.class);
return EntityV1.builder()
.id(ModelId.of(entityDescriptorIds.getOrThrow(collector.id(), ModelTypes.SIDECAR_COLLECTOR_V1)))
.type(TYPE_V1)
.data(data)
.build();
} | @Test
@MongoDBFixtures("SidecarCollectorFacadeTest.json")
public void exportNativeEntity() {
final Collector collector = collectorService.find("5b4c920b4b900a0024af0001");
final EntityDescriptor descriptor = EntityDescriptor.create(collector.id(), ModelTypes.SIDECAR_COLLECTOR_V1);
final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor);
final Entity entity = facade.exportNativeEntity(collector, entityDescriptorIds);
assertThat(entity).isInstanceOf(EntityV1.class);
assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null)));
assertThat(entity.type()).isEqualTo(ModelTypes.SIDECAR_COLLECTOR_V1);
final EntityV1 entityV1 = (EntityV1) entity;
final SidecarCollectorEntity collectorEntity = objectMapper.convertValue(entityV1.data(), SidecarCollectorEntity.class);
assertThat(collectorEntity.name()).isEqualTo(ValueReference.of("filebeat"));
assertThat(collectorEntity.serviceType()).isEqualTo(ValueReference.of("exec"));
assertThat(collectorEntity.nodeOperatingSystem()).isEqualTo(ValueReference.of("linux"));
assertThat(collectorEntity.executablePath()).isEqualTo(ValueReference.of("/usr/lib/graylog-sidecar/filebeat"));
assertThat(collectorEntity.executeParameters()).isEqualTo(ValueReference.of("-c %s"));
assertThat(collectorEntity.validationParameters()).isEqualTo(ValueReference.of("test config -c %s"));
assertThat(collectorEntity.defaultTemplate()).isEqualTo(ValueReference.of(""));
} |
@Override
public Class<? extends HistogramFunctionBuilder> builder() {
return HistogramFunctionBuilder.class;
} | @Test
public void testBuilder() throws IllegalAccessException, InstantiationException {
HistogramFunctionInst inst = new HistogramFunctionInst();
inst.accept(
MeterEntity.newService("service-test", Layer.GENERAL),
new BucketedValues(
BUCKETS, new long[] {
1,
4,
10,
10
})
);
final StorageBuilder storageBuilder = inst.builder().newInstance();
// Simulate the storage layer do, convert the datatable to string.
final HashMapConverter.ToStorage hashMapConverter = new HashMapConverter.ToStorage();
storageBuilder.entity2Storage(inst, hashMapConverter);
final Map<String, Object> map = hashMapConverter.obtain();
map.put(DATASET, ((DataTable) map.get(DATASET)).toStorageData());
final HistogramFunction inst2 = (HistogramFunction) storageBuilder.storage2Entity(
new HashMapConverter.ToEntity(map));
Assertions.assertEquals(inst, inst2);
// HistogramFunction equal doesn't include dataset.
Assertions.assertEquals(inst.getDataset(), inst2.getDataset());
} |
public FEELFnResult<String> invoke(@ParameterName("list") List<?> list, @ParameterName("delimiter") String delimiter) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
if (list.isEmpty()) {
return FEELFnResult.ofResult(""); // If list is empty, the result is the empty string
}
StringJoiner sj = new StringJoiner(delimiter != null ? delimiter : ""); // If delimiter is null, the string elements are joined without a separator
for (Object element : list) {
if (element == null) {
continue; // Null elements in the list parameter are ignored.
} else if (element instanceof CharSequence) {
sj.add((CharSequence) element);
} else {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains an element which is not a string"));
}
}
return FEELFnResult.ofResult(sj.toString());
} | @Test
void setStringJoinFunctionNullValues() {
FunctionTestUtil.assertResultError(stringJoinFunction.invoke( null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(stringJoinFunction.invoke((List<?>) null , null), InvalidParametersEvent.class);
} |
@Override
public final void isEqualTo(@Nullable Object other) {
super.isEqualTo(other);
} | @Test
@GwtIncompatible("GWT behavior difference")
public void testJ2clCornerCaseDoubleVsFloat() {
// Under GWT, 1.23f.toString() is different than 1.23d.toString(), so the message omits types.
// TODO(b/35377736): Consider making Truth add the types anyway.
expectFailureWhenTestingThat(1.23).isEqualTo(1.23f);
assertFailureKeys("expected", "an instance of", "but was", "an instance of");
} |
@Override
protected CompletableFuture<Void> getCompletionFuture() {
return sourceThread.getCompletionFuture();
} | @Test
void testTriggeringCheckpointAfterSourceThreadFinished() throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try (NettyShuffleEnvironment env =
new NettyShuffleEnvironmentBuilder()
.setNumNetworkBuffers(partitionWriters.length * 2)
.build()) {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] =
PartitionTestUtils.createPartition(
env, ResultPartitionType.PIPELINED_BOUNDED, 1);
partitionWriters[i].setup();
}
final CompletableFuture<Long> checkpointCompleted = new CompletableFuture<>();
try (StreamTaskMailboxTestHarness<String> testHarness =
new StreamTaskMailboxTestHarnessBuilder<>(
SourceStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO)
.modifyStreamConfig(config -> config.setCheckpointingEnabled(true))
.setCheckpointResponder(
new TestCheckpointResponder() {
@Override
public void acknowledgeCheckpoint(
JobID jobID,
ExecutionAttemptID executionAttemptID,
long checkpointId,
CheckpointMetrics checkpointMetrics,
TaskStateSnapshot subtaskState) {
super.acknowledgeCheckpoint(
jobID,
executionAttemptID,
checkpointId,
checkpointMetrics,
subtaskState);
checkpointCompleted.complete(checkpointId);
}
})
.addAdditionalOutput(partitionWriters)
.setupOperatorChain(new StreamSource<>(new MockSource(0, 0, 1)))
.finishForSingletonOperatorChain(StringSerializer.INSTANCE)
.build()) {
testHarness.processAll();
CompletableFuture<Void> taskFinished =
testHarness.getStreamTask().getCompletionFuture();
do {
testHarness.processAll();
} while (!taskFinished.isDone());
Future<Boolean> checkpointFuture = triggerCheckpoint(testHarness, 2);
// Notifies the result partition that all records are processed after the
// last checkpoint is triggered.
checkState(
checkpointFuture instanceof CompletableFuture,
"The trigger future should " + " be also CompletableFuture.");
((CompletableFuture<?>) checkpointFuture)
.thenAccept(
(ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
checkpointCompleted.whenComplete(
(id, error) ->
testHarness.getStreamTask().notifyCheckpointCompleteAsync(2));
testHarness.finishProcessing();
assertThat(checkpointFuture.isDone()).isTrue();
// Each result partition should have emitted 1 barrier, 1 max watermark and 1
// EndOfUserRecordEvent.
for (ResultPartition resultPartition : partitionWriters) {
assertThat(resultPartition.getNumberOfQueuedBuffers()).isEqualTo(3);
}
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
} |
@Override
public EncodedMessage transform(ActiveMQMessage message) throws Exception {
if (message == null) {
return null;
}
long messageFormat = 0;
Header header = null;
Properties properties = null;
Map<Symbol, Object> daMap = null;
Map<Symbol, Object> maMap = null;
Map<String,Object> apMap = null;
Map<Object, Object> footerMap = null;
Section body = convertBody(message);
if (message.isPersistent()) {
if (header == null) {
header = new Header();
}
header.setDurable(true);
}
byte priority = message.getPriority();
if (priority != Message.DEFAULT_PRIORITY) {
if (header == null) {
header = new Header();
}
header.setPriority(UnsignedByte.valueOf(priority));
}
String type = message.getType();
if (type != null) {
if (properties == null) {
properties = new Properties();
}
properties.setSubject(type);
}
MessageId messageId = message.getMessageId();
if (messageId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setMessageId(getOriginalMessageId(message));
}
ActiveMQDestination destination = message.getDestination();
if (destination != null) {
if (properties == null) {
properties = new Properties();
}
properties.setTo(destination.getQualifiedName());
if (maMap == null) {
maMap = new HashMap<>();
}
maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination));
}
ActiveMQDestination replyTo = message.getReplyTo();
if (replyTo != null) {
if (properties == null) {
properties = new Properties();
}
properties.setReplyTo(replyTo.getQualifiedName());
if (maMap == null) {
maMap = new HashMap<>();
}
maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo));
}
String correlationId = message.getCorrelationId();
if (correlationId != null) {
if (properties == null) {
properties = new Properties();
}
try {
properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId));
} catch (AmqpProtocolException e) {
properties.setCorrelationId(correlationId);
}
}
long expiration = message.getExpiration();
if (expiration != 0) {
long ttl = expiration - System.currentTimeMillis();
if (ttl < 0) {
ttl = 1;
}
if (header == null) {
header = new Header();
}
header.setTtl(new UnsignedInteger((int) ttl));
if (properties == null) {
properties = new Properties();
}
properties.setAbsoluteExpiryTime(new Date(expiration));
}
long timeStamp = message.getTimestamp();
if (timeStamp != 0) {
if (properties == null) {
properties = new Properties();
}
properties.setCreationTime(new Date(timeStamp));
}
// JMSX Message Properties
int deliveryCount = message.getRedeliveryCounter();
if (deliveryCount > 0) {
if (header == null) {
header = new Header();
}
header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount));
}
String userId = message.getUserID();
if (userId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8)));
}
String groupId = message.getGroupID();
if (groupId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setGroupId(groupId);
}
int groupSequence = message.getGroupSequence();
if (groupSequence > 0) {
if (properties == null) {
properties = new Properties();
}
properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence));
}
final Map<String, Object> entries;
try {
entries = message.getProperties();
} catch (IOException e) {
throw JMSExceptionSupport.create(e);
}
for (Map.Entry<String, Object> entry : entries.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (key.startsWith(JMS_AMQP_PREFIX)) {
if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) {
// skip transformer appended properties
continue;
} else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) {
// skip transformer appended properties
continue;
} else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) {
messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class);
continue;
} else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) {
if (header == null) {
header = new Header();
}
continue;
} else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
continue;
} else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (maMap == null) {
maMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length());
maMap.put(Symbol.valueOf(name), value);
continue;
} else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) {
if (header == null) {
header = new Header();
}
header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class));
continue;
} else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class)));
continue;
} else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class)));
continue;
} else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class));
continue;
} else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (daMap == null) {
daMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length());
daMap.put(Symbol.valueOf(name), value);
continue;
} else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (footerMap == null) {
footerMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length());
footerMap.put(Symbol.valueOf(name), value);
continue;
}
} else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) {
// strip off the scheduled message properties
continue;
}
// The property didn't map into any other slot so we store it in the
// Application Properties section of the message.
if (apMap == null) {
apMap = new HashMap<>();
}
apMap.put(key, value);
int messageType = message.getDataStructureType();
if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) {
// Type of command to recognize advisory message
Object data = message.getDataStructure();
if(data != null) {
apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName());
}
}
}
final AmqpWritableBuffer buffer = new AmqpWritableBuffer();
encoder.setByteBuffer(buffer);
if (header != null) {
encoder.writeObject(header);
}
if (daMap != null) {
encoder.writeObject(new DeliveryAnnotations(daMap));
}
if (maMap != null) {
encoder.writeObject(new MessageAnnotations(maMap));
}
if (properties != null) {
encoder.writeObject(properties);
}
if (apMap != null) {
encoder.writeObject(new ApplicationProperties(apMap));
}
if (body != null) {
encoder.writeObject(body);
}
if (footerMap != null) {
encoder.writeObject(new Footer(footerMap));
}
return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength());
} | @Test
public void testConvertCompressedBytesMessageToAmqpMessageWithAmqpValueBody() throws Exception {
byte[] expectedPayload = new byte[] { 8, 16, 24, 32 };
ActiveMQBytesMessage outbound = createBytesMessage(true);
outbound.setShortProperty(JMS_AMQP_ORIGINAL_ENCODING, AMQP_VALUE_BINARY);
outbound.writeBytes(expectedPayload);
outbound.storeContent();
outbound.onSend();
JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer();
EncodedMessage encoded = transformer.transform(outbound);
assertNotNull(encoded);
Message amqp = encoded.decode();
assertNotNull(amqp.getBody());
assertTrue(amqp.getBody() instanceof AmqpValue);
assertTrue(((AmqpValue) amqp.getBody()).getValue() instanceof Binary);
assertEquals(4, ((Binary) ((AmqpValue) amqp.getBody()).getValue()).getLength());
Binary amqpData = (Binary) ((AmqpValue) amqp.getBody()).getValue();
Binary inputData = new Binary(expectedPayload);
assertTrue(inputData.equals(amqpData));
} |
public static KafkaRoutineLoadJob fromCreateStmt(CreateRoutineLoadStmt stmt) throws UserException {
// check db and table
Database db = GlobalStateMgr.getCurrentState().getDb(stmt.getDBName());
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, stmt.getDBName());
}
Table table = db.getTable(stmt.getTableName());
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, stmt.getTableName());
}
long tableId = table.getId();
Locker locker = new Locker();
locker.lockTablesWithIntensiveDbLock(db, Lists.newArrayList(tableId), LockType.READ);
try {
unprotectedCheckMeta(db, stmt.getTableName(), stmt.getRoutineLoadDesc());
Load.checkMergeCondition(stmt.getMergeConditionStr(), (OlapTable) table, table.getFullSchema(), false);
} finally {
locker.unLockTablesWithIntensiveDbLock(db, Lists.newArrayList(tableId), LockType.READ);
}
// init kafka routine load job
long id = GlobalStateMgr.getCurrentState().getNextId();
KafkaRoutineLoadJob kafkaRoutineLoadJob = new KafkaRoutineLoadJob(id, stmt.getName(),
db.getId(), tableId, stmt.getKafkaBrokerList(), stmt.getKafkaTopic());
kafkaRoutineLoadJob.setOptional(stmt);
kafkaRoutineLoadJob.checkCustomProperties();
return kafkaRoutineLoadJob;
} | @Test
public void testSerializationCsv(@Mocked GlobalStateMgr globalStateMgr,
@Injectable Database database,
@Injectable OlapTable table) throws UserException {
CreateRoutineLoadStmt createRoutineLoadStmt = initCreateRoutineLoadStmt();
Map<String, String> jobProperties = createRoutineLoadStmt.getJobProperties();
jobProperties.put("format", "csv");
jobProperties.put("trim_space", "true");
jobProperties.put("enclose", "'");
jobProperties.put("escape", "\\");
jobProperties.put("timezone", "Asia/Shanghai");
createRoutineLoadStmt.checkJobProperties();
RoutineLoadDesc routineLoadDesc = new RoutineLoadDesc(columnSeparator, null, null, null, partitionNames);
Deencapsulation.setField(createRoutineLoadStmt, "routineLoadDesc", routineLoadDesc);
List<Pair<Integer, Long>> partitionIdToOffset = Lists.newArrayList();
for (String s : kafkaPartitionString.split(",")) {
partitionIdToOffset.add(new Pair<>(Integer.valueOf(s), 0L));
}
Deencapsulation.setField(createRoutineLoadStmt, "kafkaPartitionOffsets", partitionIdToOffset);
Deencapsulation.setField(createRoutineLoadStmt, "kafkaBrokerList", serverAddress);
Deencapsulation.setField(createRoutineLoadStmt, "kafkaTopic", topicName);
long dbId = 1L;
long tableId = 2L;
new Expectations() {
{
database.getTable(tableNameString);
minTimes = 0;
result = table;
database.getId();
minTimes = 0;
result = dbId;
table.getId();
minTimes = 0;
result = tableId;
table.isOlapOrCloudNativeTable();
minTimes = 0;
result = true;
}
};
new MockUp<KafkaUtil>() {
@Mock
public List<Integer> getAllKafkaPartitions(String brokerList, String topic,
ImmutableMap<String, String> properties) throws UserException {
return Lists.newArrayList(1, 2, 3);
}
};
String createSQL = "CREATE ROUTINE LOAD db1.job1 ON table1 " +
"PROPERTIES('format' = 'csv', 'trim_space' = 'true') " +
"FROM KAFKA('kafka_broker_list' = 'http://127.0.0.1:8080','kafka_topic' = 'topic1');";
KafkaRoutineLoadJob job = KafkaRoutineLoadJob.fromCreateStmt(createRoutineLoadStmt);
job.setOrigStmt(new OriginStatement(createSQL, 0));
Assert.assertEquals("csv", job.getFormat());
Assert.assertTrue(job.isTrimspace());
Assert.assertEquals((byte) "'".charAt(0), job.getEnclose());
Assert.assertEquals((byte) "\\".charAt(0), job.getEscape());
String data = GsonUtils.GSON.toJson(job, KafkaRoutineLoadJob.class);
KafkaRoutineLoadJob newJob = GsonUtils.GSON.fromJson(data, KafkaRoutineLoadJob.class);
Assert.assertEquals("csv", newJob.getFormat());
Assert.assertTrue(newJob.isTrimspace());
Assert.assertEquals((byte) "'".charAt(0), newJob.getEnclose());
Assert.assertEquals((byte) "\\".charAt(0), newJob.getEscape());
} |
static Schema toGenericAvroSchema(
String schemaName, List<TableFieldSchema> fieldSchemas, @Nullable String namespace) {
String nextNamespace = namespace == null ? null : String.format("%s.%s", namespace, schemaName);
List<Field> avroFields = new ArrayList<>();
for (TableFieldSchema bigQueryField : fieldSchemas) {
avroFields.add(convertField(bigQueryField, nextNamespace));
}
return Schema.createRecord(
schemaName,
"Translated Avro Schema for " + schemaName,
namespace == null ? "org.apache.beam.sdk.io.gcp.bigquery" : namespace,
false,
avroFields);
} | @Test
public void testSchemaCollisionsInAvroConversion() {
TableSchema schema = new TableSchema();
schema.setFields(
Lists.newArrayList(
new TableFieldSchema()
.setName("key_value_pair_1")
.setType("RECORD")
.setMode("REPEATED")
.setFields(
Lists.newArrayList(
new TableFieldSchema().setName("key").setType("STRING"),
new TableFieldSchema()
.setName("value")
.setType("RECORD")
.setFields(
Lists.newArrayList(
new TableFieldSchema()
.setName("string_value")
.setType("STRING"),
new TableFieldSchema().setName("int_value").setType("INTEGER"),
new TableFieldSchema().setName("double_value").setType("FLOAT"),
new TableFieldSchema()
.setName("float_value")
.setType("FLOAT"))))),
new TableFieldSchema()
.setName("key_value_pair_2")
.setType("RECORD")
.setMode("REPEATED")
.setFields(
Lists.newArrayList(
new TableFieldSchema().setName("key").setType("STRING"),
new TableFieldSchema()
.setName("value")
.setType("RECORD")
.setFields(
Lists.newArrayList(
new TableFieldSchema()
.setName("string_value")
.setType("STRING"),
new TableFieldSchema().setName("int_value").setType("INTEGER"),
new TableFieldSchema().setName("double_value").setType("FLOAT"),
new TableFieldSchema()
.setName("float_value")
.setType("FLOAT"))))),
new TableFieldSchema()
.setName("key_value_pair_3")
.setType("RECORD")
.setMode("REPEATED")
.setFields(
Lists.newArrayList(
new TableFieldSchema().setName("key").setType("STRING"),
new TableFieldSchema()
.setName("value")
.setType("RECORD")
.setFields(
Lists.newArrayList(
new TableFieldSchema()
.setName("key_value_pair_1")
.setType("RECORD")
.setMode("REPEATED")
.setFields(
Lists.newArrayList(
new TableFieldSchema()
.setName("key")
.setType("STRING"),
new TableFieldSchema()
.setName("value")
.setType("RECORD")
.setFields(
Lists.newArrayList(
new TableFieldSchema()
.setName("string_value")
.setType("STRING"),
new TableFieldSchema()
.setName("int_value")
.setType("INTEGER"),
new TableFieldSchema()
.setName("double_value")
.setType("FLOAT"),
new TableFieldSchema()
.setName("float_value")
.setType("FLOAT"))))))))),
new TableFieldSchema().setName("platform").setType("STRING")));
// To string should be sufficient here as this exercises Avro's conversion feature
String output = BigQueryAvroUtils.toGenericAvroSchema("root", schema.getFields()).toString();
assertThat(output.length(), greaterThan(0));
} |
public synchronized ConnectionProfile createBQDestinationConnectionProfile(
String connectionProfileId) {
LOG.info(
"Creating BQ Destination Connection Profile {} in project {}.",
connectionProfileId,
projectId);
try {
ConnectionProfile.Builder connectionProfileBuilder =
ConnectionProfile.newBuilder()
.setDisplayName(connectionProfileId)
.setStaticServiceIpConnectivity(StaticServiceIpConnectivity.getDefaultInstance())
.setBigqueryProfile(BigQueryProfile.newBuilder());
CreateConnectionProfileRequest request =
CreateConnectionProfileRequest.newBuilder()
.setParent(LocationName.of(projectId, location).toString())
.setConnectionProfile(connectionProfileBuilder)
.setConnectionProfileId(connectionProfileId)
.build();
ConnectionProfile reference = datastreamClient.createConnectionProfileAsync(request).get();
createdConnectionProfileIds.add(connectionProfileId);
LOG.info(
"Successfully created BQ Destination Connection Profile {} in project {}.",
connectionProfileId,
projectId);
return reference;
} catch (ExecutionException | InterruptedException e) {
throw new DatastreamResourceManagerException(
"Failed to create BQ destination connection profile. ", e);
}
} | @Test
public void testCreateBQDestinationConnectionShouldCreateSuccessfully()
throws ExecutionException, InterruptedException {
ConnectionProfile connectionProfile = ConnectionProfile.getDefaultInstance();
when(datastreamClient
.createConnectionProfileAsync(any(CreateConnectionProfileRequest.class))
.get())
.thenReturn(connectionProfile);
assertThat(testManager.createBQDestinationConnectionProfile(CONNECTION_PROFILE_ID))
.isEqualTo(connectionProfile);
} |
@Override
public Optional<ConfigItem> resolve(final String propertyName, final boolean strict) {
if (propertyName.startsWith(KSQL_REQUEST_CONFIG_PROPERTY_PREFIX)) {
return resolveRequestConfig(propertyName);
} else if (propertyName.startsWith(KSQL_CONFIG_PROPERTY_PREFIX)
&& !propertyName.startsWith(KSQL_STREAMS_PREFIX)) {
return resolveKsqlConfig(propertyName);
}
return resolveStreamsConfig(propertyName, strict);
} | @Test
public void shouldResolveProducerConfig() {
assertThat(resolver.resolve(ProducerConfig.BUFFER_MEMORY_CONFIG, true),
is(resolvedItem(ProducerConfig.BUFFER_MEMORY_CONFIG, PRODUCER_CONFIG_DEF)));
} |
public Matrix mm(Transpose transA, Matrix A, Transpose transB, Matrix B) {
return mm(transA, A, transB, B, 1.0f, 0.0f);
} | @Test
public void testMm() {
System.out.println("mm");
float[][] A = {
{ 0.7220180f, 0.07121225f, 0.6881997f},
{-0.2648886f, -0.89044952f, 0.3700456f},
{-0.6391588f, 0.44947578f, 0.6240573f}
};
float[][] B = {
{0.6881997f, -0.07121225f, 0.7220180f},
{0.3700456f, 0.89044952f, -0.2648886f},
{0.6240573f, -0.44947578f, -0.6391588f}
};
float[][] C = {
{ 0.9527204f, -0.2973347f, 0.06257778f},
{-0.2808735f, -0.9403636f, -0.19190231f},
{ 0.1159052f, 0.1652528f, -0.97941688f}
};
float[][] D = {
{ 0.9887140f, 0.1482942f, -0.0212965f},
{ 0.1482942f, -0.9889421f, -0.0015881f},
{-0.0212965f, -0.0015881f, -0.9997719f}
};
float[][] E = {
{0.0000f, 0.0000f, 1.0000f},
{0.0000f, -1.0000f, 0.0000f},
{1.0000f, 0.0000f, 0.0000f}
};
Matrix a = Matrix.of(A);
Matrix b = Matrix.of(B);
float[][] F = b.mm(a).transpose().toArray();
assertTrue(MathEx.equals(a.mm(b).toArray(), C, 1E-6f));
assertTrue(MathEx.equals(a.mt(b).toArray(), D, 1E-6f));
assertTrue(MathEx.equals(a.tm(b).toArray(), E, 1E-6f));
assertTrue(MathEx.equals(a.tt(b).toArray(), F, 1E-6f));
} |
@SuppressWarnings("MethodLength")
static void dissectControlRequest(
final ArchiveEventCode eventCode,
final MutableDirectBuffer buffer,
final int offset,
final StringBuilder builder)
{
int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder);
HEADER_DECODER.wrap(buffer, offset + encodedLength);
encodedLength += MessageHeaderDecoder.ENCODED_LENGTH;
switch (eventCode)
{
case CMD_IN_CONNECT:
CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendConnect(builder);
break;
case CMD_IN_CLOSE_SESSION:
CLOSE_SESSION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendCloseSession(builder);
break;
case CMD_IN_START_RECORDING:
START_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording(builder);
break;
case CMD_IN_STOP_RECORDING:
STOP_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecording(builder);
break;
case CMD_IN_REPLAY:
REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplay(builder);
break;
case CMD_IN_STOP_REPLAY:
STOP_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplay(builder);
break;
case CMD_IN_LIST_RECORDINGS:
LIST_RECORDINGS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordings(builder);
break;
case CMD_IN_LIST_RECORDINGS_FOR_URI:
LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingsForUri(builder);
break;
case CMD_IN_LIST_RECORDING:
LIST_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecording(builder);
break;
case CMD_IN_EXTEND_RECORDING:
EXTEND_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording(builder);
break;
case CMD_IN_RECORDING_POSITION:
RECORDING_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendRecordingPosition(builder);
break;
case CMD_IN_TRUNCATE_RECORDING:
TRUNCATE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTruncateRecording(builder);
break;
case CMD_IN_STOP_RECORDING_SUBSCRIPTION:
STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingSubscription(builder);
break;
case CMD_IN_STOP_POSITION:
STOP_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopPosition(builder);
break;
case CMD_IN_FIND_LAST_MATCHING_RECORD:
FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendFindLastMatchingRecord(builder);
break;
case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS:
LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingSubscriptions(builder);
break;
case CMD_IN_START_BOUNDED_REPLAY:
BOUNDED_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartBoundedReplay(builder);
break;
case CMD_IN_STOP_ALL_REPLAYS:
STOP_ALL_REPLAYS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopAllReplays(builder);
break;
case CMD_IN_REPLICATE:
REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate(builder);
break;
case CMD_IN_STOP_REPLICATION:
STOP_REPLICATION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplication(builder);
break;
case CMD_IN_START_POSITION:
START_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartPosition(builder);
break;
case CMD_IN_DETACH_SEGMENTS:
DETACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDetachSegments(builder);
break;
case CMD_IN_DELETE_DETACHED_SEGMENTS:
DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDeleteDetachedSegments(builder);
break;
case CMD_IN_PURGE_SEGMENTS:
PURGE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeSegments(builder);
break;
case CMD_IN_ATTACH_SEGMENTS:
ATTACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAttachSegments(builder);
break;
case CMD_IN_MIGRATE_SEGMENTS:
MIGRATE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendMigrateSegments(builder);
break;
case CMD_IN_AUTH_CONNECT:
AUTH_CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAuthConnect(builder);
break;
case CMD_IN_KEEP_ALIVE:
KEEP_ALIVE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendKeepAlive(builder);
break;
case CMD_IN_TAGGED_REPLICATE:
TAGGED_REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTaggedReplicate(builder);
break;
case CMD_IN_START_RECORDING2:
START_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording2(builder);
break;
case CMD_IN_EXTEND_RECORDING2:
EXTEND_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording2(builder);
break;
case CMD_IN_STOP_RECORDING_BY_IDENTITY:
STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingByIdentity(builder);
break;
case CMD_IN_PURGE_RECORDING:
PURGE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeRecording(builder);
break;
case CMD_IN_REPLICATE2:
REPLICATE_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate2(builder);
break;
case CMD_IN_REQUEST_REPLAY_TOKEN:
REPLAY_TOKEN_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplayToken(builder);
break;
default:
builder.append(": unknown command");
}
} | @Test
void controlRequestListRecordingsForUri()
{
internalEncodeLogHeader(buffer, 0, 32, 32, () -> 100_000_000L);
final ListRecordingsForUriRequestEncoder requestEncoder = new ListRecordingsForUriRequestEncoder();
requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder)
.controlSessionId(9)
.correlationId(78)
.fromRecordingId(45)
.recordCount(10)
.streamId(200)
.channel("CH");
dissectControlRequest(CMD_IN_LIST_RECORDINGS_FOR_URI, buffer, 0, builder);
assertEquals("[0.100000000] " + CONTEXT + ": " + CMD_IN_LIST_RECORDINGS_FOR_URI.name() + " [32/32]:" +
" controlSessionId=9" +
" correlationId=78" +
" fromRecordingId=45" +
" recordCount=10" +
" streamId=200" +
" channel=CH",
builder.toString());
} |
public static URL valueOf(String url) {
return valueOf(url, false);
} | @Test
void test_equals() throws Exception {
URL url1 = URL.valueOf(
"dubbo://admin:hello1234@10.20.130.230:20880/context/path?version=1.0.0&application=morgan");
assertURLStrDecoder(url1);
Map<String, String> params = new HashMap<String, String>();
params.put("version", "1.0.0");
params.put("application", "morgan");
URL url2 = new ServiceConfigURL("dubbo", "admin", "hello1234", "10.20.130.230", 20880, "context/path", params);
assertURLStrDecoder(url2);
assertEquals(url1, url2);
} |
public static AztecCode encode(String data) {
return encode(data.getBytes(StandardCharsets.ISO_8859_1));
} | @Test(expected = IllegalArgumentException.class)
public void testBorderCompact4CaseFailed() {
// Compact(4) con hold 608 bits of information, but at most 504 can be data. Rest must
// be error correction
String alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
// encodes as 26 * 5 * 4 = 520 bits of data
String alphabet4 = alphabet + alphabet + alphabet + alphabet;
Encoder.encode(alphabet4, 0, -4);
} |
public boolean satisfies(NodeResources other) {
ensureSpecified();
other.ensureSpecified();
if (this.vcpu < other.vcpu) return false;
if (this.memoryGiB < other.memoryGiB) return false;
if (this.diskGb < other.diskGb) return false;
if (this.bandwidthGbps < other.bandwidthGbps) return false;
if (this.gpuResources.lessThan(other.gpuResources)) return false;
// Why doesn't a fast disk satisfy a slow disk? Because if slow disk is explicitly specified
// (i.e not "any"), you should not randomly, sometimes get a faster disk as that means you may
// draw conclusions about performance on the basis of better resources than you think you have
if (other.diskSpeed != DiskSpeed.any && other.diskSpeed != this.diskSpeed) return false;
// Same reasoning as the above
if (other.storageType != StorageType.any && other.storageType != this.storageType) return false;
// Same reasoning as the above
if (other.architecture != Architecture.any && other.architecture != this.architecture) return false;
return true;
} | @Test
void testSatisfies() {
var hostResources = new NodeResources(1, 2, 3, 1);
assertTrue(hostResources.satisfies(new NodeResources(1, 2, 3, 1)));
assertTrue(hostResources.satisfies(new NodeResources(1, 1, 1, 1)));
assertFalse(hostResources.satisfies(new NodeResources(2, 2, 3, 1)));
assertFalse(hostResources.satisfies(new NodeResources(1, 3, 3, 1)));
assertFalse(hostResources.satisfies(new NodeResources(1, 2, 4, 1)));
var gpuHostResources = new NodeResources(1, 2, 3, 1,
NodeResources.DiskSpeed.fast,
NodeResources.StorageType.local,
NodeResources.Architecture.x86_64,
new NodeResources.GpuResources(1, 16));
assertTrue(gpuHostResources.satisfies(new NodeResources(1, 2, 3, 1,
NodeResources.DiskSpeed.fast,
NodeResources.StorageType.local,
NodeResources.Architecture.x86_64,
new NodeResources.GpuResources(1, 16))));
assertFalse(gpuHostResources.satisfies(new NodeResources(1, 2, 3, 1,
NodeResources.DiskSpeed.fast,
NodeResources.StorageType.local,
NodeResources.Architecture.x86_64,
new NodeResources.GpuResources(1, 32))));
assertFalse(hostResources.satisfies(gpuHostResources));
} |
@Override
public void triggerOnIndexCreation() {
try (DbSession dbSession = dbClient.openSession(false)) {
// remove already existing indexing task, if any
removeExistingIndexationTasks(dbSession);
dbClient.branchDao().updateAllNeedIssueSync(dbSession);
List<BranchDto> branchInNeedOfIssueSync = dbClient.branchDao().selectBranchNeedingIssueSync(dbSession);
LOG.info("{} branch found in need of issue sync.", branchInNeedOfIssueSync.size());
if (branchInNeedOfIssueSync.isEmpty()) {
return;
}
List<String> projectUuids = branchInNeedOfIssueSync.stream().map(BranchDto::getProjectUuid).distinct().collect(toCollection(ArrayList<String>::new));
LOG.info("{} projects found in need of issue sync.", projectUuids.size());
sortProjectUuids(dbSession, projectUuids);
Map<String, List<BranchDto>> branchesByProject = branchInNeedOfIssueSync.stream()
.collect(Collectors.groupingBy(BranchDto::getProjectUuid));
List<CeTaskSubmit> tasks = new ArrayList<>();
for (String projectUuid : projectUuids) {
List<BranchDto> branches = branchesByProject.get(projectUuid);
for (BranchDto branch : branches) {
tasks.add(buildTaskSubmit(branch));
}
}
ceQueue.massSubmit(tasks);
dbSession.commit();
}
} | @Test
public void order_by_last_analysis_date() {
BranchDto dto = new BranchDto()
.setBranchType(BRANCH)
.setKey("branch_1")
.setUuid("branch_uuid1")
.setProjectUuid("project_uuid1")
.setIsMain(false);
dbClient.branchDao().insert(dbTester.getSession(), dto);
dbTester.commit();
insertSnapshot("analysis_1", "project_uuid1", 1);
BranchDto dto2 = new BranchDto()
.setBranchType(BRANCH)
.setKey("branch_2")
.setUuid("branch_uuid2")
.setProjectUuid("project_uuid2")
.setIsMain(false);
dbClient.branchDao().insert(dbTester.getSession(), dto2);
dbTester.commit();
insertSnapshot("analysis_2", "project_uuid2", 2);
underTest.triggerOnIndexCreation();
verify(ceQueue, times(2)).prepareSubmit();
ArgumentCaptor<Collection<CeTaskSubmit>> captor = ArgumentCaptor.forClass(Collection.class);
verify(ceQueue, times(1)).massSubmit(captor.capture());
List<Collection<CeTaskSubmit>> captures = captor.getAllValues();
assertThat(captures).hasSize(1);
Collection<CeTaskSubmit> tasks = captures.get(0);
assertThat(tasks).hasSize(2);
assertThat(tasks)
.extracting(p -> p.getComponent().get().getUuid())
.containsExactly("branch_uuid2", "branch_uuid1");
assertThat(logTester.logs(Level.INFO))
.contains("2 projects found in need of issue sync.");
} |
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
} | @Test
public void testMergeUpstreamMergeWithLessStrictMode() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
parseParamDefMap(
"{'workflow_default_param': {'type': 'STRING','value': 'default_value','mode': 'MUTABLE_ON_START', 'meta': {'source': 'SYSTEM_DEFAULT'}}}");
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap(
"{'workflow_default_param': {'type': 'STRING','value': 'parent_wf_defined_value','mode': 'MUTABLE', 'meta': {'source': 'DEFINITION'}}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, upstreamDefinitionMergeContext);
assertEquals(1, allParams.size());
assertEquals("parent_wf_defined_value", allParams.get("workflow_default_param").getValue());
assertEquals(ParamMode.MUTABLE_ON_START, allParams.get("workflow_default_param").getMode());
assertEquals(ParamSource.SUBWORKFLOW, allParams.get("workflow_default_param").getSource());
} |
@NonNull
@Override
public HealthResponse healthResponse(final Map<String, Collection<String>> queryParams) {
final String type = queryParams.getOrDefault(CHECK_TYPE_QUERY_PARAM, Collections.emptyList())
.stream()
.findFirst()
.orElse(null);
final Collection<HealthStateView> views = getViews(queryParams);
final String responseBody;
try {
responseBody = mapper.writeValueAsString(views);
} catch (final Exception e) {
LOGGER.error("Failed to serialize health state views: {}", views, e);
throw new RuntimeException(e);
}
final boolean healthy = healthStatusChecker.isHealthy(type);
final int status;
if (healthy) {
// HTTP OK
status = 200;
} else {
// HTTP Service unavailable
status = 503;
}
return new HealthResponse(healthy, responseBody, MEDIA_TYPE, status);
} | @Test
void shouldHandleZeroHealthStateViewsCorrectly() {
// given
// when
when(healthStatusChecker.isHealthy(isNull())).thenReturn(true);
final HealthResponse response = jsonHealthResponseProvider.healthResponse(Collections.emptyMap());
// then
assertThat(response.isHealthy()).isTrue();
assertThat(response.getContentType()).isEqualTo(MediaType.APPLICATION_JSON);
assertThat(response.getMessage()).isEqualToIgnoringWhitespace("[]");
verifyNoInteractions(healthStateAggregator);
} |
@Override
public boolean equals(Object toBeCompared) {
if (toBeCompared instanceof ControllerInfo) {
ControllerInfo that = (ControllerInfo) toBeCompared;
return Objects.equals(this.type, that.type) &&
Objects.equals(this.ip, that.ip) &&
Objects.equals(this.port, that.port);
}
return false;
} | @Test
public void testListEquals() {
String target1 = "ptcp:6653:192.168.1.1";
ControllerInfo controllerInfo1 = new ControllerInfo(target1);
String target2 = "ptcp:6653:192.168.1.1";
ControllerInfo controllerInfo2 = new ControllerInfo(target2);
String target3 = "tcp:192.168.1.1:6653";
ControllerInfo controllerInfo3 = new ControllerInfo(target3);
String target4 = "tcp:192.168.1.1:6653";
ControllerInfo controllerInfo4 = new ControllerInfo(target4);
List<ControllerInfo> list1 = new ArrayList<>(Arrays.asList(controllerInfo1, controllerInfo3));
List<ControllerInfo> list2 = new ArrayList<>(Arrays.asList(controllerInfo2, controllerInfo4));
assertTrue("wrong equals list method", list1.equals(list2));
} |
public InstantAndValue<T> remove(MetricKey metricKey) {
return counters.remove(metricKey);
} | @Test
public void testRemoveWithNullKey() {
LastValueTracker<Double> lastValueTracker = new LastValueTracker<>();
assertThrows(NullPointerException.class, () -> lastValueTracker.remove(null));
} |
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception {
CruiseConfig configForEdit;
CruiseConfig config;
LOGGER.debug("[Config Save] Loading config holder");
configForEdit = deserializeConfig(content);
if (callback != null) callback.call(configForEdit);
config = preprocessAndValidate(configForEdit);
return new GoConfigHolder(config, configForEdit);
} | @Test
void shouldLoadConfigWithEnvironment() throws Exception {
String content = configWithEnvironments(
"""
<environments>
<environment name='uat' />
<environment name='prod' />
</environments>""", CONFIG_SCHEMA_VERSION);
EnvironmentsConfig environmentsConfig = xmlLoader.loadConfigHolder(content).config.getEnvironments();
EnvironmentPipelineMatchers matchers = environmentsConfig.matchers();
assertThat(matchers.size()).isEqualTo(2);
} |
@Override
public Reader createReader(ResultSubpartitionView subpartitionView) throws IOException {
checkState(!fileChannel.isOpen());
final FileChannel fc = FileChannel.open(filePath, StandardOpenOption.READ);
return new FileBufferReader(fc, memorySegmentSize, subpartitionView);
} | @TestTemplate
void testReadNextBuffer() throws Exception {
final int numberOfBuffers = 3;
try (final BoundedData data = createBoundedData()) {
writeBuffers(data, numberOfBuffers);
final BoundedData.Reader reader = data.createReader();
final Buffer buffer1 = reader.nextBuffer();
final Buffer buffer2 = reader.nextBuffer();
assertThat(buffer1).isNotNull();
assertThat(buffer2).isNotNull();
// there are only two available memory segments for reading data
assertThat(reader.nextBuffer()).isNull();
// cleanup
buffer1.recycleBuffer();
buffer2.recycleBuffer();
}
} |
public static <T> boolean contains(T[] array, T value) {
return indexOf(array, value) > INDEX_NOT_FOUND;
} | @Test
public void containsTest() {
Integer[] a = {1, 2, 3, 4, 3, 6};
boolean contains = ArrayUtil.contains(a, 3);
assertTrue(contains);
long[] b = {1, 2, 3, 4, 3, 6};
boolean contains2 = ArrayUtil.contains(b, 3);
assertTrue(contains2);
} |
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuffer buf = new StringBuffer();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case FORMAT_MODIFIER_STATE:
handleFormatModifierState(c, tokenList, buf);
break;
case OPTION_STATE:
processOption(c, tokenList, buf);
break;
case KEYWORD_STATE:
handleKeywordState(c, tokenList, buf);
break;
case RIGHT_PARENTHESIS_STATE:
handleRightParenthesisState(c, tokenList, buf);
break;
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addValuedToken(Token.LITERAL, buf, tokenList);
break;
case KEYWORD_STATE:
tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString()));
break;
case RIGHT_PARENTHESIS_STATE:
tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN);
break;
case FORMAT_MODIFIER_STATE:
case OPTION_STATE:
throw new ScanException("Unexpected end of pattern string");
}
return tokenList;
} | @Test
public void testWindowsLikeBackSlashes() throws ScanException {
List<Token> tl = new TokenStream("c:\\hello\\world.%i", new AlmostAsIsEscapeUtil()).tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(new Token(Token.LITERAL, "c:\\hello\\world."));
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "i"));
assertEquals(witness, tl);
} |
@Override
public void excludeFiles(String[] filenames) {
if (filenames != null && filenames.length > 0) {
EXCFILE = filenames;
this.FILEFILTER = true;
}
} | @Test
public void testExcludeFiles() {
testf.excludeFiles(INCL);
for (TestData td : TESTDATA) {
String theFile = td.file;
boolean expect = td.exclfile;
testf.isFiltered(theFile, null);
String line = testf.filter(theFile);
if (line != null) {
assertTrue(expect, "Expect to accept " + theFile);
} else {
assertFalse(expect, "Expect to reject " + theFile);
}
}
} |
@Udf
public Long round(@UdfParameter final long val) {
return val;
} | @Test
public void shouldRoundLong() {
assertThat(udf.round(123L), is(123L));
} |
@Override
public String getNamenodes() {
final Map<String, Map<String, Object>> info = new LinkedHashMap<>();
if (membershipStore == null) {
return "{}";
}
try {
// Get the values from the store
GetNamenodeRegistrationsRequest request =
GetNamenodeRegistrationsRequest.newInstance();
GetNamenodeRegistrationsResponse response =
membershipStore.getNamenodeRegistrations(request);
// Order the namenodes
final List<MembershipState> namenodes = response.getNamenodeMemberships();
if (namenodes == null || namenodes.size() == 0) {
return JSON.toString(info);
}
List<MembershipState> namenodesOrder = new ArrayList<>(namenodes);
Collections.sort(namenodesOrder, MembershipState.NAME_COMPARATOR);
// Dump namenodes information into JSON
for (MembershipState namenode : namenodesOrder) {
Map<String, Object> innerInfo = new HashMap<>();
Map<String, Object> map = getJson(namenode);
innerInfo.putAll(map);
long dateModified = namenode.getDateModified();
long lastHeartbeat = getSecondsSince(dateModified);
innerInfo.put("lastHeartbeat", lastHeartbeat);
MembershipStats stats = namenode.getStats();
long used = stats.getTotalSpace() - stats.getAvailableSpace();
innerInfo.put("used", used);
info.put(namenode.getNamenodeKey(),
Collections.unmodifiableMap(innerInfo));
}
} catch (IOException e) {
LOG.error("Enable to fetch json representation of namenodes {}",
e.getMessage());
return "{}";
}
return JSON.toString(info);
} | @Test
public void testNamenodeStatsDataSource() throws IOException, JSONException {
RBFMetrics metrics = getRouter().getMetrics();
String jsonString = metrics.getNamenodes();
JSONObject jsonObject = new JSONObject(jsonString);
Iterator<?> keys = jsonObject.keys();
int nnsFound = 0;
while (keys.hasNext()) {
// Validate each entry against our mocks
JSONObject json = jsonObject.getJSONObject((String) keys.next());
String nameserviceId = json.getString("nameserviceId");
String namenodeId = json.getString("namenodeId");
MembershipState mockEntry =
this.findMockNamenode(nameserviceId, namenodeId);
assertNotNull(mockEntry);
assertEquals(json.getString("state"), mockEntry.getState().toString());
MembershipStats stats = mockEntry.getStats();
assertEquals(json.getLong("numOfActiveDatanodes"),
stats.getNumOfActiveDatanodes());
assertEquals(json.getLong("numOfDeadDatanodes"),
stats.getNumOfDeadDatanodes());
assertEquals(json.getLong("numOfStaleDatanodes"),
stats.getNumOfStaleDatanodes());
assertEquals(json.getLong("numOfDecommissioningDatanodes"),
stats.getNumOfDecommissioningDatanodes());
assertEquals(json.getLong("numOfDecomActiveDatanodes"),
stats.getNumOfDecomActiveDatanodes());
assertEquals(json.getLong("numOfDecomDeadDatanodes"),
stats.getNumOfDecomDeadDatanodes());
assertEquals(json.getLong("numOfInMaintenanceLiveDataNodes"),
stats.getNumOfInMaintenanceLiveDataNodes());
assertEquals(json.getLong("numOfInMaintenanceDeadDataNodes"),
stats.getNumOfInMaintenanceDeadDataNodes());
assertEquals(json.getLong("numOfEnteringMaintenanceDataNodes"),
stats.getNumOfEnteringMaintenanceDataNodes());
assertEquals(json.getLong("numOfBlocks"), stats.getNumOfBlocks());
assertEquals(json.getString("rpcAddress"), mockEntry.getRpcAddress());
assertEquals(json.getString("webScheme"), mockEntry.getWebScheme());
assertEquals(json.getString("webAddress"), mockEntry.getWebAddress());
nnsFound++;
}
// Validate all memberships are present
assertEquals(getActiveMemberships().size() + getStandbyMemberships().size(),
nnsFound);
} |
public static StatsSingleNote getNoteInfo(Note note) {
StatsSingleNote infos = new StatsSingleNote();
int words;
int chars;
if (note.isChecklist()) {
infos.setChecklistCompletedItemsNumber(
StringUtils.countMatches(note.getContent(), CHECKED_SYM));
infos.setChecklistItemsNumber(infos.getChecklistCompletedItemsNumber() +
StringUtils.countMatches(note.getContent(), UNCHECKED_SYM));
}
infos.setTags(TagsHelper.retrieveTags(note).size());
words = getWords(note);
chars = getChars(note);
infos.setWords(words);
infos.setChars(chars);
int attachmentsAll = 0;
int images = 0;
int videos = 0;
int audioRecordings = 0;
int sketches = 0;
int files = 0;
for (Attachment attachment : note.getAttachmentsList()) {
if (MIME_TYPE_IMAGE.equals(attachment.getMime_type())) {
images++;
} else if (MIME_TYPE_VIDEO.equals(attachment.getMime_type())) {
videos++;
} else if (MIME_TYPE_AUDIO.equals(attachment.getMime_type())) {
audioRecordings++;
} else if (MIME_TYPE_SKETCH.equals(attachment.getMime_type())) {
sketches++;
} else if (MIME_TYPE_FILES.equals(attachment.getMime_type())) {
files++;
}
attachmentsAll++;
}
infos.setAttachments(attachmentsAll);
infos.setImages(images);
infos.setVideos(videos);
infos.setAudioRecordings(audioRecordings);
infos.setSketches(sketches);
infos.setFiles(files);
if (note.getCategory() != null) {
infos.setCategoryName(note.getCategory().getName());
}
return infos;
} | @Test
public void getNoteInfo() {
var contextMock = getContextMock();
try (
MockedStatic<OmniNotes> omniNotes = mockStatic(OmniNotes.class);
MockedStatic<BuildHelper> buildVersionHelper = mockStatic(BuildHelper.class);
) {
omniNotes.when(OmniNotes::getAppContext).thenReturn(contextMock);
buildVersionHelper.when(() -> BuildHelper.isAboveOrEqual(VERSION_CODES.N)).thenReturn(true);
var info = NotesHelper.getNoteInfo(new Note());
assertEquals(0, info.getChars());
assertEquals(0, info.getWords());
assertEquals(0, info.getChecklistCompletedItemsNumber());
}
} |
@Override public GrpcServerRequest request() {
return request;
} | @Test void request() {
assertThat(response.request()).isSameAs(request);
} |
@Override
public String toString() {
return this.toJSONString(0);
} | @Test
@Disabled
public void toStringTest() {
final String str = "{\"code\": 500, \"data\":null}";
final JSONObject jsonObject = new JSONObject(str);
Console.log(jsonObject);
jsonObject.getConfig().setIgnoreNullValue(true);
Console.log(jsonObject.toStringPretty());
} |
NettyPartitionRequestClient createPartitionRequestClient(ConnectionID connectionId)
throws IOException, InterruptedException {
// We map the input ConnectionID to a new value to restrict the number of tcp connections
connectionId =
new ConnectionID(
connectionId.getResourceID(),
connectionId.getAddress(),
connectionId.getConnectionIndex() % maxNumberOfConnections);
while (true) {
final CompletableFuture<NettyPartitionRequestClient> newClientFuture =
new CompletableFuture<>();
final CompletableFuture<NettyPartitionRequestClient> clientFuture =
clients.putIfAbsent(connectionId, newClientFuture);
final NettyPartitionRequestClient client;
if (clientFuture == null) {
try {
client = connectWithRetries(connectionId);
} catch (Throwable e) {
newClientFuture.completeExceptionally(
new IOException("Could not create Netty client.", e));
clients.remove(connectionId, newClientFuture);
throw e;
}
newClientFuture.complete(client);
} else {
try {
client = clientFuture.get();
} catch (ExecutionException e) {
ExceptionUtils.rethrowIOException(ExceptionUtils.stripExecutionException(e));
return null;
}
}
// Make sure to increment the reference count before handing a client
// out to ensure correct bookkeeping for channel closing.
if (client.validateClientAndIncrementReferenceCounter()) {
return client;
} else if (client.canBeDisposed()) {
client.closeConnection();
} else {
destroyPartitionRequestClient(connectionId, client);
}
}
} | @TestTemplate
void testNettyClientConnectRetryMultipleThread() throws Exception {
NettyTestUtil.NettyServerAndClient serverAndClient = createNettyServerAndClient();
UnstableNettyClient unstableNettyClient =
new UnstableNettyClient(serverAndClient.client(), 2);
PartitionRequestClientFactory factory =
new PartitionRequestClientFactory(
unstableNettyClient, 2, 1, connectionReuseEnabled);
List<CompletableFuture<NettyPartitionRequestClient>> futures = new ArrayList<>();
for (int i = 0; i < 10; i++) {
futures.add(
CompletableFuture.supplyAsync(
() -> {
try {
return factory.createPartitionRequestClient(
serverAndClient.getConnectionID(RESOURCE_ID, 0));
} catch (Exception e) {
throw new CompletionException(e);
}
},
EXECUTOR_EXTENSION.getExecutor()));
}
futures.forEach(
runnableFuture ->
assertThatFuture(runnableFuture).eventuallySucceeds().isNotNull());
shutdown(serverAndClient);
} |
@Override
public void reset() throws IOException {
createDirectory(PATH_DATA.getKey());
createDirectory(PATH_WEB.getKey());
createDirectory(PATH_LOGS.getKey());
File tempDir = createOrCleanTempDirectory(PATH_TEMP.getKey());
try (AllProcessesCommands allProcessesCommands = new AllProcessesCommands(tempDir)) {
allProcessesCommands.clean();
}
} | @Test
public void reset_deletes_content_of_temp_dir_but_not_temp_dir_itself_if_it_already_exists() throws Exception {
assertThat(tempDir.mkdir()).isTrue();
Object tempDirKey = getFileKey(tempDir);
File fileInTempDir = new File(tempDir, "someFile.txt");
assertThat(fileInTempDir.createNewFile()).isTrue();
File subDirInTempDir = new File(tempDir, "subDir");
assertThat(subDirInTempDir.mkdir()).isTrue();
underTest.reset();
assertThat(tempDir).exists();
assertThat(fileInTempDir).doesNotExist();
assertThat(subDirInTempDir).doesNotExist();
assertThat(getFileKey(tempDir)).isEqualTo(tempDirKey);
} |
@Override
public boolean removeFirstOccurrence(Object o) {
return remove(o, 1);
} | @Test
public void testRemoveFirstOccurrence() {
RDeque<Integer> queue1 = redisson.getDeque("deque1");
queue1.addFirst(3);
queue1.addFirst(1);
queue1.addFirst(2);
queue1.addFirst(3);
queue1.removeFirstOccurrence(3);
assertThat(queue1).containsExactly(2, 1, 3);
} |
@Override
public String getValue(EvaluationContext context) {
// Use variable name if we just provide this.
if (variableName != null && variable == null) {
variable = context.lookupVariable(variableName);
return (variable != null ? variable.toString() : "");
}
String propertyName = pathExpression;
String propertyPath = null;
int delimiterIndex = -1;
// Search for a delimiter to isolate property name.
for (String delimiter : PROPERTY_NAME_DELIMITERS) {
delimiterIndex = pathExpression.indexOf(delimiter);
if (delimiterIndex != -1) {
propertyName = pathExpression.substring(0, delimiterIndex);
propertyPath = pathExpression.substring(delimiterIndex);
break;
}
}
Object variableValue = getProperty(variable, propertyName);
if (log.isDebugEnabled()) {
log.debug("propertyName: {}", propertyName);
log.debug("propertyPath: {}", propertyPath);
log.debug("variableValue: {}", variableValue);
}
if (propertyPath != null) {
if (variableValue.getClass().equals(String.class)) {
if (propertyPath.startsWith("/")) {
// This is a JSON Pointer or XPath expression to apply.
String variableString = String.valueOf(variableValue);
if (variableString.trim().startsWith("{") || variableString.trim().startsWith("[")) {
variableValue = getJsonPointerValue(variableString, propertyPath);
} else if (variableString.trim().startsWith("<")) {
variableValue = getXPathValue(variableString, propertyPath);
} else {
log.warn("Got a path query expression but content seems not to be JSON nor XML...");
variableValue = null;
}
}
} else if (variableValue.getClass().isArray()) {
if (propertyPath.matches(ARRAY_INDEX_REGEXP)) {
Matcher m = ARRAY_INDEX_PATTERN.matcher(propertyPath);
if (m.matches()) {
String arrayIndex = m.group(1);
Object[] variableValues = (Object[]) variableValue;
try {
variableValue = variableValues[Integer.parseInt(arrayIndex)];
} catch (ArrayIndexOutOfBoundsException ae) {
log.warn("Expression asked for " + arrayIndex + " but array is smaller (" + variableValues.length
+ "). Returning null.");
variableValue = null;
}
}
}
} else if (Map.class.isAssignableFrom(variableValue.getClass())) {
if (propertyPath.matches(MAP_INDEX_REGEXP)) {
Matcher m = MAP_INDEX_PATTERN.matcher(propertyPath);
if (m.matches()) {
String mapKey = m.group(1);
Map variableValues = (Map) variableValue;
variableValue = variableValues.get(mapKey);
}
}
}
}
return String.valueOf(variableValue);
} | @Test
void testJSONPointerValueInArray() {
String jsonString = "[{\"foo\":{\"bar\":111222},\"quantity\":1}]";
EvaluableRequest request = new EvaluableRequest(jsonString, null);
// Create new expression evaluating JSON Pointer path.
VariableReferenceExpression exp = new VariableReferenceExpression(request, "body/0/quantity");
String result = exp.getValue(new EvaluationContext());
assertEquals("1", result);
// Test with a nested expression
exp = new VariableReferenceExpression(request, "body/0/foo/bar");
result = exp.getValue(new EvaluationContext());
assertEquals("111222", result);
} |
public static URL urlForResource(String location) throws MalformedURLException, FileNotFoundException {
if (location == null) {
throw new NullPointerException("location is required");
}
URL url = null;
if (!location.matches(SCHEME_PATTERN)) {
url = Loader.getResourceBySelfClassLoader(location);
} else if (location.startsWith(CLASSPATH_SCHEME)) {
String path = location.substring(CLASSPATH_SCHEME.length());
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.length() == 0) {
throw new MalformedURLException("path is required");
}
url = Loader.getResourceBySelfClassLoader(path);
} else {
url = new URL(location);
}
if (url == null) {
throw new FileNotFoundException(location);
}
return url;
} | @Test
public void testExplicitClasspathUrlWithRootPath() throws Exception {
Assertions.assertThrows(MalformedURLException.class, () -> {
LocationUtil.urlForResource(LocationUtil.CLASSPATH_SCHEME + "/");
});
} |
@Override
public boolean onOptionsItemSelected(@NonNull MenuItem item) {
MainSettingsActivity mainSettingsActivity = (MainSettingsActivity) getActivity();
if (mainSettingsActivity == null) return super.onOptionsItemSelected(item);
if (item.getItemId() == R.id.add_user_word) {
createEmptyItemForAdd();
return true;
}
return super.onOptionsItemSelected(item);
} | @Test
public void testAddNewWordFromMenuAtEmptyState() {
UserDictionaryEditorFragment fragment = startEditorFragment();
RecyclerView wordsRecyclerView = fragment.getView().findViewById(R.id.words_recycler_view);
Assert.assertNotNull(wordsRecyclerView);
Assert.assertEquals(1 /*empty view*/, wordsRecyclerView.getAdapter().getItemCount());
Assert.assertEquals(
R.id.word_editor_view_type_empty_view_row,
wordsRecyclerView.getAdapter().getItemViewType(0));
final MenuItem menuItem = Mockito.mock(MenuItem.class);
Mockito.doReturn(R.id.add_user_word).when(menuItem).getItemId();
fragment.onOptionsItemSelected(menuItem);
TestRxSchedulers.drainAllTasks();
Assert.assertEquals(1, wordsRecyclerView.getAdapter().getItemCount());
Assert.assertEquals(
R.id.word_editor_view_type_editing_row, wordsRecyclerView.getAdapter().getItemViewType(0));
} |
public String getQuery() throws Exception {
return getQuery(weatherConfiguration.getLocation());
} | @Test
public void testBoxedStationQuery() throws Exception {
WeatherConfiguration weatherConfiguration = new WeatherConfiguration();
weatherConfiguration.setLon("4");
weatherConfiguration.setLat("52");
weatherConfiguration.setRightLon("6");
weatherConfiguration.setTopLat("54");
weatherConfiguration.setZoom(8);
weatherConfiguration.setUnits(WeatherUnits.METRIC);
weatherConfiguration.setAppid(APPID);
weatherConfiguration.setWeatherApi(WeatherApi.Station);
WeatherQuery weatherQuery = new WeatherQuery(weatherConfiguration);
weatherConfiguration.setGeoLocationProvider(geoLocationProvider);
String query = weatherQuery.getQuery();
assertThat(query, is(
"http://api.openweathermap.org/data/2.5/box/station?bbox=4,52,6,54,8&cluster=yes&lang=en&units=metric&APPID=9162755b2efa555823cfe0451d7fff38"));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.