focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Udf(description = "Converts a string representation of a date in the given format"
+ " into the number of days since 1970-01-01 00:00:00 UTC/GMT.")
public int stringToDate(
@UdfParameter(
description = "The string representation of a date.") final String formattedDate,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
// NB: We do not perform a null here preferring to throw an exception as
// there is no sentinel value for a "null" Date.
try {
final DateTimeFormatter formatter = formatters.get(formatPattern);
return ((int)LocalDate.parse(formattedDate, formatter).toEpochDay());
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to parse date '" + formattedDate
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
} | @Test
public void shouldThrowOnEmptyString() {
// When:
final Exception e = assertThrows(
KsqlFunctionException.class,
() -> udf.stringToDate("", "yyyy-MM-dd")
);
// Then:
assertThat(e.getMessage(), containsString("Failed to parse date '' with formatter 'yyyy-MM-dd'"));
} |
@Override
public boolean isSimilar(PiMeterCellConfig onosMeter, PiMeterCellConfig deviceMeter) {
final PiMeterBand onosCommittedBand = onosMeter.committedBand();
final PiMeterBand onosPeakBand = onosMeter.peakBand();
final PiMeterBand deviceCommittedBand = deviceMeter.committedBand();
final PiMeterBand devicePeakBand = deviceMeter.peakBand();
// Fail fast, this can easily happen if we send a write very
// close to a read, read can still return the default config
if (deviceCommittedBand == null || devicePeakBand == null) {
return false;
}
final long onosCir = onosCommittedBand.rate();
final long onosCburst = onosCommittedBand.burst();
final long onosPir = onosPeakBand.rate();
final long onosPburst = onosPeakBand.burst();
final long deviceCir = deviceCommittedBand.rate();
final long deviceCburst = deviceCommittedBand.burst();
final long devicePir = devicePeakBand.rate();
final long devicePburst = devicePeakBand.burst();
return isRateSimilar(onosCir, deviceCir) && isRateSimilar(onosPir, devicePir) &&
isBurstSimilar(onosCburst, deviceCburst) && isBurstSimilar(onosPburst, devicePburst);
} | @Test
public void testWrongIsRateSimilar() {
PiMeterBand onosMeterBand;
PiMeterBand deviceMeterBand;
PiMeterCellConfig onosMeter;
PiMeterCellConfig deviceMeter;
for (Map.Entry<Long, Long> entry : WRONG_RATES.entrySet()) {
onosMeterBand = new PiMeterBand(PiMeterBandType.COMMITTED, entry.getKey(), 0);
deviceMeterBand = new PiMeterBand(PiMeterBandType.COMMITTED, entry.getValue(), 0);
onosMeter = PiMeterCellConfig.builder()
.withMeterCellId(meterCellId)
.withMeterBand(onosMeterBand)
.withMeterBand(new PiMeterBand(PiMeterBandType.PEAK, 0, 0))
.build();
deviceMeter = PiMeterCellConfig.builder()
.withMeterCellId(meterCellId)
.withMeterBand(deviceMeterBand)
.withMeterBand(new PiMeterBand(PiMeterBandType.PEAK, 0, 0))
.build();
assertFalse(meterProgrammable.isSimilar(onosMeter, deviceMeter));
}
} |
public void retrieveDocuments() throws DocumentRetrieverException {
boolean first = true;
String route = params.cluster.isEmpty() ? params.route : resolveClusterRoute(params.cluster);
MessageBusParams messageBusParams = createMessageBusParams(params.configId, params.timeout, route);
documentAccess = documentAccessFactory.createDocumentAccess(messageBusParams);
session = documentAccess.createSyncSession(new SyncParameters.Builder().build());
int trace = params.traceLevel;
if (trace > 0) {
session.setTraceLevel(trace);
}
Iterator<String> iter = params.documentIds;
if (params.jsonOutput && !params.printIdsOnly) {
System.out.println('[');
}
while (iter.hasNext()) {
if (params.jsonOutput && !params.printIdsOnly) {
if (!first) {
System.out.println(',');
} else {
first = false;
}
}
String docid = iter.next();
Message msg = createDocumentRequest(docid);
Reply reply = session.syncSend(msg);
printReply(reply);
}
if (params.jsonOutput && !params.printIdsOnly) {
System.out.println(']');
}
} | @Test
void testDocumentNotFound() throws DocumentRetrieverException {
ClientParameters params = createParameters()
.setDocumentIds(asIterator(DOC_ID_1))
.setPrintIdsOnly(true)
.build();
when(mockedSession.syncSend(any())).thenReturn(new GetDocumentReply(null));
DocumentRetriever documentRetriever = createDocumentRetriever(params);
documentRetriever.retrieveDocuments();
verify(mockedSession, times(1)).syncSend(any());
assertEquals(outContent.toString(), "Document not found.\n");
} |
@Override
public void delete(DataAdapterDto nativeEntity) {
dataAdapterService.deleteAndPostEventImmutable(nativeEntity.id());
} | @Test
@MongoDBFixtures("LookupDataAdapterFacadeTest.json")
public void delete() {
final Optional<DataAdapterDto> dataAdapterDto = dataAdapterService.get("5adf24a04b900a0fdb4e52c8");
assertThat(dataAdapterService.findAll()).hasSize(1);
dataAdapterDto.ifPresent(facade::delete);
assertThat(dataAdapterService.findAll()).isEmpty();
assertThat(dataAdapterService.get("5adf24a04b900a0fdb4e52c8")).isEmpty();
} |
public static String randomStringWithoutStr(final int length, final String elemData) {
String baseStr = BASE_CHAR_NUMBER;
baseStr = StrUtil.removeAll(baseStr, elemData.toCharArray());
return randomString(baseStr, length);
} | @Test
@Disabled
public void randomStringWithoutStrTest() {
for (int i = 0; i < 100; i++) {
final String s = RandomUtil.randomStringWithoutStr(8, "0IPOL");
System.out.println(s);
for (char c : "0IPOL".toCharArray()) {
assertFalse(s.contains((String.valueOf(c).toLowerCase(Locale.ROOT))));
}
}
} |
public static TreeMap<Integer, List<BufferIndexAndChannel>>
getBuffersByConsumptionPriorityInOrder(
List<Integer> nextBufferIndexToConsume,
TreeMap<Integer, Deque<BufferIndexAndChannel>> subpartitionToAllBuffers,
int expectedSize) {
if (expectedSize <= 0) {
return new TreeMap<>();
}
PriorityQueue<BufferConsumptionPriorityIterator> heap = new PriorityQueue<>();
subpartitionToAllBuffers.forEach(
(subpartitionId, buffers) -> {
if (!buffers.isEmpty()) {
heap.add(
new BufferConsumptionPriorityIterator(
buffers, nextBufferIndexToConsume.get(subpartitionId)));
}
});
TreeMap<Integer, List<BufferIndexAndChannel>> subpartitionToHighPriorityBuffers =
new TreeMap<>();
for (int i = 0; i < expectedSize; i++) {
if (heap.isEmpty()) {
break;
}
BufferConsumptionPriorityIterator bufferConsumptionPriorityIterator = heap.poll();
BufferIndexAndChannel bufferIndexAndChannel = bufferConsumptionPriorityIterator.next();
subpartitionToHighPriorityBuffers
.computeIfAbsent(bufferIndexAndChannel.getChannel(), k -> new ArrayList<>())
.add(bufferIndexAndChannel);
// if this iterator has next, re-added it.
if (bufferConsumptionPriorityIterator.hasNext()) {
heap.add(bufferConsumptionPriorityIterator);
}
}
// treeMap will ensure that the key are sorted by subpartitionId
// ascending. Within the same subpartition, the larger the bufferIndex,
// the higher the consumption priority, reserve the value so that buffers are ordered
// by (subpartitionId, bufferIndex) ascending.
subpartitionToHighPriorityBuffers.values().forEach(Collections::reverse);
return subpartitionToHighPriorityBuffers;
} | @Test
void testGetBuffersByConsumptionPriorityInOrder() {
final int subpartition1 = 0;
final int subpartition2 = 1;
final int progress1 = 10;
final int progress2 = 20;
TreeMap<Integer, Deque<BufferIndexAndChannel>> subpartitionBuffers = new TreeMap<>();
List<BufferIndexAndChannel> subpartitionBuffers1 =
createBufferIndexAndChannelsList(
subpartition1, progress1, progress1 + 2, progress1 + 6);
List<BufferIndexAndChannel> subpartitionBuffers2 =
createBufferIndexAndChannelsList(
subpartition2, progress2 + 1, progress2 + 2, progress2 + 5);
subpartitionBuffers.put(subpartition1, new ArrayDeque<>(subpartitionBuffers1));
subpartitionBuffers.put(subpartition2, new ArrayDeque<>(subpartitionBuffers2));
TreeMap<Integer, List<BufferIndexAndChannel>> buffersByConsumptionPriorityInOrder =
HsSpillingStrategyUtils.getBuffersByConsumptionPriorityInOrder(
Arrays.asList(progress1, progress2), subpartitionBuffers, 5);
assertThat(buffersByConsumptionPriorityInOrder).hasSize(2);
assertThat(buffersByConsumptionPriorityInOrder.get(subpartition1))
.isEqualTo(subpartitionBuffers1.subList(1, 3));
assertThat(buffersByConsumptionPriorityInOrder.get(subpartition2))
.isEqualTo(subpartitionBuffers2.subList(0, 3));
} |
public static ClassLoader getCallerClassLoader(Class<?> caller) {
return caller.getClassLoader();
} | @Test
void testGetCallerClassLoader() {
assertThat(
ClassUtils.getCallerClassLoader(ClassUtilsTest.class),
sameInstance(ClassUtilsTest.class.getClassLoader()));
} |
public CompletableFuture<Long> getMinOffsetFromFileAsync() {
int length = MessageFormatUtil.QUEUE_OFFSET_POSITION + Long.BYTES;
if (this.fileSegmentTable.isEmpty() ||
this.getCommitOffset() - this.getMinOffset() < length) {
return CompletableFuture.completedFuture(GET_OFFSET_ERROR);
}
return this.readAsync(this.getMinOffset(), length)
.thenApply(buffer -> {
firstOffset.set(MessageFormatUtil.getQueueOffset(buffer));
return firstOffset.get();
});
} | @Test
public void getMinOffsetFromFileAsyncTest() {
String filePath = MessageStoreUtil.toFilePath(queue);
FlatCommitLogFile flatFile = flatFileFactory.createFlatFileForCommitLog(filePath);
// append some messages
for (int i = 6; i < 9; i++) {
ByteBuffer byteBuffer = MessageFormatUtilTest.buildMockedMessageBuffer();
byteBuffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, i);
Assert.assertEquals(AppendResult.SUCCESS, flatFile.append(byteBuffer, i));
}
Assert.assertEquals(-1L, flatFile.getMinOffsetFromFileAsync().join().longValue());
// append some messages
for (int i = 9; i < 30; i++) {
if (i == 20) {
flatFile.commitAsync().join();
flatFile.rollingNewFile(flatFile.getAppendOffset());
}
ByteBuffer byteBuffer = MessageFormatUtilTest.buildMockedMessageBuffer();
byteBuffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, i);
Assert.assertEquals(AppendResult.SUCCESS, flatFile.append(byteBuffer, i));
}
flatFile.commitAsync().join();
Assert.assertEquals(6L, flatFile.getMinOffsetFromFile());
Assert.assertEquals(6L, flatFile.getMinOffsetFromFileAsync().join().longValue());
// recalculate min offset here
flatFile.destroyExpiredFile(20L);
Assert.assertEquals(20L, flatFile.getMinOffsetFromFile());
Assert.assertEquals(20L, flatFile.getMinOffsetFromFileAsync().join().longValue());
// clean expired file again
flatFile.destroyExpiredFile(20L);
Assert.assertEquals(20L, flatFile.getMinOffsetFromFile());
Assert.assertEquals(20L, flatFile.getMinOffsetFromFileAsync().join().longValue());
} |
protected abstract void modifyDataSourceProperties(RoutineLoadDataSourceProperties dataSourceProperties)
throws DdlException; | @Test
public void testModifyDataSourceProperties() throws Exception {
KafkaRoutineLoadJob routineLoadJob = new KafkaRoutineLoadJob();
ConnectContext connectContext = UtFrameUtils.createDefaultCtx();
//alter data source custom properties
String groupId = "group1";
String clientId = "client1";
String defaultOffsets = "OFFSET_BEGINNING";
String originStmt = "alter routine load for db.job1 " +
"FROM KAFKA (" +
" \"property.group.id\" = \"" + groupId + "\"," +
" \"property.client.id\" = \"" + clientId + "\"," +
" \"property.kafka_default_offsets\" = \"" + defaultOffsets + "\"" +
")";
routineLoadJob.setOrigStmt(new OriginStatement(originStmt, 0));
AlterRoutineLoadStmt stmt = (AlterRoutineLoadStmt) UtFrameUtils.parseStmtWithNewParser(originStmt, connectContext);
routineLoadJob.modifyJob(stmt.getRoutineLoadDesc(), stmt.getAnalyzedJobProperties(),
stmt.getDataSourceProperties(), new OriginStatement(originStmt, 0), true);
routineLoadJob.convertCustomProperties(true);
Map<String, String> properties = routineLoadJob.getConvertedCustomProperties();
Assert.assertEquals(groupId, properties.get("group.id"));
Assert.assertEquals(clientId, properties.get("client.id"));
Assert.assertEquals(-2L,
(long) Deencapsulation.getField(routineLoadJob, "kafkaDefaultOffSet"));
} |
@Override public Status unwrap() {
return status;
} | @Test void unwrap() {
assertThat(response.unwrap()).isSameAs(status);
} |
@Override
public void onThrowing(final TargetAdviceObject target, final TargetAdviceMethod method, final Object[] args, final Throwable throwable, final String pluginType) {
Span span = (Span) target.getAttachment();
span.setStatus(StatusCode.ERROR).recordException(throwable);
span.end();
} | @Test
void assertExceptionHandle() {
TargetAdviceObjectFixture adviceObjectFixture = new TargetAdviceObjectFixture();
OpenTelemetrySQLParserEngineAdvice advice = new OpenTelemetrySQLParserEngineAdvice();
advice.beforeMethod(adviceObjectFixture, null, new Object[]{SQL, true}, "OpenTelemetry");
advice.onThrowing(adviceObjectFixture, null, new Object[]{SQL, true}, new IOException(""), "OpenTelemetry");
List<SpanData> spanItems = testExporter.getFinishedSpanItems();
assertCommonData(spanItems);
assertThat(spanItems.iterator().next().getStatus().getStatusCode(), is(StatusCode.ERROR));
} |
public static String toHexColor(final Color color)
{
return "#" + colorToHexCode(color);
} | @Test
public void toHexColor()
{
COLOR_HEXSTRING_MAP.forEach((color, hex) ->
{
assertEquals("#" + hex, ColorUtil.toHexColor(color));
});
} |
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof Timestamped)) {
return false;
}
@SuppressWarnings("unchecked")
Timestamped<T> that = (Timestamped<T>) obj;
return Objects.equals(this.timestamp, that.timestamp);
} | @Test
public final void testEquals() {
Timestamped<String> a = new Timestamped<>("a", TS_1_1);
Timestamped<String> b = new Timestamped<>("b", TS_1_1);
assertTrue("value does not impact equality",
a.equals(b));
new EqualsTester()
.addEqualityGroup(new Timestamped<>("a", TS_1_1),
new Timestamped<>("b", TS_1_1),
new Timestamped<>("c", TS_1_1))
.addEqualityGroup(new Timestamped<>("a", TS_1_2),
new Timestamped<>("b", TS_1_2),
new Timestamped<>("c", TS_1_2))
.addEqualityGroup(new Timestamped<>("a", TS_2_1),
new Timestamped<>("b", TS_2_1),
new Timestamped<>("c", TS_2_1))
.testEquals();
} |
public CompletableFuture<Void> deleteStoredData(final UUID accountUuid) {
final ExternalServiceCredentials credentials = storageServiceCredentialsGenerator.generateForUuid(accountUuid);
final HttpRequest request = HttpRequest.newBuilder()
.uri(deleteUri)
.DELETE()
.header(HttpHeaders.AUTHORIZATION, basicAuthHeader(credentials))
.build();
return httpClient.sendAsync(request, HttpResponse.BodyHandlers.ofString()).thenApply(response -> {
if (HttpUtils.isSuccessfulResponse(response.statusCode())) {
return null;
}
throw new SecureStorageException("Failed to delete storage service data: " + response.statusCode());
});
} | @Test
void deleteStoredData() {
final String username = RandomStringUtils.randomAlphabetic(16);
final String password = RandomStringUtils.randomAlphanumeric(32);
when(credentialsGenerator.generateForUuid(accountUuid)).thenReturn(
new ExternalServiceCredentials(username, password));
wireMock.stubFor(delete(urlEqualTo(SecureStorageClient.DELETE_PATH))
.withBasicAuth(username, password)
.willReturn(aResponse().withStatus(202)));
// We're happy as long as this doesn't throw an exception
secureStorageClient.deleteStoredData(accountUuid).join();
} |
public String create(final String secret, final String bucket, String region, final String key, final String method, final long expiry) {
if(StringUtils.isBlank(region)) {
// Only for AWS
switch(session.getSignatureVersion()) {
case AWS4HMACSHA256:
// Region is required for AWS4-HMAC-SHA256 signature
region = S3LocationFeature.DEFAULT_REGION.getIdentifier();
}
}
final Host bookmark = session.getHost();
return new RestS3Service(new AWSCredentials(StringUtils.strip(bookmark.getCredentials().getUsername()), StringUtils.strip(secret))) {
@Override
public String getEndpoint() {
if(S3Session.isAwsHostname(bookmark.getHostname())) {
return bookmark.getProtocol().getDefaultHostname();
}
return bookmark.getHostname();
}
@Override
protected void initializeProxy(final HttpClientBuilder httpClientBuilder) {
//
}
}.createSignedUrlUsingSignatureVersion(
session.getSignatureVersion().toString(),
region, method, bucket, key, null, null, expiry / 1000, false, true,
new HostPreferences(bookmark).getBoolean("s3.bucket.virtualhost.disable"));
} | @Test
public void testCreateEuWest() throws Exception {
final Calendar expiry = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
expiry.add(Calendar.MILLISECOND, (int) TimeUnit.DAYS.toMillis(7));
final String url = new S3PresignedUrlProvider(session).create(PROPERTIES.get("s3.secret"),
"test-eu-west-1-cyberduck", "eu-west-1", "f", "GET", expiry.getTimeInMillis());
assertNotNull(url);
assertEquals("test-eu-west-1-cyberduck.s3.amazonaws.com", URI.create(url).getHost());
final HttpURLConnection connection = (HttpURLConnection) new URL(url).openConnection();
assertEquals(404, connection.getResponseCode());
} |
public static <T, R> R unaryCall(Invoker<?> invoker, MethodDescriptor methodDescriptor, T request) {
return (R) call(invoker, methodDescriptor, new Object[] {request});
} | @Test
void unaryCall() {
when(invoker.invoke(any(Invocation.class))).thenReturn(result);
Object ret = StubInvocationUtil.unaryCall(invoker, method, request);
Assertions.assertEquals(response, ret);
} |
public boolean hasConfigRepo(String configRepoId) {
return this.getConfigRepo(configRepoId) != null;
} | @Test
public void shouldReturnTrueIfContainsConfigRepoWithTheSpecifiedId() {
ConfigRepoConfig repo = ConfigRepoConfig.createConfigRepoConfig(git("http://git1"), "myplugin", "id");
repos.add(repo);
assertThat(repos.hasConfigRepo(repo.getId()), is(true));
} |
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
} | @Test
public void testBank32nhLAD() {
test(Loss.lad(), "bank32nh", Bank32nh.formula, Bank32nh.data, 0.0909);
} |
public ObjectMapper getObjectMapper() {
return mapObjectMapper;
} | @Test
public void test() {
Assertions.assertThrows(JsonMappingException.class, () -> {
String JSON =
"{'id': 124,\n" +
" 'obj':[ 'com.sun.org.apache.xalan.internal.xsltc.trax.TemplatesImpl',\n" +
" {\n" +
" 'transletBytecodes' : [ 'AAIAZQ==' ],\n" +
" 'transletName' : 'a.b',\n" +
" 'outputProperties' : { }\n" +
" }\n" +
" ]\n" +
"}";
JSON = JSON.replace("'", "\"");
JsonJacksonCodec codec = new JsonJacksonCodec();
codec.getObjectMapper().readValue(JSON, Bean1599.class);
});
} |
@Override
// TODO(yimin) integrate this method with load() method
public void cacheData(String ufsPath, long length, long pos, boolean isAsync)
throws IOException {
List<CompletableFuture<Void>> futures = new ArrayList<>();
// TODO(yimin) To implement the sync data caching.
alluxio.grpc.FileInfo fi = getGrpcFileInfo(ufsPath, -1);
String fileId = new AlluxioURI(ufsPath).hash();
for (long i = pos / mPageSize;
i <= Math.min(pos + length, fi.getLength()) / mPageSize; ++i) {
PageId pageId = new PageId(fileId, i);
// TODO(yimin) As an optimization, data does not need to load on a page basis.
// Can implement a bulk load mechanism and load a couple of pages at the same time,
// to improve the performance.
if (mCacheManager.hasPageUnsafe(pageId)) {
continue;
}
long loadPos = i * mPageSize;
long loadLength = Math.min(mPageSize, fi.getLength() - loadPos);
if (loadLength == 0) {
continue;
}
if (!mLoadingPages.addIfAbsent(pageId)) {
continue;
}
futures.add(CompletableFuture.runAsync(() -> {
try {
if (mCacheManager.hasPageUnsafe(pageId)) {
return;
}
LOG.debug("Preloading {} pos: {} length: {} started", ufsPath, loadPos, loadLength);
loadPages(ufsPath, Collections.singletonList(pageId), fi.getLength());
LOG.debug("Preloading {} pos: {} length: {} finished", ufsPath, loadPos, loadLength);
} catch (Exception e) {
LOG.info("Preloading failed for {} page: {}", ufsPath, pageId, e);
} finally {
mLoadingPages.remove(pageId);
}
}, mCacheDataExecutor));
if (!isAsync) {
try {
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
} | @Test
public void testCacheData() throws Exception {
int numPages = 10;
long length = mPageSize * numPages;
String ufsPath = mTestFolder.newFile("test").getAbsolutePath();
byte[] buffer = BufferUtils.getIncreasingByteArray((int) length);
BufferUtils.writeBufferToFile(ufsPath, buffer);
mWorker.cacheData(ufsPath, length, 0, false);
List<PageId> cachedPages =
mCacheManager.getCachedPageIdsByFileId(new AlluxioURI(ufsPath).hash(), length);
assertEquals(numPages, cachedPages.size());
int start = 0;
for (PageId pageId : cachedPages) {
byte[] buff = new byte[(int) mPageSize];
mCacheManager.get(pageId, (int) mPageSize, buff, 0);
assertTrue(BufferUtils.equalIncreasingByteArray(start, (int) mPageSize, buff));
start += mPageSize;
}
} |
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final byte[] payload = rawMessage.getPayload();
final Map<String, Object> event;
try {
event = objectMapper.readValue(payload, TypeReferences.MAP_STRING_OBJECT);
} catch (IOException e) {
LOG.error("Couldn't decode raw message {}", rawMessage);
return null;
}
return parseEvent(event);
} | @Test
public void decodeReturnsNullIfPayloadCouldNotBeDecoded() throws Exception {
assertThat(codec.decode(new RawMessage(new byte[0]))).isNull();
} |
@Override
public KeyValueIterator<K, V> reverseAll() {
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.reverseAll();
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(
storeName,
new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
} | @Test
public void shouldSupportReverseAllAcrossMultipleStores() {
final KeyValueStore<String, String> cache = newStoreInstance();
stubProviderTwo.addStore(storeName, cache);
stubOneUnderlying.put("a", "a");
stubOneUnderlying.put("b", "b");
stubOneUnderlying.put("z", "z");
cache.put("c", "c");
cache.put("d", "d");
cache.put("x", "x");
final List<KeyValue<String, String>> results = toList(theStore.reverseAll());
assertTrue(results.contains(new KeyValue<>("a", "a")));
assertTrue(results.contains(new KeyValue<>("b", "b")));
assertTrue(results.contains(new KeyValue<>("c", "c")));
assertTrue(results.contains(new KeyValue<>("d", "d")));
assertTrue(results.contains(new KeyValue<>("x", "x")));
assertTrue(results.contains(new KeyValue<>("z", "z")));
assertEquals(6, results.size());
} |
@Override
public void execute(ComputationStep.Context context) {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository)
.buildFor(List.of(duplicationFormula)))
.visit(treeRootHolder.getRoot());
} | @Test
public void compute_and_aggregate_duplicated_lines() {
addDuplicatedBlock(FILE_1_REF, 2);
addDuplicatedBlock(FILE_3_REF, 10);
addDuplicatedBlock(FILE_4_REF, 12);
setNewLines(FILE_1, FILE_2, FILE_3, FILE_4);
underTest.execute(new TestComputationStepContext());
assertRawMeasureValue(FILE_1_REF, NEW_DUPLICATED_LINES_KEY, 2);
assertRawMeasureValue(FILE_2_REF, NEW_DUPLICATED_LINES_KEY, 0);
assertRawMeasureValue(FILE_3_REF, NEW_DUPLICATED_LINES_KEY, 9);
assertRawMeasureValue(FILE_4_REF, NEW_DUPLICATED_LINES_KEY, 11);
assertRawMeasureValue(DIRECTORY_REF, NEW_DUPLICATED_LINES_KEY, 2);
assertNoRawMeasure(DIRECTORY_2_REF, NEW_DUPLICATED_LINES_KEY);
assertRawMeasureValue(ROOT_REF, NEW_DUPLICATED_LINES_KEY, 22);
} |
@Override
public RestLiRequestData extractRequestData(RoutingResult routingResult, DataMap data)
{
ResourceMethodDescriptor resourceMethodDescriptor = routingResult.getResourceMethod();
if (data == null)
{
data = new DataMap();
}
DynamicRecordTemplate template = new DynamicRecordTemplate(data, resourceMethodDescriptor.getRequestDataSchema());
ValidationResult result =
ValidateDataAgainstSchema.validate(data, template.schema(), getValidationOptions());
if (!result.isValid())
{
throw new RoutingException("Parameters of method '" + resourceMethodDescriptor.getActionName()
+ "' failed validation with error '" + result.getMessages() + "'", HttpStatus.S_400_BAD_REQUEST.getCode());
}
return new RestLiRequestDataImpl.Builder().entity(template).build();
} | @Test(dataProvider = "failureData")
public void testExtractRequestDataFailure(String entity, List<Parameter<?>> params, String errorRegEx)
throws IOException
{
RecordDataSchema dataSchema = DynamicRecordMetadata.buildSchema("testAction", params);
RestRequest request = RestLiArgumentBuilderTestHelper.getMockRequest(false, entity);
ResourceMethodDescriptor descriptor = RestLiArgumentBuilderTestHelper.getMockResourceMethodDescriptor(null, null, "testAction", dataSchema);
RoutingResult routingResult = RestLiArgumentBuilderTestHelper.getMockRoutingResult(descriptor, 1, null, 1);
RestLiArgumentBuilder argumentBuilder = new ActionArgumentBuilder();
try
{
argumentBuilder.extractRequestData(routingResult, DataMapUtils.readMapWithExceptions(request));
fail("Expected RoutingException");
}
catch (RoutingException e)
{
assertTrue(e.getMessage().matches(errorRegEx));
}
verify(request, descriptor, routingResult);
} |
public V get(final int keyPartA, final int keyPartB)
{
return unmapNullValue(getMapping(keyPartA, keyPartB));
} | @Test
void shouldReturnNullWhenNotFoundItem()
{
final int keyPartA = 3;
final int keyPartB = 7;
assertNull(map.get(keyPartA, keyPartB));
} |
public static ResourceProfile generateDefaultSlotResourceProfile(
WorkerResourceSpec workerResourceSpec, int numSlotsPerWorker) {
final ResourceProfile.Builder resourceProfileBuilder =
ResourceProfile.newBuilder()
.setCpuCores(workerResourceSpec.getCpuCores().divide(numSlotsPerWorker))
.setTaskHeapMemory(
workerResourceSpec.getTaskHeapSize().divide(numSlotsPerWorker))
.setTaskOffHeapMemory(
workerResourceSpec.getTaskOffHeapSize().divide(numSlotsPerWorker))
.setManagedMemory(
workerResourceSpec.getManagedMemSize().divide(numSlotsPerWorker))
.setNetworkMemory(
workerResourceSpec.getNetworkMemSize().divide(numSlotsPerWorker));
workerResourceSpec
.getExtendedResources()
.forEach(
(name, resource) ->
resourceProfileBuilder.setExtendedResource(
resource.divide(numSlotsPerWorker)));
return resourceProfileBuilder.build();
} | @Test
void testGenerateDefaultSlotConsistentWithTaskExecutorResourceUtils() {
final int numSlots = 5;
final TaskExecutorResourceSpec taskExecutorResourceSpec =
new TaskExecutorResourceSpec(
new CPUResource(1.0),
MemorySize.parse("1m"),
MemorySize.parse("2m"),
MemorySize.parse("3m"),
MemorySize.parse("4m"),
Collections.singleton(
new ExternalResource(EXTERNAL_RESOURCE_NAME, numSlots)));
final ResourceProfile resourceProfileFromTaskExecutorResourceUtils =
TaskExecutorResourceUtils.generateDefaultSlotResourceProfile(
taskExecutorResourceSpec, numSlots);
final ResourceProfile totalResourceProfile =
TaskExecutorResourceUtils.generateTotalAvailableResourceProfile(
taskExecutorResourceSpec);
final WorkerResourceSpec workerResourceSpec =
WorkerResourceSpec.fromTotalResourceProfile(totalResourceProfile, numSlots);
assertThat(
SlotManagerUtils.generateDefaultSlotResourceProfile(
totalResourceProfile, numSlots))
.isEqualTo(resourceProfileFromTaskExecutorResourceUtils);
assertThat(
SlotManagerUtils.generateDefaultSlotResourceProfile(
workerResourceSpec, numSlots))
.isEqualTo(resourceProfileFromTaskExecutorResourceUtils);
} |
public <I> I newFlyweight(Class<I> implementationParent, String templateFileName, Object... args) {
Template template = Template.fromFile(implementationParent, templateFileName);
return newFlyweight(implementationParent, templateFileName, template, args);
} | @Test
public void shouldBeAbleToMoveFlyweights() {
Example writer = (Example) newFlyweight();
Example reader = (Example) newFlyweight();
StubFlyweight writeCursor = (StubFlyweight) writer;
StubFlyweight readCursor = (StubFlyweight) reader;
writeCursor.moveTo(startAddress + EXAMPLE_SIZE_IN_BYTES);
readCursor.moveTo(startAddress + EXAMPLE_SIZE_IN_BYTES);
writer.setFoo(5);
assertEquals(5, reader.getFoo());
writer.setBar(6L);
assertEquals(6L, reader.getBar());
} |
@Override
public String convertTo(SortedSet<Path> value) {
if (value == null) {
throw new ParameterException("String list of Paths must not be null.");
}
return value.stream().map(Path::toString).collect(Collectors.joining(","));
} | @Test
public void testConvertToEmpty() {
assertEquals("", converter.convertTo(new TreeSet<>()));
} |
public boolean execute(final File clusterDir)
{
if (!clusterDir.exists() || !clusterDir.isDirectory())
{
throw new IllegalArgumentException("invalid cluster directory: " + clusterDir.getAbsolutePath());
}
final RecordingLog.Entry entry = ClusterTool.findLatestValidSnapshot(clusterDir);
if (null == entry)
{
throw new ClusterException("no valid snapshot found");
}
final long recordingId = entry.recordingId;
final ClusterNodeControlProperties properties = ClusterTool.loadControlProperties(clusterDir);
final RecordingSignalCapture recordingSignalCapture = new RecordingSignalCapture();
try (Aeron aeron = Aeron.connect(new Aeron.Context().aeronDirectoryName(properties.aeronDirectoryName));
AeronArchive archive = AeronArchive.connect(new AeronArchive.Context()
.controlRequestChannel(archiveLocalRequestChannel)
.controlRequestStreamId(archiveLocalRequestStreamId)
.controlResponseChannel(IPC_CHANNEL)
.recordingSignalConsumer(recordingSignalCapture)
.aeron(aeron)))
{
final SnapshotReader snapshotReader = new SnapshotReader();
replayLocalSnapshotRecording(aeron, archive, recordingId, snapshotReader);
final long targetNextServiceSessionId = max(
max(snapshotReader.nextServiceSessionId, snapshotReader.maxClusterSessionId + 1),
snapshotReader.logServiceSessionId + 1 + snapshotReader.pendingServiceMessageCount);
final long targetLogServiceSessionId =
targetNextServiceSessionId - 1 - snapshotReader.pendingServiceMessageCount;
if (targetNextServiceSessionId != snapshotReader.nextServiceSessionId ||
targetLogServiceSessionId != snapshotReader.logServiceSessionId ||
0 != snapshotReader.pendingServiceMessageCount &&
(targetLogServiceSessionId + 1 != snapshotReader.minClusterSessionId ||
targetNextServiceSessionId - 1 != snapshotReader.maxClusterSessionId))
{
final long tempRecordingId = createNewSnapshotRecording(
aeron, archive, recordingId, targetLogServiceSessionId, targetNextServiceSessionId);
final long stopPosition = awaitRecordingStopPosition(archive, recordingId);
final long newStopPosition = awaitRecordingStopPosition(archive, tempRecordingId);
if (stopPosition != newStopPosition)
{
throw new ClusterException("new snapshot recording incomplete: expectedStopPosition=" +
stopPosition + ", actualStopPosition=" + newStopPosition);
}
recordingSignalCapture.reset();
archive.truncateRecording(recordingId, 0);
recordingSignalCapture.awaitSignalForRecordingId(archive, recordingId, RecordingSignal.DELETE);
final long replicationId = archive.replicate(
tempRecordingId, recordingId, archive.context().controlRequestStreamId(), IPC_CHANNEL, null);
recordingSignalCapture.reset();
recordingSignalCapture.awaitSignalForCorrelationId(archive, replicationId, RecordingSignal.SYNC);
final long replicatedStopPosition = recordingSignalCapture.position();
if (stopPosition != replicatedStopPosition)
{
throw new ClusterException("incomplete replication of the new recording: expectedStopPosition=" +
stopPosition + ", replicatedStopPosition=" + replicatedStopPosition);
}
recordingSignalCapture.reset();
archive.purgeRecording(tempRecordingId);
recordingSignalCapture.awaitSignalForRecordingId(archive, tempRecordingId, RecordingSignal.DELETE);
return true;
}
}
return false;
} | @Test
void executeThrowsIllegalArgumentExceptionIfClusterDirIsNotADirectory(
final @TempDir File tempDir) throws IOException
{
final File clusterDir = new File(tempDir, "file.txt");
assertTrue(clusterDir.createNewFile());
final IllegalArgumentException exception = assertThrowsExactly(
IllegalArgumentException.class,
() -> new ConsensusModuleSnapshotPendingServiceMessagesPatch().execute(clusterDir));
assertEquals("invalid cluster directory: " + clusterDir.getAbsolutePath(), exception.getMessage());
} |
@Override
public ConsumerRunningInfo getConsumerRunningInfo(String consumerGroup, String clientId,
boolean jstack) throws RemotingException,
MQClientException, InterruptedException {
return defaultMQAdminExtImpl.getConsumerRunningInfo(consumerGroup, clientId, jstack);
} | @Test
public void testGetConsumerRunningInfo() throws RemotingException, MQClientException, InterruptedException {
ConsumerRunningInfo consumerRunningInfo = defaultMQAdminExt.getConsumerRunningInfo("consumer-group", "cid_123", false);
assertThat(consumerRunningInfo.getJstack()).isEqualTo("test");
} |
public static <InputT> KeyByBuilder<InputT> of(PCollection<InputT> input) {
return named(null).of(input);
} | @Test
public void testBuild_Windowing() {
final PCollection<String> dataset = TestUtils.createMockDataset(TypeDescriptors.strings());
final PCollection<Triple<String, Long, Long>> result =
TopPerKey.of(dataset)
.keyBy(s -> s)
.valueBy(s -> 1L)
.scoreBy(s -> 1L)
.windowBy(FixedWindows.of(org.joda.time.Duration.standardHours(1)))
.triggeredBy(DefaultTrigger.of())
.accumulationMode(AccumulationMode.DISCARDING_FIRED_PANES)
.output();
final TopPerKey tpk = (TopPerKey) TestUtils.getProducer(result);
assertTrue(tpk.getWindow().isPresent());
@SuppressWarnings("unchecked")
final WindowDesc<?> windowDesc = WindowDesc.of((Window) tpk.getWindow().get());
assertEquals(
FixedWindows.of(org.joda.time.Duration.standardHours(1)), windowDesc.getWindowFn());
assertEquals(DefaultTrigger.of(), windowDesc.getTrigger());
assertEquals(AccumulationMode.DISCARDING_FIRED_PANES, windowDesc.getAccumulationMode());
} |
public static boolean canFail(LogicalType inputType, LogicalType targetType) {
return Preconditions.checkNotNull(
resolve(inputType, targetType), "Cast rule cannot be resolved")
.canFail(inputType, targetType);
} | @Test
void testCanFail() {
assertThat(CastRuleProvider.canFail(TINYINT, INT)).isFalse();
assertThat(CastRuleProvider.canFail(STRING_TYPE, TIME().getLogicalType())).isTrue();
assertThat(CastRuleProvider.canFail(STRING_TYPE, STRING_TYPE)).isFalse();
LogicalType inputType = ROW(TINYINT(), STRING()).getLogicalType();
assertThat(CastRuleProvider.canFail(inputType, ROW(INT(), TIME()).getLogicalType()))
.isTrue();
assertThat(CastRuleProvider.canFail(inputType, ROW(INT(), STRING()).getLogicalType()))
.isFalse();
} |
@Nullable
public Function<DnsNameResolverBuilder, DnsAddressResolverGroup> dnsAddressResolverGroupProvider() {
return dnsAddressResolverGroupProvider;
} | @Test
void dnsAddressResolverGroupProvider() {
assertThat(builder.build().dnsAddressResolverGroupProvider()).isNull();
Function<DnsNameResolverBuilder, DnsAddressResolverGroup> provider = RoundRobinDnsAddressResolverGroup::new;
builder.dnsAddressResolverGroupProvider(provider);
assertThat(builder.build().dnsAddressResolverGroupProvider()).isEqualTo(provider);
} |
public synchronized boolean hasEndOfData() {
if (hasEndOfBlock()) {
int potentialEndOfDataIndex = endOfBlockIndex + 1;
if (potentialEndOfDataIndex < availableByteCount
&& buffer[potentialEndOfDataIndex] == MllpProtocolConstants.END_OF_DATA) {
return true;
}
}
return false;
} | @Test
public void testHasEndOfData() {
assertFalse(instance.hasEndOfData(), "Unexpected initial value");
// Test just the END_OF_DATA
instance.write(MllpProtocolConstants.END_OF_DATA);
assertFalse(instance.hasEndOfData());
instance.reset();
assertFalse(instance.hasEndOfData());
// Test just the terminators
instance.write(MllpProtocolConstants.END_OF_BLOCK);
assertFalse(instance.hasEndOfData());
instance.write(MllpProtocolConstants.END_OF_DATA);
assertFalse(instance.hasEndOfData(), "Need a START_OF_BLOCK before the END_OF_DATA");
instance.reset();
assertFalse(instance.hasEndOfData());
instance.write(MllpProtocolConstants.START_OF_BLOCK);
assertFalse(instance.hasEndOfData());
instance.write(TEST_HL7_MESSAGE.getBytes());
assertFalse(instance.hasEndOfData());
instance.write(MllpProtocolConstants.END_OF_BLOCK);
assertFalse(instance.hasEndOfData());
instance.write(MllpProtocolConstants.END_OF_DATA);
assertTrue(instance.hasEndOfData());
instance.reset();
assertFalse(instance.hasEndOfData());
instance.write(MllpProtocolConstants.START_OF_BLOCK);
assertFalse(instance.hasEndOfData());
instance.write(TEST_HL7_MESSAGE.getBytes());
assertFalse(instance.hasEndOfData());
instance.write(MllpProtocolConstants.END_OF_BLOCK);
assertFalse(instance.hasEndOfData());
instance.write("BLAH".getBytes());
assertFalse(instance.hasEndOfData());
instance.write(MllpProtocolConstants.END_OF_DATA);
assertFalse(instance.hasEndOfData());
} |
@Override
public SmsSendRespDTO sendSms(Long sendLogId, String mobile,
String apiTemplateId, List<KeyValue<String, Object>> templateParams) throws Throwable {
// 构建请求
SendSmsRequest request = new SendSmsRequest();
request.setSmsSdkAppId(getSdkAppId());
request.setPhoneNumberSet(new String[]{mobile});
request.setSignName(properties.getSignature());
request.setTemplateId(apiTemplateId);
request.setTemplateParamSet(ArrayUtils.toArray(templateParams, e -> String.valueOf(e.getValue())));
request.setSessionContext(JsonUtils.toJsonString(new SessionContext().setLogId(sendLogId)));
// 执行请求
SendSmsResponse response = client.SendSms(request);
SendStatus status = response.getSendStatusSet()[0];
return new SmsSendRespDTO().setSuccess(Objects.equals(status.getCode(), API_CODE_SUCCESS)).setSerialNo(status.getSerialNo())
.setApiRequestId(response.getRequestId()).setApiCode(status.getCode()).setApiMsg(status.getMessage());
} | @Test
public void testDoSendSms_fail() throws Throwable {
// 准备参数
Long sendLogId = randomLongId();
String mobile = randomString();
String apiTemplateId = randomString();
List<KeyValue<String, Object>> templateParams = Lists.newArrayList(
new KeyValue<>("1", 1234), new KeyValue<>("2", "login"));
String requestId = randomString();
String serialNo = randomString();
// mock 方法
SendSmsResponse response = randomPojo(SendSmsResponse.class, o -> {
o.setRequestId(requestId);
SendStatus[] sendStatuses = new SendStatus[1];
o.setSendStatusSet(sendStatuses);
SendStatus sendStatus = new SendStatus();
sendStatuses[0] = sendStatus;
sendStatus.setCode("ERROR");
sendStatus.setMessage("send success");
sendStatus.setSerialNo(serialNo);
});
when(client.SendSms(argThat(request -> {
assertEquals(mobile, request.getPhoneNumberSet()[0]);
assertEquals(properties.getSignature(), request.getSignName());
assertEquals(apiTemplateId, request.getTemplateId());
assertEquals(toJsonString(ArrayUtils.toArray(new ArrayList<>(MapUtils.convertMap(templateParams).values()), String::valueOf)),
toJsonString(request.getTemplateParamSet()));
assertEquals(sendLogId, ReflectUtil.getFieldValue(JsonUtils.parseObject(request.getSessionContext(), TencentSmsClient.SessionContext.class), "logId"));
return true;
}))).thenReturn(response);
// 调用
SmsSendRespDTO result = smsClient.sendSms(sendLogId, mobile, apiTemplateId, templateParams);
// 断言
assertFalse(result.getSuccess());
assertEquals(response.getRequestId(), result.getApiRequestId());
assertEquals(response.getSendStatusSet()[0].getCode(), result.getApiCode());
assertEquals(response.getSendStatusSet()[0].getMessage(), result.getApiMsg());
assertEquals(response.getSendStatusSet()[0].getSerialNo(), result.getSerialNo());
} |
public static <P, R> FuncRt<P, R> uncheck(Func<P, R> expression) {
return uncheck(expression, RuntimeException::new);
} | @Test
public void functionTest() {
Func1<String, String> afunc = (funcParam) -> {
if (funcParam.length() > 5) {
throw new Exception("这是受检查异常需要屌用处显示处理");
}
return funcParam.toUpperCase();
};
//afunc.apply("hello world"); 直接调用需要处理异常
try {
//本行代码原本需要抛出受检查异常,现在只抛出运行时异常
CheckedUtil.uncheck(afunc).call("hello world");
} catch (Exception re) {
assertTrue(re instanceof RuntimeException);
}
} |
@Override
public void persist(final String key, final String value) {
try {
if (isExisted(key)) {
update(key, value);
return;
}
String tempPrefix = "";
String parent = SEPARATOR;
String[] paths = Arrays.stream(key.split(SEPARATOR)).filter(each -> !Strings.isNullOrEmpty(each)).toArray(String[]::new);
// Create key level directory recursively.
for (int i = 0; i < paths.length - 1; i++) {
String tempKey = tempPrefix + SEPARATOR + paths[i];
if (!isExisted(tempKey)) {
insert(tempKey, "", parent);
}
tempPrefix = tempKey;
parent = tempKey;
}
insert(key, value, parent);
} catch (final SQLException ex) {
log.error("Persist {} data to key: {} failed", getType(), key, ex);
}
} | @Test
void assertPersistFailureDuringInsert() throws SQLException {
when(mockJdbcConnection.prepareStatement(repositorySQL.getSelectByKeySQL())).thenReturn(mockPreparedStatement);
when(mockPreparedStatement.executeQuery()).thenReturn(mockResultSet);
when(mockResultSet.next()).thenReturn(false);
when(mockJdbcConnection.prepareStatement(repositorySQL.getInsertSQL())).thenReturn(mockPreparedStatement);
repository.persist("key", "value");
verify(mockPreparedStatementForPersist, times(0)).executeUpdate();
} |
@Override
public Map<String, Object> entries() {
return threadLocal.get();
} | @Test
public void testEntries() {
// Test getting all entries
contextCore.put("key1", "value1");
contextCore.put("key2", "value2");
contextCore.put("key3", "value3");
assertEquals(3, contextCore.entries().size());
assertTrue(contextCore.entries().containsKey("key1"));
assertTrue(contextCore.entries().containsKey("key2"));
assertTrue(contextCore.entries().containsKey("key3"));
contextCore.remove("key1");
contextCore.remove("key2");
contextCore.remove("key3");
assertNull(contextCore.get("key1"));
assertNull(contextCore.get("key2"));
assertNull(contextCore.get("key3"));
} |
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
} | @Test
public void should_clone_collection_of_non_serializable_object() {
List<NonSerializableObject> original = new ArrayList<>();
original.add(new NonSerializableObject("value"));
List<NonSerializableObject> cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
} |
@Override
public void abort(OutputBufferId bufferId)
{
checkState(!Thread.holdsLock(this), "Can not abort while holding a lock on this");
requireNonNull(bufferId, "bufferId is null");
getBuffer(bufferId).destroy();
checkFlushComplete();
} | @Test
public void testAbort()
{
BroadcastOutputBuffer bufferedBuffer = createBroadcastBuffer(
createInitialEmptyOutputBuffers(BROADCAST)
.withBuffer(FIRST, BROADCAST_PARTITION_ID)
.withBuffer(SECOND, BROADCAST_PARTITION_ID)
.withNoMoreBufferIds(),
sizeOfPages(10));
// fill the buffer
for (int i = 0; i < 10; i++) {
addPage(bufferedBuffer, createPage(i));
}
bufferedBuffer.setNoMorePages();
assertBufferResultEquals(TYPES, getBufferResult(bufferedBuffer, FIRST, 0, sizeOfPages(1), NO_WAIT), bufferResult(0, createPage(0)));
bufferedBuffer.abort(FIRST);
assertQueueClosed(bufferedBuffer, FIRST, 0);
assertBufferResultEquals(TYPES, getBufferResult(bufferedBuffer, FIRST, 1, sizeOfPages(1), NO_WAIT), emptyResults(TASK_INSTANCE_ID, 0, true));
assertBufferResultEquals(TYPES, getBufferResult(bufferedBuffer, SECOND, 0, sizeOfPages(1), NO_WAIT), bufferResult(0, createPage(0)));
bufferedBuffer.abort(SECOND);
assertQueueClosed(bufferedBuffer, SECOND, 0);
assertFinished(bufferedBuffer);
assertBufferResultEquals(TYPES, getBufferResult(bufferedBuffer, SECOND, 1, sizeOfPages(1), NO_WAIT), emptyResults(TASK_INSTANCE_ID, 0, true));
} |
@Override
public void commit() {
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */)
.onlyRetryOn(CommitFailedException.class)
.run(
item -> {
TableMetadata updated = internalApply();
ops.commit(base, updated);
});
LOG.info("Committed snapshot changes");
if (cleanExpiredFiles) {
cleanExpiredSnapshots();
}
} | @TestTemplate
public void testExpireSnapshotsWhenGarbageCollectionDisabled() {
table.updateProperties().set(TableProperties.GC_ENABLED, "false").commit();
table.newAppend().appendFile(FILE_A).commit();
assertThatThrownBy(() -> table.expireSnapshots())
.isInstanceOf(ValidationException.class)
.hasMessageStartingWith("Cannot expire snapshots: GC is disabled");
} |
@Override
public byte[] serialize(final String topic, final List<?> data) {
if (data == null) {
return null;
}
try {
final StringWriter stringWriter = new StringWriter();
final CSVPrinter csvPrinter = new CSVPrinter(stringWriter, csvFormat);
csvPrinter.printRecord(() -> new FieldIterator(data, schema));
final String result = stringWriter.toString();
return result.substring(0, result.length() - 2).getBytes(StandardCharsets.UTF_8);
} catch (final Exception e) {
throw new SerializationException("Error serializing CSV message", e);
}
} | @Test
public void shouldSerializeReallyLargeDecimalWithoutScientificNotation() {
// Given:
givenSingleColumnSerializer(SqlTypes.decimal(10, 3));
final List<?> values = Collections.singletonList(new BigDecimal("10000000000.000"));
// When:
final byte[] bytes = serializer.serialize("", values);
// Then:
assertThat(new String(bytes, StandardCharsets.UTF_8), is("10000000000.000"));
} |
public ProjectList searchProjects(String gitlabUrl, String personalAccessToken, @Nullable String projectName,
@Nullable Integer pageNumber, @Nullable Integer pageSize) {
String url = format("%s/projects?archived=false&simple=true&membership=true&order_by=name&sort=asc&search=%s%s%s",
gitlabUrl,
projectName == null ? "" : urlEncode(projectName),
pageNumber == null ? "" : format("&page=%d", pageNumber),
pageSize == null ? "" : format("&per_page=%d", pageSize)
);
LOG.debug("get projects : [{}]", url);
Request request = new Request.Builder()
.addHeader(PRIVATE_TOKEN, personalAccessToken)
.url(url)
.get()
.build();
try (Response response = client.newCall(request).execute()) {
Headers headers = response.headers();
checkResponseIsSuccessful(response, "Could not get projects from GitLab instance");
List<Project> projectList = Project.parseJsonArray(response.body().string());
int returnedPageNumber = parseAndGetIntegerHeader(headers.get("X-Page"));
int returnedPageSize = parseAndGetIntegerHeader(headers.get("X-Per-Page"));
String xtotal = headers.get("X-Total");
Integer totalProjects = Strings.isEmpty(xtotal) ? null : parseAndGetIntegerHeader(xtotal);
return new ProjectList(projectList, returnedPageNumber, returnedPageSize, totalProjects);
} catch (JsonSyntaxException e) {
throw new IllegalArgumentException("Could not parse GitLab answer to search projects. Got a non-json payload as result.");
} catch (IOException e) {
logException(url, e);
throw new IllegalStateException(e.getMessage(), e);
}
} | @Test
public void should_throw_IllegalArgumentException_when_token_insufficient_scope() {
MockResponse response = new MockResponse()
.setResponseCode(403)
.setBody("{\"error\":\"insufficient_scope\"," +
"\"error_description\":\"The request requires higher privileges than provided by the access token.\"," +
"\"scope\":\"api read_api\"}");
server.enqueue(response);
assertThatThrownBy(() -> underTest.searchProjects(gitlabUrl, "pat", "example", 1, 2))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Your GitLab token has insufficient scope");
} |
@ProcessElement
public void processElement(OutputReceiver<PartitionMetadata> receiver) {
PartitionMetadataDao partitionMetadataDao = daoFactory.getPartitionMetadataDao();
if (!partitionMetadataDao.tableExists()) {
daoFactory.getPartitionMetadataAdminDao().createPartitionMetadataTable();
createFakeParentPartition();
}
final PartitionMetadata initialPartition =
Optional.ofNullable(partitionMetadataDao.getPartition(InitialPartition.PARTITION_TOKEN))
.map(mapperFactory.partitionMetadataMapper()::from)
.orElseThrow(
() -> new IllegalStateException("Initial partition not found in metadata table."));
receiver.output(initialPartition);
} | @Test
public void testInitialize() {
when(daoFactory.getPartitionMetadataDao()).thenReturn(partitionMetadataDao);
when(partitionMetadataDao.tableExists()).thenReturn(false);
when(daoFactory.getPartitionMetadataAdminDao()).thenReturn(partitionMetadataAdminDao);
doNothing().when(partitionMetadataAdminDao).createPartitionMetadataTable();
when(partitionMetadataDao.insert(any())).thenReturn(Timestamp.ofTimeMicroseconds(1L));
when(partitionMetadataDao.getPartition(InitialPartition.PARTITION_TOKEN))
.thenReturn(Struct.newBuilder().build());
when(mapperFactory.partitionMetadataMapper()).thenReturn(partitionMetadataMapper);
when(partitionMetadataMapper.from(any())).thenReturn(mock(PartitionMetadata.class));
initializeDoFn.processElement(receiver);
verify(daoFactory, times(2)).getPartitionMetadataDao();
verify(daoFactory, times(1)).getPartitionMetadataAdminDao();
verify(partitionMetadataDao, times(1)).insert(any());
verify(partitionMetadataDao, times(1)).getPartition(InitialPartition.PARTITION_TOKEN);
verify(partitionMetadataDao, times(1)).tableExists();
verify(mapperFactory, times(1)).partitionMetadataMapper();
verify(partitionMetadataMapper, times(1)).from(any());
} |
public String doLayout(ILoggingEvent event) {
StringBuilder buf = new StringBuilder();
startNewTableIfLimitReached(buf);
boolean odd = true;
if (((counter++) & 1) == 0) {
odd = false;
}
String level = event.getLevel().toString().toLowerCase();
buf.append(LINE_SEPARATOR);
buf.append("<tr class=\"");
buf.append(level);
if (odd) {
buf.append(" odd\">");
} else {
buf.append(" even\">");
}
buf.append(LINE_SEPARATOR);
Converter<ILoggingEvent> c = head;
while (c != null) {
appendEventToBuffer(buf, c, event);
c = c.getNext();
}
buf.append("</tr>");
buf.append(LINE_SEPARATOR);
if (event.getThrowableProxy() != null) {
throwableRenderer.render(buf, event);
}
return buf.toString();
} | @Test
@Disabled
public void rawLimit() throws Exception {
StringBuilder sb = new StringBuilder();
String header = layout.getFileHeader();
assertTrue(header.startsWith(
"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">"));
sb.append(header);
sb.append(layout.getPresentationHeader());
for (int i = 0; i < CoreConstants.TABLE_ROW_LIMIT * 3; i++) {
sb.append(layout.doLayout(
new LoggingEvent(this.getClass().getName(), root, Level.DEBUG, "test message" + i, null, null)));
}
sb.append(layout.getPresentationFooter());
sb.append(layout.getFileFooter());
// check that the output adheres to xhtml-strict.dtd
parseOutput(sb.toString());
} |
@Override
public void close() throws BlockStoreException {
try {
buffer.force();
buffer = null; // Allow it to be GCd and the underlying file mapping to go away.
fileLock.release();
randomAccessFile.close();
blockCache.clear();
} catch (IOException e) {
throw new BlockStoreException(e);
}
} | @Test(expected = BlockStoreException.class)
public void twoStores_sequentially_shrink() throws Exception {
SPVBlockStore store = new SPVBlockStore(TESTNET, blockStoreFile, 20, true);
store.close();
store = new SPVBlockStore(TESTNET, blockStoreFile, 10, true);
} |
@Override
public Map<ExecutionAttemptID, ExecutionSlotAssignment> allocateSlotsFor(
List<ExecutionAttemptID> executionAttemptIds) {
final Map<ExecutionVertexID, ExecutionAttemptID> vertexIdToExecutionId = new HashMap<>();
executionAttemptIds.forEach(
executionId ->
vertexIdToExecutionId.put(executionId.getExecutionVertexId(), executionId));
checkState(
vertexIdToExecutionId.size() == executionAttemptIds.size(),
"SlotSharingExecutionSlotAllocator does not support one execution vertex to have multiple concurrent executions");
final List<ExecutionVertexID> vertexIds =
executionAttemptIds.stream()
.map(ExecutionAttemptID::getExecutionVertexId)
.collect(Collectors.toList());
return allocateSlotsForVertices(vertexIds).stream()
.collect(
Collectors.toMap(
vertexAssignment ->
vertexIdToExecutionId.get(
vertexAssignment.getExecutionVertexId()),
vertexAssignment ->
new ExecutionSlotAssignment(
vertexIdToExecutionId.get(
vertexAssignment.getExecutionVertexId()),
vertexAssignment.getLogicalSlotFuture())));
} | @Test
void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture()
throws ExecutionException, InterruptedException {
AllocationContext context = AllocationContext.newBuilder().addGroup(EV1).build();
ExecutionSlotAssignment assignment1 =
getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1);
ExecutionSlotAssignment assignment2 =
getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1);
assertThat(assignment1.getLogicalSlotFuture().get())
.isSameAs(assignment2.getLogicalSlotFuture().get());
} |
public static String getNativeDataTypeSimpleName( ValueMetaInterface v ) {
try {
return v.getType() != ValueMetaInterface.TYPE_BINARY ? v.getNativeDataTypeClass().getSimpleName() : "Binary";
} catch ( KettleValueException e ) {
LogChannelInterface log = new LogChannel( v );
log.logDebug( BaseMessages.getString( PKG, "FieldHelper.Log.UnknownNativeDataTypeSimpleName" ) );
return "Object";
}
} | @Test
public void getNativeDataTypeSimpleName_Timestamp() {
ValueMetaTimestamp v = new ValueMetaTimestamp();
assertEquals( "Timestamp", FieldHelper.getNativeDataTypeSimpleName( v ) );
} |
public static void main(String[] args) {
// DB seeding
LOGGER.info("Db seeding: " + "1 user: {\"ignite1771\", amount = 1000.0}, "
+ "2 products: {\"computer\": price = 800.0, \"car\": price = 20000.0}");
Db.getInstance().seedUser(TEST_USER_1, 1000.0);
Db.getInstance().seedItem(ITEM_COMPUTER, 800.0);
Db.getInstance().seedItem(ITEM_CAR, 20000.0);
final var applicationServices = new ApplicationServicesImpl();
ReceiptViewModel receipt;
LOGGER.info(LOGGER_STRING, TEST_USER_2, ITEM_TV);
receipt = applicationServices.loggedInUserPurchase(TEST_USER_2, ITEM_TV);
receipt.show();
MaintenanceLock.getInstance().setLock(false);
LOGGER.info(LOGGER_STRING, TEST_USER_2, ITEM_TV);
receipt = applicationServices.loggedInUserPurchase(TEST_USER_2, ITEM_TV);
receipt.show();
LOGGER.info(LOGGER_STRING, TEST_USER_1, ITEM_TV);
receipt = applicationServices.loggedInUserPurchase(TEST_USER_1, ITEM_TV);
receipt.show();
LOGGER.info(LOGGER_STRING, TEST_USER_1, ITEM_CAR);
receipt = applicationServices.loggedInUserPurchase(TEST_USER_1, ITEM_CAR);
receipt.show();
LOGGER.info(LOGGER_STRING, TEST_USER_1, ITEM_COMPUTER);
receipt = applicationServices.loggedInUserPurchase(TEST_USER_1, ITEM_COMPUTER);
receipt.show();
} | @Test
void shouldExecuteWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
@Override
public boolean deleteAll(JobID jobId) {
return delete(BlobUtils.getStorageLocationPath(basePath, jobId));
} | @Test
void testDeleteAll() throws IOException {
final Path temporaryFile = createTemporaryFileWithContent("delete");
final JobID jobId = new JobID();
assertThat(testInstance.put(temporaryFile.toFile(), jobId, new PermanentBlobKey()))
.isTrue();
assertThat(testInstance.put(temporaryFile.toFile(), jobId, new PermanentBlobKey()))
.isTrue();
assertThat(getPath(jobId)).isDirectory().exists();
assertThat(getPath(jobId).toFile().listFiles()).hasSize(2);
assertThat(testInstance.deleteAll(jobId)).isTrue();
assertThat(getPath(jobId)).doesNotExist();
} |
public boolean isJspOrStrutsCounter() {
return JSP_COUNTER_NAME.equals(name) || STRUTS_COUNTER_NAME.equals(name);
} | @Test
public void testJspOrStrutsCounter() {
assertFalse("jspOrStrutsCounter", new Counter("http", null).isJspOrStrutsCounter());
assertTrue("jspOrStrutsCounter", new Counter("jsp", null).isJspOrStrutsCounter());
assertTrue("jspOrStrutsCounter", new Counter("struts", null).isJspOrStrutsCounter());
} |
@Override
public boolean isConnected() {
return channel.isConnected();
} | @Test
void isConnectedTest() {
Assertions.assertFalse(header.isConnected());
} |
@Override
synchronized public void registerConfigChangeWatcher(ConfigChangeWatcher watcher) {
startListening(new WatcherHolder(watcher), new ConfigChangeCallback() {
@Override
public synchronized void onSingleValueChanged(final WatcherHolder holder, final ConfigTable.ConfigItem configItem) {
notifySingleValue(holder.getWatcher(), configItem);
}
@Override
public synchronized void onGroupValuesChanged(final WatcherHolder holder,
final GroupConfigTable.GroupConfigItems groupConfigItems) {
notifyGroupValues((GroupConfigChangeWatcher) holder.getWatcher(), groupConfigItems);
}
});
} | @Test
public void testInit() {
final String[] newValue = new String[1];
register.registerConfigChangeWatcher(
new ConfigChangeWatcher("MockModule", new FetchingConfigWatcherRegisterTest.MockProvider(), "prop2") {
@Override
public void notify(ConfigChangeEvent value) {
newValue[0] = value.getNewValue();
}
@Override
public String value() {
return null;
}
});
assertEquals("abc2", newValue[0]);
} |
@Override
public void completeInitialLoad() {
data = data.copyWithNewLoadingComplete(true);
data.log.info("Completed initial ACL load process.");
initialLoadFuture.complete(null);
} | @Test
public void testCompleteInitialLoad() {
StandardAuthorizer authorizer = new StandardAuthorizer();
authorizer.configure(Collections.singletonMap(SUPER_USERS_CONFIG, "User:superman"));
Map<Endpoint, ? extends CompletionStage<Void>> futures = authorizer.
start(new AuthorizerTestServerInfo(Collections.singleton(PLAINTEXT)));
assertEquals(Collections.singleton(PLAINTEXT), futures.keySet());
assertFalse(futures.get(PLAINTEXT).toCompletableFuture().isDone());
authorizer.completeInitialLoad();
assertTrue(futures.get(PLAINTEXT).toCompletableFuture().isDone());
assertFalse(futures.get(PLAINTEXT).toCompletableFuture().isCompletedExceptionally());
} |
@Override
public boolean isValidHeader(final int readableBytes) {
return readableBytes >= (startupPhase ? 0 : MESSAGE_TYPE_LENGTH) + PAYLOAD_LENGTH;
} | @Test
void assertIsInvalidHeader() {
assertTrue(new PostgreSQLPacketCodecEngine().isValidHeader(4));
} |
public double length() {
double result = 0;
for (LineSegment segment : this.segments) {
result += segment.length();
}
return result;
} | @Test
public void lengthTest() {
Point point1 = new Point(0, 0);
Point point2 = new Point(1, 0);
Point point3 = new Point(1, 1);
Point point4 = new Point(3, 1);
LineString lineString = new LineString();
lineString.segments.add(new LineSegment(point1, point2));
lineString.segments.add(new LineSegment(point2, point3));
lineString.segments.add(new LineSegment(point3, point4));
Assert.assertEquals(4, lineString.length(), 0.0001);
Assert.assertEquals(3, lineString.segments.size());
} |
public void setup(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to setup internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final Map<String, Map<String, String>> streamsSideTopicConfigs = topicConfigs.values().stream()
.collect(Collectors.toMap(
InternalTopicConfig::name,
topicConfig -> topicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention)
));
final Set<String> createdTopics = new HashSet<>();
final Set<String> topicStillToCreate = new HashSet<>(topicConfigs.keySet());
while (!topicStillToCreate.isEmpty()) {
final Set<NewTopic> newTopics = topicStillToCreate.stream()
.map(topicName -> new NewTopic(
topicName,
topicConfigs.get(topicName).numberOfPartitions(),
Optional.of(replicationFactor)
).configs(streamsSideTopicConfigs.get(topicName))
).collect(Collectors.toSet());
log.info("Going to create internal topics: " + newTopics);
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
processCreateTopicResults(createTopicsResult, topicStillToCreate, createdTopics, deadline);
maybeSleep(Collections.singletonList(topicStillToCreate), deadline, "created");
}
log.info("Completed setup of internal topics {}.", topicConfigs.keySet());
} | @Test
public void shouldOnlyRetryNotSuccessfulFuturesDuringSetup() {
final AdminClient admin = mock(AdminClient.class);
final StreamsConfig streamsConfig = new StreamsConfig(config);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, streamsConfig);
final KafkaFutureImpl<TopicMetadataAndConfig> createTopicFailFuture = new KafkaFutureImpl<>();
createTopicFailFuture.completeExceptionally(new TopicExistsException("exists"));
final KafkaFutureImpl<TopicMetadataAndConfig> createTopicSuccessfulFuture = new KafkaFutureImpl<>();
createTopicSuccessfulFuture.complete(
new TopicMetadataAndConfig(Uuid.randomUuid(), 1, 1, new Config(Collections.emptyList()))
);
final InternalTopicConfig internalTopicConfig1 = setupRepartitionTopicConfig(topic1, 1);
final InternalTopicConfig internalTopicConfig2 = setupRepartitionTopicConfig(topic2, 1);
final NewTopic newTopic1 = newTopic(topic1, internalTopicConfig1, streamsConfig);
final NewTopic newTopic2 = newTopic(topic2, internalTopicConfig2, streamsConfig);
when(admin.createTopics(mkSet(newTopic1, newTopic2)))
.thenAnswer(answer -> new MockCreateTopicsResult(mkMap(
mkEntry(topic1, createTopicSuccessfulFuture),
mkEntry(topic2, createTopicFailFuture)
)));
when(admin.createTopics(mkSet(newTopic2)))
.thenAnswer(answer -> new MockCreateTopicsResult(mkMap(
mkEntry(topic2, createTopicSuccessfulFuture)
)));
topicManager.setup(mkMap(
mkEntry(topic1, internalTopicConfig1),
mkEntry(topic2, internalTopicConfig2)
));
} |
public int base() { return this.alphabet.alphabetChars.length; } | @Test
void base62_codec_test_cases_pass() {
var b62 = Base62.codec();
assertEquals(62, b62.base());
verifyRoundtrip(b62, "Hello World!", "T8dgcjRGkZ3aysdN");
verifyRoundtrip(b62, "\0\0Hello World!", "00T8dgcjRGkZ3aysdN");
verifyRoundtrip(b62, "", "");
verifyRoundtrip(b62, unhex("00"), "0");
verifyRoundtrip(b62, unhex("0000"), "00");
verifyRoundtrip(b62, unhex("00000000ffffffff"), "00004gfFC3");
verifyRoundtrip(b62, unhex("ffffffff00000000"), "LygHZwPV2MC");
} |
@Override
public double score(int[] truth, int[] prediction) {
return of(truth, prediction);
} | @Test
public void testMeasure() {
System.out.println("FDR");
int[] truth = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
int[] prediction = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
FDR instance = new FDR();
double expResult = 0.07407;
double result = instance.score(truth, prediction);
assertEquals(expResult, result, 1E-5);
} |
@SuppressWarnings("unchecked")
public <T extends Expression> T rewrite(final T expression, final C context) {
return (T) rewriter.process(expression, context);
} | @Test
public void shouldRewriteLogicalBinaryExpression() {
// Given:
final LogicalBinaryExpression parsed = parseExpression("true OR false");
when(processor.apply(parsed.getLeft(), context)).thenReturn(expr1);
when(processor.apply(parsed.getRight(), context)).thenReturn(expr2);
// When:
final Expression rewritten = expressionRewriter.rewrite(parsed, context);
// Then:
assertThat(
rewritten,
equalTo(
new LogicalBinaryExpression(parsed.getLocation(), parsed.getType(), expr1, expr2))
);
} |
@Override
public JobStatus getJobStatus() {
return JobStatus.CANCELLING;
} | @Test
void testStateDoesNotExposeGloballyTerminalExecutionGraph() throws Exception {
try (MockStateWithExecutionGraphContext ctx = new MockStateWithExecutionGraphContext()) {
StateTrackingMockExecutionGraph meg = new StateTrackingMockExecutionGraph();
Canceling canceling = createCancelingState(ctx, meg);
// ideally we'd delay the async call to #onGloballyTerminalState instead, but the
// context does not support that
ctx.setExpectFinished(eg -> {});
meg.completeTerminationFuture(JobStatus.CANCELED);
// this is just a sanity check for the test
assertThat(meg.getState()).isEqualTo(JobStatus.CANCELED);
assertThat(canceling.getJobStatus()).isEqualTo(JobStatus.CANCELLING);
assertThat(canceling.getJob().getState()).isEqualTo(JobStatus.CANCELLING);
assertThat(canceling.getJob().getStatusTimestamp(JobStatus.CANCELED)).isZero();
}
} |
@Override
public PackageRevision responseMessageForLatestRevisionSince(String responseBody) {
return toPackageRevision(responseBody);
} | @Test
public void shouldBuildNullPackageRevisionFromLatestRevisionSinceWhenEmptyResponse() throws Exception {
assertThat(messageHandler.responseMessageForLatestRevisionSince(""), nullValue());
assertThat(messageHandler.responseMessageForLatestRevisionSince(null), nullValue());
assertThat(messageHandler.responseMessageForLatestRevisionSince("{}"), nullValue());
} |
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
} else if (!(obj instanceof Rotation)) {
return false;
}
Rotation other = (Rotation) obj;
if (noRotation(this)) {
return noRotation(other);
}
if (noRotation(other)) {
return noRotation(this);
}
if (Float.floatToIntBits(this.px) != Float.floatToIntBits(other.px)) {
return false;
}
if (Float.floatToIntBits(this.py) != Float.floatToIntBits(other.py)) {
return false;
}
if (Double.doubleToLongBits(this.radians) != Double.doubleToLongBits(other.radians)) {
return false;
}
return true;
} | @Test
public void equalsTest() {
Rotation Rotation1 = new Rotation(1, 2, 1);
Rotation Rotation2 = new Rotation(1, 2, 1);
Rotation Rotation3 = new Rotation(1, 1, 1);
Rotation Rotation4 = new Rotation(2, 2, 4);
TestUtils.equalsTest(Rotation1, Rotation2);
TestUtils.notEqualsTest(Rotation1, Rotation3);
TestUtils.notEqualsTest(Rotation1, Rotation4);
TestUtils.notEqualsTest(Rotation1, new Object());
TestUtils.notEqualsTest(Rotation1, null);
} |
@Override
public Long sendSingleMailToAdmin(String mail, Long userId,
String templateCode, Map<String, Object> templateParams) {
// 如果 mail 为空,则加载用户编号对应的邮箱
if (StrUtil.isEmpty(mail)) {
AdminUserDO user = adminUserService.getUser(userId);
if (user != null) {
mail = user.getEmail();
}
}
// 执行发送
return sendSingleMail(mail, userId, UserTypeEnum.ADMIN.getValue(), templateCode, templateParams);
} | @Test
public void testSendSingleMailToAdmin() {
// 准备参数
Long userId = randomLongId();
String templateCode = RandomUtils.randomString();
Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234")
.put("op", "login").build();
// mock adminUserService 的方法
AdminUserDO user = randomPojo(AdminUserDO.class, o -> o.setMobile("15601691300"));
when(adminUserService.getUser(eq(userId))).thenReturn(user);
// mock MailTemplateService 的方法
MailTemplateDO template = randomPojo(MailTemplateDO.class, o -> {
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
o.setContent("验证码为{code}, 操作为{op}");
o.setParams(Lists.newArrayList("code", "op"));
});
when(mailTemplateService.getMailTemplateByCodeFromCache(eq(templateCode))).thenReturn(template);
String title = RandomUtils.randomString();
when(mailTemplateService.formatMailTemplateContent(eq(template.getTitle()), eq(templateParams)))
.thenReturn(title);
String content = RandomUtils.randomString();
when(mailTemplateService.formatMailTemplateContent(eq(template.getContent()), eq(templateParams)))
.thenReturn(content);
// mock MailAccountService 的方法
MailAccountDO account = randomPojo(MailAccountDO.class);
when(mailAccountService.getMailAccountFromCache(eq(template.getAccountId()))).thenReturn(account);
// mock MailLogService 的方法
Long mailLogId = randomLongId();
when(mailLogService.createMailLog(eq(userId), eq(UserTypeEnum.ADMIN.getValue()), eq(user.getEmail()),
eq(account), eq(template), eq(content), eq(templateParams), eq(true))).thenReturn(mailLogId);
// 调用
Long resultMailLogId = mailSendService.sendSingleMailToAdmin(null, userId, templateCode, templateParams);
// 断言
assertEquals(mailLogId, resultMailLogId);
// 断言调用
verify(mailProducer).sendMailSendMessage(eq(mailLogId), eq(user.getEmail()),
eq(account.getId()), eq(template.getNickname()), eq(title), eq(content));
} |
public final void isFinite() {
if (actual == null || actual.isNaN() || actual.isInfinite()) {
failWithActual(simpleFact("expected to be finite"));
}
} | @Test
public void isFinite() {
assertThat(1.23f).isFinite();
assertThat(Float.MAX_VALUE).isFinite();
assertThat(-1.0 * Float.MIN_VALUE).isFinite();
assertThatIsFiniteFails(Float.POSITIVE_INFINITY);
assertThatIsFiniteFails(Float.NEGATIVE_INFINITY);
assertThatIsFiniteFails(Float.NaN);
assertThatIsFiniteFails(null);
} |
@Override
public final void collect(T record) {
collect(record, TimestampAssigner.NO_TIMESTAMP);
} | @Test
void testNoTimestampValue() {
final CollectingDataOutput<Integer> dataOutput = new CollectingDataOutput<>();
final SourceOutputWithWatermarks<Integer> out =
createWithSameOutputs(
dataOutput, new RecordTimestampAssigner<>(), new NoWatermarksGenerator<>());
out.collect(17);
final Object event = dataOutput.events.get(0);
assertThat(event).isInstanceOf(StreamRecord.class);
assertThat(((StreamRecord<?>) event).getTimestamp())
.isEqualTo(TimestampAssigner.NO_TIMESTAMP);
} |
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) {
Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType);
return BINARY_PROTOCOL_VALUES.get(binaryColumnType);
} | @Test
void assertGetBinaryProtocolValueWithMySQLTypeLong() {
assertThat(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.LONG), instanceOf(MySQLInt4BinaryProtocolValue.class));
} |
@Override
public void onRuleSubscribe(final RuleData ruleData) {
LOG.info("subscribe rule data for rule[id: {}, selectorId: {}, name: {}]", ruleData.getId(), ruleData.getSelectorId(), ruleData.getName());
subscribeDataHandler(ruleData, DataEventTypeEnum.UPDATE);
} | @Test
public void testOnRuleSubscribe() {
baseDataCache.cleanRuleData();
RuleData ruleData = RuleData.builder().id("1").selectorId(mockSelectorId1).enabled(true).pluginName(mockPluginName1).sort(1).build();
commonPluginDataSubscriber.onRuleSubscribe(ruleData);
assertNotNull(baseDataCache.obtainRuleData(ruleData.getSelectorId()));
assertEquals(Lists.newArrayList(ruleData), baseDataCache.obtainRuleData(ruleData.getSelectorId()));
} |
@Override
public String name() {
return name;
} | @Test
public void testSetNamespaceOwnership() throws TException {
setNamespaceOwnershipAndVerify(
"set_individual_ownership_on_default_owner",
ImmutableMap.of(),
ImmutableMap.of(
HiveCatalog.HMS_DB_OWNER,
"some_individual_owner",
HiveCatalog.HMS_DB_OWNER_TYPE,
PrincipalType.USER.name()),
System.getProperty("user.name"),
PrincipalType.USER,
"some_individual_owner",
PrincipalType.USER);
setNamespaceOwnershipAndVerify(
"set_group_ownership_on_default_owner",
ImmutableMap.of(),
ImmutableMap.of(
HiveCatalog.HMS_DB_OWNER,
"some_group_owner",
HiveCatalog.HMS_DB_OWNER_TYPE,
PrincipalType.GROUP.name()),
System.getProperty("user.name"),
PrincipalType.USER,
"some_group_owner",
PrincipalType.GROUP);
setNamespaceOwnershipAndVerify(
"change_individual_to_group_ownership",
ImmutableMap.of(HiveCatalog.HMS_DB_OWNER, "some_owner"),
ImmutableMap.of(
HiveCatalog.HMS_DB_OWNER,
"some_group_owner",
HiveCatalog.HMS_DB_OWNER_TYPE,
PrincipalType.GROUP.name()),
"some_owner",
PrincipalType.USER,
"some_group_owner",
PrincipalType.GROUP);
setNamespaceOwnershipAndVerify(
"change_group_to_individual_ownership",
ImmutableMap.of(
HiveCatalog.HMS_DB_OWNER,
"some_group_owner",
HiveCatalog.HMS_DB_OWNER_TYPE,
PrincipalType.GROUP.name()),
ImmutableMap.of(
HiveCatalog.HMS_DB_OWNER,
"some_individual_owner",
HiveCatalog.HMS_DB_OWNER_TYPE,
PrincipalType.USER.name()),
"some_group_owner",
PrincipalType.GROUP,
"some_individual_owner",
PrincipalType.USER);
assertThatThrownBy(
() ->
setNamespaceOwnershipAndVerify(
"set_owner_without_setting_owner_type",
ImmutableMap.of(),
ImmutableMap.of(HiveCatalog.HMS_DB_OWNER, "some_individual_owner"),
System.getProperty("user.name"),
PrincipalType.USER,
"no_post_setting_expectation_due_to_exception_thrown",
null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
String.format(
"Setting %s and %s has to be performed together or not at all",
HiveCatalog.HMS_DB_OWNER_TYPE, HiveCatalog.HMS_DB_OWNER));
assertThatThrownBy(
() ->
setNamespaceOwnershipAndVerify(
"set_owner_type_without_setting_owner",
ImmutableMap.of(HiveCatalog.HMS_DB_OWNER, "some_owner"),
ImmutableMap.of(HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()),
"some_owner",
PrincipalType.USER,
"no_post_setting_expectation_due_to_exception_thrown",
null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
String.format(
"Setting %s and %s has to be performed together or not at all",
HiveCatalog.HMS_DB_OWNER_TYPE, HiveCatalog.HMS_DB_OWNER));
assertThatThrownBy(
() ->
setNamespaceOwnershipAndVerify(
"set_invalid_owner_type",
ImmutableMap.of(),
ImmutableMap.of(
HiveCatalog.HMS_DB_OWNER, "iceberg",
HiveCatalog.HMS_DB_OWNER_TYPE, "invalidOwnerType"),
System.getProperty("user.name"),
PrincipalType.USER,
"no_post_setting_expectation_due_to_exception_thrown",
null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
"No enum constant org.apache.hadoop.hive.metastore.api.PrincipalType.invalidOwnerType");
} |
@Deprecated
public static org.apache.rocketmq.common.message.Message convertToRocketMessage(
ObjectMapper objectMapper, String charset,
String destination, org.springframework.messaging.Message message) {
Object payloadObj = message.getPayload();
byte[] payloads;
if (payloadObj instanceof String) {
payloads = ((String) payloadObj).getBytes(Charset.forName(charset));
} else if (payloadObj instanceof byte[]) {
payloads = (byte[]) message.getPayload();
} else {
try {
String jsonObj = objectMapper.writeValueAsString(payloadObj);
payloads = jsonObj.getBytes(Charset.forName(charset));
} catch (Exception e) {
throw new RuntimeException("convert to RocketMQ message failed.", e);
}
}
return getAndWrapMessage(destination, message.getHeaders(), payloads);
} | @Test
public void testConvertToRocketMessageWithMessageConvert() {
Message msgWithStringPayload = MessageBuilder.withPayload("test body")
.setHeader("test", 1)
.setHeader(RocketMQHeaders.TAGS, "tags")
.setHeader(RocketMQHeaders.KEYS, "my_keys")
.build();
RocketMQMessageConverter messageConverter = new RocketMQMessageConverter();
org.apache.rocketmq.common.message.Message rocketMsg = RocketMQUtil.convertToRocketMessage(messageConverter.getMessageConverter(),
"UTF-8", "test-topic", msgWithStringPayload);
assertEquals("1", rocketMsg.getProperty("test"));
assertNull(rocketMsg.getProperty(RocketMQHeaders.TAGS));
assertEquals("my_keys", rocketMsg.getProperty(RocketMQHeaders.KEYS));
Message msgWithBytesPayload = MessageBuilder.withPayload("123".getBytes()).build();
org.apache.rocketmq.common.message.Message rocketMsgWithObj = RocketMQUtil.convertToRocketMessage(messageConverter.getMessageConverter(),
"UTF-8", "test-topic", msgWithBytesPayload);
assertEquals("123", new String(rocketMsgWithObj.getBody()));
} |
@Override
public void addSAJSListener(SAJSListener listener) {
} | @Test
public void addSAJSListener() {
mSensorsAPI.addSAJSListener(new SAJSListener() {
@Override
public void onReceiveJSMessage(WeakReference<View> view, String message) {
Assert.fail();
}
});
} |
@Deprecated
@Restricted(DoNotUse.class)
public static String resolve(ConfigurationContext context, String toInterpolate) {
return context.getSecretSourceResolver().resolve(toInterpolate);
} | @Test
public void resolve_defaultValueLimit() {
assertThat(resolve("${FOO:-default:-other}"), equalTo("default:-other"));
} |
public static Result<Void> success() {
return new Result<Void>()
.setCode(Result.SUCCESS_CODE);
} | @Test
public void success() {
Assert.isTrue(Result.SUCCESS_CODE.equals(Results.success().getCode()));
} |
@Secured(resource = Commons.NACOS_CORE_CONTEXT_V2 + "/loader", action = ActionTypes.WRITE)
@GetMapping("/smartReloadCluster")
public ResponseEntity<String> smartReload(HttpServletRequest request,
@RequestParam(value = "loaderFactor", required = false) String loaderFactorStr,
@RequestParam(value = "force", required = false) String force) {
LOGGER.info("Smart reload request receive,requestIp={}", getRemoteIp(request));
Map<String, Object> serverLoadMetrics = getServerLoadMetrics();
Object avgString = serverLoadMetrics.get("avg");
List<ServerLoaderMetrics> details = (List<ServerLoaderMetrics>) serverLoadMetrics.get("detail");
int avg = Integer.parseInt(avgString.toString());
float loaderFactor =
StringUtils.isBlank(loaderFactorStr) ? RemoteUtils.LOADER_FACTOR : Float.parseFloat(loaderFactorStr);
int overLimitCount = (int) (avg * (1 + loaderFactor));
int lowLimitCount = (int) (avg * (1 - loaderFactor));
List<ServerLoaderMetrics> overLimitServer = new ArrayList<>();
List<ServerLoaderMetrics> lowLimitServer = new ArrayList<>();
for (ServerLoaderMetrics metrics : details) {
int sdkCount = Integer.parseInt(metrics.getMetric().get(SDK_CONNECTION_COUNT_METRIC));
if (sdkCount > overLimitCount) {
overLimitServer.add(metrics);
}
if (sdkCount < lowLimitCount) {
lowLimitServer.add(metrics);
}
}
// desc by sdkConCount
overLimitServer.sort((o1, o2) -> {
Integer sdkCount1 = Integer.valueOf(o1.getMetric().get(SDK_CONNECTION_COUNT_METRIC));
Integer sdkCount2 = Integer.valueOf(o2.getMetric().get(SDK_CONNECTION_COUNT_METRIC));
return sdkCount1.compareTo(sdkCount2) * -1;
});
LOGGER.info("Over load limit server list ={}", overLimitServer);
//asc by sdkConCount
lowLimitServer.sort((o1, o2) -> {
Integer sdkCount1 = Integer.valueOf(o1.getMetric().get(SDK_CONNECTION_COUNT_METRIC));
Integer sdkCount2 = Integer.valueOf(o2.getMetric().get(SDK_CONNECTION_COUNT_METRIC));
return sdkCount1.compareTo(sdkCount2);
});
LOGGER.info("Low load limit server list ={}", lowLimitServer);
AtomicBoolean result = new AtomicBoolean(true);
for (int i = 0; i < overLimitServer.size() & i < lowLimitServer.size(); i++) {
ServerReloadRequest serverLoaderInfoRequest = new ServerReloadRequest();
serverLoaderInfoRequest.setReloadCount(overLimitCount);
serverLoaderInfoRequest.setReloadServer(lowLimitServer.get(i).address);
Member member = serverMemberManager.find(overLimitServer.get(i).address);
LOGGER.info("Reload task submit ,fromServer ={},toServer={}, ", overLimitServer.get(i).address,
lowLimitServer.get(i).address);
if (serverMemberManager.getSelf().equals(member)) {
try {
serverReloaderRequestHandler.handle(serverLoaderInfoRequest, new RequestMeta());
} catch (NacosException e) {
LOGGER.error("Fail to loader self server", e);
result.set(false);
}
} else {
try {
clusterRpcClientProxy.asyncRequest(member, serverLoaderInfoRequest, new RequestCallBack() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public long getTimeout() {
return 100L;
}
@Override
public void onResponse(Response response) {
if (response == null || !response.isSuccess()) {
LOGGER.error("Fail to loader member={},response={}", member.getAddress(), response);
result.set(false);
}
}
@Override
public void onException(Throwable e) {
LOGGER.error("Fail to loader member={}", member.getAddress(), e);
result.set(false);
}
});
} catch (NacosException e) {
LOGGER.error("Fail to loader member={}", member.getAddress(), e);
result.set(false);
}
}
}
return ResponseEntity.ok().body(result.get() ? SUCCESS_RESULT : FAIL_RESULT);
} | @Test
void testSmartReload() throws NacosException {
EnvUtil.setEnvironment(new MockEnvironment());
Member member = new Member();
member.setIp("1.1.1.1");
member.setPort(8848);
ServerAbilities serverAbilities = new ServerAbilities();
ServerRemoteAbility serverRemoteAbility = new ServerRemoteAbility();
serverRemoteAbility.setSupportRemoteConnection(true);
serverAbilities.setRemoteAbility(serverRemoteAbility);
member.setAbilities(serverAbilities);
Mockito.when(serverMemberManager.allMembersWithoutSelf()).thenReturn(Collections.singletonList(member));
Map<String, String> metrics = new HashMap<>();
metrics.put("conCount", "1");
metrics.put("sdkConCount", "1");
ServerLoaderInfoResponse serverLoaderInfoResponse = new ServerLoaderInfoResponse();
serverLoaderInfoResponse.setLoaderMetrics(metrics);
Mockito.when(serverLoaderInfoRequestHandler.handle(Mockito.any(), Mockito.any())).thenReturn(serverLoaderInfoResponse);
Mockito.when(serverMemberManager.getSelf()).thenReturn(member);
MockHttpServletRequest httpServletRequest = new MockHttpServletRequest();
ResponseEntity<String> result = serverLoaderController.smartReload(httpServletRequest, "1", null);
assertEquals("Ok", result.getBody());
} |
public final Sensor taskLevelSensor(final String threadId,
final String taskId,
final String sensorSuffix,
final RecordingLevel recordingLevel,
final Sensor... parents) {
final String sensorPrefix = taskSensorPrefix(threadId, taskId);
synchronized (taskLevelSensors) {
return getSensors(taskLevelSensors, sensorSuffix, sensorPrefix, recordingLevel, parents);
}
} | @Test
public void shouldGetNewTaskLevelSensor() {
final Metrics metrics = mock(Metrics.class);
final RecordingLevel recordingLevel = RecordingLevel.INFO;
setupGetNewSensorTest(metrics, recordingLevel);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time);
final Sensor actualSensor = streamsMetrics.taskLevelSensor(
THREAD_ID1,
TASK_ID1,
SENSOR_NAME_1,
recordingLevel
);
assertThat(actualSensor, is(equalToObject(sensor)));
} |
@Override
public void processElement(StreamRecord<MergeOnReadInputSplit> element) {
splits.add(element.getValue());
enqueueProcessSplits();
} | @Test
public void testCheckpoint() throws Exception {
// Received emitted splits: split1, split2, split3, split4, checkpoint request is triggered
// when reading records from split1.
TestData.writeData(TestData.DATA_SET_INSERT, conf);
long timestamp = 0;
try (OneInputStreamOperatorTestHarness<MergeOnReadInputSplit, RowData> harness = createReader()) {
harness.setup();
harness.open();
SteppingMailboxProcessor processor = createLocalMailbox(harness);
StreamReadMonitoringFunction func = TestUtils.getMonitorFunc(conf);
List<MergeOnReadInputSplit> splits = generateSplits(func);
assertThat("Should have 4 splits", splits.size(), is(4));
for (MergeOnReadInputSplit split : splits) {
harness.processElement(split, ++timestamp);
}
// Trigger snapshot state, it will start to work once all records from split0 are read.
processor.getMainMailboxExecutor()
.execute(() -> harness.snapshot(1, 3), "Trigger snapshot");
assertTrue(processor.runMailboxStep(), "Should have processed the split0");
assertTrue(processor.runMailboxStep(), "Should have processed the snapshot state action");
assertThat(TestData.rowDataToString(harness.extractOutputValues()),
is(getSplitExpected(Collections.singletonList(splits.get(0)), EXPECTED)));
// Read records from split1.
assertTrue(processor.runMailboxStep(), "Should have processed the split1");
// Read records from split2.
assertTrue(processor.runMailboxStep(), "Should have processed the split2");
// Read records from split3.
assertTrue(processor.runMailboxStep(), "Should have processed the split3");
// Assert the output has expected elements.
TestData.assertRowDataEquals(harness.extractOutputValues(), TestData.DATA_SET_INSERT);
}
} |
static <T, W extends BoundedWindow>
ThrowingFunction<KV<T, Iterable<W>>, KV<T, KV<Iterable<W>, Iterable<KV<W, Iterable<W>>>>>>
createMapFunctionForPTransform(String ptransformId, PTransform ptransform)
throws IOException {
RunnerApi.FunctionSpec payload =
RunnerApi.FunctionSpec.parseFrom(ptransform.getSpec().getPayload());
WindowFn<?, W> windowFn =
(WindowFn<?, W>) WindowingStrategyTranslation.windowFnFromProto(payload);
return WindowMergingFnRunner.<T, W>create(windowFn)::mergeWindows;
} | @Test
public void testWindowMergingWithNonMergingWindowFn() throws Exception {
ThrowingFunction<
KV<Object, Iterable<BoundedWindow>>,
KV<
Object,
KV<Iterable<BoundedWindow>, Iterable<KV<BoundedWindow, Iterable<BoundedWindow>>>>>>
mapFunction =
WindowMergingFnRunner.createMapFunctionForPTransform(
"ptransformId", createMergeTransformForWindowFn(new GlobalWindows()));
KV<Object, Iterable<BoundedWindow>> input =
KV.of(
"abc",
ImmutableList.of(new IntervalWindow(Instant.now(), Duration.standardMinutes(1))));
assertEquals(
KV.of(input.getKey(), KV.of(input.getValue(), Collections.emptyList())),
mapFunction.apply(input));
} |
@Override
public void removeSelector(final SelectorData selectorData) {
super.getWasmExtern(REMOVE_SELECTOR_METHOD_NAME)
.ifPresent(handlerPlugin -> callWASI(selectorData, handlerPlugin));
} | @Test
public void removeSelectorTest() {
pluginDataHandler.removeSelector(selectorData);
testWasmPluginDataHandler.handlerSelector(selectorData);
testWasmPluginDataHandler.removeSelector(selectorData);
} |
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final List<Path> deleted = new ArrayList<Path>();
for(Map.Entry<Path, TransferStatus> entry : files.entrySet()) {
boolean skip = false;
final Path file = entry.getKey();
for(Path d : deleted) {
if(file.isChild(d)) {
skip = true;
break;
}
}
if(skip) {
continue;
}
deleted.add(file);
callback.delete(file);
try {
final TransferStatus status = entry.getValue();
session.getClient().execute(this.toRequest(file, status), new VoidResponseHandler());
}
catch(SardineException e) {
throw new DAVExceptionMappingService().map("Cannot delete {0}", e, file);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map(e, file);
}
}
} | @Test(expected = NotfoundException.class)
public void testDeleteNotFound() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new DAVDeleteFeature(session).delete(Collections.singletonMap(test, new TransferStatus()), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public <R> RFuture<R> evalAsync(Mode mode, String luaScript, ReturnType returnType, List<Object> keys, Object... values) {
String key = getKey(keys);
return evalAsync(key, mode, luaScript, returnType, keys, values);
} | @Test
public void testEvalAsync() {
RScript script = redisson.getScript(StringCodec.INSTANCE);
RFuture<List<Object>> res = script.evalAsync(RScript.Mode.READ_ONLY, "return {'1','2','3.3333','foo',nil,'bar'}", RScript.ReturnType.MULTI, Collections.emptyList());
assertThat(res.toCompletableFuture().join()).containsExactly("1", "2", "3.3333", "foo");
} |
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:methodlength"})
void planMigrations(int partitionId, PartitionReplica[] oldReplicas, PartitionReplica[] newReplicas,
MigrationDecisionCallback callback) {
assert oldReplicas.length == newReplicas.length : "Replica addresses with different lengths! Old: "
+ Arrays.toString(oldReplicas) + ", New: " + Arrays.toString(newReplicas);
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Initial state: %s", partitionId, Arrays.toString(oldReplicas));
logger.finest("partitionId=%d, Final state: %s", partitionId, Arrays.toString(newReplicas));
}
initState(oldReplicas);
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
// fix cyclic partition replica movements
if (fixCycle(oldReplicas, newReplicas)) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Final state (after cycle fix): %s", partitionId,
Arrays.toString(newReplicas));
}
}
int currentIndex = 0;
while (currentIndex < oldReplicas.length) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Current index: %d, state: %s", partitionId, currentIndex,
Arrays.toString(state));
}
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
if (newReplicas[currentIndex] == null) {
if (state[currentIndex] != null) {
// replica owner is removed and no one will own this replica
logger.finest("partitionId=%d, New address is null at index: %d", partitionId, currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, null, -1, -1);
state[currentIndex] = null;
}
currentIndex++;
continue;
}
if (state[currentIndex] == null) {
int i = getReplicaIndex(state, newReplicas[currentIndex]);
if (i == -1) {
// fresh replica copy is needed, so COPY replica to newReplicas[currentIndex] from partition owner
logger.finest("partitionId=%d, COPY %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(null, -1, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (i > currentIndex) {
// SHIFT UP replica from i to currentIndex, copy data from partition owner
logger.finest("partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d", partitionId,
state[i], i, currentIndex);
callback.migrate(null, -1, -1, state[i], i, currentIndex);
state[currentIndex] = state[i];
state[i] = null;
continue;
}
throw new AssertionError("partitionId=" + partitionId
+ "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays.toString(oldReplicas)
+ ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas));
}
if (newReplicas[currentIndex].equals(state[currentIndex])) {
// no change, no action needed
currentIndex++;
continue;
}
if (getReplicaIndex(newReplicas, state[currentIndex]) == -1
&& getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
// MOVE partition replica from its old owner to new owner
logger.finest("partitionId=%d, MOVE %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
int newIndex = getReplicaIndex(newReplicas, state[currentIndex]);
assert newIndex > currentIndex : "partitionId=" + partitionId
+ ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: "
+ Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state)
+ ", FINAL: " + Arrays.toString(newReplicas);
if (state[newIndex] == null) {
// it is a SHIFT DOWN
logger.finest("partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d", partitionId,
state[currentIndex], newIndex, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, newIndex, newReplicas[currentIndex], -1, currentIndex);
state[newIndex] = state[currentIndex];
} else {
logger.finest("partitionId=%d, MOVE-3 %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
}
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
planMigrations(partitionId, oldReplicas, newReplicas, callback, currentIndex);
}
assert Arrays.equals(state, newReplicas)
: "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays.toString(oldReplicas)
+ " CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas);
} | @Test
public void test_SHIFT_UP() throws UnknownHostException {
final PartitionReplica[] oldReplicas = {
new PartitionReplica(new Address("localhost", 5701), uuids[0]),
null,
new PartitionReplica(new Address("localhost", 5703), uuids[2]),
new PartitionReplica(new Address("localhost", 5704), uuids[3]),
null,
null,
null,
};
final PartitionReplica[] newReplicas = {
new PartitionReplica(new Address("localhost", 5701), uuids[0]),
new PartitionReplica(new Address("localhost", 5703), uuids[2]),
new PartitionReplica(new Address("localhost", 5704), uuids[3]),
null,
null,
null,
null,
};
migrationPlanner.planMigrations(0, oldReplicas, newReplicas, callback);
verify(callback).migrate(null, -1, -1, new PartitionReplica(new Address("localhost", 5703), uuids[2]), 2, 1);
verify(callback).migrate(null, -1, -1, new PartitionReplica(new Address("localhost", 5704), uuids[3]), 3, 2);
} |
@Override
public boolean equals(Object o)
{
return o instanceof COSInteger && ((COSInteger)o).intValue() == intValue();
} | @Test
void testEquals()
{
// Consistency
for (int i = -1000; i < 3000; i += 200)
{
COSInteger test1 = COSInteger.get(i);
COSInteger test2 = COSInteger.get(i);
COSInteger test3 = COSInteger.get(i);
// Reflexive (x == x)
assertEquals(test1, test1);
// Symmetric is preserved ( x==y then y===x)
assertEquals(test2, test1);
assertEquals(test1, test2);
// Transitive (if x==y && y==z then x===z)
assertEquals(test1, test2);
assertEquals(test2, test3);
assertEquals(test1, test3);
COSInteger test4 = COSInteger.get(i + 1);
assertNotEquals(test4, test1);
}
} |
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) {
return Optional.ofNullable(HANDLERS.get(step.getClass()))
.map(h -> h.handle(this, schema, step))
.orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass()));
} | @Test
public void shouldResolveSchemaForStreamFilter() {
// Given:
final StreamFilter<?> step = new StreamFilter<>(
PROPERTIES,
streamSource,
mock(Expression.class)
);
// When:
final LogicalSchema result = resolver.resolve(step, SCHEMA);
// Then:
assertThat(result, is(SCHEMA));
} |
public AlertResult send(String content) {
try {
return checkSendMsgResult(HttpUtils.post(feiShuParams.getWebhook(), content, proxyConfig));
} catch (Exception e) {
e.printStackTrace();
logger.error("send fei shu alert msg exception : {}", e.getMessage(), e);
AlertResult alertResult = new AlertResult();
alertResult.setSuccess(false);
alertResult.setMessage("send fei shu alert fail.");
return alertResult;
}
} | @Ignore
@Test
public void testSend() {
FeiShuAlert feiShuAlert = new FeiShuAlert();
AlertConfig alertConfig = new AlertConfig();
alertConfig.setType(FeiShuConstants.TYPE);
alertConfig.setParam(feiShuConfig);
feiShuAlert.setConfig(alertConfig);
AlertResult alertResult =
feiShuAlert.send(AlertBaseConstant.ALERT_TEMPLATE_TITLE, AlertBaseConstant.ALERT_TEMPLATE_MSG);
Assert.assertEquals(true, alertResult.getSuccess());
} |
static boolean applyTags(RuleDto rule, Set<String> tags) {
for (String tag : tags) {
RuleTagFormat.validate(tag);
}
Set<String> initialTags = rule.getTags();
final Set<String> systemTags = rule.getSystemTags();
Set<String> withoutSystemTags = Sets.filter(tags, input -> input != null && !systemTags.contains(input));
rule.setTags(withoutSystemTags);
return withoutSystemTags.size() != initialTags.size() || !withoutSystemTags.containsAll(initialTags);
} | @Test
public void applyTags() {
RuleDto rule = new RuleDto().setTags(Sets.newHashSet("performance"));
boolean changed = RuleTagHelper.applyTags(rule, Sets.newHashSet("java8", "security"));
assertThat(rule.getTags()).containsOnly("java8", "security");
assertThat(changed).isTrue();
} |
public static String artifactToString(Artifact artifact) {
StringBuilder buffer = new StringBuilder(128);
buffer.append(artifact.getGroupId());
buffer.append(':').append(artifact.getArtifactId());
buffer.append(':').append(artifact.getExtension());
if (artifact.getClassifier().length() > 0) {
buffer.append(':').append(artifact.getClassifier());
}
buffer.append(':').append(artifact.getVersion());
return buffer.toString();
} | @Test
public void artifactToString() {
Artifact testArtifact = new DefaultArtifact("org.apache.storm:storm-core:1.0.0");
String ret = AetherUtils.artifactToString(testArtifact);
assertEquals("org.apache.storm:storm-core:jar:1.0.0", ret);
} |
@SuppressWarnings({"checkstyle:NPathComplexity", "checkstyle:CyclomaticComplexity"})
@Override
public void shutdown() {
log.info("ksqlDB shutdown called");
try {
pullQueryMetrics.ifPresent(PullQueryExecutorMetrics::close);
} catch (final Exception e) {
log.error("Exception while waiting for pull query metrics to close", e);
}
try {
scalablePushQueryMetrics.ifPresent(ScalablePushQueryMetrics::close);
} catch (final Exception e) {
log.error("Exception while waiting for scalable push query metrics to close", e);
}
localCommands.ifPresent(lc -> {
try {
lc.close();
} catch (final Exception e) {
log.error("Exception while closing local commands", e);
}
});
try {
ksqlEngine.close();
} catch (final Exception e) {
log.error("Exception while waiting for Ksql Engine to close", e);
}
try {
commandRunner.close();
} catch (final Exception e) {
log.error("Exception while waiting for CommandRunner thread to complete", e);
}
try {
serviceContext.close();
} catch (final Exception e) {
log.error("Exception while closing services", e);
}
try {
securityExtension.close();
} catch (final Exception e) {
log.error("Exception while closing security extension", e);
}
if (apiServer != null) {
apiServer.stop();
apiServer = null;
}
if (vertx != null) {
try {
final CountDownLatch latch = new CountDownLatch(1);
vertx.close(ar -> latch.countDown());
latch.await();
} catch (InterruptedException e) {
log.error("Exception while closing vertx", e);
}
}
if (oldApiWebsocketExecutor != null) {
oldApiWebsocketExecutor.shutdown();
}
shutdownAdditionalAgents();
log.info("ksqlDB shutdown complete");
} | @Test
public void shouldCloseSecurityExtensionOnClose() {
// When:
app.shutdown();
// Then:
verify(securityExtension).close();
} |
public Set<Map.Entry<String, JsonElement>> entrySet() {
return members.entrySet();
} | @Test
public void testEntrySet() {
JsonObject o = new JsonObject();
assertThat(o.entrySet()).hasSize(0);
o.addProperty("b", true);
Set<?> expectedEntries = Collections.singleton(new SimpleEntry<>("b", new JsonPrimitive(true)));
assertThat(o.entrySet()).isEqualTo(expectedEntries);
assertThat(o.entrySet()).hasSize(1);
o.addProperty("a", false);
// Insertion order should be preserved by entrySet()
List<?> expectedEntriesList =
Arrays.asList(
new SimpleEntry<>("b", new JsonPrimitive(true)),
new SimpleEntry<>("a", new JsonPrimitive(false)));
assertThat(new ArrayList<>(o.entrySet())).isEqualTo(expectedEntriesList);
Iterator<Entry<String, JsonElement>> iterator = o.entrySet().iterator();
// Test behavior of Entry.setValue
for (int i = 0; i < o.size(); i++) {
Entry<String, JsonElement> entry = iterator.next();
entry.setValue(new JsonPrimitive(i));
assertThat(entry.getValue()).isEqualTo(new JsonPrimitive(i));
}
expectedEntriesList =
Arrays.asList(
new SimpleEntry<>("b", new JsonPrimitive(0)),
new SimpleEntry<>("a", new JsonPrimitive(1)));
assertThat(new ArrayList<>(o.entrySet())).isEqualTo(expectedEntriesList);
Entry<String, JsonElement> entry = o.entrySet().iterator().next();
// null value is not permitted, only JsonNull is supported
// This intentionally deviates from the behavior of the other JsonObject methods which
// implicitly convert null -> JsonNull, to match more closely the contract of Map.Entry
var e = assertThrows(NullPointerException.class, () -> entry.setValue(null));
assertThat(e).hasMessageThat().isEqualTo("value == null");
assertThat(entry.getValue()).isNotNull();
o.addProperty("key1", 1);
o.addProperty("key2", 2);
Deque<?> expectedEntriesQueue =
new ArrayDeque<>(
Arrays.asList(
new SimpleEntry<>("b", new JsonPrimitive(0)),
new SimpleEntry<>("a", new JsonPrimitive(1)),
new SimpleEntry<>("key1", new JsonPrimitive(1)),
new SimpleEntry<>("key2", new JsonPrimitive(2))));
// Note: Must wrap in ArrayList because Deque implementations do not implement `equals`
assertThat(new ArrayList<>(o.entrySet())).isEqualTo(new ArrayList<>(expectedEntriesQueue));
iterator = o.entrySet().iterator();
// Remove entries one by one
for (int i = o.size(); i >= 1; i--) {
assertThat(iterator.hasNext()).isTrue();
assertThat(iterator.next()).isEqualTo(expectedEntriesQueue.getFirst());
iterator.remove();
expectedEntriesQueue.removeFirst();
assertThat(o.size()).isEqualTo(i - 1);
assertThat(new ArrayList<>(o.entrySet())).isEqualTo(new ArrayList<>(expectedEntriesQueue));
}
} |
public void removeFromLastWhen(final Predicate<T> predicate) {
Segment<T> lastSeg = getLast();
while (true) {
if (lastSeg == null) {
this.firstOffset = this.size = 0;
return;
}
int removed = lastSeg.removeFromLastWhen(predicate);
if (removed == 0) {
break;
}
this.size -= removed;
if (lastSeg.isEmpty()) {
RecycleUtil.recycle(this.segments.pollLast());
lastSeg = getLast();
}
}
} | @Test
public void testRemoveFromLastWhen() {
fillList();
// remove elements is greater or equal to 150.
this.list.removeFromLastWhen(x -> x >= 150);
assertEquals(150, this.list.size());
assertFalse(this.list.isEmpty());
for (int i = 0; i < 150; i++) {
assertEquals(i, (int) this.list.get(i));
}
try {
this.list.get(151);
fail();
} catch (IndexOutOfBoundsException e) {
}
assertEquals(150 / SegmentList.SEGMENT_SIZE + 1, this.list.segmentSize());
// remove elements is greater or equal to 32.
this.list.removeFromLastWhen(x -> x >= 32);
assertEquals(32, this.list.size());
assertFalse(this.list.isEmpty());
for (int i = 0; i < 32; i++) {
assertEquals(i, (int) this.list.get(i));
}
try {
this.list.get(32);
fail();
} catch (IndexOutOfBoundsException e) {
}
assertEquals(1, this.list.segmentSize());
// Add elements again.
fillList();
assertEquals(1032, this.list.size());
for (int i = 0; i < 1032; i++) {
if (i < 32) {
assertEquals(i, (int) this.list.get(i));
} else {
assertEquals(i - 32, (int) this.list.get(i));
}
}
} |
public Map<String, Parameter> generateMergedWorkflowParams(
WorkflowInstance instance, RunRequest request) {
Workflow workflow = instance.getRuntimeWorkflow();
Map<String, ParamDefinition> allParamDefs = new LinkedHashMap<>();
Map<String, ParamDefinition> defaultWorkflowParams =
defaultParamManager.getDefaultWorkflowParams();
// merge workflow params for start
if (request.isFreshRun()) {
// merge default workflow params
ParamsMergeHelper.mergeParams(
allParamDefs,
defaultWorkflowParams,
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM_DEFAULT, request));
// merge defined workflow params
if (workflow.getParams() != null) {
ParamsMergeHelper.mergeParams(
allParamDefs,
workflow.getParams(),
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.DEFINITION, request));
}
}
// merge workflow params from previous instance for restart
if (!request.isFreshRun() && instance.getParams() != null) {
Map<String, ParamDefinition> previousParamDefs =
instance.getParams().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toDefinition()));
// remove reserved params, which should be injected again by the system.
for (String paramName : Constants.RESERVED_PARAM_NAMES) {
previousParamDefs.remove(paramName);
}
ParamsMergeHelper.mergeParams(
allParamDefs,
previousParamDefs,
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM, false));
}
// merge run params
if (request.getRunParams() != null) {
ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun());
ParamsMergeHelper.mergeParams(
allParamDefs,
request.getRunParams(),
ParamsMergeHelper.MergeContext.workflowCreate(source, request));
}
// merge user provided restart run params
getUserRestartParam(request)
.ifPresent(
userRestartParams -> {
ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun());
ParamsMergeHelper.mergeParams(
allParamDefs,
userRestartParams,
ParamsMergeHelper.MergeContext.workflowCreate(source, request));
});
// cleanup any placeholder params and convert to params
return ParamsMergeHelper.convertToParameters(ParamsMergeHelper.cleanupParams(allParamDefs));
} | @Test
public void testSubRestartConfigRunUnchangedParamMerge() {
Map<String, Object> meta =
Collections.singletonMap(Constants.METADATA_SOURCE_KEY, "SUBWORKFLOW");
LongParameter param =
LongParameter.builder()
.name("TARGET_RUN_DATE")
.value(1000L)
.evaluatedResult(1000L)
.evaluatedTime(123L)
.mode(ParamMode.MUTABLE_ON_START)
.meta(meta)
.build();
Map<String, Object> restartMeta =
Collections.singletonMap(Constants.METADATA_SOURCE_KEY, "DEFINITION");
Map<String, ParamDefinition> restartParams =
singletonMap(
"TARGET_RUN_DATE",
param.toDefinition().asLongParamDef().toBuilder()
.mode(ParamMode.MUTABLE)
.meta(restartMeta)
.build());
RunRequest request =
RunRequest.builder()
.initiator(new SubworkflowInitiator())
.currentPolicy(RunPolicy.RESTART_FROM_SPECIFIC)
.runParams(restartParams)
.restartConfig(
RestartConfig.builder().addRestartNode("sample-wf-map-params", 1, "foo").build())
.build();
Map<String, Parameter> instanceParams = new LinkedHashMap<>();
instanceParams.put("TARGET_RUN_DATE", param);
workflowInstance.setParams(instanceParams);
Map<String, Parameter> workflowParams =
paramsManager.generateMergedWorkflowParams(workflowInstance, request);
Assert.assertFalse(workflowParams.isEmpty());
Assert.assertEquals(
Long.valueOf(1000L), workflowParams.get("TARGET_RUN_DATE").asLongParam().getValue());
Assert.assertEquals(ParamSource.SUBWORKFLOW, workflowParams.get("TARGET_RUN_DATE").getSource());
Assert.assertEquals(
ParamMode.MUTABLE_ON_START, workflowParams.get("TARGET_RUN_DATE").getMode());
} |
@Override
public int getPrecision(final int column) {
Preconditions.checkArgument(1 == column);
return 0;
} | @Test
void assertGetPrecision() throws SQLException {
assertThat(actualMetaData.getPrecision(1), is(0));
} |
public static HoodieRecordMerger loadRecordMerger(String mergerClass) {
try {
HoodieRecordMerger recordMerger = (HoodieRecordMerger) INSTANCE_CACHE.get(mergerClass);
if (null == recordMerger) {
synchronized (HoodieRecordMerger.class) {
recordMerger = (HoodieRecordMerger) INSTANCE_CACHE.get(mergerClass);
if (null == recordMerger) {
recordMerger = (HoodieRecordMerger) ReflectionUtils.loadClass(mergerClass,
new Object[] {});
INSTANCE_CACHE.put(mergerClass, recordMerger);
}
}
}
return recordMerger;
} catch (HoodieException e) {
throw new HoodieException("Unable to instantiate hoodie merge class ", e);
}
} | @Test
void loadHoodieMergeWithWrongMerger() {
String mergeClassName = "wrong.package.MergerName";
assertThrows(HoodieException.class, () -> HoodieRecordUtils.loadRecordMerger(mergeClassName));
} |
@Override
public ResultSet getTablePrivileges(final String catalog, final String schemaPattern, final String tableNamePattern) throws SQLException {
return createDatabaseMetaDataResultSet(getDatabaseMetaData().getTablePrivileges(getActualCatalog(catalog), getActualSchema(schemaPattern), getActualTableNamePattern(tableNamePattern)));
} | @Test
void assertGetTablePrivileges() throws SQLException {
when(databaseMetaData.getTablePrivileges("test", null, null)).thenReturn(resultSet);
assertThat(shardingSphereDatabaseMetaData.getTablePrivileges("test", null, null), instanceOf(DatabaseMetaDataResultSet.class));
} |
public JobRecord getJobRecord(long jobId) {
return jobRecords.get().get(jobId);
} | @Test
public void test_getJobRecordFromClient() {
HazelcastInstance client = createHazelcastClient();
Pipeline p = Pipeline.create();
p.readFrom(Sources.streamFromProcessor("source", ProcessorMetaSupplier.of(() -> new NoOutputSourceP())))
.withoutTimestamps()
.writeTo(Sinks.logger());
Job job = instance.getJet().newJob(p, new JobConfig()
.setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE)
.setSnapshotIntervalMillis(100));
JobRepository jobRepository = new JobRepository(client);
assertTrueEventually(() -> assertNotNull(jobRepository.getJobRecord(job.getId())));
client.shutdown();
} |
@VisibleForTesting
@Nonnull
Map<String, Object> prepareContextForPaginatedResponse(@Nonnull List<RuleDao> rules) {
final Map<String, RuleDao> ruleTitleMap = rules
.stream()
.collect(Collectors.toMap(RuleDao::title, dao -> dao));
final Map<String, List<PipelineCompactSource>> result = new HashMap<>();
rules.forEach(r -> result.put(r.id(), new ArrayList<>()));
pipelineServiceHelper.groupByRuleName(
pipelineService::loadAll, ruleTitleMap.keySet())
.forEach((ruleTitle, pipelineDaos) -> {
result.put(
ruleTitleMap.get(ruleTitle).id(),
pipelineDaos.stream()
.map(dao -> PipelineCompactSource.builder()
.id(dao.id())
.title(dao.title())
.build())
.toList()
);
});
return Map.of("used_in_pipelines", result);
} | @Test
public void prepareContextForPaginatedResponse_returnsEmptyRuleMapIfRulesNotUsedByPipelines() {
final List<RuleDao> rules = List.of(
ruleDao("rule-1", "Rule 1"),
ruleDao("rule-2", "Rule 2")
);
assertThat(underTest.prepareContextForPaginatedResponse(rules))
.isEqualTo(Map.of("used_in_pipelines", Map.of(
"rule-1", List.of(),
"rule-2", List.of()
)));
} |
public Array getArray(String name) {
Array a = arrayMap.get(name);
if (a == null) {
validateArray(name);
a = new Array(configDefinition, name);
arrayMap.put(name, a);
}
return a;
} | @Test
public void require_that_definition_is_passed_to_childarray() {
ConfigPayloadBuilder.Array nestedArray = builderWithDef.getArray("myarray");
nestedArray.append("1337");
} |
public static IssueChangeContextBuilder newBuilder() {
return new IssueChangeContextBuilder();
} | @Test
public void test_equal() {
context = IssueChangeContext.newBuilder()
.setUserUuid(USER_UUID)
.setDate(NOW)
.setExternalUser(EXTERNAL_USER)
.setWebhookSource(WEBHOOK_SOURCE)
.build();
IssueChangeContext equalContext = IssueChangeContext.newBuilder()
.setUserUuid(USER_UUID)
.setDate(NOW)
.setExternalUser(EXTERNAL_USER)
.setWebhookSource(WEBHOOK_SOURCE)
.build();
IssueChangeContext notEqualContext = IssueChangeContext.newBuilder().setUserUuid("other_user_uuid").setDate(NOW).build();
assertThat(context).isEqualTo(context)
.isEqualTo(equalContext)
.isNotEqualTo(notEqualContext)
.isNotEqualTo(null)
.isNotEqualTo(new Object());
} |
private CoordinatorResult<ShareGroupHeartbeatResponseData, CoordinatorRecord> shareGroupHeartbeat(
String groupId,
String memberId,
int memberEpoch,
String rackId,
String clientId,
String clientHost,
List<String> subscribedTopicNames
) throws ApiException {
final long currentTimeMs = time.milliseconds();
final List<CoordinatorRecord> records = new ArrayList<>();
// Get or create the share group.
boolean createIfNotExists = memberEpoch == 0;
final ShareGroup group = getOrMaybeCreatePersistedShareGroup(groupId, createIfNotExists);
throwIfShareGroupIsFull(group, memberId);
// Get or create the member.
if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString();
ShareGroupMember member = getOrMaybeSubscribeShareGroupMember(
group,
memberId,
memberEpoch,
createIfNotExists
);
// 1. Create or update the member. If the member is new or has changed, a ShareGroupMemberMetadataValue
// record is written to the __consumer_offsets partition to persist the change. If the subscriptions have
// changed, the subscription metadata is updated and persisted by writing a ShareGroupPartitionMetadataValue
// record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have
// changed, and persisted by writing a ShareGroupMetadataValue record to the partition.
ShareGroupMember updatedMember = new ShareGroupMember.Builder(member)
.maybeUpdateRackId(Optional.ofNullable(rackId))
.maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames))
.setClientId(clientId)
.setClientHost(clientHost)
.build();
boolean bumpGroupEpoch = hasMemberSubscriptionChanged(
groupId,
member,
updatedMember,
records
);
int groupEpoch = group.groupEpoch();
Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata();
SubscriptionType subscriptionType = group.subscriptionType();
if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) {
// The subscription metadata is updated in two cases:
// 1) The member has updated its subscriptions;
// 2) The refresh deadline has been reached.
Map<String, Integer> subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember);
subscriptionMetadata = group.computeSubscriptionMetadata(
subscribedTopicNamesMap,
metadataImage.topics(),
metadataImage.cluster()
);
int numMembers = group.numMembers();
if (!group.hasMember(updatedMember.memberId())) {
numMembers++;
}
subscriptionType = ModernGroup.subscriptionType(
subscribedTopicNamesMap,
numMembers
);
if (!subscriptionMetadata.equals(group.subscriptionMetadata())) {
log.info("[GroupId {}] Computed new subscription metadata: {}.",
groupId, subscriptionMetadata);
bumpGroupEpoch = true;
records.add(newShareGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata));
}
if (bumpGroupEpoch) {
groupEpoch += 1;
records.add(newShareGroupEpochRecord(groupId, groupEpoch));
log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch);
}
group.setMetadataRefreshDeadline(currentTimeMs + shareGroupMetadataRefreshIntervalMs, groupEpoch);
}
// 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between
// the existing and the new target assignment is persisted to the partition.
final int targetAssignmentEpoch;
final Assignment targetAssignment;
if (groupEpoch > group.assignmentEpoch()) {
targetAssignment = updateTargetAssignment(
group,
groupEpoch,
updatedMember,
subscriptionMetadata,
subscriptionType,
records
);
targetAssignmentEpoch = groupEpoch;
} else {
targetAssignmentEpoch = group.assignmentEpoch();
targetAssignment = group.targetAssignment(updatedMember.memberId());
}
// 3. Reconcile the member's assignment with the target assignment if the member is not
// fully reconciled yet.
updatedMember = maybeReconcile(
groupId,
updatedMember,
targetAssignmentEpoch,
targetAssignment,
records
);
scheduleShareGroupSessionTimeout(groupId, memberId);
// Prepare the response.
ShareGroupHeartbeatResponseData response = new ShareGroupHeartbeatResponseData()
.setMemberId(updatedMember.memberId())
.setMemberEpoch(updatedMember.memberEpoch())
.setHeartbeatIntervalMs(shareGroupHeartbeatIntervalMs);
// The assignment is only provided in the following cases:
// 1. The member just joined or rejoined to group (epoch equals to zero);
// 2. The member's assignment has been updated.
if (memberEpoch == 0 || hasAssignedPartitionsChanged(member, updatedMember)) {
response.setAssignment(createShareGroupResponseAssignment(updatedMember));
}
return new CoordinatorResult<>(records, response);
} | @Test
public void testShareGroupMemberIdGeneration() {
MockPartitionAssignor assignor = new MockPartitionAssignor("share");
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withShareGroupAssignor(assignor)
.withMetadataImage(MetadataImage.EMPTY)
.build();
assignor.prepareGroupAssignment(new GroupAssignment(
Collections.emptyMap()
));
CoordinatorResult<ShareGroupHeartbeatResponseData, CoordinatorRecord> result = context.shareGroupHeartbeat(
new ShareGroupHeartbeatRequestData()
.setGroupId("group-foo")
.setMemberEpoch(0)
.setSubscribedTopicNames(Arrays.asList("foo", "bar")));
// Verify that a member id was generated for the new member.
String memberId = result.response().memberId();
assertNotNull(memberId);
assertNotEquals("", memberId);
// The response should get a bumped epoch and should not
// contain any assignment because we did not provide
// topics metadata.
assertEquals(
new ShareGroupHeartbeatResponseData()
.setMemberId(memberId)
.setMemberEpoch(1)
.setHeartbeatIntervalMs(5000)
.setAssignment(new ShareGroupHeartbeatResponseData.Assignment()),
result.response()
);
} |
@Override
public Ring<T> createRing(Map<T, Integer> pointsMap) {
return _ringFactory.createRing(pointsMap);
} | @Test(groups = { "small", "back-end" })
public void testPointsCleanUpLarge()
throws URISyntaxException
{
Map<String, Integer> pointsMp = buildPointsMap(19);
PointBasedConsistentHashRingFactory<String> ringFactory = new PointBasedConsistentHashRingFactory<>(new DegraderLoadBalancerStrategyConfig(1L));
Ring<String> ring = ringFactory.createRing(pointsMp);
assertNotNull(ring.get(1000));
pointsMp.remove("http://test.linkedin.com:10001");
pointsMp.remove("http://test.linkedin.com:10003");
pointsMp.remove("http://test.linkedin.com:10006");
ring = ringFactory.createRing(pointsMp);
assertNotNull(ring.get(1000));
// factory should keep all the points
Map<String, List<Point<String>>> pointsMap = ringFactory.getPointsMap();
assertEquals(pointsMap.size(), 19);
pointsMp.remove("http://test.linkedin.com:10009");
ring = ringFactory.createRing(pointsMp);
assertNotNull(ring.get(1000));
// factory should clean up and build new points
pointsMap = ringFactory.getPointsMap();
assertEquals(pointsMap.size(), 15);
} |
@Override
public Instant getWatermarkThatGuaranteesFiring(BoundedWindow window) {
return BoundedWindow.TIMESTAMP_MAX_VALUE;
} | @Test
public void testFireDeadline() throws Exception {
assertEquals(
BoundedWindow.TIMESTAMP_MAX_VALUE,
underTest.getWatermarkThatGuaranteesFiring(
new IntervalWindow(new Instant(0), new Instant(10))));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.