focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public CeWorker create(int ordinal) {
String uuid = uuidFactory.create();
CeWorkerImpl ceWorker = new CeWorkerImpl(ordinal, uuid, queue, taskProcessorRepository, ceWorkerController, executionListeners);
ceWorkers = Stream.concat(ceWorkers.stream(), Stream.of(ceWorker)).collect(Collectors.toSet());
return ceWorker;
} | @Test
public void ceworker_created_by_factory_must_contain_uuid() {
CeWorker ceWorker = underTest.create(randomOrdinal);
assertThat(ceWorker.getUUID()).isNotEmpty();
} |
@Override
public boolean contains(Object o) {
throw new UnsupportedOperationException("LazySet does not support contains requests");
} | @Test(expected = UnsupportedOperationException.class)
@SuppressWarnings("ResultOfMethodCallIgnored")
public void testContains_throwsException() {
set.contains(null);
} |
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
// Get the mime4j configuration, or use a default one
MimeConfig config =
new MimeConfig.Builder().setMaxLineLen(100000).setMaxHeaderLen(100000).build();
config = context.get(MimeConfig.class, config);
Detector localDetector = context.get(Detector.class);
if (localDetector == null) {
//lazily load this if necessary
if (detector == null) {
EmbeddedDocumentUtil embeddedDocumentUtil = new EmbeddedDocumentUtil(context);
detector = embeddedDocumentUtil.getDetector();
}
localDetector = detector;
}
MimeStreamParser parser =
new MimeStreamParser(config, null, new DefaultBodyDescriptorBuilder());
XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata);
MailContentHandler mch = new MailContentHandler(xhtml, localDetector, metadata, context,
config.isStrictParsing(), extractAllAlternatives);
parser.setContentHandler(mch);
parser.setContentDecoding(true);
parser.setNoRecurse();
xhtml.startDocument();
TikaInputStream tstream = TikaInputStream.get(stream);
try {
parser.parse(tstream);
} catch (IOException e) {
tstream.throwIfCauseOf(e);
throw new TikaException("Failed to parse an email message", e);
} catch (MimeException e) {
// Unwrap the exception in case it was not thrown by mime4j
Throwable cause = e.getCause();
if (cause instanceof TikaException) {
throw (TikaException) cause;
} else if (cause instanceof SAXException) {
throw (SAXException) cause;
} else {
throw new TikaException("Failed to parse an email message", e);
}
}
xhtml.endDocument();
} | @Test
public void testUnusualFromAddress() throws Exception {
Metadata metadata = new Metadata();
InputStream stream = getStream("test-documents/testRFC822_oddfrom");
ContentHandler handler = mock(DefaultHandler.class);
EXTRACT_ALL_ALTERNATIVES_PARSER.parse(stream, handler, metadata, new ParseContext());
assertEquals("Saved by Windows Internet Explorer 7",
metadata.get(TikaCoreProperties.CREATOR));
assertEquals("Air Permit Programs | Air & Radiation | US EPA",
metadata.get(TikaCoreProperties.TITLE));
assertEquals("Air Permit Programs | Air & Radiation | US EPA",
metadata.get(TikaCoreProperties.SUBJECT));
} |
public static Object get(Object object, int index) {
if (index < 0) {
throw new IndexOutOfBoundsException("Index cannot be negative: " + index);
}
if (object instanceof Map) {
Map map = (Map) object;
Iterator iterator = map.entrySet().iterator();
return get(iterator, index);
} else if (object instanceof List) {
return ((List) object).get(index);
} else if (object instanceof Object[]) {
return ((Object[]) object)[index];
} else if (object instanceof Iterator) {
Iterator it = (Iterator) object;
while (it.hasNext()) {
index--;
if (index == -1) {
return it.next();
} else {
it.next();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object instanceof Collection) {
Iterator iterator = ((Collection) object).iterator();
return get(iterator, index);
} else if (object instanceof Enumeration) {
Enumeration it = (Enumeration) object;
while (it.hasMoreElements()) {
index--;
if (index == -1) {
return it.nextElement();
} else {
it.nextElement();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object == null) {
throw new IllegalArgumentException("Unsupported object type: null");
} else {
try {
return Array.get(object, index);
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName());
}
}
} | @Test
void testGetArray5() {
assertThrows(IndexOutOfBoundsException.class, () -> {
CollectionUtils.get(new int[] {}, -1);
});
} |
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
} | @Test
public void testPuma8nhQuantile() {
test(Loss.quantile(0.5), "puma8nh", Puma8NH.formula, Puma8NH.data, 3.2486);
} |
public Path getSourceFileListing() {
return sourceFileListing;
} | @Test
public void testSourceListing() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
new Path("hdfs://localhost:8020/source/first"),
new Path("hdfs://localhost:8020/target/"));
Assert.assertEquals(new Path("hdfs://localhost:8020/source/first"),
builder.build().getSourceFileListing());
} |
@Override
public int calculateNewCapacity(int minNewCapacity, int maxCapacity) {
checkPositiveOrZero(minNewCapacity, "minNewCapacity");
if (minNewCapacity > maxCapacity) {
throw new IllegalArgumentException(String.format(
"minNewCapacity: %d (expected: not greater than maxCapacity(%d)",
minNewCapacity, maxCapacity));
}
final int threshold = CALCULATE_THRESHOLD; // 4 MiB page
if (minNewCapacity == threshold) {
return threshold;
}
// If over threshold, do not double but just increase by threshold.
if (minNewCapacity > threshold) {
int newCapacity = minNewCapacity / threshold * threshold;
if (newCapacity > maxCapacity - threshold) {
newCapacity = maxCapacity;
} else {
newCapacity += threshold;
}
return newCapacity;
}
// 64 <= newCapacity is a power of 2 <= threshold
final int newCapacity = MathUtil.findNextPositivePowerOfTwo(Math.max(minNewCapacity, 64));
return Math.min(newCapacity, maxCapacity);
} | @Test
public void testCalculateNewCapacity() {
testCalculateNewCapacity(true);
testCalculateNewCapacity(false);
} |
public static boolean canDrop(FilterPredicate pred, List<ColumnChunkMetaData> columns) {
Objects.requireNonNull(pred, "pred cannot be null");
Objects.requireNonNull(columns, "columns cannot be null");
return pred.accept(new StatisticsFilter(columns));
} | @Test
public void testEqNonNull() {
assertTrue(canDrop(eq(intColumn, 9), columnMetas));
assertFalse(canDrop(eq(intColumn, 10), columnMetas));
assertFalse(canDrop(eq(intColumn, 100), columnMetas));
assertTrue(canDrop(eq(intColumn, 101), columnMetas));
// drop columns of all nulls when looking for non-null value
assertTrue(canDrop(eq(intColumn, 0), nullColumnMetas));
assertTrue(canDrop(eq(missingColumn, fromString("any")), columnMetas));
assertFalse(canDrop(eq(intColumn, 50), missingMinMaxColumnMetas));
assertFalse(canDrop(eq(doubleColumn, 50.0), missingMinMaxColumnMetas));
} |
@SneakyThrows({InterruptedException.class, ExecutionException.class})
@Override
public void persistEphemeral(final String key, final String value) {
buildParentPath(key);
long leaseId = client.getLeaseClient().grant(etcdProps.getValue(EtcdPropertyKey.TIME_TO_LIVE_SECONDS)).get().getID();
client.getLeaseClient().keepAlive(leaseId, Observers.observer(response -> {
}));
client.getKVClient().put(ByteSequence.from(key, StandardCharsets.UTF_8), ByteSequence.from(value, StandardCharsets.UTF_8), PutOption.newBuilder().withLeaseId(leaseId).build()).get();
} | @Test
@SuppressWarnings("unchecked")
void assertPersistEphemeral() {
repository.persistEphemeral("key1", "value1");
verify(lease).grant(anyLong());
verify(lease).keepAlive(anyLong(), any(StreamObserver.class));
verify(kv).put(any(ByteSequence.class), any(ByteSequence.class), any(PutOption.class));
} |
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("list", concurrency);
try {
final String prefix = this.createPrefix(directory);
if(log.isDebugEnabled()) {
log.debug(String.format("List with prefix %s", prefix));
}
final Path bucket = containerService.getContainer(directory);
final AttributedList<Path> objects = new AttributedList<>();
String priorLastKey = null;
String priorLastVersionId = null;
long revision = 0L;
String lastKey = null;
boolean hasDirectoryPlaceholder = bucket.isRoot() || containerService.isContainer(directory);
do {
final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), prefix, String.valueOf(Path.DELIMITER),
new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"),
priorLastKey, priorLastVersionId, false);
// Amazon S3 returns object versions in the order in which they were stored, with the most recently stored returned first.
for(BaseVersionOrDeleteMarker marker : chunk.getItems()) {
final String key = URIEncoder.decode(marker.getKey());
if(new SimplePathPredicate(PathNormalizer.compose(bucket, key)).test(directory)) {
if(log.isDebugEnabled()) {
log.debug(String.format("Skip placeholder key %s", key));
}
hasDirectoryPlaceholder = true;
continue;
}
final PathAttributes attr = new PathAttributes();
attr.setVersionId(marker.getVersionId());
if(!StringUtils.equals(lastKey, key)) {
// Reset revision for next file
revision = 0L;
}
attr.setRevision(++revision);
attr.setDuplicate(marker.isDeleteMarker() && marker.isLatest() || !marker.isLatest());
if(marker.isDeleteMarker()) {
attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, String.valueOf(true)));
}
attr.setModificationDate(marker.getLastModified().getTime());
attr.setRegion(bucket.attributes().getRegion());
if(marker instanceof S3Version) {
final S3Version object = (S3Version) marker;
attr.setSize(object.getSize());
if(StringUtils.isNotBlank(object.getEtag())) {
attr.setETag(StringUtils.remove(object.getEtag(), "\""));
// The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted
// using SSE-S3. If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is
// not the MD5 of the object data.
attr.setChecksum(Checksum.parse(StringUtils.remove(object.getEtag(), "\"")));
}
if(StringUtils.isNotBlank(object.getStorageClass())) {
attr.setStorageClass(object.getStorageClass());
}
}
final Path f = new Path(directory.isDirectory() ? directory : directory.getParent(),
PathNormalizer.name(key), EnumSet.of(Path.Type.file), attr);
if(metadata) {
f.withAttributes(attributes.find(f));
}
objects.add(f);
lastKey = key;
}
final String[] prefixes = chunk.getCommonPrefixes();
final List<Future<Path>> folders = new ArrayList<>();
for(String common : prefixes) {
if(new SimplePathPredicate(PathNormalizer.compose(bucket, URIEncoder.decode(common))).test(directory)) {
continue;
}
folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(common)));
}
for(Future<Path> f : folders) {
try {
objects.add(Uninterruptibles.getUninterruptibly(f));
}
catch(ExecutionException e) {
log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage()));
for(Throwable cause : ExceptionUtils.getThrowableList(e)) {
Throwables.throwIfInstanceOf(cause, BackgroundException.class);
}
throw new DefaultExceptionMappingService().map(Throwables.getRootCause(e));
}
}
priorLastKey = null != chunk.getNextKeyMarker() ? URIEncoder.decode(chunk.getNextKeyMarker()) : null;
priorLastVersionId = chunk.getNextVersionIdMarker();
listener.chunk(directory, objects);
}
while(priorLastKey != null);
if(!hasDirectoryPlaceholder && objects.isEmpty()) {
// Only for AWS
if(S3Session.isAwsHostname(session.getHost().getHostname())) {
if(StringUtils.isEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) {
if(log.isWarnEnabled()) {
log.warn(String.format("No placeholder found for directory %s", directory));
}
throw new NotfoundException(directory.getAbsolute());
}
}
else {
// Handle missing prefix for directory placeholders in Minio
final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(),
String.format("%s%s", this.createPrefix(directory.getParent()), directory.getName()),
String.valueOf(Path.DELIMITER), 1, null, null, false);
if(Arrays.stream(chunk.getCommonPrefixes()).map(URIEncoder::decode).noneMatch(common -> common.equals(prefix))) {
throw new NotfoundException(directory.getAbsolute());
}
}
}
return objects;
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Listing directory {0} failed", e, directory);
}
finally {
// Cancel future tasks
pool.shutdown(false);
}
} | @Test
public void testEnableVersioningExistingFiles() throws Exception {
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final Path bucket = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl)
.mkdir(new Path(new AsciiRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
assertTrue(new S3FindFeature(session, acl).find(bucket));
final Path file = new S3TouchFeature(session, acl).touch(new Path(bucket, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final S3WriteFeature feature = new S3WriteFeature(session, acl);
{
final byte[] content = RandomUtils.nextBytes(1024);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
status.setChecksum(new SHA256ChecksumCompute().compute(new ByteArrayInputStream(content), status));
final HttpResponseOutputStream<StorageObject> out = feature.write(file, status, new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
file.withAttributes(new S3AttributesAdapter(session.getHost()).toAttributes(out.getStatus()));
assertEquals(content.length, new S3AttributesFinderFeature(session, acl).find(file).getSize());
final PathAttributes attr = new S3AttributesFinderFeature(session, acl).find(file);
assertEquals(content.length, attr.getSize());
assertNull(new S3AttributesFinderFeature(session, acl).find(file).getVersionId());
}
assertNull(new S3AttributesFinderFeature(session, acl).find(file).getVersionId());
session.getFeature(Versioning.class).setConfiguration(bucket, new DisabledPasswordCallback(),
new VersioningConfiguration(true));
{
final byte[] content = RandomUtils.nextBytes(256);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
status.setChecksum(new SHA256ChecksumCompute().compute(new ByteArrayInputStream(content), status));
final HttpResponseOutputStream<StorageObject> out = feature.write(file, status, new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
file.withAttributes(new S3AttributesAdapter(session.getHost()).toAttributes(out.getStatus()));
assertEquals(content.length, new S3AttributesFinderFeature(session, acl).find(file).getSize());
final PathAttributes attr = new S3AttributesFinderFeature(session, acl).find(file);
assertEquals(content.length, attr.getSize());
assertNotNull(attr.getVersionId());
}
final AttributedList<Path> list = new S3VersionedObjectListService(session, acl).list(bucket, new DisabledListProgressListener()).filter(
new Filter<Path>() {
@Override
public boolean accept(final Path f) {
return new SimplePathPredicate(file).test(f);
}
@Override
public Pattern toPattern() {
return null;
}
}
);
assertEquals(2, list.size());
final AttributedList<Path> versions = new S3VersioningFeature(session, acl).list(file, new DisabledListProgressListener());
assertEquals(1, versions.size());
assertEquals(versions.get(0), list.get(1));
new S3DefaultDeleteFeature(session).delete(Arrays.asList(
new Path(file).withAttributes(new PathAttributes().withVersionId("null")),
new Path(file).withAttributes(new DefaultAttributesFinderFeature(session).find(file)), bucket), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public synchronized void cleanupAll() {
LOG.info("Attempting to cleanup Cassandra manager.");
boolean producedError = false;
// First, delete the database if it was not given as a static argument
if (!usingStaticDatabase) {
try {
executeStatement(String.format("DROP KEYSPACE IF EXISTS %s", this.keyspaceName));
} catch (Exception e) {
LOG.error("Failed to drop Cassandra keyspace {}.", keyspaceName, e);
// Only bubble exception if the cause is not timeout or does not exist
if (!ExceptionUtils.containsType(e, DriverTimeoutException.class)
&& !ExceptionUtils.containsMessage(e, "does not exist")) {
producedError = true;
}
}
}
// Next, try to close the Cassandra client connection
try {
cassandraClient.close();
} catch (Exception e) {
LOG.error("Failed to delete Cassandra client.", e);
producedError = true;
}
// Throw Exception at the end if there were any errors
if (producedError) {
throw new CassandraResourceManagerException(
"Failed to delete resources. Check above for errors.");
}
super.cleanupAll();
LOG.info("Cassandra manager successfully cleaned up.");
} | @Test
public void testCleanupAllShouldThrowErrorWhenCassandraClientFailsToDropDatabase() {
doThrow(RuntimeException.class).when(cassandraClient).execute(any(SimpleStatement.class));
assertThrows(CassandraResourceManagerException.class, () -> testManager.cleanupAll());
} |
@VisibleForTesting
void setIsPartialBufferCleanupRequired() {
isPartialBufferCleanupRequired = true;
} | @TestTemplate
void testSkipPartialDataStartWithFullRecord() throws Exception {
final BufferWritingResultPartition writer = createResultPartition();
final PipelinedApproximateSubpartition subpartition =
getPipelinedApproximateSubpartition(writer);
writer.emitRecord(toByteBuffer(0, 1, 2, 3, 42), 0);
writer.emitRecord(toByteBuffer(8, 9), 0);
subpartition.setIsPartialBufferCleanupRequired();
assertContent(requireNonNull(subpartition.pollBuffer()).buffer(), null, 0, 1, 2, 3);
assertContent(requireNonNull(subpartition.pollBuffer()).buffer(), null, 42, 8, 9);
} |
@SuppressWarnings("MethodLength")
static void dissectControlRequest(
final ArchiveEventCode eventCode,
final MutableDirectBuffer buffer,
final int offset,
final StringBuilder builder)
{
int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder);
HEADER_DECODER.wrap(buffer, offset + encodedLength);
encodedLength += MessageHeaderDecoder.ENCODED_LENGTH;
switch (eventCode)
{
case CMD_IN_CONNECT:
CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendConnect(builder);
break;
case CMD_IN_CLOSE_SESSION:
CLOSE_SESSION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendCloseSession(builder);
break;
case CMD_IN_START_RECORDING:
START_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording(builder);
break;
case CMD_IN_STOP_RECORDING:
STOP_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecording(builder);
break;
case CMD_IN_REPLAY:
REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplay(builder);
break;
case CMD_IN_STOP_REPLAY:
STOP_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplay(builder);
break;
case CMD_IN_LIST_RECORDINGS:
LIST_RECORDINGS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordings(builder);
break;
case CMD_IN_LIST_RECORDINGS_FOR_URI:
LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingsForUri(builder);
break;
case CMD_IN_LIST_RECORDING:
LIST_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecording(builder);
break;
case CMD_IN_EXTEND_RECORDING:
EXTEND_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording(builder);
break;
case CMD_IN_RECORDING_POSITION:
RECORDING_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendRecordingPosition(builder);
break;
case CMD_IN_TRUNCATE_RECORDING:
TRUNCATE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTruncateRecording(builder);
break;
case CMD_IN_STOP_RECORDING_SUBSCRIPTION:
STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingSubscription(builder);
break;
case CMD_IN_STOP_POSITION:
STOP_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopPosition(builder);
break;
case CMD_IN_FIND_LAST_MATCHING_RECORD:
FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendFindLastMatchingRecord(builder);
break;
case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS:
LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingSubscriptions(builder);
break;
case CMD_IN_START_BOUNDED_REPLAY:
BOUNDED_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartBoundedReplay(builder);
break;
case CMD_IN_STOP_ALL_REPLAYS:
STOP_ALL_REPLAYS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopAllReplays(builder);
break;
case CMD_IN_REPLICATE:
REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate(builder);
break;
case CMD_IN_STOP_REPLICATION:
STOP_REPLICATION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplication(builder);
break;
case CMD_IN_START_POSITION:
START_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartPosition(builder);
break;
case CMD_IN_DETACH_SEGMENTS:
DETACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDetachSegments(builder);
break;
case CMD_IN_DELETE_DETACHED_SEGMENTS:
DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDeleteDetachedSegments(builder);
break;
case CMD_IN_PURGE_SEGMENTS:
PURGE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeSegments(builder);
break;
case CMD_IN_ATTACH_SEGMENTS:
ATTACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAttachSegments(builder);
break;
case CMD_IN_MIGRATE_SEGMENTS:
MIGRATE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendMigrateSegments(builder);
break;
case CMD_IN_AUTH_CONNECT:
AUTH_CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAuthConnect(builder);
break;
case CMD_IN_KEEP_ALIVE:
KEEP_ALIVE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendKeepAlive(builder);
break;
case CMD_IN_TAGGED_REPLICATE:
TAGGED_REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTaggedReplicate(builder);
break;
case CMD_IN_START_RECORDING2:
START_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording2(builder);
break;
case CMD_IN_EXTEND_RECORDING2:
EXTEND_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording2(builder);
break;
case CMD_IN_STOP_RECORDING_BY_IDENTITY:
STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingByIdentity(builder);
break;
case CMD_IN_PURGE_RECORDING:
PURGE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeRecording(builder);
break;
case CMD_IN_REPLICATE2:
REPLICATE_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate2(builder);
break;
case CMD_IN_REQUEST_REPLAY_TOKEN:
REPLAY_TOKEN_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplayToken(builder);
break;
default:
builder.append(": unknown command");
}
} | @Test
void controlRequestExtendRecording()
{
internalEncodeLogHeader(buffer, 0, 12, 32, () -> 10_000_000_000L);
final ExtendRecordingRequestEncoder requestEncoder = new ExtendRecordingRequestEncoder();
requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder)
.controlSessionId(9)
.correlationId(78)
.recordingId(1010101)
.streamId(43)
.sourceLocation(SourceLocation.LOCAL)
.channel("extend me");
dissectControlRequest(CMD_IN_EXTEND_RECORDING, buffer, 0, builder);
assertEquals("[10.000000000] " + CONTEXT + ": " + CMD_IN_EXTEND_RECORDING.name() + " [12/32]:" +
" controlSessionId=9" +
" correlationId=78" +
" recordingId=1010101" +
" streamId=43" +
" sourceLocation=" + SourceLocation.LOCAL +
" channel=extend me",
builder.toString());
} |
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
} | @Test
public void testNewPersonalBest()
{
final String NEW_PB = "Fight duration: <col=ff0000>3:01</col> (new personal best).";
final String NEW_PB_PRECISE = "Fight duration: <col=ff0000>3:01.40</col> (new personal best).";
// This sets lastBoss
ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Kree'arra kill count is: <col=ff0000>4</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", NEW_PB, null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("personalbest", "kree'arra", 181.0);
// Precise times
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", NEW_PB_PRECISE, null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("personalbest", "kree'arra", 181.4);
} |
@Override
public List<PartitionInfo> getPartitions(Table table, List<String> partitionNames) {
Map<String, Partition> partitionMap = Maps.newHashMap();
IcebergTable icebergTable = (IcebergTable) table;
PartitionsTable partitionsTable = (PartitionsTable) MetadataTableUtils.
createMetadataTableInstance(icebergTable.getNativeTable(), org.apache.iceberg.MetadataTableType.PARTITIONS);
if (icebergTable.isUnPartitioned()) {
try (CloseableIterable<FileScanTask> tasks = partitionsTable.newScan().planFiles()) {
for (FileScanTask task : tasks) {
// partitionsTable Table schema :
// record_count,
// file_count,
// total_data_file_size_in_bytes,
// position_delete_record_count,
// position_delete_file_count,
// equality_delete_record_count,
// equality_delete_file_count,
// last_updated_at,
// last_updated_snapshot_id
CloseableIterable<StructLike> rows = task.asDataTask().rows();
for (StructLike row : rows) {
// Get the last updated time of the table according to the table schema
long lastUpdated = -1;
try {
lastUpdated = row.get(7, Long.class);
} catch (NullPointerException e) {
LOG.error("The table [{}] snapshot [{}] has been expired",
icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), e);
}
Partition partition = new Partition(lastUpdated);
return ImmutableList.of(partition);
}
}
// for empty table, use -1 as last updated time
return ImmutableList.of(new Partition(-1));
} catch (IOException e) {
throw new StarRocksConnectorException("Failed to get partitions for table: " + table.getName(), e);
}
} else {
// For partition table, we need to get all partitions from PartitionsTable.
try (CloseableIterable<FileScanTask> tasks = partitionsTable.newScan().planFiles()) {
for (FileScanTask task : tasks) {
// partitionsTable Table schema :
// partition,
// spec_id,
// record_count,
// file_count,
// total_data_file_size_in_bytes,
// position_delete_record_count,
// position_delete_file_count,
// equality_delete_record_count,
// equality_delete_file_count,
// last_updated_at,
// last_updated_snapshot_id
CloseableIterable<StructLike> rows = task.asDataTask().rows();
for (StructLike row : rows) {
// Get the partition data/spec id/last updated time according to the table schema
StructProjection partitionData = row.get(0, StructProjection.class);
int specId = row.get(1, Integer.class);
PartitionSpec spec = icebergTable.getNativeTable().specs().get(specId);
String partitionName =
PartitionUtil.convertIcebergPartitionToPartitionName(spec, partitionData);
long lastUpdated = -1;
try {
lastUpdated = row.get(9, Long.class);
} catch (NullPointerException e) {
LOG.error("The table [{}.{}] snapshot [{}] has been expired",
icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), partitionName, e);
}
Partition partition = new Partition(lastUpdated);
partitionMap.put(partitionName, partition);
}
}
} catch (IOException e) {
throw new StarRocksConnectorException("Failed to get partitions for table: " + table.getName(), e);
}
}
ImmutableList.Builder<PartitionInfo> partitions = ImmutableList.builder();
partitionNames.forEach(partitionName -> partitions.add(partitionMap.get(partitionName)));
return partitions.build();
} | @Test
public void testGetPartitions1() {
mockedNativeTableB.newAppend().appendFile(FILE_B_1).appendFile(FILE_B_2).commit();
IcebergHiveCatalog icebergHiveCatalog = new IcebergHiveCatalog(CATALOG_NAME, new Configuration(), DEFAULT_CONFIG);
CachingIcebergCatalog cachingIcebergCatalog = new CachingIcebergCatalog(CATALOG_NAME, icebergHiveCatalog,
DEFAULT_CATALOG_PROPERTIES, Executors.newSingleThreadExecutor());
IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, cachingIcebergCatalog,
Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null);
IcebergTable icebergTable = new IcebergTable(1, "srTableName", CATALOG_NAME,
"resource_name", "db",
"table", "", Lists.newArrayList(), mockedNativeTableB, Maps.newHashMap());
List<PartitionInfo> partitions = metadata.getPartitions(icebergTable, ImmutableList.of("k2=2", "k2=3"));
Assert.assertEquals(2, partitions.size());
} |
Map<String, List<Phase2Context>> groupedByResourceId(List<Phase2Context> contexts) {
Map<String, List<Phase2Context>> groupedContexts = new HashMap<>(DEFAULT_RESOURCE_SIZE);
contexts.forEach(context -> {
if (StringUtils.isBlank(context.resourceId)) {
LOGGER.warn("resourceId is empty, resource:{}", context);
return;
}
List<Phase2Context> group = groupedContexts.computeIfAbsent(context.resourceId, key -> new LinkedList<>());
group.add(context);
});
return groupedContexts;
} | @Test
void groupedByResourceId() {
List<AsyncWorker.Phase2Context> contexts = getRandomContexts();
Map<String, List<AsyncWorker.Phase2Context>> groupedContexts = worker.groupedByResourceId(contexts);
groupedContexts.forEach((resourceId, group) -> group.forEach(context -> {
String message = "each context in the group should has the same resourceId";
assertEquals(resourceId, context.resourceId, message);
}));
} |
public boolean similarTo(ClusterStateBundle other) {
if (!baselineState.getClusterState().similarToIgnoringInitProgress(other.baselineState.getClusterState())) {
return false;
}
if (clusterFeedIsBlocked() != other.clusterFeedIsBlocked()) {
return false;
}
if (clusterFeedIsBlocked() && !feedBlock.similarTo(other.feedBlock)) {
return false;
}
// Distribution configs must match exactly for bundles to be similar.
// It may be the case that they are both null, in which case they are also considered equal.
if (!Objects.equals(distributionConfig, other.distributionConfig)) {
return false;
}
// FIXME we currently treat mismatching bucket space sets as unchanged to avoid breaking some tests
return derivedBucketSpaceStates.entrySet().stream()
.allMatch(entry -> other.derivedBucketSpaceStates.getOrDefault(entry.getKey(), entry.getValue())
.getClusterState().similarToIgnoringInitProgress(entry.getValue().getClusterState()));
} | @Test
void similarity_test_considers_all_bucket_spaces() {
ClusterStateBundle bundle = createTestBundle(false);
ClusterStateBundle unchangedBundle = createTestBundle(false);
assertTrue(bundle.similarTo(unchangedBundle));
assertTrue(unchangedBundle.similarTo(bundle));
ClusterStateBundle changedBundle = createTestBundle(true);
assertFalse(bundle.similarTo(changedBundle));
assertFalse(changedBundle.similarTo(bundle));
} |
public static String[] parseUri(String uri) {
return doParseUri(uri, false);
} | @Test
public void testParseUriSlashAndQuery() {
String[] out1 = CamelURIParser.parseUri("file:/absolute?recursive=true");
assertEquals("file", out1[0]);
assertEquals("/absolute", out1[1]);
assertEquals("recursive=true", out1[2]);
String[] out2 = CamelURIParser.parseUri("file:///absolute?recursive=true");
assertEquals("file", out2[0]);
assertEquals("/absolute", out2[1]);
assertEquals("recursive=true", out2[2]);
String[] out3 = CamelURIParser.parseUri("file://relative?recursive=true");
assertEquals("file", out3[0]);
assertEquals("relative", out3[1]);
assertEquals("recursive=true", out3[2]);
String[] out4 = CamelURIParser.parseUri("file:relative?recursive=true");
assertEquals("file", out4[0]);
assertEquals("relative", out4[1]);
assertEquals("recursive=true", out4[2]);
} |
public Map<String, Integer> getNotAnalysedFilesByLanguage() {
return ImmutableMap.copyOf(notAnalysedFilesByLanguage);
} | @Test
public void stores_not_analysed_c_file_count_in_sq_community_edition() {
when(sonarRuntime.getEdition()).thenReturn(SonarEdition.COMMUNITY);
InputComponentStoreTester underTest = new InputComponentStoreTester(sonarRuntime);
String mod1Key = "mod1";
underTest.addFile(mod1Key, "src/main/java/Foo.java", "java");
underTest.addFile(mod1Key, "src/main/c/file1.c");
underTest.addFile(mod1Key, "src/main/c/file2.c");
String mod2Key = "mod2";
underTest.addFile(mod2Key, "src/main/groovy/Foo.groovy", "groovy");
underTest.addFile(mod2Key, "src/main/c/file3.c");
assertThat(underTest.getNotAnalysedFilesByLanguage()).hasSize(1);
assertThat(underTest.getNotAnalysedFilesByLanguage()).containsEntry("C", 3);
} |
public boolean after(DateTimeStamp other) {
return compareTo(other) > 0;
} | @Test
void malformedDateTimeStampThrowsException() {
final DateTimeStamp onlyDate = new DateTimeStamp("2021-09-01T11:12:13.111-0100");
final DateTimeStamp onlyTime = new DateTimeStamp(1.0D);
assertThrows(IllegalStateException.class, () -> onlyDate.after(onlyTime));
} |
public static Object mapToObject(Map<String, String> map, Class<?> clazz) {
if (CollectionUtils.isEmpty(map)) {
return null;
}
try {
Object instance = clazz.newInstance();
Field[] fields = instance.getClass().getDeclaredFields();
for (Field field : fields) {
int modifiers = field.getModifiers();
if (Modifier.isStatic(modifiers) || Modifier.isFinal(modifiers)) {
continue;
}
boolean accessible = field.isAccessible();
field.setAccessible(true);
Class<?> type = field.getType();
if (type == Date.class) {
if (!StringUtils.isEmpty(map.get(field.getName()))) {
field.set(instance, new Date(Long.valueOf(map.get(field.getName()))));
}
} else if (type == Long.class) {
if (!StringUtils.isEmpty(map.get(field.getName()))) {
field.set(instance, Long.valueOf(map.get(field.getName())));
}
} else if (type == Integer.class) {
if (!StringUtils.isEmpty(map.get(field.getName()))) {
field.set(instance, Integer.valueOf(map.get(field.getName())));
}
} else if (type == Double.class) {
if (!StringUtils.isEmpty(map.get(field.getName()))) {
field.set(instance, Double.valueOf(map.get(field.getName())));
}
} else if (type == String.class) {
if (!StringUtils.isEmpty(map.get(field.getName()))) {
field.set(instance, map.get(field.getName()));
}
}
field.setAccessible(accessible);
}
return instance;
} catch (IllegalAccessException e) {
throw new NotSupportYetException(
"map to " + clazz.toString() + " failed:" + e.getMessage(), e);
} catch (InstantiationException e) {
throw new NotSupportYetException(
"map to " + clazz.toString() + " failed:" + e.getMessage(), e);
}
} | @Test
public void testMapToObject() {
// null map
BranchDO branchDO =
(BranchDO) BeanUtils.mapToObject(null, BranchDO.class);
Assertions.assertNull(branchDO);
Map<String, String> map = new HashMap<>();
Date date = new Date();
map.put("xid", "192.166.166.11:9010:12423424234234");
map.put("transactionId", "12423424234234");
map.put("status", "2");
map.put("test", "22.22");
map.put("gmtCreate", String.valueOf(date.getTime()));
map.put("msg", "test");
map.put("testByte", "1");
branchDO = (BranchDO) BeanUtils.mapToObject(map, BranchDO.class);
Assertions.assertEquals(map.get("xid"), branchDO.getXid());
Assertions.assertEquals(Long.valueOf(map.get("transactionId")), branchDO.getTransactionId());
Assertions.assertEquals(Integer.valueOf(map.get("status")), branchDO.getStatus());
Assertions.assertEquals(Double.valueOf(map.get("test")), branchDO.getTest());
Assertions.assertEquals(new Date(date.getTime()), branchDO.getGmtCreate());
map = new HashMap<>();
map.put("xid", null);
map.put("transactionId", null);
map.put("status", null);
map.put("test", null);
map.put("gmtCreate", null);
branchDO = (BranchDO) BeanUtils.mapToObject(map, BranchDO.class);
Assertions.assertNull(branchDO.getXid());
Assertions.assertNull(branchDO.getTransactionId());
Assertions.assertNull(branchDO.getStatus());
Assertions.assertNull(branchDO.getTest());
Assertions.assertNull(branchDO.getGmtCreate());
// InstantiationException
Assertions.assertThrows(NotSupportYetException.class, () -> {
Map<String, String> map1 = new HashMap<>();
map1.put("xid", "1");
BeanUtils.mapToObject(map1, DefaultValues.class);
});
// IllegalAccessException
Assertions.assertThrows(NotSupportYetException.class, () -> {
Map<String, String> map1 = new HashMap<>();
map1.put("xid", "1");
BeanUtils.mapToObject(map1, RpcStatus.class);
});
} |
public TopicConfig setMultiThreadingEnabled(boolean multiThreadingEnabled) {
if (this.globalOrderingEnabled && multiThreadingEnabled) {
throw new IllegalArgumentException("Multi-threading can not be enabled when global ordering is used.");
}
this.multiThreadingEnabled = multiThreadingEnabled;
return this;
} | @Test
public void testSetMultiThreadingEnabled() {
TopicConfig topicConfig = new TopicConfig().setGlobalOrderingEnabled(false);
topicConfig.setMultiThreadingEnabled(true);
assertTrue(topicConfig.isMultiThreadingEnabled());
try {
topicConfig.setGlobalOrderingEnabled(true);
assertTrue("global-ordering must be disabled when multi-threading is enabled", false);
} catch (IllegalArgumentException e) {
// anticipated..
}
assertFalse(topicConfig.isGlobalOrderingEnabled());
} |
public static ChronoZonedDateTimeByInstantComparator getInstance() {
return INSTANCE;
} | @Test
void should_have_one_instance() {
assertThat(comparator).isSameAs(ChronoZonedDateTimeByInstantComparator.getInstance());
} |
public static List<FieldSchema> convert(Schema schema) {
return schema.columns().stream()
.map(col -> new FieldSchema(col.name(), convertToTypeString(col.type()), col.doc()))
.collect(Collectors.toList());
} | @Test
public void testNotSupportedTypes() {
for (FieldSchema notSupportedField : getNotSupportedFieldSchemas()) {
assertThatThrownBy(
() -> HiveSchemaUtil.convert(
Lists.newArrayList(Collections.singletonList(notSupportedField))))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageStartingWith("Unsupported Hive type");
}
} |
public void instanceDeregistered(String serviceName, String groupName) {
String key = NamingUtils.getGroupedName(serviceName, groupName);
synchronized (registeredInstances) {
InstanceRedoData redoData = registeredInstances.get(key);
if (null != redoData) {
redoData.unregistered();
}
}
} | @Test
void testInstanceDeregistered() {
ConcurrentMap<String, InstanceRedoData> registeredInstances = getInstanceRedoDataMap();
redoService.cacheInstanceForRedo(SERVICE, GROUP, new Instance());
redoService.instanceDeregistered(SERVICE, GROUP);
InstanceRedoData actual = registeredInstances.entrySet().iterator().next().getValue();
assertFalse(actual.isRegistered());
assertTrue(actual.isUnregistering());
} |
@Override
public boolean validate(Path path, ResourceContext context) {
// explicitly call a method not depending on LinkResourceService
return validate(path);
} | @Test
public void testMoreThanLatency() {
sut = new LatencyConstraint(Duration.of(3, ChronoUnit.NANOS));
assertThat(sut.validate(path, resourceContext), is(false));
} |
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
Map<String, String> mdcContextMap = getMdcContextMap();
return super.schedule(ContextPropagator.decorateRunnable(contextPropagators, () -> {
try {
setMDCContext(mdcContextMap);
command.run();
} finally {
MDC.clear();
}
}), delay, unit);
} | @Test
public void testScheduleRunnableWithDelayPropagatesContext() {
TestThreadLocalContextHolder.put("ValueShouldCrossThreadBoundary");
final ScheduledFuture<?> schedule = schedulerService.schedule(() -> {
TestThreadLocalContextHolder.get().orElseThrow(() -> new RuntimeException("Found No Context"));
}, 100, TimeUnit.MILLISECONDS);
try{
await().atMost(200, TimeUnit.MILLISECONDS).until(matches(() -> schedule.get()));
} catch (Exception exception) {
Assertions.fail("Must not throw an exception");
}
} |
protected <G> G doGet(String token, HttpUrl url, Function<String, G> handler) {
Request request = prepareRequestWithBearerToken(token, GET, url, null);
return doCall(request, handler);
} | @Test
public void validate_handler_call_on_empty_body() {
server.enqueue(new MockResponse().setResponseCode(200)
.setBody(""));
assertThat(underTest.doGet("token", server.url("/"), Function.identity()))
.isEmpty();
} |
public void onPeriodicEmit() {
updateCombinedWatermark();
} | @Test
void singleDeferredWatermark() {
TestingWatermarkOutput underlyingWatermarkOutput = createTestingWatermarkOutput();
WatermarkOutputMultiplexer multiplexer =
new WatermarkOutputMultiplexer(underlyingWatermarkOutput);
WatermarkOutput watermarkOutput = createDeferredOutput(multiplexer);
watermarkOutput.emitWatermark(new Watermark(0));
multiplexer.onPeriodicEmit();
assertThat(underlyingWatermarkOutput.lastWatermark()).isEqualTo(new Watermark(0));
assertThat(underlyingWatermarkOutput.isIdle()).isFalse();
} |
public static Finder expandedFinder(String... queries) {
var finder = identSum();
for (String query : queries) {
finder = finder.or(Finder.contains(query));
}
return finder;
} | @Test
void expandedFinderTest() {
var res = expandedFinder("It was", "kingdom").find(text());
assertEquals(3, res.size());
assertEquals("It was many and many a year ago,", res.get(0));
assertEquals("In a kingdom by the sea,", res.get(1));
assertEquals("In this kingdom by the sea;", res.get(2));
} |
@Override
protected FlumeConfiguration getFlumeConfiguration() {
return flumeConfiguration;
} | @Test
public void testPolling() throws Exception {
es.awaitEvent();
es.reset();
FlumeConfiguration fc = cp.getFlumeConfiguration();
Assert.assertTrue(fc.getConfigurationErrors().isEmpty());
AgentConfiguration ac = fc.getConfigurationFor(AGENT_NAME);
Assert.assertNull(ac);
addData();
es.awaitEvent();
es.reset();
verifyProperties(cp);
} |
@Override
public long lastHeartbeat() {
return lastHeartbeatMillis;
} | @Test
public void lastHeartbeat_whenNoHeartbeat() {
long lastHeartbeat = failureDetector.lastHeartbeat();
assertEquals(PhiAccrualFailureDetector.NO_HEARTBEAT_TIMESTAMP, lastHeartbeat);
} |
public static void addTotalSstFilesSizeMetric(final StreamsMetricsImpl streamsMetrics,
final RocksDBMetricContext metricContext,
final Gauge<BigInteger> valueProvider) {
addMutableMetric(
streamsMetrics,
metricContext,
valueProvider,
TOTAL_SST_FILES_SIZE,
TOTAL_SST_FILE_SIZE_DESCRIPTION
);
} | @Test
public void shouldAddTotalSstFilesSizeMetric() {
final String name = "total-sst-files-size";
final String description = "Total size in bytes of all SST files";
runAndVerifyMutableMetric(
name,
description,
() -> RocksDBMetrics.addTotalSstFilesSizeMetric(streamsMetrics, ROCKSDB_METRIC_CONTEXT, VALUE_PROVIDER)
);
} |
@Override
public void addToQueue(Runnable r) {
r.run();
} | @Test
public void addToQueue_executes_Runnable_synchronously() {
Set<String> s = new HashSet<>();
underTest.addToQueue(() -> s.add("done"));
assertThat(s).containsOnly("done");
} |
@CanIgnoreReturnValue
public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) {
List<@Nullable Object> expected =
(varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs);
return containsExactlyElementsIn(
expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable);
} | @Test
public void iterableContainsExactlyWithOneNonIterableDoesNotGiveWarning() {
expectFailureWhenTestingThat(asList(1, 2, 3, 4)).containsExactly(1);
assertFailureValue("unexpected (3)", "2, 3, 4");
} |
public static int fromLogical(Schema schema, java.util.Date value) {
if (!(LOGICAL_NAME.equals(schema.name())))
throw new DataException("Requested conversion of Date object but the schema does not match.");
Calendar calendar = Calendar.getInstance(UTC);
calendar.setTime(value);
if (calendar.get(Calendar.HOUR_OF_DAY) != 0 || calendar.get(Calendar.MINUTE) != 0 ||
calendar.get(Calendar.SECOND) != 0 || calendar.get(Calendar.MILLISECOND) != 0) {
throw new DataException("Kafka Connect Date type should not have any time fields set to non-zero values.");
}
long unixMillis = calendar.getTimeInMillis();
return (int) (unixMillis / MILLIS_PER_DAY);
} | @Test
public void testFromLogicalInvalidHasTimeComponents() {
assertThrows(DataException.class,
() -> Date.fromLogical(Date.SCHEMA, EPOCH_PLUS_TIME_COMPONENT.getTime()));
} |
@Override
public Result invoke(Invoker<?> invoker, Invocation inv) throws RpcException {
String accessLogKey = invoker.getUrl().getParameter(Constants.ACCESS_LOG_KEY);
boolean isFixedPath = invoker.getUrl().getParameter(ACCESS_LOG_FIXED_PATH_KEY, true);
if (StringUtils.isEmpty(accessLogKey) || "false".equalsIgnoreCase(accessLogKey)) {
// Notice that disable accesslog of one service may cause the whole application to stop collecting
// accesslog.
// It's recommended to use application level configuration to enable or disable accesslog if dynamically
// configuration is needed .
if (future != null && !future.isCancelled()) {
future.cancel(true);
logger.info("Access log task cancelled ...");
}
return invoker.invoke(inv);
}
if (scheduled.compareAndSet(false, true)) {
future = inv.getModuleModel()
.getApplicationModel()
.getFrameworkModel()
.getBeanFactory()
.getBean(FrameworkExecutorRepository.class)
.getSharedScheduledExecutor()
.scheduleWithFixedDelay(
new AccesslogRefreshTask(isFixedPath),
LOG_OUTPUT_INTERVAL,
LOG_OUTPUT_INTERVAL,
TimeUnit.MILLISECONDS);
logger.info("Access log task started ...");
}
Optional<AccessLogData> optionalAccessLogData = Optional.empty();
try {
optionalAccessLogData = Optional.of(buildAccessLogData(invoker, inv));
} catch (Throwable t) {
logger.warn(
CONFIG_FILTER_VALIDATION_EXCEPTION,
"",
"",
"Exception in AccessLogFilter of service(" + invoker + " -> " + inv + ")",
t);
}
try {
return invoker.invoke(inv);
} finally {
String finalAccessLogKey = accessLogKey;
optionalAccessLogData.ifPresent(logData -> {
logData.setOutTime(new Date());
log(finalAccessLogKey, logData, isFixedPath);
});
}
} | @Test
@SuppressWarnings("unchecked")
public void testDefault() throws NoSuchFieldException, IllegalAccessException {
URL url = URL.valueOf("test://test:11/test?accesslog=true&group=dubbo&version=1.1");
Invoker<AccessLogFilterTest> invoker = new MyInvoker<AccessLogFilterTest>(url);
Invocation invocation = new MockInvocation();
Field field = AccessLogFilter.class.getDeclaredField("logEntries");
field.setAccessible(true);
assertTrue(((Map) field.get(accessLogFilter)).isEmpty());
accessLogFilter.invoke(invoker, invocation);
Map<String, Queue<AccessLogData>> logs = (Map<String, Queue<AccessLogData>>) field.get(accessLogFilter);
assertFalse(logs.isEmpty());
assertFalse(logs.get("true").isEmpty());
AccessLogData log = logs.get("true").iterator().next();
assertEquals("org.apache.dubbo.rpc.support.DemoService", log.getServiceName());
} |
public void setColumn(int index, SGDVector vector) {
if (index < 0 || index > dim2) {
throw new IllegalArgumentException("Invalid column index, must be [0,"+dim2+"), received " + index);
}
if (vector.size() == dim1) {
if (vector instanceof DenseVector) {
for (int i = 0; i < dim1; i++) {
values[i][index] = vector.get(i);
}
} else {
for (VectorTuple t : vector) {
values[t.index][index] = t.value;
}
}
} else {
throw new IllegalArgumentException("Vector size mismatch, expected " + dim1 + " found " + vector.size());
}
} | @Test
public void setColumnTest() {
//create a 2x3 matrix
DenseMatrix a = new DenseMatrix(new double[][] {new double[] {1.0, 2.0, 3.0}, new double[] {4.0, 5.0, 6.0}});
assertEquals(2, a.getDimension1Size());
assertEquals(3, a.getDimension2Size());
a.setColumn(2, new DenseVector(new double[] {7.0, 8.0}));
assertEquals(7.0, a.get(0,2));
assertEquals(8.0, a.get(1,2));
double[][] d = a.toArray();
assertArrayEquals(new double[] {1.0, 2.0, 7.0}, d[0]);
assertArrayEquals(new double[] {4.0, 5.0, 8.0}, d[1]);
} |
@Override
public void route(final RouteContext routeContext, final SingleRule singleRule) {
for (String each : singleRule.getDataSourceNames()) {
routeContext.getRouteUnits().add(new RouteUnit(new RouteMapper(each, each), Collections.emptyList()));
}
} | @Test
void assertRoute() throws SQLException {
SingleRule singleRule = new SingleRule(new SingleRuleConfiguration(), DefaultDatabase.LOGIC_NAME, new H2DatabaseType(), createDataSourceMap(), Collections.emptyList());
RouteContext routeContext = new RouteContext();
SingleDatabaseBroadcastRouteEngine engine = new SingleDatabaseBroadcastRouteEngine();
engine.route(routeContext, singleRule);
List<RouteUnit> routeUnits = new ArrayList<>(routeContext.getRouteUnits());
assertThat(routeContext.getRouteUnits().size(), is(2));
assertThat(routeUnits.get(0).getDataSourceMapper().getActualName(), is("ds_0"));
assertTrue(routeUnits.get(0).getTableMappers().isEmpty());
assertThat(routeUnits.get(1).getDataSourceMapper().getActualName(), is("ds_1"));
assertTrue(routeUnits.get(1).getTableMappers().isEmpty());
} |
@Override
@SuppressWarnings("unchecked")
public int run() throws IOException {
Preconditions.checkArgument(
input != null && output != null, "Both input and output parquet file paths are required.");
Preconditions.checkArgument(codec != null, "The codec cannot be null");
Path inPath = new Path(input);
Path outPath = new Path(output);
CompressionCodecName codecName = Codecs.parquetCodec(codec);
ParquetMetadata metaData = ParquetFileReader.readFooter(getConf(), inPath, NO_FILTER);
MessageType schema = metaData.getFileMetaData().getSchema();
ParquetFileWriter writer = new ParquetFileWriter(getConf(), schema, outPath, ParquetFileWriter.Mode.CREATE);
writer.start();
try (TransParquetFileReader reader = new TransParquetFileReader(
HadoopInputFile.fromPath(inPath, getConf()),
HadoopReadOptions.builder(getConf()).build())) {
compressionConverter.processBlocks(
reader, writer, metaData, schema, metaData.getFileMetaData().getCreatedBy(), codecName);
} finally {
writer.end(metaData.getFileMetaData().getKeyValueMetaData());
}
return 0;
} | @Test
public void testTransCompressionCommand_ZSTD() throws IOException {
TransCompressionCommand command = new TransCompressionCommand(createLogger());
command.input = parquetFile().getAbsolutePath();
File output = new File(getTempFolder(), getClass().getSimpleName() + ".converted-1.ZSTD.parquet");
command.output = output.getAbsolutePath();
command.codec = "ZSTD";
command.setConf(new Configuration());
Assert.assertEquals(0, command.run());
Assert.assertTrue(output.exists());
} |
public static DateTime parseDate(CharSequence dateString) {
dateString = normalize(dateString);
return parse(dateString, DatePattern.NORM_DATE_FORMAT);
} | @Test
public void parseDateTest() {
final String dateStr = "2018-4-10";
final Date date = DateUtil.parseDate(dateStr);
final String format = DateUtil.format(date, DatePattern.NORM_DATE_PATTERN);
assertEquals("2018-04-10", format);
} |
public static Date parseHttpDate(CharSequence txt) {
return parseHttpDate(txt, 0, txt.length());
} | @Test
public void testParseMidnight() {
assertEquals(new Date(784080000000L), parseHttpDate("Sunday, 06 Nov 1994 00:00:00 GMT"));
} |
@Override
public String execute(CommandContext commandContext, String[] args) {
if (ArrayUtils.isEmpty(args)) {
return "Please input method name, eg: \r\ninvoke xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})\r\n"
+ "invoke XxxService.xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})\r\n"
+ "invoke com.xxx.XxxService.xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})";
}
Channel channel = commandContext.getRemote();
String service = channel.attr(ChangeTelnet.SERVICE_KEY) != null
? channel.attr(ChangeTelnet.SERVICE_KEY).get()
: null;
String message = args[0];
int i = message.indexOf("(");
if (i < 0 || !message.endsWith(")")) {
return "Invalid parameters, format: service.method(args)";
}
String method = message.substring(0, i).trim();
String param = message.substring(i + 1, message.length() - 1).trim();
i = method.lastIndexOf(".");
if (i >= 0) {
service = method.substring(0, i).trim();
method = method.substring(i + 1).trim();
}
if (StringUtils.isEmpty(service)) {
return "If you want to invoke like [invoke sayHello(\"xxxx\")], please execute cd command first,"
+ " or you can execute it like [invoke IHelloService.sayHello(\"xxxx\")]";
}
List<Object> list;
try {
list = JsonUtils.toJavaList("[" + param + "]", Object.class);
} catch (Throwable t) {
return "Invalid json argument, cause: " + t.getMessage();
}
StringBuilder buf = new StringBuilder();
Method invokeMethod = null;
ProviderModel selectedProvider = null;
if (isInvokedSelectCommand(channel)) {
selectedProvider = channel.attr(INVOKE_METHOD_PROVIDER_KEY).get();
invokeMethod = channel.attr(SelectTelnet.SELECT_METHOD_KEY).get();
} else {
for (ProviderModel provider : frameworkModel.getServiceRepository().allProviderModels()) {
if (!isServiceMatch(service, provider)) {
continue;
}
selectedProvider = provider;
List<Method> methodList = findSameSignatureMethod(provider.getAllMethods(), method, list);
if (CollectionUtils.isEmpty(methodList)) {
break;
}
if (methodList.size() == 1) {
invokeMethod = methodList.get(0);
} else {
List<Method> matchMethods = findMatchMethods(methodList, list);
if (CollectionUtils.isEmpty(matchMethods)) {
break;
}
if (matchMethods.size() == 1) {
invokeMethod = matchMethods.get(0);
} else { // exist overridden method
channel.attr(INVOKE_METHOD_PROVIDER_KEY).set(provider);
channel.attr(INVOKE_METHOD_LIST_KEY).set(matchMethods);
channel.attr(INVOKE_MESSAGE_KEY).set(message);
printSelectMessage(buf, matchMethods);
return buf.toString();
}
}
break;
}
}
if (!StringUtils.isEmpty(service)) {
buf.append("Use default service ").append(service).append('.');
}
if (selectedProvider == null) {
buf.append("\r\nNo such service ").append(service);
return buf.toString();
}
if (invokeMethod == null) {
buf.append("\r\nNo such method ")
.append(method)
.append(" in service ")
.append(service);
return buf.toString();
}
try {
Object[] array =
realize(list.toArray(), invokeMethod.getParameterTypes(), invokeMethod.getGenericParameterTypes());
long start = System.currentTimeMillis();
AppResponse result = new AppResponse();
try {
Object o = invokeMethod.invoke(selectedProvider.getServiceInstance(), array);
boolean setValueDone = false;
if (RpcContext.getServerAttachment().isAsyncStarted()) {
AsyncContext asyncContext = RpcContext.getServerAttachment().getAsyncContext();
if (asyncContext instanceof AsyncContextImpl) {
CompletableFuture<Object> internalFuture =
((AsyncContextImpl) asyncContext).getInternalFuture();
result.setValue(internalFuture.get());
setValueDone = true;
}
}
if (!setValueDone) {
result.setValue(o);
}
} catch (Throwable t) {
result.setException(t);
if (t instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
} finally {
RpcContext.removeContext();
}
long end = System.currentTimeMillis();
buf.append("\r\nresult: ");
buf.append(JsonUtils.toJson(result.recreate()));
buf.append("\r\nelapsed: ");
buf.append(end - start);
buf.append(" ms.");
} catch (Throwable t) {
return "Failed to invoke method " + invokeMethod.getName() + ", cause: " + StringUtils.toString(t);
}
return buf.toString();
} | @Test
void testInvokeOverriddenMethodBySelect() throws RemotingException {
defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).set(DemoService.class.getName());
defaultAttributeMap.attr(SelectTelnet.SELECT_KEY).set(null);
defaultAttributeMap.attr(SelectTelnet.SELECT_METHOD_KEY).set(null);
defaultAttributeMap.attr(InvokeTelnet.INVOKE_METHOD_PROVIDER_KEY).set(null);
defaultAttributeMap.attr(InvokeTelnet.INVOKE_METHOD_LIST_KEY).set(null);
defaultAttributeMap.attr(InvokeTelnet.INVOKE_MESSAGE_KEY).set(null);
given(mockChannel.attr(ChangeTelnet.SERVICE_KEY))
.willReturn(defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY));
given(mockChannel.attr(SelectTelnet.SELECT_KEY)).willReturn(defaultAttributeMap.attr(SelectTelnet.SELECT_KEY));
given(mockChannel.attr(SelectTelnet.SELECT_METHOD_KEY))
.willReturn(defaultAttributeMap.attr(SelectTelnet.SELECT_METHOD_KEY));
given(mockChannel.attr(InvokeTelnet.INVOKE_METHOD_PROVIDER_KEY))
.willReturn(defaultAttributeMap.attr(InvokeTelnet.INVOKE_METHOD_PROVIDER_KEY));
given(mockChannel.attr(InvokeTelnet.INVOKE_METHOD_LIST_KEY))
.willReturn(defaultAttributeMap.attr(InvokeTelnet.INVOKE_METHOD_LIST_KEY));
given(mockChannel.attr(InvokeTelnet.INVOKE_MESSAGE_KEY))
.willReturn(defaultAttributeMap.attr(InvokeTelnet.INVOKE_MESSAGE_KEY));
registerProvider(DemoService.class.getName(), new DemoServiceImpl(), DemoService.class);
String param = "{\"name\":\"Dubbo\",\"age\":8}";
String result = invoke.execute(mockCommandContext, new String[] {"getPerson(" + param + ")"});
assertTrue(
result.contains("Please use the select command to select the method you want to invoke. eg: select 1"));
result = select.execute(mockCommandContext, new String[] {"1"});
// result dependent on method order.
assertTrue(result.contains("result: 8") || result.contains("result: \"Dubbo\""));
result = select.execute(mockCommandContext, new String[] {"2"});
assertTrue(result.contains("result: 8") || result.contains("result: \"Dubbo\""));
defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).remove();
defaultAttributeMap.attr(SelectTelnet.SELECT_KEY).remove();
defaultAttributeMap.attr(SelectTelnet.SELECT_METHOD_KEY).remove();
defaultAttributeMap.attr(InvokeTelnet.INVOKE_METHOD_PROVIDER_KEY).remove();
defaultAttributeMap.attr(InvokeTelnet.INVOKE_METHOD_LIST_KEY).remove();
defaultAttributeMap.attr(InvokeTelnet.INVOKE_MESSAGE_KEY).remove();
} |
@NonNull
public String processShownotes() {
String shownotes = rawShownotes;
if (TextUtils.isEmpty(shownotes)) {
Log.d(TAG, "shownotesProvider contained no shownotes. Returning 'no shownotes' message");
shownotes = "<html><head></head><body><p id='apNoShownotes'>" + noShownotesLabel + "</p></body></html>";
}
// replace ASCII line breaks with HTML ones if shownotes don't contain HTML line breaks already
if (!LINE_BREAK_REGEX.matcher(shownotes).find() && !shownotes.contains("<p>")) {
shownotes = shownotes.replace("\n", "<br />");
}
Document document = Jsoup.parse(shownotes);
cleanCss(document);
document.head().appendElement("style").attr("type", "text/css").text(webviewStyle);
addTimecodes(document);
return document.toString();
} | @Test
public void testProcessShownotesAndInvalidTimecode() {
final String[] timeStrs = new String[] {"2:1", "0:0", "000", "00", "00:000"};
StringBuilder shownotes = new StringBuilder("<p> Some test text with timecodes ");
for (String timeStr : timeStrs) {
shownotes.append(timeStr).append(" ");
}
shownotes.append("here.</p>");
ShownotesCleaner t = new ShownotesCleaner(context, shownotes.toString(), Integer.MAX_VALUE);
String res = t.processShownotes();
checkLinkCorrect(res, new long[0], new String[0]);
} |
@Override
Function<Request.Builder, Request.Builder> addVerbToBuilder() {
return Request.Builder::get;
} | @Test
public void addVerbToBuilder_shouldReturnNonNullResult() {
assertThat(getRequest.addVerbToBuilder()).isNotNull();
} |
@PUT
@Path("{id}")
@ApiOperation("Update view")
@AuditEvent(type = ViewsAuditEventTypes.VIEW_UPDATE)
public ViewDTO update(@ApiParam(name = "id") @PathParam("id") @NotEmpty String id,
@ApiParam @Valid ViewDTO dto,
@Context SearchUser searchUser) {
final ViewDTO updatedDTO = dto.toBuilder().id(id).build();
if (!searchUser.canUpdateView(updatedDTO)) {
throw new ForbiddenException("Not allowed to edit " + summarize(updatedDTO) + ".");
}
validateIntegrity(updatedDTO, searchUser, false);
var result = dbService.update(updatedDTO);
recentActivityService.update(result.id(), result.type().equals(ViewDTO.Type.DASHBOARD) ? GRNTypes.DASHBOARD : GRNTypes.SEARCH, searchUser);
return result;
} | @Test
public void updatesDashboardSuccessfullyIfInvisibleFilterWasPresentBefore() {
final ViewService viewService = mockViewService(TEST_DASHBOARD_VIEW);
final var dto = ViewDTO.builder().searchId("1").title("2").state(new HashMap<>()).build();
when(viewService.update(any())).thenReturn(dto);
final ViewsResource viewsResource = createViewsResource(
viewService,
mock(StartPageService.class),
mock(RecentActivityService.class),
mock(ClusterEventBus.class),
referencedFiltersHelperWithIDs(Collections.singleton("<<Hidden filter, but not added by this update>>")),
searchFilterVisibilityChecker(Collections.singletonList("<<Hidden filter, but not added by this update>>")),
EMPTY_VIEW_RESOLVERS,
SEARCH
);
viewsResource.update(VIEW_ID, TEST_DASHBOARD_VIEW, SEARCH_USER);
verify(viewService).update(TEST_DASHBOARD_VIEW);
} |
@Override
public long getTimeout() {
return timeoutMills;
} | @Test
void testAbstractPushCallBack() {
AbstractRequestCallBack callBack = new AbstractRequestCallBack() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void onResponse(Response response) {
testValue = true;
}
@Override
public void onException(Throwable e) {
testValue = false;
}
};
assertEquals(3000L, callBack.getTimeout());
assertFalse(testValue);
callBack.onResponse(new ErrorResponse());
assertTrue(testValue);
callBack.onException(new RuntimeException("test"));
assertFalse(testValue);
} |
@ProtoFactory
public static MediaType fromString(String tree) {
if (tree == null || tree.isEmpty()) throw CONTAINER.missingMediaType();
Matcher matcher = TREE_PATTERN.matcher(tree);
return parseSingleMediaType(tree, matcher, false);
} | @Test(expected = EncodingException.class)
public void testUnQuotedParamWithSpaces() {
MediaType mediaType = MediaType.fromString("application/json ; charset= UTF-8");
} |
public static DataType getDataTypeFromColumn(EtlJobConfig.EtlColumn column, boolean regardDistinctColumnAsBinary) {
DataType dataType = DataTypes.StringType;
switch (column.columnType) {
case "BOOLEAN":
dataType = DataTypes.StringType;
break;
case "TINYINT":
dataType = DataTypes.ByteType;
break;
case "SMALLINT":
dataType = DataTypes.ShortType;
break;
case "INT":
dataType = DataTypes.IntegerType;
break;
case "DATETIME":
dataType = DataTypes.TimestampType;
break;
case "BIGINT":
dataType = DataTypes.LongType;
break;
case "LARGEINT":
dataType = DataTypes.StringType;
break;
case "FLOAT":
dataType = DataTypes.FloatType;
break;
case "DOUBLE":
dataType = DataTypes.DoubleType;
break;
case "DATE":
dataType = DataTypes.DateType;
break;
case "CHAR":
case "VARCHAR":
case "OBJECT":
case "PERCENTILE":
dataType = DataTypes.StringType;
break;
case "HLL":
case "BITMAP":
dataType = regardDistinctColumnAsBinary ? DataTypes.BinaryType : DataTypes.StringType;
break;
case "DECIMALV2":
case "DECIMAL32":
case "DECIMAL64":
case "DECIMAL128":
dataType = DecimalType.apply(column.precision, column.scale);
break;
default:
throw new RuntimeException("Reason: invalid column type:" + column);
}
return dataType;
} | @Test
public void testGetDataTypeFromColumn() {
DppUtils dppUtils = new DppUtils();
try {
EtlJobConfig.EtlColumn column = new EtlJobConfig.EtlColumn();
column.columnType = "VARCHAR";
DataType stringResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.StringType, stringResult);
column.columnType = "CHAR";
DataType charResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.StringType, charResult);
column.columnType = "HLL";
DataType hllResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.StringType, hllResult);
column.columnType = "OBJECT";
DataType objectResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.StringType, objectResult);
column.columnType = "BOOLEAN";
DataType booleanResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.StringType, booleanResult);
column.columnType = "TINYINT";
DataType tinyResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.ByteType, tinyResult);
column.columnType = "SMALLINT";
DataType smallResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.ShortType, smallResult);
column.columnType = "INT";
DataType integerResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.IntegerType, integerResult);
column.columnType = "BIGINT";
DataType longResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.LongType, longResult);
column.columnType = "DATETIME";
DataType datetimeResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.TimestampType, datetimeResult);
column.columnType = "FLOAT";
DataType floatResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.FloatType, floatResult);
column.columnType = "DOUBLE";
DataType doubleResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.DoubleType, doubleResult);
column.columnType = "DATE";
DataType dateResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DataTypes.DateType, dateResult);
column.columnType = "DECIMAL32";
column.precision = 7;
column.scale = 2;
DataType decimalResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DecimalType.apply(7, 2), decimalResult);
column.columnType = "DECIMAL64";
column.precision = 15;
column.scale = 3;
decimalResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DecimalType.apply(15, 3), decimalResult);
column.columnType = "DECIMAL128";
column.precision = 34;
column.scale = 4;
decimalResult = dppUtils.getDataTypeFromColumn(column, false);
Assert.assertEquals(DecimalType.apply(34, 4), decimalResult);
} catch (Exception e) {
Assert.assertTrue(false);
}
} |
@Udf(description = "Converts a string representation of a time in the given format"
+ " into the TIME value.")
public Time parseTime(
@UdfParameter(
description = "The string representation of a time.") final String formattedTime,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
if (formattedTime == null | formatPattern == null) {
return null;
}
try {
final TemporalAccessor ta = formatters.get(formatPattern).parse(formattedTime);
final Optional<ChronoField> dateField = Arrays.stream(ChronoField.values())
.filter(ChronoField::isDateBased)
.filter(ta::isSupported)
.findFirst();
if (dateField.isPresent()) {
throw new KsqlFunctionException("Time format contains date field.");
}
return new Time(TimeUnit.NANOSECONDS.toMillis(LocalTime.from(ta).toNanoOfDay()));
} catch (ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to parse time '" + formattedTime
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
} | @Test
public void shouldBeThreadSafeAndWorkWithManyDifferentFormatters() {
IntStream.range(0, 10_000)
.parallel()
.forEach(idx -> {
try {
final String sourceDate = "000105X" + idx;
final String pattern = "HHmmss'X" + idx + "'";
final Time result = udf.parseTime(sourceDate, pattern);
assertThat(result.getTime(), is(65000L));
} catch (final Exception e) {
fail(e.getMessage());
}
});
} |
@PUT
@Path("/{connector}/topics/reset")
@Operation(summary = "Reset the list of topics actively used by the specified connector")
public Response resetConnectorActiveTopics(final @PathParam("connector") String connector, final @Context HttpHeaders headers) {
if (isTopicTrackingDisabled) {
throw new ConnectRestException(Response.Status.FORBIDDEN.getStatusCode(),
"Topic tracking is disabled.");
}
if (isTopicTrackingResetDisabled) {
throw new ConnectRestException(Response.Status.FORBIDDEN.getStatusCode(),
"Topic tracking reset is disabled.");
}
herder.resetConnectorActiveTopics(connector);
return Response.accepted().build();
} | @Test
public void testResetConnectorActiveTopics() {
HttpHeaders headers = mock(HttpHeaders.class);
connectorsResource = new ConnectorsResource(herder, serverConfig, restClient, REQUEST_TIMEOUT);
Response response = connectorsResource.resetConnectorActiveTopics(CONNECTOR_NAME, headers);
verify(herder).resetConnectorActiveTopics(CONNECTOR_NAME);
assertEquals(Response.Status.ACCEPTED.getStatusCode(), response.getStatus());
} |
@VisibleForTesting
public Path getAllocationFile(Configuration conf)
throws UnsupportedFileSystemException {
String allocFilePath = conf.get(FairSchedulerConfiguration.ALLOCATION_FILE,
FairSchedulerConfiguration.DEFAULT_ALLOCATION_FILE);
Path allocPath = new Path(allocFilePath);
String allocPathScheme = allocPath.toUri().getScheme();
if(allocPathScheme != null && !allocPathScheme.matches(SUPPORTED_FS_REGEX)){
throw new UnsupportedFileSystemException("Allocation file "
+ allocFilePath + " uses an unsupported filesystem");
} else if (!allocPath.isAbsolute()) {
URL url = Thread.currentThread().getContextClassLoader()
.getResource(allocFilePath);
if (url == null) {
LOG.warn(allocFilePath + " not found on the classpath.");
allocPath = null;
} else if (!url.getProtocol().equalsIgnoreCase("file")) {
throw new RuntimeException("Allocation file " + url
+ " found on the classpath is not on the local filesystem.");
} else {
allocPath = new Path(url.getProtocol(), null, url.getPath());
}
} else if (allocPath.isAbsoluteAndSchemeAuthorityNull()){
allocPath = new Path("file", null, allocFilePath);
}
return allocPath;
} | @Test
public void testGetAllocationFileFromFileSystem()
throws IOException, URISyntaxException {
File baseDir =
new File(TEST_DIR + Path.SEPARATOR + "getAllocHDFS").getAbsoluteFile();
FileUtil.fullyDelete(baseDir);
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
MiniDFSCluster hdfsCluster = builder.build();
String fsAllocPath = "hdfs://localhost:" + hdfsCluster.getNameNodePort()
+ Path.SEPARATOR + TEST_FAIRSCHED_XML;
URL fschedURL = Thread.currentThread().getContextClassLoader()
.getResource(TEST_FAIRSCHED_XML);
FileSystem fs = FileSystem.get(conf);
fs.copyFromLocalFile(new Path(fschedURL.toURI()), new Path(fsAllocPath));
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, fsAllocPath);
AllocationFileLoaderService allocLoader =
new AllocationFileLoaderService(scheduler);
Path allocationFile = allocLoader.getAllocationFile(conf);
assertEquals(fsAllocPath, allocationFile.toString());
assertTrue(fs.exists(allocationFile));
hdfsCluster.shutdown(true);
} |
@Override
public int read() throws IOException {
final int tmp = read(z);
return tmp == -1 ? -1 : (0xFF & z[0]);
} | @Test
public void testRepeat() throws Exception {
final Configuration conf = new Configuration();
Arrays.fill(loc, "");
Arrays.fill(start, 0L);
Arrays.fill(len, BLOCK);
final ByteArrayOutputStream out = fillVerif();
final FileQueue q =
new FileQueue(new CombineFileSplit(paths, start, len, loc), conf);
final byte[] verif = out.toByteArray();
final byte[] check = new byte[2 * NFILES * BLOCK];
q.read(check, 0, NFILES * BLOCK);
assertArrayEquals(verif, Arrays.copyOf(check, NFILES * BLOCK));
final byte[] verif2 = new byte[2 * NFILES * BLOCK];
System.arraycopy(verif, 0, verif2, 0, verif.length);
System.arraycopy(verif, 0, verif2, verif.length, verif.length);
q.read(check, 0, 2 * NFILES * BLOCK);
assertArrayEquals(verif2, check);
} |
public void writeTokens(Object... tokens) throws IOException
{
for (Object token : tokens)
{
writeObject(token);
}
output.write("\n".getBytes(StandardCharsets.US_ASCII));
} | @Test
void testPDFBox4750() throws IOException
{
String filename = "PDFBOX-4750.pdf";
File file = new File("target/pdfs", filename);
try (PDDocument doc = Loader.loadPDF(file))
{
PDFRenderer r = new PDFRenderer(doc);
for (int i = 0; i < doc.getNumberOfPages(); ++i)
{
BufferedImage bim1 = r.renderImageWithDPI(i, 96);
ImageIO.write(bim1, "png", new File(testDirIn, filename + "-" + (i + 1) + ".png"));
PDPage page = doc.getPage(i);
PDStream newContent = new PDStream(doc);
try (OutputStream os = newContent.createOutputStream(COSName.FLATE_DECODE))
{
PDFStreamParser parser = new PDFStreamParser(page);
ContentStreamWriter tokenWriter = new ContentStreamWriter(os);
tokenWriter.writeTokens(parser.parse());
}
page.setContents(newContent);
}
doc.save(new File(testDirIn, filename));
}
if (!TestPDFToImage.doTestFile(new File(testDirIn, filename), testDirIn.getAbsolutePath(),
testDirOut.getAbsolutePath()))
{
fail("Rendering failed or is not identical, see in " + testDirOut);
}
} |
public static String getTimestamp() throws IOException {
return loadProperties().getProperty(TIMESTAMP);
} | @Test
public void testGetTimestamp() throws IOException {
assertEquals(getTimestamp(), ("2017-01-31 01:21:09.843 UTC"));
} |
void restoreBatch(final Collection<ConsumerRecord<byte[], byte[]>> records) {
// compute the observed stream time at the end of the restore batch, in order to speed up
// restore by not bothering to read from/write to segments which will have expired by the
// time the restoration process is complete.
long endOfBatchStreamTime = observedStreamTime;
for (final ConsumerRecord<byte[], byte[]> record : records) {
endOfBatchStreamTime = Math.max(endOfBatchStreamTime, record.timestamp());
}
final VersionedStoreClient<?> restoreClient = restoreWriteBuffer.getClient();
// note: there is increased risk for hitting an out-of-memory during this restore loop,
// compared to for non-versioned key-value stores, because this versioned store
// implementation stores multiple records (for the same key) together in a single RocksDB
// "segment" entry -- restoring a single changelog entry could require loading multiple
// records into memory. how high this memory amplification will be is very much dependent
// on the specific workload and the value of the "segment interval" parameter.
synchronized (position) {
for (final ConsumerRecord<byte[], byte[]> record : records) {
if (record.timestamp() < observedStreamTime - gracePeriod) {
// record is older than grace period and was therefore never written to the store
continue;
}
// advance observed stream time as usual, for use in deciding whether records have
// exceeded the store's grace period and should be dropped.
observedStreamTime = Math.max(observedStreamTime, record.timestamp());
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(
record,
consistencyEnabled,
position
);
// put records to write buffer
doPut(
restoreClient,
endOfBatchStreamTime,
new Bytes(record.key()),
record.value(),
record.timestamp()
);
}
try {
restoreWriteBuffer.flush();
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error restoring batch to store " + name, e);
}
}
} | @Test
public void shouldRestoreMultipleBatches() {
final List<DataRecord> records = new ArrayList<>();
records.add(new DataRecord("k", null, SEGMENT_INTERVAL - 20));
records.add(new DataRecord("k", "vn10", SEGMENT_INTERVAL - 10));
records.add(new DataRecord("k", null, SEGMENT_INTERVAL - 1));
final List<DataRecord> moreRecords = new ArrayList<>();
moreRecords.add(new DataRecord("k", null, SEGMENT_INTERVAL + 1));
moreRecords.add(new DataRecord("k", "vp10", SEGMENT_INTERVAL + 10));
moreRecords.add(new DataRecord("k", null, SEGMENT_INTERVAL + 20));
store.restoreBatch(getChangelogRecords(records));
store.restoreBatch(getChangelogRecords(moreRecords));
verifyGetNullFromStore("k");
verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL + 30);
verifyTimestampedGetValueFromStore("k", SEGMENT_INTERVAL + 15, "vp10", SEGMENT_INTERVAL + 10, SEGMENT_INTERVAL + 20);
verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL + 5);
verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL + 2);
verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL);
verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL - 1);
verifyTimestampedGetValueFromStore("k", SEGMENT_INTERVAL - 5, "vn10", SEGMENT_INTERVAL - 10, SEGMENT_INTERVAL - 1);
verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL - 15);
} |
@ScalarOperator(CAST)
@LiteralParameters("x")
@SqlType("varchar(x)")
public static Slice castToVarchar(@SqlType(StandardTypes.REAL) long value)
{
return utf8Slice(String.valueOf(intBitsToFloat((int) value)));
} | @Test
public void testCastToVarchar()
{
assertFunction("CAST(REAL'754.1985' as VARCHAR)", VARCHAR, "754.1985");
assertFunction("CAST(REAL'-754.2008' as VARCHAR)", VARCHAR, "-754.2008");
assertFunction("CAST(REAL'Infinity' as VARCHAR)", VARCHAR, "Infinity");
assertFunction("CAST(REAL'0.0' / REAL'0.0' as VARCHAR)", VARCHAR, "NaN");
} |
@Override
public ValidationTaskResult validateImpl(Map<String, String> optionsMap) {
if (!ValidationUtils.isHdfsScheme(mPath)) {
mMsg.append(String.format(
"UFS path %s is not HDFS. Skipping validation for HDFS properties.%n", mPath));
return new ValidationTaskResult(ValidationUtils.State.SKIPPED, getName(),
mMsg.toString(), mAdvice.toString());
}
ValidationTaskResult loadConfig = loadHdfsConfig();
if (loadConfig.getState() != ValidationUtils.State.OK) {
// If failed to load config files, abort
return loadConfig;
}
// no conflicts between these two
ValidationTaskResult last = checkConflicts();
if (last.getState() == ValidationUtils.State.OK) {
last = checkNameservice();
}
return last;
} | @Test
public void inconsistentConf() {
String hdfsSite = Paths.get(sTestDir.toPath().toString(), "hdfs-site.xml").toString();
ValidationTestUtils.writeXML(hdfsSite, ImmutableMap.of("key1", "value2"));
String coreSite = Paths.get(sTestDir.toPath().toString(), "core-site.xml").toString();
ValidationTestUtils.writeXML(coreSite, ImmutableMap.of("key1", "value1"));
CONF.set(PropertyKey.UNDERFS_HDFS_CONFIGURATION,
hdfsSite + HdfsConfValidationTask.SEPARATOR + coreSite);
HdfsConfValidationTask task =
new HdfsConfValidationTask("hdfs://namenode:9000/alluxio", CONF);
ValidationTaskResult result = task.validateImpl(ImmutableMap.of());
assertEquals(ValidationUtils.State.FAILED, result.getState());
assertThat(result.getResult(), containsString("key1"));
assertThat(result.getResult(), containsString("value1 in core-site.xml"));
assertThat(result.getResult(), containsString("value2 in hdfs-site.xml"));
assertThat(result.getAdvice(), containsString("fix the inconsistency"));
} |
@Override
public DataflowPipelineJob run(Pipeline pipeline) {
// Multi-language pipelines and pipelines that include upgrades should automatically be upgraded
// to Runner v2.
if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_runner_v2")) {
LOG.info(
"Automatically enabling Dataflow Runner v2 since the pipeline used cross-language"
+ " transforms or pipeline needed a transform upgrade.");
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build());
}
}
if (useUnifiedWorker(options)) {
if (hasExperiment(options, "disable_runner_v2")
|| hasExperiment(options, "disable_runner_v2_until_2023")
|| hasExperiment(options, "disable_prime_runner_v2")) {
throw new IllegalArgumentException(
"Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set.");
}
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("use_runner_v2")) {
experiments.add("use_runner_v2");
}
if (!experiments.contains("use_unified_worker")) {
experiments.add("use_unified_worker");
}
if (!experiments.contains("beam_fn_api")) {
experiments.add("beam_fn_api");
}
if (!experiments.contains("use_portable_job_submission")) {
experiments.add("use_portable_job_submission");
}
options.setExperiments(ImmutableList.copyOf(experiments));
}
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
logWarningIfBigqueryDLQUnused(pipeline);
if (shouldActAsStreaming(pipeline)) {
options.setStreaming(true);
if (useUnifiedWorker(options)) {
options.setEnableStreamingEngine(true);
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("enable_streaming_engine")) {
experiments.add("enable_streaming_engine");
}
if (!experiments.contains("enable_windmill_service")) {
experiments.add("enable_windmill_service");
}
}
}
if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) {
ProjectionPushdownOptimizer.optimize(pipeline);
}
LOG.info(
"Executing pipeline on the Dataflow Service, which will have billing implications "
+ "related to Google Compute Engine usage and other Google Cloud Services.");
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions);
// This incorrectly puns the worker harness container image (which implements v1beta3 API)
// with the SDK harness image (which implements Fn API).
//
// The same Environment is used in different and contradictory ways, depending on whether
// it is a v1 or v2 job submission.
RunnerApi.Environment defaultEnvironmentForDataflow =
Environments.createDockerEnvironment(workerHarnessContainerImageURL);
// The SdkComponents for portable an non-portable job submission must be kept distinct. Both
// need the default environment.
SdkComponents portableComponents = SdkComponents.create();
portableComponents.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
RunnerApi.Pipeline portablePipelineProto =
PipelineTranslation.toProto(pipeline, portableComponents, false);
// Note that `stageArtifacts` has to be called before `resolveArtifact` because
// `resolveArtifact` updates local paths to staged paths in pipeline proto.
portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto);
List<DataflowPackage> packages = stageArtifacts(portablePipelineProto);
portablePipelineProto = resolveArtifacts(portablePipelineProto);
portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Portable pipeline proto:\n{}",
TextFormat.printer().printToString(portablePipelineProto));
}
// Stage the portable pipeline proto, retrieving the staged pipeline path, then update
// the options on the new job
// TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options
LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation());
byte[] serializedProtoPipeline = portablePipelineProto.toByteArray();
DataflowPackage stagedPipeline =
options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME);
dataflowOptions.setPipelineUrl(stagedPipeline.getLocation());
if (useUnifiedWorker(options)) {
LOG.info("Skipping v1 transform replacements since job will run on v2.");
} else {
// Now rewrite things to be as needed for v1 (mutates the pipeline)
// This way the job submitted is valid for v1 and v2, simultaneously
replaceV1Transforms(pipeline);
}
// Capture the SdkComponents for look up during step translations
SdkComponents dataflowV1Components = SdkComponents.create();
dataflowV1Components.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
// No need to perform transform upgrading for the Runner v1 proto.
RunnerApi.Pipeline dataflowV1PipelineProto =
PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Dataflow v1 pipeline proto:\n{}",
TextFormat.printer().printToString(dataflowV1PipelineProto));
}
// Set a unique client_request_id in the CreateJob request.
// This is used to ensure idempotence of job creation across retried
// attempts to create a job. Specifically, if the service returns a job with
// a different client_request_id, it means the returned one is a different
// job previously created with the same job name, and that the job creation
// has been effectively rejected. The SDK should return
// Error::Already_Exists to user in that case.
int randomNum = new Random().nextInt(9000) + 1000;
String requestId =
DateTimeFormat.forPattern("YYYYMMddHHmmssmmm")
.withZone(DateTimeZone.UTC)
.print(DateTimeUtils.currentTimeMillis())
+ "_"
+ randomNum;
JobSpecification jobSpecification =
translator.translate(
pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages);
if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_staged_dataflow_worker_jar")) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("use_staged_dataflow_worker_jar")
.build());
}
}
Job newJob = jobSpecification.getJob();
try {
newJob
.getEnvironment()
.setSdkPipelineOptions(
MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class));
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
newJob.setClientRequestId(requestId);
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String version = dataflowRunnerInfo.getVersion();
checkState(
!"${pom.version}".equals(version),
"Unable to submit a job to the Dataflow service with unset version ${pom.version}");
LOG.info("Dataflow SDK version: {}", version);
newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties());
// The Dataflow Service may write to the temporary directory directly, so
// must be verified.
if (!isNullOrEmpty(options.getGcpTempLocation())) {
newJob
.getEnvironment()
.setTempStoragePrefix(
dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation()));
}
newJob.getEnvironment().setDataset(options.getTempDatasetId());
if (options.getWorkerRegion() != null) {
newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion());
}
if (options.getWorkerZone() != null) {
newJob.getEnvironment().setWorkerZone(options.getWorkerZone());
}
if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED");
} else if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED");
}
// Represent the minCpuPlatform pipeline option as an experiment, if not already present.
if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
List<String> minCpuFlags =
experiments.stream()
.filter(p -> p.startsWith("min_cpu_platform"))
.collect(Collectors.toList());
if (minCpuFlags.isEmpty()) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform())
.build());
} else {
LOG.warn(
"Flag min_cpu_platform is defined in both top level PipelineOption, "
+ "as well as under experiments. Proceed using {}.",
minCpuFlags.get(0));
}
}
newJob
.getEnvironment()
.setExperiments(
ImmutableList.copyOf(
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList())));
// Set the Docker container image that executes Dataflow worker harness, residing in Google
// Container Registry. Translator is guaranteed to create a worker pool prior to this point.
// For runner_v1, only worker_harness_container is set.
// For runner_v2, both worker_harness_container and sdk_harness_container are set to the same
// value.
String containerImage = getContainerImageForJob(options);
for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) {
workerPool.setWorkerHarnessContainerImage(containerImage);
}
configureSdkHarnessContainerImages(options, portablePipelineProto, newJob);
newJob.getEnvironment().setVersion(getEnvironmentVersion(options));
if (hooks != null) {
hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment());
}
// enable upload_graph when the graph is too large
byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8);
int jobGraphByteSize = jobGraphBytes.length;
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES
&& !hasExperiment(options, "upload_graph")
&& !useUnifiedWorker(options)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build());
LOG.info(
"The job graph size ({} in bytes) is larger than {}. Automatically add "
+ "the upload_graph option to experiments.",
jobGraphByteSize,
CREATE_JOB_REQUEST_LIMIT_BYTES);
}
if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) {
ArrayList<String> experiments = new ArrayList<>(options.getExperiments());
while (experiments.remove("upload_graph")) {}
options.setExperiments(experiments);
LOG.warn(
"The upload_graph experiment was specified, but it does not apply "
+ "to runner v2 jobs. Option has been automatically removed.");
}
// Upload the job to GCS and remove the graph object from the API call. The graph
// will be downloaded from GCS by the service.
if (hasExperiment(options, "upload_graph")) {
DataflowPackage stagedGraph =
options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME);
newJob.getSteps().clear();
newJob.setStepsLocation(stagedGraph.getLocation());
}
if (!isNullOrEmpty(options.getDataflowJobFile())
|| !isNullOrEmpty(options.getTemplateLocation())) {
boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation());
if (isTemplate) {
checkArgument(
isNullOrEmpty(options.getDataflowJobFile()),
"--dataflowJobFile and --templateLocation are mutually exclusive.");
}
String fileLocation =
firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile());
checkArgument(
fileLocation.startsWith("/") || fileLocation.startsWith("gs://"),
"Location must be local or on Cloud Storage, got %s.",
fileLocation);
ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */);
String workSpecJson = DataflowPipelineTranslator.jobToString(newJob);
try (PrintWriter printWriter =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(
Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)),
UTF_8)))) {
printWriter.print(workSpecJson);
LOG.info("Printed job specification to {}", fileLocation);
} catch (IOException ex) {
String error = String.format("Cannot create output file at %s", fileLocation);
if (isTemplate) {
throw new RuntimeException(error, ex);
} else {
LOG.warn(error, ex);
}
}
if (isTemplate) {
LOG.info("Template successfully created.");
return new DataflowTemplateJob();
}
}
String jobIdToUpdate = null;
if (options.isUpdate()) {
jobIdToUpdate = getJobIdFromName(options.getJobName());
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setReplaceJobId(jobIdToUpdate);
}
if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) {
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot());
}
Job jobResult;
try {
jobResult = dataflowClient.createJob(newJob);
} catch (GoogleJsonResponseException e) {
String errorMessages = "Unexpected errors";
if (e.getDetails() != null) {
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) {
errorMessages =
"The size of the serialized JSON representation of the pipeline "
+ "exceeds the allowable limit. "
+ "For more information, please see the documentation on job submission:\n"
+ "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs";
} else {
errorMessages = e.getDetails().getMessage();
}
}
throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e);
} catch (IOException e) {
throw new RuntimeException("Failed to create a workflow job", e);
}
// Use a raw client for post-launch monitoring, as status calls may fail
// regularly and need not be retried automatically.
DataflowPipelineJob dataflowPipelineJob =
new DataflowPipelineJob(
DataflowClient.create(options),
jobResult.getId(),
options,
jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(),
portablePipelineProto);
// If the service returned client request id, the SDK needs to compare it
// with the original id generated in the request, if they are not the same
// (i.e., the returned job is not created by this request), throw
// DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException
// depending on whether this is a reload or not.
if (jobResult.getClientRequestId() != null
&& !jobResult.getClientRequestId().isEmpty()
&& !jobResult.getClientRequestId().equals(requestId)) {
// If updating a job.
if (options.isUpdate()) {
throw new DataflowJobAlreadyUpdatedException(
dataflowPipelineJob,
String.format(
"The job named %s with id: %s has already been updated into job id: %s "
+ "and cannot be updated again.",
newJob.getName(), jobIdToUpdate, jobResult.getId()));
} else {
throw new DataflowJobAlreadyExistsException(
dataflowPipelineJob,
String.format(
"There is already an active job named %s with id: %s. If you want to submit a"
+ " second job, try again by setting a different name using --jobName.",
newJob.getName(), jobResult.getId()));
}
}
LOG.info(
"To access the Dataflow monitoring console, please navigate to {}",
MonitoringUtil.getJobMonitoringPageURL(
options.getProject(), options.getRegion(), jobResult.getId()));
LOG.info("Submitted job: {}", jobResult.getId());
LOG.info(
"To cancel the job using the 'gcloud' tool, run:\n> {}",
MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId()));
return dataflowPipelineJob;
} | @Test
public void testRunReturnDifferentRequestId() throws IOException {
DataflowPipelineOptions options = buildPipelineOptions();
Dataflow mockDataflowClient = options.getDataflowClient();
Dataflow.Projects.Locations.Jobs.Create mockRequest =
mock(Dataflow.Projects.Locations.Jobs.Create.class);
when(mockDataflowClient
.projects()
.locations()
.jobs()
.create(eq(PROJECT_ID), eq(REGION_ID), any(Job.class)))
.thenReturn(mockRequest);
Job resultJob = new Job();
resultJob.setId("newid");
// Return a different request id.
resultJob.setClientRequestId("different_request_id");
when(mockRequest.execute()).thenReturn(resultJob);
Pipeline p = buildDataflowPipeline(options);
try {
p.run();
fail("Expected DataflowJobAlreadyExistsException");
} catch (DataflowJobAlreadyExistsException expected) {
assertThat(
expected.getMessage(),
containsString(
"If you want to submit a second job, try again by setting a "
+ "different name using --jobName."));
assertEquals(expected.getJob().getJobId(), resultJob.getId());
}
} |
@Override public String getLegacyColumnName( DatabaseMetaData dbMetaData, ResultSetMetaData rsMetaData, int index ) throws KettleDatabaseException {
if ( dbMetaData == null ) {
throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoDBMetaDataException" ) );
}
if ( rsMetaData == null ) {
throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoRSMetaDataException" ) );
}
try {
return rsMetaData.getColumnLabel( index );
} catch ( Exception e ) {
throw new KettleDatabaseException( String.format( "%s: %s", BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameException" ), e.getMessage() ), e );
}
} | @Test
public void testGetLegacyColumnNameNoAliasText() throws Exception {
assertEquals( "NoAliasText", new MariaDBDatabaseMeta().getLegacyColumnName( mock( DatabaseMetaData.class ), getResultSetMetaData(), 6 ) );
} |
public String stringify(boolean value) {
throw new UnsupportedOperationException(
"stringify(boolean) was called on a non-boolean stringifier: " + toString());
} | @Test
public void testDateStringifier() {
PrimitiveStringifier stringifier = DATE_STRINGIFIER;
assertEquals("1970-01-01", stringifier.stringify(0));
Calendar cal = Calendar.getInstance(UTC);
cal.clear();
cal.set(2017, Calendar.DECEMBER, 14);
assertEquals("2017-12-14", stringifier.stringify((int) MILLISECONDS.toDays(cal.getTimeInMillis())));
cal.clear();
cal.set(1583, Calendar.AUGUST, 3);
assertEquals("1583-08-03", stringifier.stringify((int) MILLISECONDS.toDays(cal.getTimeInMillis())));
checkThrowingUnsupportedException(stringifier, Integer.TYPE);
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void integerLiteral() {
String inputExpression = "10";
BaseNode number = parse( inputExpression );
assertThat( number).isInstanceOf(NumberNode.class);
assertThat( number.getResultType()).isEqualTo(BuiltInType.NUMBER);
assertLocation( inputExpression, number );
} |
@CanDistro
@DeleteMapping("/metadata/batch")
@TpsControl(pointName = "NamingInstanceMetadataUpdate", name = "HttpNamingInstanceMetadataBatchUpdate")
@Secured(action = ActionTypes.WRITE)
@ExtractorManager.Extractor(httpExtractor = NamingInstanceMetadataBatchHttpParamExtractor.class)
public ObjectNode batchDeleteInstanceMetadata(HttpServletRequest request) throws Exception {
final String namespaceId = WebUtils.optional(request, CommonParams.NAMESPACE_ID,
Constants.DEFAULT_NAMESPACE_ID);
String serviceName = WebUtils.required(request, CommonParams.SERVICE_NAME);
String consistencyType = WebUtils.optional(request, "consistencyType", StringUtils.EMPTY);
String instances = WebUtils.optional(request, "instances", StringUtils.EMPTY);
List<Instance> targetInstances = parseBatchInstances(instances);
String metadata = WebUtils.required(request, METADATA);
Map<String, String> targetMetadata = UtilsAndCommons.parseMetadata(metadata);
InstanceOperationInfo instanceOperationInfo = buildOperationInfo(serviceName, consistencyType, targetInstances);
List<String> operatedInstances = getInstanceOperator().batchDeleteMetadata(namespaceId, instanceOperationInfo,
targetMetadata);
ObjectNode result = JacksonUtils.createEmptyJsonNode();
ArrayNode ipArray = JacksonUtils.createEmptyArrayNode();
for (String ip : operatedInstances) {
ipArray.add(ip);
}
result.replace("updated", ipArray);
return result;
} | @Test
void testBatchDeleteInstanceMetadata() throws Exception {
mockRequestParameter("metadata", "{}");
when(instanceServiceV2.batchDeleteMetadata(eq(Constants.DEFAULT_NAMESPACE_ID), any(), anyMap())).thenReturn(
Collections.singletonList("1.1.1.1:3306:unknown:DEFAULT:ephemeral"));
ObjectNode actual = instanceController.batchDeleteInstanceMetadata(request);
assertEquals("1.1.1.1:3306:unknown:DEFAULT:ephemeral", actual.get("updated").get(0).textValue());
} |
public RecordAppendResult append(String topic,
int partition,
long timestamp,
byte[] key,
byte[] value,
Header[] headers,
AppendCallbacks callbacks,
long maxTimeToBlock,
boolean abortOnNewBatch,
long nowMs,
Cluster cluster) throws InterruptedException {
TopicInfo topicInfo = topicInfoMap.computeIfAbsent(topic, k -> new TopicInfo(createBuiltInPartitioner(logContext, k, batchSize)));
// We keep track of the number of appending thread to make sure we do not miss batches in
// abortIncompleteBatches().
appendsInProgress.incrementAndGet();
ByteBuffer buffer = null;
if (headers == null) headers = Record.EMPTY_HEADERS;
try {
// Loop to retry in case we encounter partitioner's race conditions.
while (true) {
// If the message doesn't have any partition affinity, so we pick a partition based on the broker
// availability and performance. Note, that here we peek current partition before we hold the
// deque lock, so we'll need to make sure that it's not changed while we were waiting for the
// deque lock.
final BuiltInPartitioner.StickyPartitionInfo partitionInfo;
final int effectivePartition;
if (partition == RecordMetadata.UNKNOWN_PARTITION) {
partitionInfo = topicInfo.builtInPartitioner.peekCurrentPartitionInfo(cluster);
effectivePartition = partitionInfo.partition();
} else {
partitionInfo = null;
effectivePartition = partition;
}
// Now that we know the effective partition, let the caller know.
setPartition(callbacks, effectivePartition);
// check if we have an in-progress batch
Deque<ProducerBatch> dq = topicInfo.batches.computeIfAbsent(effectivePartition, k -> new ArrayDeque<>());
synchronized (dq) {
// After taking the lock, validate that the partition hasn't changed and retry.
if (partitionChanged(topic, topicInfo, partitionInfo, dq, nowMs, cluster))
continue;
RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callbacks, dq, nowMs);
if (appendResult != null) {
// If queue has incomplete batches we disable switch (see comments in updatePartitionInfo).
boolean enableSwitch = allBatchesFull(dq);
topicInfo.builtInPartitioner.updatePartitionInfo(partitionInfo, appendResult.appendedBytes, cluster, enableSwitch);
return appendResult;
}
}
// we don't have an in-progress record batch try to allocate a new batch
if (abortOnNewBatch) {
// Return a result that will cause another call to append.
return new RecordAppendResult(null, false, false, true, 0);
}
if (buffer == null) {
byte maxUsableMagic = apiVersions.maxUsableProduceMagic();
int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression.type(), key, value, headers));
log.trace("Allocating a new {} byte message buffer for topic {} partition {} with remaining timeout {}ms", size, topic, effectivePartition, maxTimeToBlock);
// This call may block if we exhausted buffer space.
buffer = free.allocate(size, maxTimeToBlock);
// Update the current time in case the buffer allocation blocked above.
// NOTE: getting time may be expensive, so calling it under a lock
// should be avoided.
nowMs = time.milliseconds();
}
synchronized (dq) {
// After taking the lock, validate that the partition hasn't changed and retry.
if (partitionChanged(topic, topicInfo, partitionInfo, dq, nowMs, cluster))
continue;
RecordAppendResult appendResult = appendNewBatch(topic, effectivePartition, dq, timestamp, key, value, headers, callbacks, buffer, nowMs);
// Set buffer to null, so that deallocate doesn't return it back to free pool, since it's used in the batch.
if (appendResult.newBatchCreated)
buffer = null;
// If queue has incomplete batches we disable switch (see comments in updatePartitionInfo).
boolean enableSwitch = allBatchesFull(dq);
topicInfo.builtInPartitioner.updatePartitionInfo(partitionInfo, appendResult.appendedBytes, cluster, enableSwitch);
return appendResult;
}
}
} finally {
free.deallocate(buffer);
appendsInProgress.decrementAndGet();
}
} | @Test
public void testIdempotenceWithOldMagic() {
// Simulate talking to an older broker, ie. one which supports a lower magic.
ApiVersions apiVersions = new ApiVersions();
int batchSize = 1025;
int deliveryTimeoutMs = 3200;
int lingerMs = 10;
long retryBackoffMs = 100L;
long totalSize = 10 * batchSize;
String metricGrpName = "producer-metrics";
apiVersions.update("foobar", NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2));
TransactionManager transactionManager = new TransactionManager(new LogContext(), null, 0, retryBackoffMs, apiVersions);
RecordAccumulator accum = new RecordAccumulator(logContext, batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD,
Compression.NONE, lingerMs, retryBackoffMs, retryBackoffMs, deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, transactionManager,
new BufferPool(totalSize, batchSize, metrics, time, metricGrpName));
assertThrows(UnsupportedVersionException.class,
() -> accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, false, time.milliseconds(), cluster));
} |
public void resetPositionsIfNeeded() {
Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp();
if (offsetResetTimestamps.isEmpty())
return;
resetPositionsAsync(offsetResetTimestamps);
} | @Test
public void testSeekWithInFlightReset() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST);
// Send the ListOffsets request to reset the position
offsetFetcher.resetPositionsIfNeeded();
consumerClient.pollNoWakeup();
assertFalse(subscriptions.hasValidPosition(tp0));
assertTrue(client.hasInFlightRequests());
// Now we get a seek from the user
subscriptions.seek(tp0, 237);
// The response returns and is discarded
client.respond(listOffsetResponse(Errors.NONE, 1L, 5L));
consumerClient.pollNoWakeup();
assertFalse(client.hasPendingResponses());
assertFalse(client.hasInFlightRequests());
assertEquals(237L, subscriptions.position(tp0).offset);
} |
public static PemAuthIdentity clusterOperator(Secret secret) {
return new PemAuthIdentity(secret, "cluster-operator");
} | @Test
public void testSecretWithMissingCertChainThrowsException() {
Secret secretWithMissingClusterOperatorKey = new SecretBuilder()
.withNewMetadata()
.withName(KafkaResources.clusterOperatorCertsSecretName(CLUSTER))
.withNamespace(NAMESPACE)
.endMetadata()
.withData(map("cluster-operator.key", "key"))
.build();
Exception e = assertThrows(RuntimeException.class, () -> PemAuthIdentity.clusterOperator(secretWithMissingClusterOperatorKey));
assertThat(e.getMessage(), is("The Secret testns/testcluster-cluster-operator-certs is missing the field cluster-operator.crt"));
} |
public Connection findAllowedConnection(String entityId) {
return connectionRepository.findAllowedByEntityId(entityId);
} | @Test
void findAllowedConnection() {
when(connectionRepositoryMock.findAllowedByEntityId(anyString())).thenReturn(new Connection());
Connection result = connectionServiceMock.findAllowedConnection("entityId");
verify(connectionRepositoryMock, times(1)).findAllowedByEntityId(anyString());
assertNotNull(result);
} |
public static String format(double amount, boolean isUseTraditional) {
return format(amount, isUseTraditional, false);
} | @Test
public void formatTest() {
String f0 = NumberChineseFormatter.format(5000_8000, false);
assertEquals("五千万零八千", f0);
String f1 = NumberChineseFormatter.format(1_0889.72356, false);
assertEquals("一万零八百八十九点七二", f1);
f1 = NumberChineseFormatter.format(12653, false);
assertEquals("一万二千六百五十三", f1);
f1 = NumberChineseFormatter.format(215.6387, false);
assertEquals("二百一十五点六四", f1);
f1 = NumberChineseFormatter.format(1024, false);
assertEquals("一千零二十四", f1);
f1 = NumberChineseFormatter.format(100350089, false);
assertEquals("一亿零三十五万零八十九", f1);
f1 = NumberChineseFormatter.format(1200, false);
assertEquals("一千二百", f1);
f1 = NumberChineseFormatter.format(12, false);
assertEquals("一十二", f1);
f1 = NumberChineseFormatter.format(0.05, false);
assertEquals("零点零五", f1);
} |
@Override
public int readUnsignedShort() throws EOFException {
return readShort() & 0xffff;
} | @Test
public void testReadUnsignedShort() throws Exception {
byte[] bytes1 = {0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, -1, -1};
in.init(bytes1, bytes1.length - 4);
int unsigned = in.readUnsignedShort();
assertEquals(0xFFFF, unsigned);
} |
public CaseInsensitiveString getAncestorName() {
if (cachedAncestorName == null) {
String stringPath = path();
if (stringPath == null) {
return null;
}
int index = stringPath.indexOf(DELIMITER);
cachedAncestorName = index == -1 ? path : new CaseInsensitiveString(stringPath.substring(0, index));
}
return cachedAncestorName;
} | @Test
public void shouldUnderstandAncestorName() {
PathFromAncestor path = new PathFromAncestor(new CaseInsensitiveString("grand-parent/parent/child"));
assertThat(path.getAncestorName(), is(new CaseInsensitiveString("grand-parent")));
} |
public String toXmlPartial(Object domainObject) {
bombIf(!isAnnotationPresent(domainObject.getClass(), ConfigTag.class), () -> "Object " + domainObject + " does not have a ConfigTag");
Element element = elementFor(domainObject.getClass());
write(domainObject, element, configCache, registry);
if (isAnnotationPresent(domainObject.getClass(), ConfigCollection.class) && domainObject instanceof Collection) {
for (Object item : (Collection<?>) domainObject) {
if (isAnnotationPresent(item.getClass(), ConfigCollection.class) && item instanceof Collection) {
new ExplicitCollectionXmlFieldWithValue(domainObject.getClass(), null, (Collection<?>) item, configCache, registry).populate(element);
continue;
}
Element childElement = elementFor(item.getClass());
element.addContent(childElement);
write(item, childElement, configCache, registry);
}
}
try (ByteArrayOutputStream output = new ByteArrayOutputStream(32 * 1024)) {
XmlUtils.writeXml(element, output);
// FIXME the lack of charset here looks rather suspicious. But unclear how to fix without possible regressions.
// Related to similar issue in GoConfigMigration?
return output.toString();
} catch (IOException e) {
throw bomb("Unable to write xml to String");
}
} | @Test
public void shouldNotSaveUserNameAndPasswordWhenBothAreEmpty() {
MailHost mailHost = new MailHost("hostname", 24, "", "", null, true, false, "from@te.com", "to@te.com", new GoCipher());
mailHost.ensureEncrypted();
String s = xmlWriter.toXmlPartial(mailHost);
assertThat(s, is(
"<mailhost hostname=\"hostname\" port=\"24\" "
+ "from=\"from@te.com\" admin=\"to@te.com\" />"));
} |
@Override
public String getName() {
return _name;
} | @Test
public void testStringSplitTransformFunction() {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("split(%s, 'ab')", STRING_ALPHANUM_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getName(), "split");
String[][] expectedValues = new String[NUM_ROWS][];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = StringUtils.splitByWholeSeparator(_stringAlphaNumericSVValues[i], "ab");
}
testTransformFunctionMV(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(
String.format("split(%s, 'ab', %s)", STRING_ALPHANUM_SV_COLUMN, INT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getName(), "split");
expectedValues = new String[NUM_ROWS][];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = StringUtils.splitByWholeSeparator(_stringAlphaNumericSVValues[i], "ab", _intSVValues[i]);
}
testTransformFunctionMV(transformFunction, expectedValues);
} |
@Override
public int actionStop(String appName) throws IOException, YarnException {
int result = EXIT_SUCCESS;
try {
Service service = new Service();
service.setName(appName);
service.setState(ServiceState.STOPPED);
String buffer = jsonSerDeser.toJson(service);
ClientResponse response = getApiClient(getServicePath(appName))
.put(ClientResponse.class, buffer);
result = processResponse(response);
} catch (Exception e) {
LOG.error("Fail to stop application: ", e);
result = EXIT_EXCEPTION_THROWN;
}
return result;
} | @Test
void testBadStop() {
String appName = "unknown_app";
try {
int result = badAsc.actionStop(appName);
assertEquals(EXIT_EXCEPTION_THROWN, result);
} catch (IOException | YarnException e) {
fail();
}
} |
public void revokePublisherAgreement(UserData user, UserData admin) {
checkApiUrl();
checkEclipseData(user);
var eclipseToken = admin == null ? checkEclipseToken(user) : checkEclipseToken(admin);
var headers = new HttpHeaders();
headers.setBearerAuth(eclipseToken.accessToken);
var request = new HttpEntity<>(headers);
var urlTemplate = eclipseApiUrl + "openvsx/publisher_agreement/{personId}";
var uriVariables = Map.of("personId", user.getEclipsePersonId());
try {
var requestCallback = restTemplate.httpEntityCallback(request);
restTemplate.execute(urlTemplate, HttpMethod.DELETE, requestCallback, null, uriVariables);
} catch (RestClientException exc) {
var url = UriComponentsBuilder.fromUriString(urlTemplate).build(uriVariables);
logger.error("Delete request failed with URL: " + url, exc);
throw new ErrorResultException("Request for revoking publisher agreement failed: " + exc.getMessage(),
HttpStatus.INTERNAL_SERVER_ERROR);
}
} | @Test
public void testRevokePublisherAgreement() {
var user = mockUser();
user.setEclipsePersonId("test");
eclipse.revokePublisherAgreement(user, null);
} |
@Override
public String getDataSource() {
return DataSourceConstant.DERBY;
} | @Test
void testGetDataSource() {
String dataSource = groupCapacityMapperByDerby.getDataSource();
assertEquals(DataSourceConstant.DERBY, dataSource);
} |
public static ClientDetailsEntity parse(String jsonString) {
JsonElement jsonEl = parser.parse(jsonString);
return parse(jsonEl);
} | @Test
public void testParse() {
String json = " {\n" +
" \"application_type\": \"web\",\n" +
" \"redirect_uris\":\n" +
" [\"https://client.example.org/callback\",\n" +
" \"https://client.example.org/callback2\"],\n" +
" \"client_name\": \"My Example\",\n" +
" \"client_name#ja-Jpan-JP\":\n" +
" \"クライアント名\",\n" +
" \"response_types\": [\"code\", \"token\"],\n" +
" \"grant_types\": [\"authorization_code\", \"implicit\"],\n" +
" \"logo_uri\": \"https://client.example.org/logo.png\",\n" +
" \"subject_type\": \"pairwise\",\n" +
" \"sector_identifier_uri\":\n" +
" \"https://other.example.net/file_of_redirect_uris.json\",\n" +
" \"token_endpoint_auth_method\": \"client_secret_basic\",\n" +
" \"jwks_uri\": \"https://client.example.org/my_public_keys.jwks\",\n" +
" \"userinfo_encrypted_response_alg\": \"RSA1_5\",\n" +
" \"userinfo_encrypted_response_enc\": \"A128CBC-HS256\",\n" +
" \"contacts\": [\"ve7jtb@example.org\", \"mary@example.org\"],\n" +
" \"request_uris\":\n" +
" [\"https://client.example.org/rf.txt#qpXaRLh_n93TTR9F252ValdatUQvQiJi5BDub2BeznA\"]\n" +
" }";
ClientDetailsEntity c = ClientDetailsEntityJsonProcessor.parse(json);
assertEquals(ClientDetailsEntity.AppType.WEB, c.getApplicationType());
assertEquals(ImmutableSet.of("https://client.example.org/callback", "https://client.example.org/callback2"), c.getRedirectUris());
assertEquals("My Example", c.getClientName());
assertEquals(ImmutableSet.of("code", "token"), c.getResponseTypes());
assertEquals(ImmutableSet.of("authorization_code", "implicit"), c.getGrantTypes());
assertEquals("https://client.example.org/logo.png", c.getLogoUri());
assertEquals(ClientDetailsEntity.SubjectType.PAIRWISE, c.getSubjectType());
assertEquals("https://other.example.net/file_of_redirect_uris.json", c.getSectorIdentifierUri());
assertEquals(ClientDetailsEntity.AuthMethod.SECRET_BASIC, c.getTokenEndpointAuthMethod());
assertEquals("https://client.example.org/my_public_keys.jwks", c.getJwksUri());
assertEquals(JWEAlgorithm.RSA1_5, c.getUserInfoEncryptedResponseAlg());
assertEquals(EncryptionMethod.A128CBC_HS256, c.getUserInfoEncryptedResponseEnc());
assertEquals(ImmutableSet.of("ve7jtb@example.org", "mary@example.org"), c.getContacts());
assertEquals(ImmutableSet.of("https://client.example.org/rf.txt#qpXaRLh_n93TTR9F252ValdatUQvQiJi5BDub2BeznA"), c.getRequestUris());
} |
static String getPIDFromOS() {
String pid;
// following is not always reliable as is (for example, see issue 3 on solaris 10
// or http://blog.igorminar.com/2007/03/how-java-application-can-discover-its.html)
// Author: Santhosh Kumar T, https://github.com/santhosh-tekuri/jlibs, licence LGPL
// Author getpids.exe: Daniel Scheibli, http://www.scheibli.com/projects/getpids/index.html, licence GPL
final String[] cmd;
File tempFile = null;
Process process = null;
try {
try {
if (!System.getProperty("os.name").toLowerCase(Locale.ENGLISH)
.contains("windows")) {
cmd = new String[] { "/bin/sh", "-c", "echo $$ $PPID" };
} else {
// getpids.exe is taken from http://www.scheibli.com/projects/getpids/index.html (GPL)
tempFile = File.createTempFile("getpids", ".exe");
// extract the embedded getpids.exe file from the jar and save it to above file
extractGetPid(tempFile);
cmd = new String[] { tempFile.getAbsolutePath() };
}
process = Runtime.getRuntime().exec(cmd);
final String processOutput = InputOutput.pumpToString(process.getInputStream(),
Charset.defaultCharset());
final StringTokenizer stok = new StringTokenizer(processOutput);
stok.nextToken(); // this is pid of the process we spanned
pid = stok.nextToken();
// waitFor nécessaire sous windows server 2003
// (sinon le fichier temporaire getpidsxxx.exe n'est pas effacé)
process.waitFor();
} finally {
if (process != null) {
// évitons http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6462165
process.getInputStream().close();
process.getOutputStream().close();
process.getErrorStream().close();
process.destroy();
}
if (tempFile != null && !tempFile.delete()) {
tempFile.deleteOnExit();
}
}
} catch (final InterruptedException | IOException e) {
pid = e.toString();
}
return pid;
} | @Test
public void testGetPIDFromOS() {
assertNotNull("getPIDFromOS", PID.getPIDFromOS());
} |
@Override
public Set<DeviceId> getDevicesOf(NodeId nodeId) {
checkPermission(CLUSTER_READ);
checkNotNull(nodeId, NODE_ID_NULL);
return store.getDevices(nodeId);
} | @Test
public void getDevicesOf() {
mgr.setRole(NID_LOCAL, DEV_MASTER, MASTER);
mgr.setRole(NID_LOCAL, DEV_OTHER, STANDBY);
assertEquals("should be one device:", 1, mgr.getDevicesOf(NID_LOCAL).size());
//hand both devices to NID_LOCAL
mgr.setRole(NID_LOCAL, DEV_OTHER, MASTER);
assertEquals("should be two devices:", 2, mgr.getDevicesOf(NID_LOCAL).size());
} |
protected static SimpleDateFormat getLog4j2Appender() {
Optional<Appender> log4j2xmlAppender =
configuration.getAppenders().values().stream()
.filter( a -> a.getName().equalsIgnoreCase( log4J2Appender ) ).findFirst();
if ( log4j2xmlAppender.isPresent() ) {
ArrayList<String> matchesArray = new ArrayList<>();
String dateFormatFromLog4j2xml = log4j2xmlAppender.get().getLayout().getContentFormat().get( "format" );
Pattern pattern = Pattern.compile( "(\\{(.*?)})" );
Matcher matcher = pattern.matcher( dateFormatFromLog4j2xml );
while ( matcher.find() ) {
matchesArray.add( matcher.group( 2 ) );
}
if ( !matchesArray.isEmpty() ) {
return processMatches( matchesArray );
}
}
return new SimpleDateFormat( "yyyy/MM/dd HH:mm:ss" );
} | @Test
public void testGetLog4j2UsingAppender6() {
// Testing dd/MMM/yyyy HH:mm:ss,SSS pattern
KettleLogLayout.log4J2Appender = "pdi-execution-appender-test-6";
Assert.assertEquals( "dd/MMM/yyyy HH:mm:ss,SSS",
KettleLogLayout.getLog4j2Appender().toPattern() );
} |
public void submitLoggingTask(Collection<Member> connectedMembers, Collection<Member> allMembers) {
if (delayPeriodSeconds <= 0) {
return;
}
if ((submittedLoggingTask != null && !submittedLoggingTask.isDone())) {
submittedLoggingTask.cancel(true);
}
submittedLoggingTask = executor.schedule(()
-> logger.info(connectivityLog(connectedMembers, allMembers)),
delayPeriodSeconds, SECONDS);
} | @Test
void assertLogMessageIsCorrect() {
List<Member> mockMembers = createMockMembers(2);
HazelcastProperties hzProps = createMockProperties(1);
ClientConnectivityLogger clientConnectivityLogger = new ClientConnectivityLogger(loggingService, executor, hzProps);
clientConnectivityLogger.submitLoggingTask(List.of(mockMembers.get(0)), mockMembers);
String expected = format(System.lineSeparator()
+ System.lineSeparator()
+ "Client Connectivity [2] {"
+ System.lineSeparator()
+ "\t%s - connected"
+ System.lineSeparator()
+ "\t%s - disconnected"
+ System.lineSeparator()
+ "}"
+ System.lineSeparator(),
mockMembers.get(0),
mockMembers.get(1)
);
ArgumentCaptor<Runnable> runnableCaptor = ArgumentCaptor.forClass(Runnable.class);
verify(executor).schedule(runnableCaptor.capture(), anyLong(), any());
runnableCaptor.getValue().run();
verify(logger).info(expected);
} |
public void upgradeApp(Service service) throws IOException,
SolrServerException {
Collection<SolrInputDocument> docs = new HashSet<SolrInputDocument>();
SolrClient solr = getSolrClient();
if (service!=null) {
String name = service.getName();
String app = "";
SolrQuery query = new SolrQuery();
query.setQuery("id:" + name);
query.setFilterQueries("type_s:AppEntry");
query.setRows(1);
QueryResponse response;
try {
response = solr.query(query);
Iterator<SolrDocument> appList = response.getResults().listIterator();
while (appList.hasNext()) {
SolrDocument d = appList.next();
app = d.get("app_s").toString();
}
} catch (SolrServerException | IOException e) {
LOG.error("Error in finding deployed application: " + name, e);
}
// Register deployed application instance with AppList
SolrInputDocument request = new SolrInputDocument();
request.addField("type_s", "AppEntry");
request.addField("id", name);
request.addField("name_s", name);
request.addField("app_s", app);
request.addField("yarnfile_s", OBJECT_MAPPER.writeValueAsString(service));
docs.add(request);
}
try {
commitSolrChanges(solr, docs);
} catch (IOException e) {
throw new IOException("Unable to register docker instance "
+ "with application entry.", e);
}
} | @Test
void testUpgradeApp() throws Exception {
Application example = new Application();
String expected = "2.0";
String actual = "";
example.setOrganization("jenkins-ci.org");
example.setVersion("1.0");
example.setName("jenkins");
example.setDescription("World leading open source automation system.");
example.setIcon("/css/img/feather.png");
spy.register(example);
spy.deployApp("test", example);
example.setVersion("2.0");
spy.upgradeApp(example);
List<AppEntry> appEntries = spy.listAppEntries();
actual = appEntries.get(appEntries.size() - 1).getYarnfile().getVersion();
assertEquals(expected, actual);
} |
@Override
public ObjectNode encode(LispAsAddress address, CodecContext context) {
checkNotNull(address, "LispAsAddress cannot be null");
final ObjectNode result = context.mapper().createObjectNode()
.put(AS_NUMBER, address.getAsNumber());
if (address.getAddress() != null) {
final JsonCodec<MappingAddress> addressCodec =
context.codec(MappingAddress.class);
ObjectNode addressNode = addressCodec.encode(address.getAddress(), context);
result.set(ADDRESS, addressNode);
}
return result;
} | @Test
public void testLispAsAddressEncode() {
LispAsAddress address = new LispAsAddress.Builder()
.withAsNumber(AS_NUMBER)
.withAddress(MappingAddresses.ipv4MappingAddress(IPV4_PREFIX))
.build();
ObjectNode addressJson = asAddressCodec.encode(address, context);
assertThat("errors in encoding AS address JSON",
addressJson, LispAsAddressJsonMatcher.matchesAsAddress(address));
} |
@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt adminExt = new DefaultMQAdminExt(rpcHook);
adminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
String topic = commandLine.getOptionValue('t').trim();
if (commandLine.hasOption('c')) {
String clusterName = commandLine.getOptionValue('c').trim();
adminExt.start();
deleteTopic(adminExt, clusterName, topic);
return;
}
ServerUtil.printCommandLineHelp("mqadmin " + this.commandName(), options);
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
adminExt.shutdown();
}
} | @Test
public void testExecute() {
DeleteTopicSubCommand cmd = new DeleteTopicSubCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {"-t unit-test", "-c default-cluster"};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
assertThat(commandLine.getOptionValue('t').trim()).isEqualTo("unit-test");
assertThat(commandLine.getOptionValue("c").trim()).isEqualTo("default-cluster");
} |
public final void topoSort(final CaseInsensitiveString root, final PipelineDependencyState pipelineDependencyState) throws Exception {
Hashtable<CaseInsensitiveString, CycleState> state = new Hashtable<>();
Stack<CaseInsensitiveString> visiting = new Stack<>();
if (!state.containsKey(root)) {
tsort(root, pipelineDependencyState, state, visiting);
} else if (state.get(root) == CycleState.VISITING) {
throw ExceptionUtils.bomb("Unexpected node in visiting state: " + root);
}
assertHasVisitedAllNodesInTree(state);
} | @Test
public void shouldThrowExceptionWhenCycleDependencyFound() {
when(state.getDependencyMaterials(new CaseInsensitiveString("a"))).thenReturn(new Node(new Node.DependencyNode(new CaseInsensitiveString("b"), new CaseInsensitiveString("stage"))));
when(state.getDependencyMaterials(new CaseInsensitiveString("b"))).thenReturn(new Node(new Node.DependencyNode(new CaseInsensitiveString("c"), new CaseInsensitiveString("stage"))));
when(state.getDependencyMaterials(new CaseInsensitiveString("c"))).thenReturn(new Node(new Node.DependencyNode(new CaseInsensitiveString("a"), new CaseInsensitiveString("stage"))));
when(state.hasPipeline(new CaseInsensitiveString("a"))).thenReturn(true);
when(state.hasPipeline(new CaseInsensitiveString("b"))).thenReturn(true);
when(state.hasPipeline(new CaseInsensitiveString("c"))).thenReturn(true);
try {
project.topoSort(new CaseInsensitiveString("a"), state);
} catch (Exception e) {
assertThat(e.getMessage(), is("Circular dependency: a <- c <- b <- a"));
}
} |
public ProducerAppendInfo prepareUpdate(long producerId, AppendOrigin origin) {
ProducerStateEntry currentEntry = lastEntry(producerId).orElse(ProducerStateEntry.empty(producerId));
return new ProducerAppendInfo(topicPartition, producerId, currentEntry, origin, verificationStateEntry(producerId));
} | @Test
public void updateProducerTransactionState() {
int coordinatorEpoch = 15;
long offset = 9L;
appendClientEntry(stateManager, producerId, epoch, defaultSequence, offset, false);
ProducerAppendInfo appendInfo = stateManager.prepareUpdate(producerId, AppendOrigin.CLIENT);
appendInfo.appendDataBatch(epoch, 1, 5, time.milliseconds(),
new LogOffsetMetadata(16L), 20L, true);
verifyLastEntryWithTxnData(appendInfo.toEntry(), 1, 5,
16L, 20L, OptionalLong.of(16), appendInfo);
appendInfo.appendDataBatch(epoch, 6, 10, time.milliseconds(),
new LogOffsetMetadata(26L), 30L, true);
verifyLastEntryWithTxnData(appendInfo.toEntry(), 1, 10,
16L, 30L, OptionalLong.of(16), appendInfo);
EndTransactionMarker endTxnMarker = new EndTransactionMarker(ControlRecordType.COMMIT, coordinatorEpoch);
CompletedTxn completedTxn = appendInfo.appendEndTxnMarker(endTxnMarker, epoch, 40L, time.milliseconds())
.orElseThrow(() -> new RuntimeException("The transaction should be completed"));
assertEquals(producerId, completedTxn.producerId);
assertEquals(16L, completedTxn.firstOffset);
assertEquals(40L, completedTxn.lastOffset);
assertFalse(completedTxn.isAborted);
ProducerStateEntry lastEntry = appendInfo.toEntry();
// verify that appending the transaction marker doesn't affect the metadata of the cached record batches.
verifyLastEntryWithTxnData(lastEntry, 1, 10,
16L, 30L, OptionalLong.empty(), appendInfo);
assertEquals(OptionalLong.empty(), lastEntry.currentTxnFirstOffset());
} |
@Override
public void process(Exchange exchange) throws Exception {
JsonElement json = getBodyAsJsonElement(exchange);
String operation = exchange.getIn().getHeader(CouchDbConstants.HEADER_METHOD, String.class);
if (ObjectHelper.isEmpty(operation)) {
Response<DocumentResult> save = saveJsonElement(json);
if (save == null) {
throw new CouchDbException("Could not save document [unknown reason]", exchange);
}
if (LOG.isTraceEnabled()) {
LOG.trace("Document saved [_id={}, _rev={}]", save.getResult().getId(), save.getResult().getRev());
}
exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_REV, save.getResult().getRev());
exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_ID, save.getResult().getId());
} else {
if (operation.equalsIgnoreCase(CouchDbOperations.DELETE.toString())) {
Response<DocumentResult> delete = deleteJsonElement(json);
if (delete == null) {
throw new CouchDbException("Could not delete document [unknown reason]", exchange);
}
if (LOG.isTraceEnabled()) {
LOG.trace("Document saved [_id={}, _rev={}]", delete.getResult().getId(), delete.getResult().getRev());
}
exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_REV, delete.getResult().getRev());
exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_ID, delete.getResult().getId());
}
if (operation.equalsIgnoreCase(CouchDbOperations.GET.toString())) {
String docId = exchange.getIn().getHeader(CouchDbConstants.HEADER_DOC_ID, String.class);
if (docId == null) {
throw new CouchDbException("Could not get document, document id is missing", exchange);
}
Object response = getElement(docId);
if (LOG.isTraceEnabled()) {
LOG.trace("Document retrieved [_id={}]", docId);
}
exchange.getIn().setBody(response);
}
}
} | @Test
void testDocumentHeadersAreSet() throws Exception {
String id = UUID.randomUUID().toString();
String rev = UUID.randomUUID().toString();
Document doc = new Document.Builder()
.add("_rev", rev)
.id(id)
.build();
DocumentResult documentResult = mock(DocumentResult.class, Answers.RETURNS_DEEP_STUBS);
when(msg.getMandatoryBody()).thenReturn(doc);
when(client.update(doc)).thenReturn(response);
when(response.getResult()).thenReturn(documentResult);
when(response.getResult().getId()).thenReturn(id);
when(response.getResult().getRev()).thenReturn(rev);
producer.process(exchange);
verify(msg).setHeader(CouchDbConstants.HEADER_DOC_ID, id);
verify(msg).setHeader(CouchDbConstants.HEADER_DOC_REV, rev);
} |
public static Builder newChangesetBuilder() {
return new Builder();
} | @Test
public void fail_with_NPE_when_building_without_date() {
assertThatThrownBy(() -> {
Changeset.newChangesetBuilder()
.setAuthor("john")
.setRevision("rev-1")
.build();
})
.isInstanceOf(NullPointerException.class)
.hasMessage("Date cannot be null");
} |
@SuppressWarnings("unchecked")
private void publishContainerKilledEvent(
ContainerEvent event) {
if (publishNMContainerEvents) {
ContainerKillEvent killEvent = (ContainerKillEvent) event;
ContainerId containerId = killEvent.getContainerID();
ContainerEntity entity = createContainerEntity(containerId);
Map<String, Object> entityInfo = new HashMap<String, Object>();
entityInfo.put(ContainerMetricsConstants.DIAGNOSTICS_INFO,
killEvent.getDiagnostic());
entityInfo.put(ContainerMetricsConstants.EXIT_STATUS_INFO,
killEvent.getContainerExitStatus());
entity.setInfo(entityInfo);
Container container = context.getContainers().get(containerId);
if (container != null) {
TimelineEvent tEvent = new TimelineEvent();
tEvent.setId(ContainerMetricsConstants.KILLED_EVENT_TYPE);
tEvent.setTimestamp(event.getTimestamp());
entity.addEvent(tEvent);
dispatcher.getEventHandler().handle(new TimelinePublishEvent(entity,
containerId.getApplicationAttemptId().getApplicationId()));
}
}
} | @Test
public void testPublishContainerKilledEvent() {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId cId = ContainerId.newContainerId(appAttemptId, 1);
ContainerEvent containerEvent =
new ContainerKillEvent(cId, 1, "test kill");
publisher.createTimelineClient(appId);
publisher.publishContainerEvent(containerEvent);
publisher.stopTimelineClient(appId);
dispatcher.await();
ContainerEntity cEntity = new ContainerEntity();
cEntity.setId(cId.toString());
TimelineEntity[] lastPublishedEntities =
timelineClient.getLastPublishedEntities();
Assert.assertNotNull(lastPublishedEntities);
Assert.assertEquals(1, lastPublishedEntities.length);
TimelineEntity entity = lastPublishedEntities[0];
Assert.assertEquals(cEntity, entity);
NavigableSet<TimelineEvent> events = entity.getEvents();
Assert.assertEquals(1, events.size());
Assert.assertEquals(ContainerMetricsConstants.KILLED_EVENT_TYPE,
events.iterator().next().getId());
Map<String, Object> info = entity.getInfo();
Assert.assertTrue(
info.containsKey(ContainerMetricsConstants.DIAGNOSTICS_INFO));
Assert.assertEquals("test kill",
info.get(ContainerMetricsConstants.DIAGNOSTICS_INFO));
Assert.assertTrue(
info.containsKey(ContainerMetricsConstants.EXIT_STATUS_INFO));
Assert.assertEquals(1,
info.get(ContainerMetricsConstants.EXIT_STATUS_INFO));
} |
public MutableRecordBatch nextBatch() {
int remaining = buffer.remaining();
Integer batchSize = nextBatchSize();
if (batchSize == null || remaining < batchSize)
return null;
byte magic = buffer.get(buffer.position() + MAGIC_OFFSET);
ByteBuffer batchSlice = buffer.slice();
batchSlice.limit(batchSize);
buffer.position(buffer.position() + batchSize);
if (magic > RecordBatch.MAGIC_VALUE_V1)
return new DefaultRecordBatch(batchSlice);
else
return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice);
} | @Test
public void iteratorRaisesOnTooLargeRecords() {
ByteBuffer buffer = ByteBuffer.allocate(1024);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, TimestampType.CREATE_TIME, 0L);
builder.append(15L, "a".getBytes(), "1".getBytes());
builder.close();
builder = MemoryRecords.builder(buffer, Compression.NONE, TimestampType.CREATE_TIME, 2L);
builder.append(30L, "c".getBytes(), "3".getBytes());
builder.append(40L, "d".getBytes(), "4".getBytes());
builder.close();
buffer.flip();
ByteBufferLogInputStream logInputStream = new ByteBufferLogInputStream(buffer, 60);
assertNotNull(logInputStream.nextBatch());
assertThrows(CorruptRecordException.class, logInputStream::nextBatch);
} |
public IssueQuery create(SearchRequest request) {
try (DbSession dbSession = dbClient.openSession(false)) {
final ZoneId timeZone = parseTimeZone(request.getTimeZone()).orElse(clock.getZone());
Collection<RuleDto> ruleDtos = ruleKeysToRuleId(dbSession, request.getRules());
Collection<String> ruleUuids = ruleDtos.stream().map(RuleDto::getUuid).collect(Collectors.toSet());
Collection<String> issueKeys = collectIssueKeys(dbSession, request);
if (request.getRules() != null && request.getRules().stream().collect(Collectors.toSet()).size() != ruleDtos.size()) {
ruleUuids.add("non-existing-uuid");
}
IssueQuery.Builder builder = IssueQuery.builder()
.issueKeys(issueKeys)
.severities(request.getSeverities())
.cleanCodeAttributesCategories(request.getCleanCodeAttributesCategories())
.impactSoftwareQualities(request.getImpactSoftwareQualities())
.impactSeverities(request.getImpactSeverities())
.statuses(request.getStatuses())
.resolutions(request.getResolutions())
.issueStatuses(request.getIssueStatuses())
.resolved(request.getResolved())
.prioritizedRule(request.getPrioritizedRule())
.rules(ruleDtos)
.ruleUuids(ruleUuids)
.assigneeUuids(request.getAssigneeUuids())
.authors(request.getAuthors())
.scopes(request.getScopes())
.languages(request.getLanguages())
.tags(request.getTags())
.types(request.getTypes())
.pciDss32(request.getPciDss32())
.pciDss40(request.getPciDss40())
.owaspAsvs40(request.getOwaspAsvs40())
.owaspAsvsLevel(request.getOwaspAsvsLevel())
.owaspTop10(request.getOwaspTop10())
.owaspTop10For2021(request.getOwaspTop10For2021())
.stigAsdR5V3(request.getStigAsdV5R3())
.casa(request.getCasa())
.sansTop25(request.getSansTop25())
.cwe(request.getCwe())
.sonarsourceSecurity(request.getSonarsourceSecurity())
.assigned(request.getAssigned())
.createdAt(parseStartingDateOrDateTime(request.getCreatedAt(), timeZone))
.createdBefore(parseEndingDateOrDateTime(request.getCreatedBefore(), timeZone))
.facetMode(request.getFacetMode())
.timeZone(timeZone)
.codeVariants(request.getCodeVariants());
List<ComponentDto> allComponents = new ArrayList<>();
boolean effectiveOnComponentOnly = mergeDeprecatedComponentParameters(dbSession, request, allComponents);
addComponentParameters(builder, dbSession, effectiveOnComponentOnly, allComponents, request);
setCreatedAfterFromRequest(dbSession, builder, request, allComponents, timeZone);
String sort = request.getSort();
if (!isNullOrEmpty(sort)) {
builder.sort(sort);
builder.asc(request.getAsc());
}
return builder.build();
}
} | @Test
public void fail_if_date_is_not_formatted_correctly() {
assertThatThrownBy(() -> underTest.create(new SearchRequest()
.setCreatedAfter("unknown-date")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("'unknown-date' cannot be parsed as either a date or date+time");
} |
@Override
public TreeEntry<K, V> get(Object key) {
return nodes.get(key);
} | @Test
public void putTest() {
final ForestMap<String, String> map = new LinkedForestMap<>(false);
TreeEntry<String, String> treeEntry = new LinkedForestMap.TreeEntryNode<>(null, "a", "aaa");
assertNull(map.put("a", treeEntry));
assertNotEquals(map.get("a"), treeEntry);
assertEquals(map.get("a").getKey(), treeEntry.getKey());
assertEquals(map.get("a").getValue(), treeEntry.getValue());
treeEntry = new LinkedForestMap.TreeEntryNode<>(null, "a", "aaaa");
assertNotNull(map.put("a", treeEntry));
assertNotEquals(map.get("a"), treeEntry);
assertEquals(map.get("a").getKey(), treeEntry.getKey());
assertEquals(map.get("a").getValue(), treeEntry.getValue());
} |
public static String resolveMainClass(
@Nullable String configuredMainClass, ProjectProperties projectProperties)
throws MainClassInferenceException, IOException {
if (configuredMainClass != null) {
if (isValidJavaClass(configuredMainClass)) {
return configuredMainClass;
}
throw new MainClassInferenceException(
HelpfulSuggestions.forMainClassNotFound(
"'mainClass' configured in "
+ projectProperties.getPluginName()
+ " is not a valid Java class: "
+ configuredMainClass,
projectProperties.getPluginName()));
}
projectProperties.log(
LogEvent.info(
"Searching for main class... Add a 'mainClass' configuration to '"
+ projectProperties.getPluginName()
+ "' to improve build speed."));
String mainClassFromJarPlugin = projectProperties.getMainClassFromJarPlugin();
if (mainClassFromJarPlugin != null && isValidJavaClass(mainClassFromJarPlugin)) {
return mainClassFromJarPlugin;
}
if (mainClassFromJarPlugin != null) {
projectProperties.log(
LogEvent.warn(
"'mainClass' configured in "
+ projectProperties.getJarPluginName()
+ " is not a valid Java class: "
+ mainClassFromJarPlugin));
}
projectProperties.log(
LogEvent.info(
"Could not find a valid main class from "
+ projectProperties.getJarPluginName()
+ "; looking into all class files to infer main class."));
MainClassFinder.Result mainClassFinderResult =
MainClassFinder.find(projectProperties.getClassFiles(), projectProperties::log);
switch (mainClassFinderResult.getType()) {
case MAIN_CLASS_FOUND:
return mainClassFinderResult.getFoundMainClass();
case MAIN_CLASS_NOT_FOUND:
throw new MainClassInferenceException(
HelpfulSuggestions.forMainClassNotFound(
"Main class was not found", projectProperties.getPluginName()));
case MULTIPLE_MAIN_CLASSES:
throw new MainClassInferenceException(
HelpfulSuggestions.forMainClassNotFound(
"Multiple valid main classes were found: "
+ String.join(", ", mainClassFinderResult.getFoundMainClasses()),
projectProperties.getPluginName()));
default:
throw new IllegalStateException("Cannot reach here");
}
} | @Test
public void testResolveMainClass_noneInferredWithInvalidMainClassFromJarPlugin()
throws IOException {
Mockito.when(mockProjectProperties.getMainClassFromJarPlugin()).thenReturn("${start-class}");
Mockito.when(mockProjectProperties.getClassFiles())
.thenReturn(ImmutableList.of(Paths.get("ignored")));
try {
MainClassResolver.resolveMainClass(null, mockProjectProperties);
Assert.fail();
} catch (MainClassInferenceException ex) {
MatcherAssert.assertThat(
ex.getMessage(), CoreMatchers.containsString("Main class was not found"));
String info1 =
"Searching for main class... Add a 'mainClass' configuration to 'jib-plugin' to "
+ "improve build speed.";
String info2 =
"Could not find a valid main class from jar-plugin; looking into all class files to "
+ "infer main class.";
String warn =
"'mainClass' configured in jar-plugin is not a valid Java class: ${start-class}";
Mockito.verify(mockProjectProperties).log(LogEvent.info(info1));
Mockito.verify(mockProjectProperties).log(LogEvent.info(info2));
Mockito.verify(mockProjectProperties).log(LogEvent.warn(warn));
}
} |
public static String serializeRecordToJsonExpandingValue(ObjectMapper mapper, Record<GenericObject> record,
boolean flatten)
throws JsonProcessingException {
JsonRecord jsonRecord = new JsonRecord();
GenericObject value = record.getValue();
if (value != null) {
jsonRecord.setPayload(toJsonSerializable(record.getSchema(), value.getNativeObject()));
}
record.getKey().ifPresent(jsonRecord::setKey);
record.getTopicName().ifPresent(jsonRecord::setTopicName);
record.getEventTime().ifPresent(jsonRecord::setEventTime);
record.getProperties().forEach(jsonRecord::addProperty);
if (flatten) {
JsonNode jsonNode = mapper.convertValue(jsonRecord, JsonNode.class);
return JsonFlattener.flatten(new JacksonJsonValue(jsonNode));
} else {
return mapper.writeValueAsString(jsonRecord);
}
} | @Test
public void testPrimitiveSerializeRecordToJsonExpandingValue() throws Exception {
GenericObject genericObject = new GenericObject() {
@Override
public SchemaType getSchemaType() {
return SchemaType.STRING;
}
@Override
public Object getNativeObject() {
return "message-value";
}
};
Map<String, String> properties = new HashMap<>();
properties.put("prop-key", "prop-value");
Record<GenericObject> genericObjectRecord = new Record<GenericObject>() {
@Override
public Optional<String> getTopicName() {
return Optional.of("data-ks1.table1");
}
@Override
public org.apache.pulsar.client.api.Schema getSchema() {
return Schema.STRING;
}
@Override
public Optional<String> getKey() {
return Optional.of("message-key");
}
@Override
public GenericObject getValue() {
return genericObject;
}
@Override
public Map<String, String> getProperties() {
return properties;
}
@Override
public Optional<Long> getEventTime() {
return Optional.of(1648502845803L);
}
};
ObjectMapper objectMapper = new ObjectMapper().setSerializationInclusion(JsonInclude.Include.NON_NULL);
String json = Utils.serializeRecordToJsonExpandingValue(objectMapper, genericObjectRecord, false);
assertEquals(json, "{\"topicName\":\"data-ks1.table1\",\"key\":\"message-key\",\"payload\":\"message-value\","
+ "\"properties\":{\"prop-key\":\"prop-value\"},\"eventTime\":1648502845803}");
} |
public KafkaClientSupplier getKafkaClientSupplier() {
return getConfiguredInstance(StreamsConfig.DEFAULT_CLIENT_SUPPLIER_CONFIG,
KafkaClientSupplier.class);
} | @Test
public void shouldReturnDefaultClientSupplier() {
final KafkaClientSupplier supplier = streamsConfig.getKafkaClientSupplier();
assertInstanceOf(DefaultKafkaClientSupplier.class, supplier);
} |
public Request setExtras(Map<String, Object> extras) {
this.extras.putAll(extras);
return this;
} | @Test
public void testSetExtras() {
Request request = new Request();
Map<String, Object> extras = Collections.singletonMap("a", "1");
request.setExtras(extras);
request.putExtra("b", "2");
assertThat(request.<String>getExtra("a")).isEqualTo("1");
assertThat(request.<String>getExtra("b")).isEqualTo("2");
} |
@Override
public Optional<P> authenticate(C credentials) throws AuthenticationException {
try (Timer.Context context = gets.time()) {
return cache.get(credentials);
} catch (CompletionException e) {
final Throwable cause = e.getCause();
if (cause instanceof InvalidCredentialsException) {
return Optional.empty();
}
if (cause instanceof AuthenticationException) {
throw (AuthenticationException) cause;
}
if (cause == null) {
throw new AuthenticationException(e);
}
throw new AuthenticationException(cause);
}
} | @Test
void shouldPropagateRuntimeException() throws AuthenticationException {
final RuntimeException e = new NullPointerException();
when(underlying.authenticate(anyString())).thenThrow(e);
assertThatExceptionOfType(RuntimeException.class)
.isThrownBy(() -> cached.authenticate("credentials"))
.isEqualTo(e);
} |
@Override
public Path move(final Path source, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException {
final Path copy = proxy.copy(source, renamed, status, connectionCallback, new DisabledStreamListener());
delete.delete(Collections.singletonMap(source, status), connectionCallback, callback);
return copy;
} | @Test
public void testMoveWithDelimiter() throws Exception {
final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path placeholder = new Path(container, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.directory));
final Path test = new Path(placeholder, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file));
new GoogleStorageTouchFeature(session).touch(test, new TransferStatus());
final Path renamed = new Path(placeholder, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file));
new GoogleStorageMoveFeature(session).move(test, renamed, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(new GoogleStorageFindFeature(session).find(test));
assertTrue(new GoogleStorageFindFeature(session).find(renamed));
new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(renamed), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static MetricName name(Class<?> klass, String... names) {
return name(klass.getName(), names);
} | @Test
public void concatenatesStringsToFormADottedName() throws Exception {
assertThat(name("one", "two", "three"))
.isEqualTo(MetricName.build("one.two.three"));
} |
void submitConsumeRequest(ConsumeRequest consumeRequest) {
try {
consumeRequestCache.put(consumeRequest);
} catch (InterruptedException ignore) {
}
} | @Test
public void testSubmitConsumeRequest() throws Exception {
byte[] body = new byte[] {'1', '2', '3'};
MessageExt consumedMsg = new MessageExt();
consumedMsg.setMsgId("NewMsgId");
consumedMsg.setBody(body);
consumedMsg.putUserProperty(NonStandardKeys.MESSAGE_DESTINATION, "TOPIC");
consumedMsg.setTopic("HELLO_QUEUE");
when(consumeRequest.getMessageExt()).thenReturn(consumedMsg);
localMessageCache.submitConsumeRequest(consumeRequest);
assertThat(localMessageCache.poll()).isEqualTo(consumedMsg);
} |
@Override
public AppResponse process(Flow flow, AppRequest body) {
digidClient.remoteLog("1218", getAppDetails());
if (!appSession.getWithBsn()) {
digidClient.remoteLog("1345", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId()));
return new NokResponse("no_bsn_on_account");
}
appSession.setActivationMethod(ActivationMethod.RDA);
// Options: kiosk, upgrade_rda_widchecker, app?
// For logging in iapi /confirm from rda server
appSession.setRdaAction("app");
return new OkResponse();
} | @Test
void processWithBsn(){
mockedAppSession.setWithBsn(true);
AppResponse appResponse = rdaChosen.process(mockedFlow, mockedAbstractAppRequest);
verify(digidClientMock, times(1)).remoteLog("1218", Map.of(lowerUnderscore(ACCOUNT_ID), mockedAppSession.getAccountId(), lowerUnderscore(APP_CODE), "2B5A2", lowerUnderscore(DEVICE_NAME), mockedAppAuthenticator.getDeviceName()));
assertTrue(appResponse instanceof OkResponse);
assertEquals(ActivationMethod.RDA, mockedAppSession.getActivationMethod());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.