focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static <EventT> Write<EventT> write() {
return new AutoValue_JmsIO_Write.Builder<EventT>().build();
} | @Test
public void testWriteMessageWithCFProviderFn() throws Exception {
ArrayList<String> data = new ArrayList<>();
for (int i = 0; i < 100; i++) {
data.add("Message " + i);
}
pipeline
.apply(Create.of(data))
.apply(
JmsIO.<String>write()
.withConnectionFactoryProviderFn(
toSerializableFunction(commonJms::createConnectionFactory))
.withValueMapper(new TextMessageMapper())
.withRetryConfiguration(retryConfiguration)
.withQueue(QUEUE)
.withUsername(USERNAME)
.withPassword(PASSWORD));
pipeline.run();
assertQueueContainsMessages(100);
} |
public static Object get(Object object, int index) {
if (index < 0) {
throw new IndexOutOfBoundsException("Index cannot be negative: " + index);
}
if (object instanceof Map) {
Map map = (Map) object;
Iterator iterator = map.entrySet().iterator();
return get(iterator, index);
} else if (object instanceof List) {
return ((List) object).get(index);
} else if (object instanceof Object[]) {
return ((Object[]) object)[index];
} else if (object instanceof Iterator) {
Iterator it = (Iterator) object;
while (it.hasNext()) {
index--;
if (index == -1) {
return it.next();
} else {
it.next();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object instanceof Collection) {
Iterator iterator = ((Collection) object).iterator();
return get(iterator, index);
} else if (object instanceof Enumeration) {
Enumeration it = (Enumeration) object;
while (it.hasMoreElements()) {
index--;
if (index == -1) {
return it.nextElement();
} else {
it.nextElement();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object == null) {
throw new IllegalArgumentException("Unsupported object type: null");
} else {
try {
return Array.get(object, index);
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName());
}
}
} | @Test
void testGetEnumeration2() {
assertThrows(IndexOutOfBoundsException.class, () -> {
CollectionUtils.get(asEnumeration(Collections.emptyIterator()), -1);
});
} |
public List<ContainerEndpoint> read(ApplicationId applicationId) {
var optionalData = curator.getData(containerEndpointsPath(applicationId));
return optionalData.map(SlimeUtils::jsonToSlime)
.map(ContainerEndpointSerializer::endpointListFromSlime)
.orElseGet(List::of);
} | @Test
public void readingNonExistingEntry() {
final var cache = new ContainerEndpointsCache(Path.createRoot(), new MockCurator());
final var endpoints = cache.read(ApplicationId.defaultId());
assertTrue(endpoints.isEmpty());
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
if(new DefaultPathContainerService().isContainer(file)) {
return PathAttributes.EMPTY;
}
final Path query;
if(file.isPlaceholder()) {
query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes());
}
else {
query = file;
}
final AttributedList<Path> list;
if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) {
list = new DriveTeamDrivesListService(session, fileid).list(file.getParent(), listener);
}
else {
list = new FileidDriveListService(session, fileid, query).list(file.getParent(), listener);
}
final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(session.getCaseSensitivity(), file));
if(null == found) {
throw new NotfoundException(file.getAbsolute());
}
return found.attributes();
} | @Test
public void testFindDirectory() throws Exception {
final Path file = new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
new DriveDirectoryFeature(session, fileid).mkdir(file, new TransferStatus());
final PathAttributes attributes = new DriveAttributesFinderFeature(session, fileid).find(file);
assertNotNull(attributes);
assertEquals(-1L, attributes.getSize());
assertNotEquals(-1L, attributes.getCreationDate());
assertNotEquals(-1L, attributes.getModificationDate());
assertNotNull(attributes.getFileId());
new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public void batchDeregisterService(String serviceName, String groupName, List<Instance> instances)
throws NacosException {
synchronized (redoService.getRegisteredInstances()) {
List<Instance> retainInstance = getRetainInstance(serviceName, groupName, instances);
batchRegisterService(serviceName, groupName, retainInstance);
}
} | @Test
void testBatchDeregisterService() throws NacosException {
try {
List<Instance> instanceList = new ArrayList<>();
instance.setHealthy(true);
instanceList.add(instance);
instanceList.add(new Instance());
client.batchRegisterService(SERVICE_NAME, GROUP_NAME, instanceList);
} catch (Exception ignored) {
}
response = new BatchInstanceResponse();
when(this.rpcClient.request(any())).thenReturn(response);
List<Instance> instanceList = new ArrayList<>();
instance.setHealthy(true);
instanceList.add(instance);
client.batchDeregisterService(SERVICE_NAME, GROUP_NAME, instanceList);
verify(this.rpcClient, times(1)).request(argThat(request -> {
if (request instanceof BatchInstanceRequest) {
BatchInstanceRequest request1 = (BatchInstanceRequest) request;
request1.setRequestId("1");
return request1.getInstances().size() == 1 && request1.getType()
.equals(NamingRemoteConstants.BATCH_REGISTER_INSTANCE);
}
return false;
}));
} |
@Override
public void abortTransaction() throws ProducerFencedException {
verifyNotClosed();
verifyNotFenced();
verifyTransactionsInitialized();
verifyTransactionInFlight();
if (this.abortTransactionException != null) {
throw this.abortTransactionException;
}
flush();
this.uncommittedSends.clear();
this.uncommittedConsumerGroupOffsets.clear();
this.transactionCommitted = false;
this.transactionAborted = true;
this.transactionInFlight = false;
} | @Test
public void shouldThrowOnAbortIfTransactionsNotInitialized() {
buildMockProducer(true);
assertThrows(IllegalStateException.class, () -> producer.abortTransaction());
} |
<K, V> List<ConsumerRecord<K, V>> fetchRecords(FetchConfig fetchConfig,
Deserializers<K, V> deserializers,
int maxRecords) {
// Error when fetching the next record before deserialization.
if (corruptLastRecord)
throw new KafkaException("Received exception when fetching the next record from " + partition
+ ". If needed, please seek past the record to "
+ "continue consumption.", cachedRecordException);
if (isConsumed)
return Collections.emptyList();
List<ConsumerRecord<K, V>> records = new ArrayList<>();
try {
for (int i = 0; i < maxRecords; i++) {
// Only move to next record if there was no exception in the last fetch. Otherwise, we should
// use the last record to do deserialization again.
if (cachedRecordException == null) {
corruptLastRecord = true;
lastRecord = nextFetchedRecord(fetchConfig);
corruptLastRecord = false;
}
if (lastRecord == null)
break;
Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch());
TimestampType timestampType = currentBatch.timestampType();
ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord);
records.add(record);
recordsRead++;
bytesRead += lastRecord.sizeInBytes();
nextFetchOffset = lastRecord.offset() + 1;
// In some cases, the deserialization may have thrown an exception and the retry may succeed,
// we allow user to move forward in this case.
cachedRecordException = null;
}
} catch (SerializationException se) {
cachedRecordException = se;
if (records.isEmpty())
throw se;
} catch (KafkaException e) {
cachedRecordException = e;
if (records.isEmpty())
throw new KafkaException("Received exception when fetching the next record from " + partition
+ ". If needed, please seek past the record to "
+ "continue consumption.", e);
}
return records;
} | @Test
public void testSimple() {
long fetchOffset = 5;
int startingOffset = 10;
int numRecords = 11; // Records for 10-20
FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData()
.setRecords(newRecords(startingOffset, numRecords, fetchOffset));
Deserializers<String, String> deserializers = newStringDeserializers();
FetchConfig fetchConfig = newFetchConfig(IsolationLevel.READ_UNCOMMITTED, true);
CompletedFetch completedFetch = newCompletedFetch(fetchOffset, partitionData);
List<ConsumerRecord<String, String>> records = completedFetch.fetchRecords(fetchConfig, deserializers, 10);
assertEquals(10, records.size());
ConsumerRecord<String, String> record = records.get(0);
assertEquals(10, record.offset());
records = completedFetch.fetchRecords(fetchConfig, deserializers, 10);
assertEquals(1, records.size());
record = records.get(0);
assertEquals(20, record.offset());
records = completedFetch.fetchRecords(fetchConfig, deserializers, 10);
assertEquals(0, records.size());
} |
public final void removeAllThreadLevelSensors(final String threadId) {
final String key = threadSensorPrefix(threadId);
synchronized (threadLevelSensors) {
final Deque<String> sensors = threadLevelSensors.remove(key);
while (sensors != null && !sensors.isEmpty()) {
metrics.removeSensor(sensors.pop());
}
}
} | @Test
public void shouldRemoveThreadLevelSensors() {
final Metrics metrics = mock(Metrics.class);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time);
addSensorsOnAllLevels(metrics, streamsMetrics);
setupRemoveSensorsTest(metrics, THREAD_ID1);
streamsMetrics.removeAllThreadLevelSensors(THREAD_ID1);
} |
@Override public void callExtensionPoint( LogChannelInterface log, Object object ) throws KettleException {
AbstractMeta meta;
if ( object instanceof Trans ) {
meta = ( (Trans) object ).getTransMeta();
} else if ( object instanceof JobExecutionExtension ) {
meta = ( (JobExecutionExtension) object ).job.getJobMeta();
} else {
meta = (AbstractMeta) object;
}
if ( meta.getMetastoreLocatorOsgi() == null ) {
meta.setMetastoreLocatorOsgi( metastoreLocatorOsgi );
}
} | @Test
public void testCallExtensionPointWithTransMeta() throws Exception {
MetastoreLocatorOsgi metastoreLocator = new MetastoreLocatorImpl();
LogChannelInterface logChannelInterface = mock( LogChannelInterface.class );
TransMeta mockTransMeta = mock( TransMeta.class );
Collection<MetastoreLocator> metastoreLocators = new ArrayList<>();
metastoreLocators.add( (MetastoreLocator) metastoreLocator );
try ( MockedStatic<PluginServiceLoader> pluginServiceLoaderMockedStatic = Mockito.mockStatic( PluginServiceLoader.class ) ) {
pluginServiceLoaderMockedStatic.when( () -> PluginServiceLoader.loadServices( MetastoreLocator.class ) )
.thenReturn( metastoreLocators );
MetastoreLocatorExtensionPoint metastoreLocatorExtensionPoint =
new MetastoreLocatorExtensionPoint();
metastoreLocatorExtensionPoint.callExtensionPoint( logChannelInterface, mockTransMeta );
verify( mockTransMeta ).setMetastoreLocatorOsgi( eq( metastoreLocator ) );
}
} |
public static DataCacheInfo analyzeDataCacheInfo(Map<String, String> properties) throws AnalysisException {
boolean enableDataCache = analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_DATACACHE_ENABLE, true);
boolean enableAsyncWriteBack =
analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_ENABLE_ASYNC_WRITE_BACK, false);
if (enableAsyncWriteBack) {
throw new AnalysisException("enable_async_write_back is disabled since version 3.1.4");
}
return new DataCacheInfo(enableDataCache, enableAsyncWriteBack);
} | @Test
public void testAnalyzeDataCacheInfo() {
Map<String, String> properties = new HashMap<>();
properties.put(PropertyAnalyzer.PROPERTIES_DATACACHE_ENABLE, "true");
properties.put(PropertyAnalyzer.PROPERTIES_ENABLE_ASYNC_WRITE_BACK, "true");
try {
PropertyAnalyzer.analyzeDataCacheInfo(properties);
Assert.assertTrue(false);
} catch (AnalysisException e) {
Assert.assertEquals("enable_async_write_back is disabled since version 3.1.4", e.getMessage());
}
} |
@Override
public Optional<String> getContentHash() {
return Optional.ofNullable(mContentHash);
} | @Test
public void close() throws Exception {
mStream.close();
Mockito.verify(mMockS3Client, never())
.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
Mockito.verify(mMockS3Client, never())
.completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
assertTrue(mStream.getContentHash().isPresent());
assertEquals("putTag", mStream.getContentHash().get());
} |
@Override
public int getColumnLength(final Object value) {
throw new UnsupportedSQLOperationException("PostgreSQLStringArrayBinaryProtocolValue.getColumnLength()");
} | @Test
void assertGetColumnLength() {
assertThrows(UnsupportedSQLOperationException.class, () -> newInstance().getColumnLength("val"));
} |
static void checkNearCacheNativeMemoryConfig(InMemoryFormat inMemoryFormat, NativeMemoryConfig nativeMemoryConfig,
boolean isEnterprise) {
if (!isEnterprise) {
return;
}
if (inMemoryFormat != NATIVE) {
return;
}
if (nativeMemoryConfig != null && nativeMemoryConfig.isEnabled()) {
return;
}
throw new InvalidConfigurationException("Enable native memory config to use NATIVE in-memory-format for Near Cache");
} | @Test(expected = InvalidConfigurationException.class)
public void checkNearCacheNativeMemoryConfig_shouldThrowExceptionWithoutNativeMemoryConfig_NATIVE_onEE() {
checkNearCacheNativeMemoryConfig(NATIVE, null, true);
} |
@Override
public SendResult send(
Message msg) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
if (this.getAutoBatch() && !(msg instanceof MessageBatch)) {
return sendByAccumulator(msg, null, null);
} else {
return sendDirect(msg, null, null);
}
} | @Test
public void testSendMessage_NoRoute() throws RemotingException, InterruptedException, MQBrokerException {
when(mQClientAPIImpl.getNameServerAddressList()).thenReturn(Collections.singletonList("127.0.0.1:9876"));
try {
producer.send(message);
failBecauseExceptionWasNotThrown(MQClientException.class);
} catch (MQClientException e) {
assertThat(e).hasMessageContaining("No route info of this topic");
}
} |
@Override
protected void doExecute() {
if (vpls == null) {
vpls = get(Vpls.class);
}
if (interfaceService == null) {
interfaceService = get(InterfaceService.class);
}
VplsCommandEnum enumCommand = VplsCommandEnum.enumFromString(command);
if (enumCommand != null) {
switch (enumCommand) {
case ADD_IFACE:
addIface(vplsName, optArg);
break;
case CREATE:
create(vplsName);
break;
case DELETE:
delete(vplsName);
break;
case LIST:
list();
break;
case REMOVE_IFACE:
removeIface(vplsName, optArg);
break;
case SET_ENCAP:
setEncap(vplsName, optArg);
break;
case SHOW:
show(vplsName);
break;
case CLEAN:
cleanVpls();
break;
default:
print(VPLS_COMMAND_NOT_FOUND, command);
}
} else {
print(VPLS_COMMAND_NOT_FOUND, command);
}
} | @Test
public void testClean() {
((TestVpls) vplsCommand.vpls).initSampleData();
vplsCommand.command = VplsCommandEnum.CLEAN.toString();
vplsCommand.doExecute();
Collection<VplsData> vplss = vplsCommand.vpls.getAllVpls();
assertEquals(0, vplss.size());
} |
@NonNull
public String keyFor(@NonNull String id) {
return id;
} | @Test
public void testKeyForCaseSensitiveEmailAddress() {
IdStrategy idStrategy = new IdStrategy.CaseSensitiveEmailAddress();
assertThat(idStrategy.keyFor("john.smith@acme.org"), is("john.smith@acme.org"));
assertThat(idStrategy.keyFor("John.Smith@acme.org"), is("John.Smith@acme.org"));
assertThat(idStrategy.keyFor("John.Smith@ACME.org"), is("John.Smith@acme.org"));
assertThat(idStrategy.keyFor("John.Smith@acme.ORG"), is("John.Smith@acme.org"));
assertThat(idStrategy.keyFor("john.smith"), is("john.smith"));
assertThat(idStrategy.keyFor("John.Smith"), is("John.Smith"));
assertThat(idStrategy.keyFor("john@smith@acme.org"), is("john@smith@acme.org"));
assertThat(idStrategy.keyFor("John@Smith@acme.org"), is("John@Smith@acme.org"));
} |
public String message() {
return message;
} | @Test
public void shouldNotFailWhenNoMessageSet() {
HttpLocalizedOperationResult result = new HttpLocalizedOperationResult();
assertThat(result.message(), is(nullValue()));
} |
public static boolean isNegative(Slice decimal)
{
return (getRawInt(decimal, SIGN_INT_INDEX) & SIGN_INT_MASK) != 0;
} | @Test
public void testIsNegative()
{
assertEquals(isNegative(MIN_DECIMAL), true);
assertEquals(isNegative(MAX_DECIMAL), false);
assertEquals(isNegative(unscaledDecimal(0)), false);
} |
@Override
public boolean test(Pair<Point, Point> pair) {
return testVertical(pair) && testHorizontal(pair);
} | @Test
public void testVertSeparation() {
Point p1 = (new PointBuilder()).time(EPOCH).latLong(0.0, 0.0).altitude(Distance.ofFeet(1000.0)).build();
Point p2 = (new PointBuilder()).time(EPOCH).latLong(0.0, 0.0).altitude(Distance.ofFeet(1000.0)).build();
Point p3 = (new PointBuilder()).time(EPOCH).latLong(0.0, 0.0).altitude(Distance.ofFeet(1500.0)).build();
Point p4 = (new PointBuilder()).time(EPOCH).latLong(0.0, 0.0).altitude(Distance.ofFeet(1501.0)).build();
Point p5 = (new PointBuilder()).time(EPOCH).latLong(0.0, 0.0).build();
double MAX_HORIZ_SEPARATION_IN_FT = 1000;
double MAX_VERT_SEPARATION = 500;
CylindricalFilter filter = new CylindricalFilter(MAX_HORIZ_SEPARATION_IN_FT, MAX_VERT_SEPARATION);
assertTrue(filter.test(Pair.of(p1, p1)), "A point is in the same cylindar with itself");
assertTrue(filter.test(Pair.of(p1, p2)), "A point is in the same cylindar with itself");
assertTrue(filter.test(Pair.of(p1, p3)), "These points are 500ft apart");
assertFalse(filter.test(Pair.of(p1, p4)), "These points are 500ft apart");
assertFalse(filter.test(Pair.of(p1, p5)), "Missing altitude data should be rejected");
} |
public static void toJson(Metadata metadata, Writer writer) throws IOException {
if (metadata == null) {
writer.write("null");
return;
}
long max = TikaConfig.getMaxJsonStringFieldLength();
try (JsonGenerator jsonGenerator = new JsonFactory()
.setStreamReadConstraints(StreamReadConstraints
.builder()
.maxStringLength(TikaConfig.getMaxJsonStringFieldLength())
.build())
.createGenerator(CloseShieldWriter.wrap(writer))) {
if (PRETTY_PRINT) {
jsonGenerator.useDefaultPrettyPrinter();
}
writeMetadataObject(metadata, jsonGenerator, PRETTY_PRINT);
}
} | @Test
public void testNull() {
StringWriter writer = new StringWriter();
boolean ex = false;
try {
JsonMetadata.toJson(null, writer);
} catch (IOException e) {
ex = true;
}
assertFalse(ex);
assertEquals("null", writer.toString());
} |
@Override
public int hashCode() {
return operands.hashCode();
} | @Test
void requireThatHashCodeIsImplemented() {
assertEquals(new Disjunction().hashCode(), new Disjunction().hashCode());
} |
public JobStatsExtended enrich(JobStats jobStats) {
JobStats latestJobStats = getLatestJobStats(jobStats, previousJobStats);
if (lock.tryLock()) {
setFirstRelevantJobStats(latestJobStats);
setJobStatsExtended(latestJobStats);
setPreviousJobStats(latestJobStats);
lock.unlock();
}
return jobStatsExtended;
} | @Test
void estimatedTimeProcessingIsCalculated5() {
JobStats firstJobStats = getJobStats(now().minusSeconds(3610), 10L, 0L, 0L, 100L);
JobStats secondJobStats = getJobStats(now().minusSeconds(3600), 5L, 4L, 0L, 101L);
JobStats thirdJobStats = getJobStats(now(), 4L, 4L, 0L, 102L);
JobStatsExtended jobStatsExtended = enrich(firstJobStats, secondJobStats, thirdJobStats);
assertThat(jobStatsExtended.getEstimation().isProcessingDone()).isFalse();
assertThat(jobStatsExtended.getEstimation().isEstimatedProcessingFinishedInstantAvailable()).isTrue();
assertThat(Duration.between(now(), jobStatsExtended.getEstimation().getEstimatedProcessingFinishedAt()).toSeconds()).isCloseTo(80L, Offset.offset(2L));
} |
public void moveAllChildrenTo(final FilePath target) throws IOException, InterruptedException {
if (this.channel != target.channel) {
throw new IOException("pullUpTo target must be on the same host");
}
act(new MoveAllChildrenTo(target));
} | @Issue("JENKINS-16846")
@Test public void moveAllChildrenTo() throws IOException, InterruptedException {
File tmp = temp.getRoot();
final String dirname = "sub";
final File top = new File(tmp, "test");
final File sub = new File(top, dirname);
final File subsub = new File(sub, dirname);
subsub.mkdirs();
final File subFile1 = new File(sub.getAbsolutePath() + "/file1.txt");
subFile1.createNewFile();
final File subFile2 = new File(subsub.getAbsolutePath() + "/file2.txt");
subFile2.createNewFile();
final FilePath src = new FilePath(sub);
final FilePath dst = new FilePath(top);
// test conflict subdir
src.moveAllChildrenTo(dst);
} |
public String toMysqlDataTypeString() {
return "unknown";
} | @Test
public void testMysqlDataType() {
Object[][] testCases = new Object[][] {
{ScalarType.createType(PrimitiveType.BOOLEAN), "tinyint"},
{ScalarType.createType(PrimitiveType.LARGEINT), "bigint unsigned"},
{ScalarType.createDecimalV3NarrowestType(18, 4), "decimal"},
{new ArrayType(Type.INT), "array"},
{new MapType(Type.INT, Type.INT), "map"},
{new StructType(Lists.newArrayList(Type.INT)), "struct"},
};
for (Object[] tc : testCases) {
Type type = (Type) tc[0];
String name = (String) tc[1];
Assert.assertEquals(name, type.toMysqlDataTypeString());
}
} |
public static Sessions withGapDuration(Duration gapDuration) {
return new Sessions(gapDuration);
} | @Test
public void testInvalidOutputAtEarliest() throws Exception {
try {
WindowFnTestUtils.validateGetOutputTimestamps(
Sessions.withGapDuration(Duration.millis(10)),
TimestampCombiner.EARLIEST,
ImmutableList.of(
(List<Long>) ImmutableList.of(1L, 3L),
(List<Long>) ImmutableList.of(0L, 5L, 10L, 15L, 20L)));
} catch (AssertionError exc) {
assertThat(
exc.getMessage(),
// These are the non-volatile pieces of the error message that a timestamp
// was not greater than what it should be.
allOf(containsString("a value greater than"), containsString("was less than")));
}
} |
private long advisoryPartitionSize(long defaultValue) {
return confParser
.longConf()
.option(SparkWriteOptions.ADVISORY_PARTITION_SIZE)
.sessionConf(SparkSQLProperties.ADVISORY_PARTITION_SIZE)
.tableProperty(TableProperties.SPARK_WRITE_ADVISORY_PARTITION_SIZE_BYTES)
.defaultValue(defaultValue)
.parse();
} | @TestTemplate
public void testAdvisoryPartitionSize() {
Table table = validationCatalog.loadTable(tableIdent);
SparkWriteConf writeConf = new SparkWriteConf(spark, table, ImmutableMap.of());
long value1 = writeConf.writeRequirements().advisoryPartitionSize();
assertThat(value1).isGreaterThan(64L * 1024 * 1024).isLessThan(2L * 1024 * 1024 * 1024);
spark.conf().set(SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES().key(), "2GB");
long value2 = writeConf.writeRequirements().advisoryPartitionSize();
assertThat(value2).isEqualTo(2L * 1024 * 1024 * 1024);
spark.conf().set(SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES().key(), "10MB");
long value3 = writeConf.writeRequirements().advisoryPartitionSize();
assertThat(value3).isGreaterThan(10L * 1024 * 1024);
} |
public static boolean checkLiteralOverflowInDecimalStyle(BigDecimal value, ScalarType scalarType) {
int realPrecision = getRealPrecision(value);
int realScale = getRealScale(value);
BigInteger underlyingInt = value.setScale(scalarType.getScalarScale(), RoundingMode.HALF_UP).unscaledValue();
BigInteger maxDecimal = BigInteger.TEN.pow(scalarType.decimalPrecision());
BigInteger minDecimal = BigInteger.TEN.pow(scalarType.decimalPrecision()).negate();
if (underlyingInt.compareTo(minDecimal) <= 0 || underlyingInt.compareTo(maxDecimal) >= 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Typed decimal literal({}) is overflow, value='{}' (precision={}, scale={})",
scalarType, value.toPlainString(), realPrecision, realScale);
}
return false;
}
return true;
} | @Test
public void testCheckLiteralOverflowInDecimalStyleFail() throws AnalysisException {
BigDecimal decimal32Values[] = {
new BigDecimal("100000.0000"),
new BigDecimal("99999.99995"),
new BigDecimal("-100000.0000"),
new BigDecimal("-99999.99995"),
};
ScalarType decimal32p9s4 = ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL32, 9, 4);
for (BigDecimal dec32 : decimal32Values) {
Assert.assertFalse(DecimalLiteral.checkLiteralOverflowInDecimalStyle(dec32, decimal32p9s4));
}
BigDecimal decimal64Values[] = {
new BigDecimal("1000000000000.000000"),
new BigDecimal("999999999999.9999995"),
new BigDecimal("-1000000000000.000000"),
new BigDecimal("-999999999999.9999995"),
};
ScalarType decimal64p18s6 = ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL64, 18, 6);
for (BigDecimal dec64 : decimal64Values) {
Assert.assertFalse(DecimalLiteral.checkLiteralOverflowInDecimalStyle(dec64, decimal64p18s6));
}
BigDecimal decimal128Values[] = {
new BigDecimal("1000000000000000000000000000.00000000000"),
new BigDecimal("999999999999999999999999999.999999999995"),
new BigDecimal("-1000000000000000000000000000.00000000000"),
new BigDecimal("-999999999999999999999999999.999999999995"),
};
ScalarType decimal128p38s11 = ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL128, 38, 11);
for (BigDecimal dec128 : decimal128Values) {
Assert.assertFalse(DecimalLiteral.checkLiteralOverflowInDecimalStyle(dec128, decimal128p38s11));
}
} |
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) {
final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps());
map.put(
MetricCollectors.RESOURCE_LABEL_PREFIX
+ StreamsConfig.APPLICATION_ID_CONFIG,
applicationId
);
// Streams client metrics aren't used in Confluent deployment
possiblyConfigureConfluentTelemetry(map);
return Collections.unmodifiableMap(map);
} | @Test
public void shouldSetStreamsConfigTopicUnprefixedProperties() {
final KsqlConfig ksqlConfig = new KsqlConfig(
Collections.singletonMap(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, 2));
final Object result = ksqlConfig.getKsqlStreamConfigProps().get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG);
assertThat(result, equalTo(2));
} |
@Override
public Cursor<Tuple> zScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<Tuple>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<Tuple> doScan(byte[] key, long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<Tuple>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, ZSCAN, args.toArray());
ListScanResult<Tuple> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<Tuple>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
} | @Test
public void testZScan() {
connection.zAdd("key".getBytes(), 1, "value1".getBytes());
connection.zAdd("key".getBytes(), 2, "value2".getBytes());
Cursor<RedisZSetCommands.Tuple> t = connection.zScan("key".getBytes(), ScanOptions.scanOptions().build());
assertThat(t.hasNext()).isTrue();
assertThat(t.next().getValue()).isEqualTo("value1".getBytes());
assertThat(t.hasNext()).isTrue();
assertThat(t.next().getValue()).isEqualTo("value2".getBytes());
} |
public void execute(int[] bytecode) {
for (var i = 0; i < bytecode.length; i++) {
Instruction instruction = Instruction.getInstruction(bytecode[i]);
switch (instruction) {
case LITERAL:
// Read the next byte from the bytecode.
int value = bytecode[++i];
// Push the next value to stack
stack.push(value);
break;
case SET_AGILITY:
var amount = stack.pop();
var wizard = stack.pop();
setAgility(wizard, amount);
break;
case SET_WISDOM:
amount = stack.pop();
wizard = stack.pop();
setWisdom(wizard, amount);
break;
case SET_HEALTH:
amount = stack.pop();
wizard = stack.pop();
setHealth(wizard, amount);
break;
case GET_HEALTH:
wizard = stack.pop();
stack.push(getHealth(wizard));
break;
case GET_AGILITY:
wizard = stack.pop();
stack.push(getAgility(wizard));
break;
case GET_WISDOM:
wizard = stack.pop();
stack.push(getWisdom(wizard));
break;
case ADD:
var a = stack.pop();
var b = stack.pop();
stack.push(a + b);
break;
case DIVIDE:
a = stack.pop();
b = stack.pop();
stack.push(b / a);
break;
case PLAY_SOUND:
wizard = stack.pop();
getWizards()[wizard].playSound();
break;
case SPAWN_PARTICLES:
wizard = stack.pop();
getWizards()[wizard].spawnParticles();
break;
default:
throw new IllegalArgumentException("Invalid instruction value");
}
LOGGER.info("Executed " + instruction.name() + ", Stack contains " + getStack());
}
} | @Test
void testInvalidInstruction() {
var bytecode = new int[1];
bytecode[0] = 999;
var vm = new VirtualMachine();
assertThrows(IllegalArgumentException.class, () -> vm.execute(bytecode));
} |
public static String formatExpression(final Expression expression) {
return formatExpression(expression, FormatOptions.of(s -> false));
} | @Test
public void shouldFormatArray() {
final SqlArray array = SqlTypes.array(SqlTypes.BOOLEAN);
assertThat(ExpressionFormatter.formatExpression(new Type(array)), equalTo("ARRAY<BOOLEAN>"));
} |
@Override
public Collection<String> getJdbcUrlPrefixes() {
return Collections.singletonList(String.format("jdbc:%s:", getType().toLowerCase()));
} | @Test
void assertGetJdbcUrlPrefixes() {
assertThat(TypedSPILoader.getService(DatabaseType.class, "MariaDB").getJdbcUrlPrefixes(), is(Collections.singletonList("jdbc:mariadb:")));
} |
@Override
public Optional<ShardingConditionValue> generate(final BinaryOperationExpression predicate, final Column column, final List<Object> params, final TimestampServiceRule timestampServiceRule) {
String operator = predicate.getOperator().toUpperCase();
if (!isSupportedOperator(operator)) {
return Optional.empty();
}
ExpressionSegment valueExpression = predicate.getLeft() instanceof ColumnSegment ? predicate.getRight() : predicate.getLeft();
ConditionValue conditionValue = new ConditionValue(valueExpression, params);
if (conditionValue.isNull()) {
return generate(null, column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
Optional<Comparable<?>> value = conditionValue.getValue();
if (value.isPresent()) {
return generate(value.get(), column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
if (ExpressionConditionUtils.isNowExpression(valueExpression)) {
return generate(timestampServiceRule.getTimestamp(), column, operator, -1);
}
return Optional.empty();
} | @SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValueWithGreaterThanOperator() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, 1), ">", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertTrue(Range.greaterThan(1).encloses(((RangeShardingConditionValue<Integer>) shardingConditionValue.get()).getValueRange()));
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
} |
@Override
public boolean start() throws IOException {
LOG.info("Starting reader using {}", initCheckpoint);
try {
shardReadersPool = createShardReadersPool();
shardReadersPool.start();
} catch (TransientKinesisException e) {
throw new IOException(e);
}
return advance();
} | @Test
public void startReturnsTrueIfSomeDataAvailable() throws IOException {
when(shardReadersPool.nextRecord())
.thenReturn(CustomOptional.of(a))
.thenReturn(CustomOptional.absent());
assertThat(reader.start()).isTrue();
} |
public float[][] getValues()
{
float[][] retval = new float[3][3];
retval[0][0] = single[0];
retval[0][1] = single[1];
retval[0][2] = single[2];
retval[1][0] = single[3];
retval[1][1] = single[4];
retval[1][2] = single[5];
retval[2][0] = single[6];
retval[2][1] = single[7];
retval[2][2] = single[8];
return retval;
} | @Test
void testGetValues()
{
Matrix m = new Matrix(2, 4, 4, 2, 15, 30);
float[][] values = m.getValues();
assertEquals(2, values[0][0], 0);
assertEquals(4, values[0][1], 0);
assertEquals(0, values[0][2], 0);
assertEquals(4, values[1][0], 0);
assertEquals(2, values[1][1], 0);
assertEquals(0, values[1][2], 0);
assertEquals(15, values[2][0], 0);
assertEquals(30, values[2][1], 0);
assertEquals(1, values[2][2], 0);
} |
public static <X> TypeInformation<X> getForObject(X value) {
return new TypeExtractor().privateGetForObject(value);
} | @Test
void testRow() {
Row row = new Row(2);
row.setField(0, "string");
row.setField(1, 15);
TypeInformation<Row> rowInfo = TypeExtractor.getForObject(row);
assertThat(rowInfo.getClass()).isEqualTo(RowTypeInfo.class);
assertThat(rowInfo.getArity()).isEqualTo(2);
assertThat(rowInfo)
.isEqualTo(
new RowTypeInfo(
BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.INT_TYPE_INFO));
Row nullRow = new Row(2);
TypeInformation<Row> genericRowInfo = TypeExtractor.getForObject(nullRow);
assertThat(new GenericTypeInfo<>(Row.class)).isEqualTo(genericRowInfo);
} |
@Nullable
public String ensureBuiltinRole(String roleName, String description, Set<String> expectedPermissions) {
Role previousRole = null;
try {
previousRole = roleService.load(roleName);
if (!previousRole.isReadOnly() || !expectedPermissions.equals(previousRole.getPermissions())) {
final String msg = "Invalid role '" + roleName + "', fixing it.";
LOG.debug(msg);
throw new IllegalArgumentException(msg); // jump to fix code
}
} catch (NotFoundException | IllegalArgumentException | NoSuchElementException ignored) {
LOG.info("{} role is missing or invalid, re-adding it as a built-in role.", roleName);
final RoleImpl fixedRole = new RoleImpl();
// copy the mongodb id over, in order to update the role instead of reading it
if (previousRole != null) {
fixedRole._id = previousRole.getId();
}
fixedRole.setReadOnly(true);
fixedRole.setName(roleName);
fixedRole.setDescription(description);
fixedRole.setPermissions(expectedPermissions);
try {
final Role savedRole = roleService.save(fixedRole);
return savedRole.getId();
} catch (DuplicateKeyException | ValidationException e) {
LOG.error("Unable to save fixed '" + roleName + "' role, please restart Graylog to fix this.", e);
}
}
if (previousRole == null) {
LOG.error("Unable to access fixed '" + roleName + "' role, please restart Graylog to fix this.");
return null;
}
return previousRole.getId();
} | @Test
public void ensureBuiltinRoleWithoutReadOnly() throws Exception {
final Role existingRole = mock(Role.class);
when(existingRole.getId()).thenReturn("new-id");
when(existingRole.isReadOnly()).thenReturn(false); // The existing role needs to be read-only
when(roleService.load("test-role")).thenReturn(existingRole);
when(roleService.save(any(Role.class))).thenReturn(existingRole);
assertThat(migrationHelpers.ensureBuiltinRole("test-role", "description", ImmutableSet.of("a", "b")))
.isEqualTo("new-id");
final ArgumentCaptor<Role> roleArg = ArgumentCaptor.forClass(Role.class);
verify(roleService, times(1)).save(roleArg.capture());
assertThat(roleArg.getValue()).satisfies(role -> {
assertThat(role.getName()).describedAs("role name").isEqualTo("test-role");
assertThat(role.getDescription()).describedAs("role description").isEqualTo("description");
assertThat(role.isReadOnly()).describedAs("role is read-only").isTrue();
assertThat(role.getPermissions()).describedAs("role permissions").containsOnly("a", "b");
});
} |
public static List<String> getTokens() {
final List<String> tokens = new ArrayList<>(Arrays.asList(SqlBaseLexer.ruleNames));
return tokens.stream().filter(pattern.asPredicate().negate()).collect(Collectors.toList());
} | @Test
public void shouldNeedBackQuotes() {
List<String> tokens = GrammarTokenExporter.getTokens();
assertEquals(expectedTokens, tokens);
} |
public double calculateMinPercentageUsedBy(NormalizedResources used, double totalMemoryMb, double usedMemoryMb) {
if (LOG.isTraceEnabled()) {
LOG.trace("Calculating min percentage used by. Used Mem: {} Total Mem: {}"
+ " Used Normalized Resources: {} Total Normalized Resources: {}", totalMemoryMb, usedMemoryMb,
toNormalizedMap(), used.toNormalizedMap());
}
double min = 1.0;
if (usedMemoryMb > totalMemoryMb) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalMemoryMb != 0.0) {
min = Math.min(min, usedMemoryMb / totalMemoryMb);
}
double totalCpu = getTotalCpu();
if (used.getTotalCpu() > totalCpu) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalCpu != 0.0) {
min = Math.min(min, used.getTotalCpu() / totalCpu);
}
if (used.otherResources.length > otherResources.length) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
for (int i = 0; i < otherResources.length; i++) {
if (otherResources[i] == 0.0) {
//Skip any resources where the total is 0, the percent used for this resource isn't meaningful.
//We fall back to prioritizing by cpu, memory and any other resources by ignoring this value
continue;
}
if (i >= used.otherResources.length) {
//Resources missing from used are using none of that resource
return 0;
}
if (used.otherResources[i] > otherResources[i]) {
String info = String.format("%s, %f > %f", getResourceNameForResourceIndex(i), used.otherResources[i], otherResources[i]);
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb, info);
}
min = Math.min(min, used.otherResources[i] / otherResources[i]);
}
return min * 100.0;
} | @Test
public void testCalculateMinWithOnlyCpu() {
NormalizedResources resources = new NormalizedResources(normalize(Collections.singletonMap(Constants.COMMON_CPU_RESOURCE_NAME, 2)));
NormalizedResources usedResources = new NormalizedResources(normalize(Collections.singletonMap(Constants.COMMON_CPU_RESOURCE_NAME, 1)));
double min = resources.calculateMinPercentageUsedBy(usedResources, 0, 0);
assertThat(min, is(50.0));
} |
public String getPlugin() {
return plugin;
} | @Test
public void thePluginCanBeChanged() {
doReturn(pluginManager).when(xmppServer).getPluginManager();
final SystemProperty<Long> longProperty = SystemProperty.Builder.ofType(Long.class)
.setKey("a-plugin-property")
.setDefaultValue(42L)
.setPlugin("TestPluginName")
.setDynamic(false)
.build();
assertThat(longProperty.getPlugin(), is("TestPluginName"));
} |
public void asyncDestroy() {
if (!(dataSource instanceof AutoCloseable)) {
return;
}
ExecutorService executor = Executors.newSingleThreadExecutor();
executor.execute(this::graceDestroy);
executor.shutdown();
} | @Test
void assertAsyncDestroyWithAutoCloseableDataSource() throws SQLException {
MockedDataSource dataSource = new MockedDataSource();
try (Connection ignored = dataSource.getConnection()) {
new DataSourcePoolDestroyer(dataSource).asyncDestroy();
}
Awaitility.await().atMost(1L, TimeUnit.SECONDS).pollInterval(10L, TimeUnit.MILLISECONDS).until(dataSource::isClosed);
assertTrue(dataSource.isClosed());
} |
public void executeTask(final Runnable r) {
if (!isStopped()) {
this.scheduledExecutorService.execute(r);
} else {
logger.warn("PullMessageServiceScheduledThread has shutdown");
}
} | @Test
public void testExecuteTask() {
Runnable runnable = mock(Runnable.class);
pullMessageService.executeTask(runnable);
pullMessageService.makeStop();
pullMessageService.executeTask(runnable);
verify(executorService, times(1)).execute(any(Runnable.class));
} |
@Override
public void deleteFileConfig(Long id) {
// 校验存在
FileConfigDO config = validateFileConfigExists(id);
if (Boolean.TRUE.equals(config.getMaster())) {
throw exception(FILE_CONFIG_DELETE_FAIL_MASTER);
}
// 删除
fileConfigMapper.deleteById(id);
// 清空缓存
clearCache(id, null);
} | @Test
public void testDeleteFileConfig_success() {
// mock 数据
FileConfigDO dbFileConfig = randomFileConfigDO().setMaster(false);
fileConfigMapper.insert(dbFileConfig);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbFileConfig.getId();
// 调用
fileConfigService.deleteFileConfig(id);
// 校验数据不存在了
assertNull(fileConfigMapper.selectById(id));
// 验证 cache
assertNull(fileConfigService.getClientCache().getIfPresent(id));
} |
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
for(Path file : files.keySet()) {
callback.delete(file);
try {
if(file.isFile() || file.isSymbolicLink()) {
if(!session.getClient().deleteFile(file.getAbsolute())) {
throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString());
}
}
else if(file.isDirectory()) {
// Change working directory to parent
if(!session.getClient().changeWorkingDirectory(file.getParent().getAbsolute())) {
throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString());
}
if(!session.getClient().removeDirectory(file.getAbsolute())) {
throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString());
}
}
}
catch(IOException e) {
throw new FTPExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
} | @Test
public void testDeleteNotFound() throws Exception {
final Path test = new Path(new FTPWorkdirService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
try {
new FTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
fail();
}
catch(NotfoundException | AccessDeniedException e) {
//
}
} |
public static Counter getOpenConnectionsCounter() {
return getOrCreateCounter(MetricsConstants.OPEN_CONNECTIONS);
} | @Test
public void testJsonStructure() throws Exception {
File jsonReportFile = File.createTempFile("TestMetrics", ".json");
String jsonFile = jsonReportFile.getAbsolutePath();
Configuration conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METRICS_REPORTERS, "json");
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METRICS_JSON_FILE_LOCATION, jsonFile);
MetastoreConf.setTimeVar(conf, MetastoreConf.ConfVars.METRICS_JSON_FILE_INTERVAL,
REPORT_INTERVAL, TimeUnit.SECONDS);
initializeMetrics(conf);
Counter openConnections = Metrics.getOpenConnectionsCounter();
openConnections.inc();
Thread.sleep(REPORT_INTERVAL * 1000 + REPORT_INTERVAL * 1000 / 2);
String json = new String(MetricsTestUtils.getFileData(jsonFile, 200, 10));
MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.GAUGE, "buffers.direct.capacity");
MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.GAUGE, "memory.heap.used");
MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.GAUGE, "threads.count");
MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.GAUGE, "classLoading.loaded");
MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER,
MetricsConstants.OPEN_CONNECTIONS, 1);
} |
@Override
public boolean eval(Object arg) {
QueryableEntry entry = (QueryableEntry) arg;
Data keyData = entry.getKeyData();
return (key == null || key.equals(keyData)) && predicate.apply((Map.Entry) arg);
} | @Test
public void testEval_givenFilterDoesNotContainKey_whenPredicateIsMatching_thenReturnTrue() {
//given
Predicate<Object, Object> predicate = Predicates.alwaysTrue();
QueryEventFilter filter = new QueryEventFilter(null, predicate, true);
//when
Data key2 = serializationService.toData("key");
QueryableEntry entry = mockEntryWithKeyData(key2);
//then
boolean result = filter.eval(entry);
assertTrue(result);
} |
static String createJobIdPrefix(
String jobName, String stepUuid, JobType type, @Nullable String random) {
jobName = jobName.replaceAll("-", "");
String result =
BIGQUERY_JOB_TEMPLATE
.replaceFirst("\\{TYPE}", type.toString())
.replaceFirst("\\{JOB_ID}", jobName)
.replaceFirst("\\{STEP}", stepUuid);
if (random != null) {
return result.replaceFirst("\\{RANDOM}", random);
} else {
return result.replaceFirst("_\\{RANDOM}", "");
}
} | @Test
public void testMatchesBigQueryJobTemplate() {
assertThat(
BigQueryResourceNaming.createJobIdPrefix(
"beamapp-job-test", "abcd", JobType.EXPORT, "RANDOME"),
matchesPattern(BQ_JOB_PATTERN_REGEXP));
assertThat(
BigQueryResourceNaming.createJobIdPrefix("beamapp-job-test", "abcd", JobType.COPY),
matchesPattern(BQ_JOB_PATTERN_REGEXP));
} |
@Override
public void showPreviewForKey(Keyboard.Key key, CharSequence label, Point previewPosition) {
mPreviewIcon.setVisibility(View.GONE);
mPreviewText.setVisibility(View.VISIBLE);
mPreviewIcon.setImageDrawable(null);
mPreviewText.setTextColor(mPreviewPopupTheme.getPreviewKeyTextColor());
mPreviewText.setText(label);
if (label.length() > 1 && key.getCodesCount() < 2) {
mPreviewText.setTextSize(
TypedValue.COMPLEX_UNIT_PX, mPreviewPopupTheme.getPreviewLabelTextSize());
} else {
mPreviewText.setTextSize(
TypedValue.COMPLEX_UNIT_PX, mPreviewPopupTheme.getPreviewKeyTextSize());
}
mPreviewText.measure(
View.MeasureSpec.makeMeasureSpec(0, View.MeasureSpec.UNSPECIFIED),
View.MeasureSpec.makeMeasureSpec(0, View.MeasureSpec.UNSPECIFIED));
showPopup(
key, mPreviewText.getMeasuredWidth(), mPreviewText.getMeasuredHeight(), previewPosition);
} | @Test
public void testPreviewLayoutCorrectlyForNoneLabel() {
PreviewPopupTheme theme = new PreviewPopupTheme();
theme.setPreviewKeyBackground(
ContextCompat.getDrawable(getApplicationContext(), blacktheme_preview_background));
theme.setPreviewKeyTextSize(1);
final KeyPreviewPopupWindow underTest =
new KeyPreviewPopupWindow(
getApplicationContext(), new View(getApplicationContext()), theme);
PopupWindow createdPopupWindow =
Shadows.shadowOf((Application) ApplicationProvider.getApplicationContext())
.getLatestPopupWindow();
Assert.assertNull(createdPopupWindow);
Keyboard.Key key = Mockito.mock(Keyboard.Key.class);
Mockito.doReturn((int) 'y').when(key).getPrimaryCode();
Mockito.doReturn(1).when(key).getCodesCount();
key.width = 10;
key.height = 20;
underTest.showPreviewForKey(key, "y", new Point(1, 1));
createdPopupWindow =
Shadows.shadowOf((Application) ApplicationProvider.getApplicationContext())
.getLatestPopupWindow();
Assert.assertNotNull(createdPopupWindow);
} |
public Set<String> assembleAllWatchKeys(String appId, String clusterName, String namespace,
String dataCenter) {
Multimap<String, String> watchedKeysMap =
assembleAllWatchKeys(appId, clusterName, Sets.newHashSet(namespace), dataCenter);
return Sets.newHashSet(watchedKeysMap.get(namespace));
} | @Test
public void testAssembleAllWatchKeysWithOneNamespaceAndDefaultCluster() throws Exception {
Set<String> watchKeys =
watchKeysUtil.assembleAllWatchKeys(someAppId, defaultCluster, someNamespace, null);
Set<String> clusters = Sets.newHashSet(defaultCluster);
assertEquals(clusters.size(), watchKeys.size());
assertWatchKeys(someAppId, clusters, someNamespace, watchKeys);
} |
@Override
public void run() {
try {
backgroundJobServer.getJobSteward().notifyThreadOccupied();
MDCMapper.loadMDCContextFromJob(job);
performJob();
} catch (Exception e) {
if (isJobDeletedWhileProcessing(e)) {
// nothing to do anymore as Job is deleted
return;
} else if (isJobServerStopped(e)) {
updateJobStateToFailedAndRunJobFilters("Job processing was stopped as background job server has stopped", e);
Thread.currentThread().interrupt();
} else if (isJobNotFoundException(e)) {
updateJobStateToFailedAndRunJobFilters("Job method not found", e);
} else {
updateJobStateToFailedAndRunJobFilters("An exception occurred during the performance of the job", e);
}
} finally {
backgroundJobServer.getJobSteward().notifyThreadIdle();
MDC.clear();
}
} | @Test
@DisplayName("InvocationTargetException is unwrapped and the actual error is stored instead")
void invocationTargetExceptionUnwrapped() throws Exception {
var job = anEnqueuedJob().build();
var runner = mock(BackgroundJobRunner.class);
doThrow(new InvocationTargetException(new RuntimeException("test error"))).when(runner).run(job);
when(backgroundJobServer.getBackgroundJobRunner(job)).thenReturn(runner);
var backgroundJobPerformer = new BackgroundJobPerformer(backgroundJobServer, job);
backgroundJobPerformer.run();
var lastFailure = job.getLastJobStateOfType(FailedState.class);
assertThat(lastFailure.isPresent()).isTrue();
assertThat(lastFailure.get().getExceptionMessage()).isEqualTo("test error");
assertThat(lastFailure.get().getException()).isInstanceOf(RuntimeException.class);
assertThat(lastFailure.get().getException().getMessage()).isEqualTo("test error");
} |
@Override
@Transactional(rollbackFor = Exception.class)
public void updateSpu(ProductSpuSaveReqVO updateReqVO) {
// 校验 SPU 是否存在
validateSpuExists(updateReqVO.getId());
// 校验分类、品牌
validateCategory(updateReqVO.getCategoryId());
brandService.validateProductBrand(updateReqVO.getBrandId());
// 校验SKU
List<ProductSkuSaveReqVO> skuSaveReqList = updateReqVO.getSkus();
productSkuService.validateSkuList(skuSaveReqList, updateReqVO.getSpecType());
// 更新 SPU
ProductSpuDO updateObj = BeanUtils.toBean(updateReqVO, ProductSpuDO.class);
initSpuFromSkus(updateObj, skuSaveReqList);
productSpuMapper.updateById(updateObj);
// 批量更新 SKU
productSkuService.updateSkuList(updateObj.getId(), updateReqVO.getSkus());
} | @Test
public void testUpdateSpu_success() {
// 准备参数
ProductSpuDO createReqVO = randomPojo(ProductSpuDO.class,o->{
o.setCategoryId(generateId());
o.setBrandId(generateId());
o.setDeliveryTemplateId(generateId());
o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setVirtualSalesCount(generaInt()); // 限制范围为正整数
o.setPrice(generaInt()); // 限制范围为正整数
o.setMarketPrice(generaInt()); // 限制范围为正整数
o.setCostPrice(generaInt()); // 限制范围为正整数
o.setStock(generaInt()); // 限制范围为正整数
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setSalesCount(generaInt()); // 限制范围为正整数
o.setBrowseCount(generaInt()); // 限制范围为正整数
});
productSpuMapper.insert(createReqVO);
// 准备参数
ProductSkuSaveReqVO skuCreateOrUpdateReqVO = randomPojo(ProductSkuSaveReqVO.class, o->{
// 限制范围为正整数
o.setCostPrice(generaInt());
o.setPrice(generaInt());
o.setMarketPrice(generaInt());
o.setStock(generaInt());
o.setFirstBrokeragePrice(generaInt());
o.setSecondBrokeragePrice(generaInt());
// 限制分数为两位数
o.setWeight(RandomUtil.randomDouble(10,2, RoundingMode.HALF_UP));
o.setVolume(RandomUtil.randomDouble(10,2, RoundingMode.HALF_UP));
});
// 准备参数
ProductSpuSaveReqVO reqVO = randomPojo(ProductSpuSaveReqVO.class, o -> {
o.setId(createReqVO.getId()); // 设置更新的 ID
o.setCategoryId(generateId());
o.setBrandId(generateId());
o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setVirtualSalesCount(generaInt()); // 限制范围为正整数
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setSalesCount(generaInt()); // 限制范围为正整数
o.setBrowseCount(generaInt()); // 限制范围为正整数
o.setSkus(newArrayList(skuCreateOrUpdateReqVO,skuCreateOrUpdateReqVO,skuCreateOrUpdateReqVO));
});
when(categoryService.getCategoryLevel(eq(reqVO.getCategoryId()))).thenReturn(2);
// 调用
productSpuService.updateSpu(reqVO);
// 校验是否更新正确
ProductSpuDO spu = productSpuMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, spu);
} |
@Override
public HollowProducer.ReadState restore(long versionDesired, HollowConsumer.BlobRetriever blobRetriever) {
return super.restore(versionDesired, blobRetriever);
} | @Test
public void testRestoreToNonExact() {
HollowProducer producer = createProducer(tmpFolder, schema);
long version = testPublishV1(producer, 2, 7);
producer = createProducer(tmpFolder, schema);
producer.restore(version + 1, blobRetriever);
Assert.assertNotNull(lastRestoreStatus);
assertEquals(Status.SUCCESS, lastRestoreStatus.getStatus());
assertEquals("Should have reached correct version", version,
lastRestoreStatus.getVersionReached());
assertEquals("Should have correct desired version", version + 1,
lastRestoreStatus.getDesiredVersion());
assertEquals(producer.getCycleCountWithPrimaryStatus(), 0); // no cycle run
} |
@Override
@NotNull
public List<PartitionStatistics> select(@NotNull Collection<PartitionStatistics> statistics,
@NotNull Set<Long> excludeTables) {
double minScore = Config.lake_compaction_score_selector_min_score;
long now = System.currentTimeMillis();
return statistics.stream()
.filter(p -> p.getCompactionScore() != null)
.filter(p -> !excludeTables.contains(p.getPartition().getTableId()))
// When manual compaction is triggered, we just skip min score and time check
.filter(p -> (p.getPriority() != PartitionStatistics.CompactionPriority.DEFAULT
|| (p.getNextCompactionTime() <= now && p.getCompactionScore().getMax() >= minScore)))
.collect(Collectors.toList());
} | @Test
public void test() {
List<PartitionStatistics> statisticsList = new ArrayList<>();
PartitionStatistics statistics = new PartitionStatistics(new PartitionIdentifier(1, 2, 3));
statistics.setCompactionScore(Quantiles.compute(Collections.singleton(0.0)));
statisticsList.add(statistics);
statistics = new PartitionStatistics(new PartitionIdentifier(1, 2, 4));
statistics.setCompactionScore(Quantiles.compute(Collections.singleton(0.99)));
statisticsList.add(statistics);
statistics = new PartitionStatistics(new PartitionIdentifier(1, 2, 5));
statistics.setCompactionScore(Quantiles.compute(Collections.singleton(1.0)));
statisticsList.add(statistics);
statistics = new PartitionStatistics(new PartitionIdentifier(1, 2, 6));
statistics.setCompactionScore(Quantiles.compute(Collections.singleton(1.1)));
statisticsList.add(statistics);
List<PartitionStatistics> targetList = selector.select(statisticsList, new HashSet<Long>());
Assert.assertEquals(2, targetList.size());
Assert.assertEquals(5, targetList.get(0).getPartition().getPartitionId());
Assert.assertEquals(6, targetList.get(1).getPartition().getPartitionId());
} |
@Override
public HttpResponse get() throws InterruptedException, ExecutionException {
try {
final Object result = process(0, null);
if (result instanceof Throwable) {
throw new ExecutionException((Throwable) result);
}
return (HttpResponse) result;
} finally {
isDone = true;
}
} | @Test(expected = InterruptedException.class)
public void errGet() throws ExecutionException, InterruptedException, TimeoutException {
get(new InterruptedException(), false);
} |
public static RunResponse from(WorkflowInstance instance, int state) {
return RunResponse.builder()
.workflowId(instance.getWorkflowId())
.workflowVersionId(instance.getWorkflowVersionId())
.workflowInstanceId(instance.getWorkflowInstanceId())
.workflowRunId(instance.getWorkflowRunId())
.workflowUuid(instance.getWorkflowUuid())
.status(Status.fromCode(state))
.timelineEvent(instance.getInitiator().getTimelineEvent())
.build();
} | @Test
public void testBuildFromEvent() {
RunResponse res = RunResponse.from(stepInstance, TimelineLogEvent.info("bar"));
Assert.assertEquals(RunResponse.Status.STEP_ATTEMPT_CREATED, res.getStatus());
} |
@Override
public ExportResult<VideosContainerResource> export(
UUID jobId, TokensAndUrlAuthData authData, Optional<ExportInformation> exportInformation)
throws CopyExceptionWithFailureReason {
Preconditions.checkNotNull(authData);
KoofrClient koofrClient = koofrClientFactory.create(authData);
KoofrMediaExport export = new KoofrMediaExport(koofrClient, monitor);
try {
export.export();
List<VideoAlbum> exportAlbums = export.getVideoAlbums();
List<VideoModel> exportVideos = export.getVideos();
VideosContainerResource containerResource =
new VideosContainerResource(exportAlbums, exportVideos);
return new ExportResult<>(ExportResult.ResultType.END, containerResource, null);
} catch (IOException e) {
return new ExportResult<>(e);
}
} | @Test
public void testExport() throws Exception {
when(client.getRootPath()).thenReturn("/Data transfer");
when(client.listRecursive("/Data transfer")).thenReturn(Fixtures.listRecursiveItems);
when(client.fileLink("/Data transfer/Album 2 :heart:/Video 1.mp4"))
.thenReturn("https://app-1.koofr.net/content/files/get/Video+1.mp4?base=TESTBASE");
when(client.fileLink("/Data transfer/Videos/Video 2.mp4"))
.thenReturn("https://app-1.koofr.net/content/files/get/Video+2.mp4?base=TESTBASE");
UUID jobId = UUID.randomUUID();
ExportResult<VideosContainerResource> result =
exporter.export(jobId, authData, Optional.empty());
assertEquals(ExportResult.ResultType.END, result.getType());
assertNull(result.getContinuationData());
VideosContainerResource exportedData = result.getExportedData();
List<VideoAlbum> expectedAlbums =
ImmutableList.of(
new VideoAlbum("/Album 2 :heart:", "Album 2 ❤️", "Album 2 description ❤️"),
new VideoAlbum("/Videos", "Videos", null));
assertEquals(expectedAlbums, exportedData.getAlbums());
List<VideoModel> expectedVideos =
ImmutableList.of(
new VideoModel(
"Video 1.mp4",
"https://app-1.koofr.net/content/files/get/Video+1.mp4?base=TESTBASE",
null,
"video/mp4",
"/Album 2 :heart:/Video 1.mp4",
"/Album 2 :heart:",
false,
Date.from(Instant.parse("2020-09-04T12:40:57.741Z"))),
new VideoModel(
"Video 2.mp4",
"https://app-1.koofr.net/content/files/get/Video+2.mp4?base=TESTBASE",
"Video 3 description",
"video/mp4",
"/Videos/Video 2.mp4",
"/Videos",
false,
Date.from(Instant.parse("2020-09-04T12:41:06.949Z"))));
assertEquals(expectedVideos, exportedData.getVideos());
} |
@Override
public void describe(SensorDescriptor descriptor) {
descriptor
.name("Xoo Measure Sensor")
.onlyOnLanguages(Xoo.KEY, Xoo2.KEY);
} | @Test
public void testDescriptor() {
sensor.describe(new DefaultSensorDescriptor());
} |
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) {
if ( super.init( smi, sdi ) ) {
logError( BaseMessages.getString( PKG, "MissingTransStep.Log.CannotRunTrans" ) );
}
return false;
} | @Test
public void testInit() {
StepMetaInterface stepMetaInterface = new AbstractStepMeta() {
@Override
public void setDefault() { }
@Override
public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int copyNr,
TransMeta transMeta,
Trans trans ) {
return null;
}
};
StepMeta stepMeta = new StepMeta();
stepMeta.setName( "TestMetaStep" );
StepDataInterface stepDataInterface = mock( StepDataInterface.class );
Trans trans = new Trans();
LogChannel log = mock( LogChannel.class );
doAnswer( new Answer<Void>() {
public Void answer( InvocationOnMock invocation ) {
return null;
}
} ).when( log ).logError( anyString() );
trans.setLog( log );
TransMeta transMeta = new TransMeta();
transMeta.addStep( stepMeta );
MissingTransStep step = createAndInitStep( stepMetaInterface, stepDataInterface );
assertFalse( step.init( stepMetaInterface, stepDataInterface ) );
} |
public boolean isTimedOut() {
return timedOut.get();
} | @Test
public void testShellCommandTimeout() throws Throwable {
Assume.assumeFalse(WINDOWS);
String rootDir = rootTestDir.getAbsolutePath();
File shellFile = new File(rootDir, "timeout.sh");
String timeoutCommand = "sleep 4; echo \"hello\"";
Shell.ShellCommandExecutor shexc;
try (PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile))) {
writer.println(timeoutCommand);
writer.close();
}
FileUtil.setExecutable(shellFile, true);
shexc = new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},
null, null, 100);
try {
shexc.execute();
} catch (Exception e) {
//When timing out exception is thrown.
}
shellFile.delete();
assertTrue("Script did not timeout" , shexc.isTimedOut());
} |
public T getRecordingProxy()
{
return _templateProxy;
} | @Test(expectedExceptions = NullPointerException.class)
public void testSimpleSetDisallowNullDefault()
{
makeOne().getRecordingProxy().setFooRequired(null);
} |
public boolean isAfterFlink114() {
return flinkInterpreter.getFlinkVersion().isAfterFlink114();
} | @Test
void testStreamIPyFlink() throws InterpreterException, IOException {
if (!flinkInnerInterpreter.getFlinkVersion().isAfterFlink114()) {
IPyFlinkInterpreterTest.testStreamPyFlink(interpreter, flinkScalaInterpreter);
}
} |
public String views(Namespace ns) {
return SLASH.join("v1", prefix, "namespaces", RESTUtil.encodeNamespace(ns), "views");
} | @Test
public void viewsWithSlash() {
Namespace ns = Namespace.of("n/s");
assertThat(withPrefix.views(ns)).isEqualTo("v1/ws/catalog/namespaces/n%2Fs/views");
assertThat(withoutPrefix.views(ns)).isEqualTo("v1/namespaces/n%2Fs/views");
} |
public boolean setRuleDescriptionContextKey(DefaultIssue issue, @Nullable String previousContextKey) {
String currentContextKey = issue.getRuleDescriptionContextKey().orElse(null);
issue.setRuleDescriptionContextKey(previousContextKey);
if (!Objects.equals(currentContextKey, previousContextKey)) {
issue.setRuleDescriptionContextKey(currentContextKey);
issue.setChanged(true);
return true;
}
return false;
} | @Test
void setRuleDescriptionContextKey_setContextKeyIfValuesAreDifferent() {
issue.setRuleDescriptionContextKey(DEFAULT_RULE_DESCRIPTION_CONTEXT_KEY);
boolean updated = underTest.setRuleDescriptionContextKey(issue, "hibernate");
assertThat(updated).isTrue();
assertThat(issue.getRuleDescriptionContextKey()).contains(DEFAULT_RULE_DESCRIPTION_CONTEXT_KEY);
} |
@Override
public DescriptiveUrlBag toUrl(final Path file) {
return new DescriptiveUrlBag(Collections.singletonList(
new DescriptiveUrl(URI.create(String.format("https://webmail.freenet.de/web/?goTo=share&path=/%s#cloud",
URIEncoder.encode(PathRelativizer.relativize(PathNormalizer.normalize(host.getDefaultPath(), true), file.isFile() ?
file.getParent().getAbsolute() : file.getAbsolute())))), DescriptiveUrl.Type.http)
));
} | @Test
public void testToUrlFile() {
final FreenetUrlProvider provider = new FreenetUrlProvider(new Host(new FreenetProtocol(), "dav.freenet.de", 443, "/webdav"));
final DescriptiveUrlBag urls = provider.toUrl(new Path("/webdav/d/f", EnumSet.of(Path.Type.file)));
assertEquals(1, urls.size());
final DescriptiveUrl url = urls.find(DescriptiveUrl.Type.http);
assertNotEquals(DescriptiveUrl.EMPTY, url);
assertEquals(DescriptiveUrl.Type.http, url.getType());
assertEquals("https://webmail.freenet.de/web/?goTo=share&path=/d#cloud", url.getUrl());
} |
public static <T, ComparatorT extends Comparator<T> & Serializable>
Combine.Globally<T, List<T>> of(int count, ComparatorT compareFn) {
return Combine.globally(new TopCombineFn<>(count, compareFn));
} | @Test
public void testCountConstraint() {
p.enableAbandonedNodeEnforcement(false);
PCollection<String> input =
p.apply(Create.of(Arrays.asList(COLLECTION)).withCoder(StringUtf8Coder.of()));
expectedEx.expect(IllegalArgumentException.class);
expectedEx.expectMessage(Matchers.containsString(">= 0"));
input.apply(Top.of(-1, new OrderByLength()));
} |
public static Method getMethodByName(Class<?> clazz, String methodName) {
if (Objects.nonNull(clazz) && Objects.nonNull(methodName)) {
Method method = Arrays.stream(clazz.getMethods())
.filter(m -> Objects.equals(m.getName(), methodName))
.findFirst().orElse(null);
if (method != null) {
return method;
}
return Arrays.stream(clazz.getDeclaredMethods())
.filter(m -> Objects.equals(m.getName(), methodName))
.findFirst().orElse(null);
}
return null;
} | @Test
public void getMethodByNameTest() {
// private method
Method runStateLessThan = ReflectUtil.getMethodByName(ThreadPoolExecutor.class, "runStateLessThan");
Assert.assertNotNull(runStateLessThan);
// public method
Method field = ReflectUtil.getMethodByName(TestClass.class, "setPrivateField");
Assert.assertNotNull(field);
// parameters
Method privateField = ReflectUtil.getMethodByName(TestClass.class, "setPrivateField", String.class);
Assert.assertNotNull(privateField);
} |
static void extractSchemaWithComplexTypeHandling(
Descriptors.FieldDescriptor fieldSchema,
List<String> fieldsToUnnest,
String delimiter,
String path,
Schema pinotSchema,
@Nullable Map<String, FieldSpec.FieldType> fieldTypeMap,
@Nullable TimeUnit timeUnit) {
Descriptors.FieldDescriptor.Type fieldType = fieldSchema.getType();
if (fieldSchema.isRepeated()) {
if (isPrimitiveType(fieldType)) {
addFieldToPinotSchema(pinotSchema, valueOf(fieldType), path, false, fieldTypeMap, timeUnit);
} else if (fieldsToUnnest.contains(path) && !fieldSchema.isMapField()) {
for (Descriptors.FieldDescriptor innerField : fieldSchema.getMessageType().getFields()) {
extractSchemaWithComplexTypeHandling(innerField, fieldsToUnnest, delimiter,
String.join(delimiter, path, innerField.getName()), pinotSchema, fieldTypeMap, timeUnit);
}
} else if (!fieldSchema.isMapField()) {
addFieldToPinotSchema(pinotSchema, FieldSpec.DataType.STRING, path, true, fieldTypeMap, timeUnit);
}
// Ignores Map type since it's not supported when complex type handling is enabled
} else if (fieldType == Descriptors.FieldDescriptor.Type.MESSAGE) {
for (Descriptors.FieldDescriptor innerField : fieldSchema.getMessageType().getFields()) {
extractSchemaWithComplexTypeHandling(innerField, fieldsToUnnest, delimiter,
String.join(delimiter, path, innerField.getName()), pinotSchema, fieldTypeMap, timeUnit);
}
} else {
FieldSpec.DataType dataType = valueOf(fieldType);
addFieldToPinotSchema(pinotSchema, dataType, path, true, fieldTypeMap, timeUnit);
}
} | @Test(dataProvider = "scalarCases")
public void testExtractSchemaWithComplexTypeHandling(
String fieldName, FieldSpec.DataType type, boolean isSingleValue) {
Descriptors.FieldDescriptor desc = ComplexTypes.TestMessage.getDescriptor().findFieldByName(fieldName);
Schema schema = new Schema();
ProtoBufSchemaUtils.extractSchemaWithComplexTypeHandling(
desc,
Collections.emptyList(),
".",
desc.getName(),
schema,
new HashMap<>(),
TimeUnit.SECONDS);
Schema expectedSchema;
if (isSingleValue) {
expectedSchema = new Schema.SchemaBuilder()
.addSingleValueDimension(fieldName, type)
.build();
} else {
expectedSchema = new Schema.SchemaBuilder()
.addMultiValueDimension(fieldName, type)
.build();
}
assertEquals(expectedSchema, schema);
} |
@SuppressWarnings("unchecked")
public Mono<RateLimiterResponse> isAllowed(final String id, final RateLimiterHandle limiterHandle) {
double replenishRate = limiterHandle.getReplenishRate();
double burstCapacity = limiterHandle.getBurstCapacity();
double requestCount = limiterHandle.getRequestCount();
RateLimiterAlgorithm<?> rateLimiterAlgorithm = RateLimiterAlgorithmFactory.newInstance(limiterHandle.getAlgorithmName());
RedisScript<?> script = rateLimiterAlgorithm.getScript();
List<String> keys = rateLimiterAlgorithm.getKeys(id);
List<String> scriptArgs = Stream.of(replenishRate, burstCapacity, Instant.now().getEpochSecond(), requestCount).map(String::valueOf).collect(Collectors.toList());
Flux<List<Long>> resultFlux = Singleton.INST.get(ReactiveRedisTemplate.class).execute(script, keys, scriptArgs);
return resultFlux.onErrorResume(throwable -> Flux.just(Arrays.asList(1L, -1L)))
.reduce(new ArrayList<Long>(), (longs, l) -> {
longs.addAll(l);
return longs;
}).map(results -> {
boolean allowed = results.get(0) == 1L;
Long tokensLeft = results.get(1);
return new RateLimiterResponse(allowed, tokensLeft, keys);
})
.doOnError(throwable -> {
rateLimiterAlgorithm.callback(rateLimiterAlgorithm.getScript(), keys, scriptArgs);
LOG.error("Error occurred while judging if user is allowed by RedisRateLimiter:{}", throwable.getMessage());
});
} | @Test
public void notAllowedTest() {
isAllowedPreInit(0L, 0L, false);
rateLimiterHandle.setAlgorithmName("tokenBucket");
Mono<RateLimiterResponse> responseMono = redisRateLimiter.isAllowed(DEFAULT_TEST_ID, rateLimiterHandle);
StepVerifier.create(responseMono).assertNext(r -> {
assertEquals(0, r.getTokensRemaining());
assertFalse(r.isAllowed());
}).verifyComplete();
} |
static List<Integer> getTargetTpcPorts(List<Integer> tpcPorts, ClientTpcConfig tpcConfig) {
List<Integer> targetTpcPorts;
int tpcConnectionCount = tpcConfig.getConnectionCount();
if (tpcConnectionCount == 0 || tpcConnectionCount >= tpcPorts.size()) {
// zero means connect to all.
targetTpcPorts = tpcPorts;
} else {
// we make a copy of the tpc ports because items are removed.
List<Integer> tpcPortsCopy = new LinkedList<>(tpcPorts);
targetTpcPorts = new ArrayList<>(tpcConnectionCount);
ThreadLocalRandom threadLocalRandom = ThreadLocalRandom.current();
for (int k = 0; k < tpcConnectionCount; k++) {
int index = threadLocalRandom.nextInt(tpcPortsCopy.size());
targetTpcPorts.add(tpcPortsCopy.remove(index));
}
}
return targetTpcPorts;
} | @Test
public void testGetTargetTpcPorts_whenConnectToAll() {
ClientTpcConfig config = new ClientTpcConfig();
List<Integer> tpcPorts = asList(1, 2, 3);
// when larger than the number of tpc ports, return the full set.
config.setConnectionCount(tpcPorts.size() + 1);
assertEquals(tpcPorts, getTargetTpcPorts(tpcPorts, config));
// when equal than the number of tpc ports, return the full set.
config.setConnectionCount(tpcPorts.size());
assertEquals(tpcPorts, getTargetTpcPorts(tpcPorts, config));
// When 0, return the full set.
config.setConnectionCount(0);
assertEquals(tpcPorts, getTargetTpcPorts(tpcPorts, config));
} |
@Override
public void deregisterService(String serviceName, String groupName, Instance instance) throws NacosException {
getExecuteClientProxy(instance).deregisterService(serviceName, groupName, instance);
} | @Test
void testDeregisterPersistentServiceHttp() throws NacosException, NoSuchFieldException, IllegalAccessException {
NamingHttpClientProxy mockHttpClient = Mockito.mock(NamingHttpClientProxy.class);
Field mockHttpClientField = NamingClientProxyDelegate.class.getDeclaredField("httpClientProxy");
mockHttpClientField.setAccessible(true);
mockHttpClientField.set(delegate, mockHttpClient);
String serviceName = "service1";
String groupName = "group1";
Instance instance = new Instance();
instance.setServiceName(serviceName);
instance.setClusterName(groupName);
instance.setIp("1.1.1.1");
instance.setPort(1);
// use http
instance.setEphemeral(false);
delegate.deregisterService(serviceName, groupName, instance);
verify(mockHttpClient, times(1)).deregisterService(serviceName, groupName, instance);
} |
public boolean statsHaveChanged() {
if (!aggregatedStats.hasUpdatesFromAllDistributors()) {
return false;
}
for (ContentNodeStats contentNodeStats : aggregatedStats.getStats()) {
int nodeIndex = contentNodeStats.getNodeIndex();
boolean currValue = mayHaveMergesPendingInGlobalSpace(nodeIndex);
Boolean prevValue = prevMayHaveMergesPendingInGlobalSpace(nodeIndex);
if (prevValue != null) {
if (prevValue != currValue) {
return true;
}
} else {
return true;
}
}
return false;
} | @Test
void stats_have_not_changed_if_not_all_distributors_are_updated() {
Fixture f = Fixture.empty();
assertFalse(f.statsHaveChanged());
} |
static Optional<ExecutorService> lookupExecutorServiceRef(
CamelContext camelContext, String name, Object source, String executorServiceRef) {
ExecutorServiceManager manager = camelContext.getExecutorServiceManager();
ObjectHelper.notNull(manager, ESM_NAME);
ObjectHelper.notNull(executorServiceRef, "executorServiceRef");
// lookup in registry first and use existing thread pool if exists,
// or create a new thread pool, assuming that the executor service ref is a thread pool ID
return lookupByNameAndType(camelContext, executorServiceRef, ExecutorService.class)
.or(() -> Optional.ofNullable(manager.newThreadPool(source, name, executorServiceRef)));
} | @Test
void testLookupExecutorServiceRefWithNullRef() {
String name = "ThreadPool";
Object source = new Object();
when(camelContext.getExecutorServiceManager()).thenReturn(manager);
Exception ex = assertThrows(IllegalArgumentException.class,
() -> DynamicRouterRecipientListHelper.lookupExecutorServiceRef(camelContext, name, source, null));
assertEquals("executorServiceRef must be specified", ex.getMessage());
} |
public void setBaseResource(Resource baseResource) {
handler.setBaseResource(baseResource);
} | @Test
void setsBaseResourceStringList(@TempDir Path tempDir) throws Exception {
String wooResource = Files.createDirectory(tempDir.resolve("dir-1")).toString();
String fooResource = Files.createDirectory(tempDir.resolve("dir-2")).toString();
final String[] testResources = new String[]{wooResource, fooResource};
environment.setBaseResource(testResources);
assertThat(handler.getBaseResource()).isExactlyInstanceOf(ResourceCollection.class);
assertThat(((ResourceCollection) handler.getBaseResource()).getResources())
.contains(Resource.newResource(wooResource), Resource.newResource(fooResource));
} |
@Override
public BuiltInScalarFunctionImplementation specialize(BoundVariables boundVariables, int arity, FunctionAndTypeManager functionAndTypeManager)
{
ImmutableList.Builder<ScalarFunctionImplementationChoice> implementationChoices = ImmutableList.builder();
for (PolymorphicScalarFunctionChoice choice : choices) {
implementationChoices.add(getScalarFunctionImplementationChoice(boundVariables, functionAndTypeManager, choice));
}
return new BuiltInScalarFunctionImplementation(implementationChoices.build());
} | @Test
public void testTypeParameters()
throws Throwable
{
Signature signature = SignatureBuilder.builder()
.name("foo")
.kind(SCALAR)
.typeVariableConstraints(comparableWithVariadicBound("V", VARCHAR))
.returnType(parseTypeSignature("V"))
.argumentTypes(parseTypeSignature("V"))
.build();
SqlScalarFunction function = SqlScalarFunction.builder(TestMethods.class)
.signature(signature)
.deterministic(true)
.calledOnNullInput(false)
.choice(choice -> choice
.implementation(methodsGroup -> methodsGroup.methods("varcharToVarchar")))
.build();
BuiltInScalarFunctionImplementation functionImplementation = function.specialize(BOUND_VARIABLES, 1, FUNCTION_AND_TYPE_MANAGER);
Slice slice = (Slice) functionImplementation.getMethodHandle().invoke(INPUT_SLICE);
assertEquals(slice, VARCHAR_TO_VARCHAR_RETURN_VALUE);
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void add1() {
String inputExpression = "y + 5 * 3";
BaseNode infix = parse( inputExpression, mapOf(entry("y", BuiltInType.NUMBER)) );
assertThat( infix).isInstanceOf(InfixOpNode.class);
assertThat( infix.getResultType()).isEqualTo(BuiltInType.NUMBER);
assertThat( infix.getText()).isEqualTo(inputExpression);
InfixOpNode add = (InfixOpNode) infix;
assertThat( add.getLeft()).isInstanceOf(NameRefNode.class);
assertThat( add.getLeft().getText()).isEqualTo("y");
assertThat( add.getOperator()).isEqualTo(InfixOperator.ADD);
assertThat( add.getRight()).isInstanceOf(InfixOpNode.class);
assertThat( add.getRight().getText()).isEqualTo( "5 * 3");
InfixOpNode mult = (InfixOpNode) add.getRight();
assertThat( mult.getLeft()).isInstanceOf(NumberNode.class);
assertThat( mult.getLeft().getText()).isEqualTo("5");
assertThat( mult.getOperator()).isEqualTo(InfixOperator.MULT);
assertThat( mult.getRight()).isInstanceOf(NumberNode.class);
assertThat( mult.getRight().getText()).isEqualTo("3");
} |
@Override
protected Set<StepField> getUsedFields( ExcelInputMeta meta ) {
Set<StepField> usedFields = new HashSet<>();
if ( meta.isAcceptingFilenames() && StringUtils.isNotEmpty( meta.getAcceptingStepName() ) ) {
StepField stepField = new StepField( meta.getAcceptingStepName(), meta.getAcceptingField() );
usedFields.add( stepField );
}
return usedFields;
} | @Test
public void testGetUsedFields_isNotAcceptingFilenames() throws Exception {
lenient().when( meta.isAcceptingFilenames() ).thenReturn( false );
lenient().when( meta.getAcceptingField() ).thenReturn( "filename" );
lenient().when( meta.getAcceptingStepName() ).thenReturn( "previousStep" );
Set<StepField> usedFields = analyzer.getUsedFields( meta );
assertNotNull( usedFields );
assertEquals( 0, usedFields.size() );
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldHandleNestedUdfs() {
// Given:
givenSourceStreamWithSchema(SINGLE_VALUE_COLUMN_SCHEMA, SerdeFeatures.of(), SerdeFeatures.of());
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
ImmutableList.of(COL0),
ImmutableList.of(
new FunctionCall(
FunctionName.of("SUBSTRING"),
ImmutableList.of(
new FunctionCall(
FunctionName.of("SUBSTRING"),
ImmutableList.of(new StringLiteral("foo"), new IntegerLiteral(2))
),
new IntegerLiteral(2))
))
);
// When:
executor.execute(statement, mock(SessionProperties.class), engine, serviceContext);
// Then:
verify(valueSerializer).serialize(TOPIC_NAME, genericRow("o"));
verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1L, KEY, VALUE));
} |
public double currentProductionRate() {
return aggregateStat(ProducerCollector.PRODUCER_MESSAGES_PER_SEC, false);
} | @Test
public void shouldAggregateStatsAcrossAllProducers() {
final MetricCollectors metricCollectors = new MetricCollectors();
final ProducerCollector collector1 = new ProducerCollector();
collector1.configure(
ImmutableMap.of(
ProducerConfig.CLIENT_ID_CONFIG, "client1",
KsqlConfig.KSQL_INTERNAL_METRIC_COLLECTORS_CONFIG, metricCollectors
)
);
final ProducerCollector collector2 = new ProducerCollector();
collector2.configure(
ImmutableMap.of(
ProducerConfig.CLIENT_ID_CONFIG, "client2",
KsqlConfig.KSQL_INTERNAL_METRIC_COLLECTORS_CONFIG, metricCollectors
)
);
for (int i = 0; i < 500; i++) {
collector1.onSend(new ProducerRecord<>(TEST_TOPIC, "key", Integer.toString(i)));
collector2.onSend(new ProducerRecord<>(TEST_TOPIC + "_" + i, "key",
Integer.toString(i * 100)));
}
// The Kafka metrics in MetricCollectors is configured so that sampled stats (like the Rate
// measurable stat) have a 100 samples, each with a duration of 1 second. In this test we
// record a 1000 events, but only in a single sample since they all belong to the same second.
// So 99 samples are empty. Hence the rate is computed as a tenth of what it should be. This
// won't be a problem for a longer running program.
assertEquals(10, Math.floor(metricCollectors.currentProductionRate()), 0);
} |
@Override
@CacheEvict(cacheNames = "ai:video:config", key = "#updateReqVO.type")
public void updateAiVideoConfig(AiVideoConfigUpdateReqVO updateReqVO) {
// 校验存在
validateAiVideoConfigExists(updateReqVO.getId());
// 更新
AiVideoConfigDO updateObj = AiVideoConfigConvert.INSTANCE.convert(updateReqVO);
aiVideoConfigMapper.updateById(updateObj);
} | @Test
public void testUpdateAiVideoConfig_notExists() {
// 准备参数
AiVideoConfigUpdateReqVO reqVO = randomPojo(AiVideoConfigUpdateReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> aiVideoConfigService.updateAiVideoConfig(reqVO), AI_VIDEO_CONFIG_NOT_EXISTS);
} |
public void computeCpd(Component component, Collection<Block> originBlocks, Collection<Block> duplicationBlocks) {
CloneIndex duplicationIndex = new PackedMemoryCloneIndex();
populateIndex(duplicationIndex, originBlocks);
populateIndex(duplicationIndex, duplicationBlocks);
List<CloneGroup> duplications = SuffixTreeCloneDetectionAlgorithm.detect(duplicationIndex, originBlocks);
Iterable<CloneGroup> filtered = duplications.stream()
.filter(getNumberOfUnitsNotLessThan(component.getFileAttributes().getLanguageKey()))
.toList();
addDuplications(component, filtered);
} | @Test
public void add_no_duplication_when_not_enough_tokens() {
settings.setProperty("sonar.cpd.xoo.minimumTokens", 10);
Collection<Block> originBlocks = singletonList(
// This block contains 5 tokens -> not enough to consider it as a duplication
new Block.Builder()
.setResourceId(ORIGIN_FILE_KEY)
.setBlockHash(new ByteArray("a8998353e96320ec"))
.setIndexInFile(0)
.setLines(30, 45)
.setUnit(0, 4)
.build());
Collection<Block> duplicatedBlocks = singletonList(
new Block.Builder()
.setResourceId(OTHER_FILE_KEY)
.setBlockHash(new ByteArray("a8998353e96320ec"))
.setIndexInFile(0)
.setLines(40, 55)
.build());
underTest.computeCpd(ORIGIN_FILE, originBlocks, duplicatedBlocks);
assertNoDuplicationAdded(ORIGIN_FILE);
} |
public Matrix submatrix(int i, int j, int k, int l) {
if (i < 0 || i >= m || k < i || k >= m || j < 0 || j >= n || l < j || l >= n) {
throw new IllegalArgumentException(String.format("Invalid submatrix range (%d:%d, %d:%d) of %d x %d", i, k, j, l, m, n));
}
Matrix sub = new Matrix(k - i + 1, l - j + 1);
for (int jj = j; jj <= l; jj++) {
for (int ii = i; ii <= k; ii++) {
sub.set(ii - i, jj - j, get(ii, jj));
}
}
return sub;
} | @Test
public void testSubmatrix() {
Matrix sub = matrix.submatrix(0, 1, 2, 2);
assertEquals(3, sub.nrow());
assertEquals(2, sub.ncol());
assertEquals(0.4f, sub.get(0,0), 1E-6f);
assertEquals(0.8f, sub.get(2,1), 1E-6f);
Matrix sub2 = sub.submatrix(0, 0, 1, 1);
assertEquals(2, sub2.nrow());
assertEquals(2, sub2.ncol());
assertEquals(0.4f, sub.get(0,0), 1E-6f);
assertEquals(0.3f, sub.get(1,1), 1E-6f);
} |
public static boolean isValid(String param) {
if (param == null) {
return false;
}
int length = param.length();
for (int i = 0; i < length; i++) {
char ch = param.charAt(i);
if (!Character.isLetterOrDigit(ch) && !isValidChar(ch)) {
return false;
}
}
return true;
} | @Test
void testIsValid() {
assertTrue(ParamUtils.isValid("test"));
assertTrue(ParamUtils.isValid("test1234"));
assertTrue(ParamUtils.isValid("test_-.:"));
assertFalse(ParamUtils.isValid("test!"));
assertFalse(ParamUtils.isValid("test~"));
} |
public static InetSocketAddress replaceUnresolvedNumericIp(InetSocketAddress inetSocketAddress) {
requireNonNull(inetSocketAddress, "inetSocketAddress");
if (!inetSocketAddress.isUnresolved()) {
return inetSocketAddress;
}
InetSocketAddress inetAddressForIpString = createForIpString(
inetSocketAddress.getHostString(), inetSocketAddress.getPort());
if (inetAddressForIpString != null) {
return inetAddressForIpString;
}
else {
return inetSocketAddress;
}
} | @Test
void shouldNotReplaceIfAlreadyResolvedWhenCallingReplaceUnresolvedNumericIp() {
InetSocketAddress socketAddress = new InetSocketAddress("127.0.0.1", 80);
InetSocketAddress processedAddress = AddressUtils.replaceUnresolvedNumericIp(socketAddress);
assertThat(processedAddress).isSameAs(socketAddress);
} |
@Override
public void registerService(String serviceName, String groupName, Instance instance) throws NacosException {
NAMING_LOGGER.info("[REGISTER-SERVICE] {} registering service {} with instance {}", namespaceId, serviceName,
instance);
if (instance.isEphemeral()) {
registerServiceForEphemeral(serviceName, groupName, instance);
} else {
doRegisterServiceForPersistent(serviceName, groupName, instance);
}
} | @Test
void testRegisterServiceThrowsNacosException() throws NacosException {
Throwable exception = assertThrows(NacosException.class, () -> {
when(this.rpcClient.request(Mockito.any())).thenReturn(ErrorResponse.build(400, "err args"));
try {
client.registerService(SERVICE_NAME, GROUP_NAME, instance);
} catch (NacosException ex) {
assertNull(ex.getCause());
throw ex;
}
});
assertTrue(exception.getMessage().contains("err args"));
} |
public void initialize(final BlockStore blockStore, final StoredBlock chainHead)
throws BlockStoreException {
StoredBlock versionBlock = chainHead;
final Stack<Long> versions = new Stack<>();
// We don't know how many blocks back we can go, so load what we can first
versions.push(versionBlock.getHeader().getVersion());
for (int headOffset = 0; headOffset < versionWindow.length; headOffset++) {
versionBlock = versionBlock.getPrev(blockStore);
if (null == versionBlock) {
break;
}
versions.push(versionBlock.getHeader().getVersion());
}
// Replay the versions into the tally
while (!versions.isEmpty()) {
add(versions.pop());
}
} | @Test
public void testInitialize() throws BlockStoreException {
Context.propagate(new Context(100, Transaction.DEFAULT_TX_FEE, false, true));
final BlockStore blockStore = new MemoryBlockStore(TESTNET.getGenesisBlock());
final BlockChain chain = new BlockChain(BitcoinNetwork.TESTNET, blockStore);
// Build a historical chain of version 2 blocks
Instant time = Instant.ofEpochSecond(1231006505);
StoredBlock chainHead = null;
for (int height = 0; height < TESTNET.getMajorityWindow(); height++) {
chainHead = FakeTxBuilder.createFakeBlock(blockStore, 2, time, height).storedBlock;
assertEquals(2, chainHead.getHeader().getVersion());
time = time.plus(1, ChronoUnit.MINUTES);
}
VersionTally instance = new VersionTally(TESTNET);
instance.initialize(blockStore, chainHead);
assertEquals(TESTNET.getMajorityWindow(), instance.getCountAtOrAbove(2).intValue());
} |
@Override
protected SchemaTransform from(SchemaTransformConfiguration configuration) {
return new IcebergReadSchemaTransform(configuration);
} | @Test
public void testBuildTransformWithRow() {
Map<String, String> properties = new HashMap<>();
properties.put("type", CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP);
properties.put("warehouse", "test_location");
Row transformConfigRow =
Row.withSchema(new IcebergReadSchemaTransformProvider().configurationSchema())
.withFieldValue("table", "test_table_identifier")
.withFieldValue("catalog_name", "test-name")
.withFieldValue("catalog_properties", properties)
.build();
new IcebergReadSchemaTransformProvider().from(transformConfigRow);
} |
public byte[] serialize() throws Throwable {
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
putInt(outputStream, this.replicaInfoTable.size());
for (Map.Entry<String, BrokerReplicaInfo> entry : replicaInfoTable.entrySet()) {
final byte[] brokerName = entry.getKey().getBytes(StandardCharsets.UTF_8);
byte[] brokerReplicaInfo = hessianSerialize(entry.getValue());
putInt(outputStream, brokerName.length);
outputStream.write(brokerName);
putInt(outputStream, brokerReplicaInfo.length);
outputStream.write(brokerReplicaInfo);
}
putInt(outputStream, this.syncStateSetInfoTable.size());
for (Map.Entry<String, SyncStateInfo> entry : syncStateSetInfoTable.entrySet()) {
final byte[] brokerName = entry.getKey().getBytes(StandardCharsets.UTF_8);
byte[] syncStateInfo = hessianSerialize(entry.getValue());
putInt(outputStream, brokerName.length);
outputStream.write(brokerName);
putInt(outputStream, syncStateInfo.length);
outputStream.write(syncStateInfo);
}
return outputStream.toByteArray();
} catch (Throwable e) {
LOGGER.error("serialize replicaInfoTable or syncStateSetInfoTable error", e);
throw e;
}
} | @Test
public void testSerialize() {
mockMetaData();
byte[] data;
try {
data = this.replicasInfoManager.serialize();
} catch (Throwable e) {
throw new RuntimeException(e);
}
final ReplicasInfoManager newReplicasInfoManager = new ReplicasInfoManager(config);
try {
newReplicasInfoManager.deserializeFrom(data);
} catch (Throwable e) {
throw new RuntimeException(e);
}
Map<String, BrokerReplicaInfo> oldReplicaInfoTable = new TreeMap<>();
Map<String, BrokerReplicaInfo> newReplicaInfoTable = new TreeMap<>();
Map<String/* brokerName */, SyncStateInfo> oldSyncStateTable = new TreeMap<>();
Map<String/* brokerName */, SyncStateInfo> newSyncStateTable = new TreeMap<>();
try {
Field field = ReplicasInfoManager.class.getDeclaredField("replicaInfoTable");
field.setAccessible(true);
oldReplicaInfoTable.putAll((Map<String, BrokerReplicaInfo>) field.get(this.replicasInfoManager));
newReplicaInfoTable.putAll((Map<String, BrokerReplicaInfo>) field.get(newReplicasInfoManager));
field = ReplicasInfoManager.class.getDeclaredField("syncStateSetInfoTable");
field.setAccessible(true);
oldSyncStateTable.putAll((Map<String, SyncStateInfo>) field.get(this.replicasInfoManager));
newSyncStateTable.putAll((Map<String, SyncStateInfo>) field.get(newReplicasInfoManager));
} catch (NoSuchFieldException | IllegalAccessException e) {
throw new RuntimeException(e);
}
assertArrayEquals(oldReplicaInfoTable.keySet().toArray(), newReplicaInfoTable.keySet().toArray());
assertArrayEquals(oldSyncStateTable.keySet().toArray(), newSyncStateTable.keySet().toArray());
for (String brokerName : oldReplicaInfoTable.keySet()) {
BrokerReplicaInfo oldReplicaInfo = oldReplicaInfoTable.get(brokerName);
BrokerReplicaInfo newReplicaInfo = newReplicaInfoTable.get(brokerName);
Field[] fields = oldReplicaInfo.getClass().getFields();
}
} |
public static DescribeAclsRequest parse(ByteBuffer buffer, short version) {
return new DescribeAclsRequest(new DescribeAclsRequestData(new ByteBufferAccessor(buffer), version), version);
} | @Test
public void shouldRoundTripPrefixedV1() {
final DescribeAclsRequest original = new DescribeAclsRequest.Builder(PREFIXED_FILTER).build(V1);
final DescribeAclsRequest result = DescribeAclsRequest.parse(original.serialize(), V1);
assertRequestEquals(original, result);
} |
public static Pipeline updateTransform(
String urn, Pipeline originalPipeline, TransformReplacement compositeBuilder) {
Components.Builder resultComponents = originalPipeline.getComponents().toBuilder();
for (Map.Entry<String, PTransform> pt :
originalPipeline.getComponents().getTransformsMap().entrySet()) {
if (pt.getValue().getSpec() != null && urn.equals(pt.getValue().getSpec().getUrn())) {
MessageWithComponents updated =
compositeBuilder.getReplacement(pt.getKey(), originalPipeline.getComponents());
if (updated == null) {
continue;
}
checkArgument(
updated.getPtransform().getOutputsMap().equals(pt.getValue().getOutputsMap()),
"A %s must produce all of the outputs of the original %s",
TransformReplacement.class.getSimpleName(),
PTransform.class.getSimpleName());
removeSubtransforms(pt.getValue(), resultComponents);
resultComponents
.mergeFrom(updated.getComponents())
.putTransforms(pt.getKey(), updated.getPtransform());
}
}
return originalPipeline.toBuilder().setComponents(resultComponents).build();
} | @Test
public void replaceExistingCompositeSucceeds() {
Pipeline p =
Pipeline.newBuilder()
.addRootTransformIds("root")
.setComponents(
Components.newBuilder()
.putTransforms(
"root",
PTransform.newBuilder()
.addSubtransforms("sub_first")
.setSpec(FunctionSpec.newBuilder().setUrn("beam:composite"))
.build())
.putTransforms(
"sub_first",
PTransform.newBuilder()
.setSpec(FunctionSpec.newBuilder().setUrn("beam:inner"))
.build()))
.build();
Pipeline pipeline =
ProtoOverrides.updateTransform(
"beam:composite",
p,
new TestReplacer(
PTransform.newBuilder()
.addSubtransforms("foo")
.addSubtransforms("bar")
.setSpec(
FunctionSpec.getDefaultInstance()
.newBuilderForType()
.setUrn("beam:composite"))
.build(),
Components.getDefaultInstance()));
assertThat(
pipeline.getComponents().getTransformsOrThrow("root").getSpec().getUrn(),
equalTo("beam:composite"));
assertThat(
pipeline.getComponents().getTransformsOrThrow("root").getSubtransformsList(),
contains("foo", "bar"));
} |
public static StatementExecutorResponse execute(
final ConfiguredStatement<ListQueries> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final RemoteHostExecutor remoteHostExecutor = RemoteHostExecutor.create(
statement,
sessionProperties,
executionContext,
serviceContext.getKsqlClient()
);
return statement.getStatement().getShowExtended()
? executeExtended(statement, sessionProperties, executionContext, remoteHostExecutor)
: executeSimple(statement, executionContext, remoteHostExecutor);
} | @Test
public void shouldIncludeUnresponsiveIfShowQueriesFutureThrowsException() {
// Given
when(sessionProperties.getInternalRequest()).thenReturn(false);
final ConfiguredStatement<?> showQueries = engine.configure("SHOW QUERIES;");
final PersistentQueryMetadata metadata = givenPersistentQuery("id", RUNNING_QUERY_STATE);
when(mockKsqlEngine.getAllLiveQueries()).thenReturn(ImmutableList.of(metadata));
when(mockKsqlEngine.getPersistentQueries()).thenReturn(ImmutableList.of(metadata));
when(ksqlClient.makeKsqlRequest(any(), any(), any())).thenThrow(new KsqlRestClientException("error"));
when(serviceContext.getKsqlClient()).thenReturn(ksqlClient);
queryStatusCount.updateStatusCount(RUNNING_QUERY_STATE, 1);
queryStatusCount.updateStatusCount(KsqlQueryStatus.UNRESPONSIVE, 1);
// When
final Queries queries = (Queries) CustomExecutors.LIST_QUERIES.execute(
showQueries,
sessionProperties,
mockKsqlEngine,
serviceContext
).getEntity().orElseThrow(IllegalStateException::new);
// Then
assertThat(queries.getQueries(), containsInAnyOrder(persistentQueryMetadataToRunningQuery(metadata, queryStatusCount)));
} |
public static UserGroupInformation getUGI(HttpServletRequest request,
Configuration conf) throws IOException {
return getUGI(null, request, conf);
} | @Test
public void testGetProxyUgi() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");
ServletContext context = mock(ServletContext.class);
String realUser = "TheDoctor";
String user = "TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
conf.set(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(realUser), "*");
conf.set(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(realUser), "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
HttpServletRequest request;
// have to be auth-ed with remote user
request = getMockRequest(null, null, user);
try {
JspHelper.getUGI(context, request, conf);
Assert.fail("bad request allowed");
} catch (IOException ioe) {
Assert.assertEquals(
"Security enabled but user not authenticated by filter",
ioe.getMessage());
}
request = getMockRequest(null, realUser, user);
try {
JspHelper.getUGI(context, request, conf);
Assert.fail("bad request allowed");
} catch (IOException ioe) {
Assert.assertEquals(
"Security enabled but user not authenticated by filter",
ioe.getMessage());
}
// proxy ugi for user via remote user
request = getMockRequest(realUser, null, user);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
Assert.assertEquals(ugi.getShortUserName(), user);
checkUgiFromAuth(ugi);
// proxy ugi for user vi a remote user = real user
request = getMockRequest(realUser, realUser, user);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
Assert.assertEquals(ugi.getShortUserName(), user);
checkUgiFromAuth(ugi);
// if there is remote user via SPNEGO, ignore user.name, doas param
request = getMockRequest(realUser, user, user);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
Assert.assertEquals(ugi.getShortUserName(), user);
checkUgiFromAuth(ugi);
// try to get get a proxy user with unauthorized user
try {
request = getMockRequest(user, null, realUser);
JspHelper.getUGI(context, request, conf);
Assert.fail("bad proxy request allowed");
} catch (AuthorizationException ae) {
Assert.assertEquals(
"User: " + user + " is not allowed to impersonate " + realUser,
ae.getMessage());
}
try {
request = getMockRequest(user, user, realUser);
JspHelper.getUGI(context, request, conf);
Assert.fail("bad proxy request allowed");
} catch (AuthorizationException ae) {
Assert.assertEquals(
"User: " + user + " is not allowed to impersonate " + realUser,
ae.getMessage());
}
} |
@Override
protected void onPreExecute() {
super.onPreExecute();
if (dbViewerFragment.databaseViewerActivity.getAppTheme().equals(AppTheme.DARK)
|| dbViewerFragment.databaseViewerActivity.getAppTheme().equals(AppTheme.BLACK)) {
htmlInit = "<html><body><table border='1' style='width:100%;color:#ffffff'>";
} else {
htmlInit = "<html><body><table border='1' style='width:100%;color:#000000'>";
}
stringBuilder.append(htmlInit);
dbViewerFragment.loadingText.setVisibility(View.VISIBLE);
} | @Test
public void testOnPreExecute() {
DbViewerFragment mock = mock(DbViewerFragment.class);
AppCompatTextView loadingText =
new AppCompatTextView(ApplicationProvider.getApplicationContext());
mock.loadingText = loadingText;
mock.databaseViewerActivity = mock(DatabaseViewerActivity.class);
mock.loadingText.setVisibility(View.GONE);
when(mock.databaseViewerActivity.getAppTheme()).thenReturn(AppTheme.DARK);
DbViewerTask task = new DbViewerTask(null, null, webView, mock);
task.onPreExecute();
assertEquals(VISIBLE, mock.loadingText.getVisibility());
assertTrue(task.htmlInit.contains("color:#ffffff"));
assertEquals("utf-8", webView.getSettings().getDefaultTextEncodingName());
when(mock.databaseViewerActivity.getAppTheme()).thenReturn(AppTheme.BLACK);
task = new DbViewerTask(null, null, webView, mock);
task.onPreExecute();
assertEquals(VISIBLE, mock.loadingText.getVisibility());
assertTrue(task.htmlInit.contains("color:#ffffff"));
assertEquals("utf-8", webView.getSettings().getDefaultTextEncodingName());
when(mock.databaseViewerActivity.getAppTheme()).thenReturn(AppTheme.LIGHT);
task = new DbViewerTask(null, null, webView, mock);
task.onPreExecute();
assertEquals(VISIBLE, mock.loadingText.getVisibility());
assertTrue(task.htmlInit.contains("color:#000000"));
assertEquals("utf-8", webView.getSettings().getDefaultTextEncodingName());
} |
@Override
public PMMLRequestData getRequestData() {
return (PMMLRequestData) get(PMML_REQUEST_DATA);
} | @Test
void getRequestData() {
PMMLRequestData requestData = new PMMLRequestData();
PMMLRuntimeContextImpl retrieved = new PMMLRuntimeContextImpl(requestData, fileName, memoryCompilerClassLoader);
assertThat(retrieved.getRequestData()).isEqualTo(requestData);
} |
public static URL socketToUrl(InetSocketAddress socketAddress) {
String hostString = socketAddress.getHostString();
// If the hostString is an IPv6 address, it needs to be enclosed in square brackets
// at the beginning and end.
if (socketAddress.getAddress() != null
&& socketAddress.getAddress() instanceof Inet6Address
&& hostString.equals(socketAddress.getAddress().getHostAddress())) {
hostString = "[" + hostString + "]";
}
String hostPort = hostString + ":" + socketAddress.getPort();
return validateHostPortString(hostPort);
} | @Test
void testSocketToUrl() throws MalformedURLException {
InetSocketAddress socketAddress = new InetSocketAddress("foo.com", 8080);
URL expectedResult = new URL("http://foo.com:8080");
assertThat(socketToUrl(socketAddress)).isEqualTo(expectedResult);
} |
@Deprecated
@Override
public Boolean hasAppendsOnly(org.apache.hadoop.hive.ql.metadata.Table hmsTable, SnapshotContext since) {
TableDesc tableDesc = Utilities.getTableDesc(hmsTable);
Table table = IcebergTableUtil.getTable(conf, tableDesc.getProperties());
return hasAppendsOnly(table.snapshots(), since);
} | @Test
public void testHasAppendsOnlyTrueWhenGivenSnapShotIsNull() {
HiveIcebergStorageHandler storageHandler = new HiveIcebergStorageHandler();
Boolean result = storageHandler.hasAppendsOnly(singletonList(appendSnapshot), null);
assertThat(result, is(true));
} |
public void findIntersections(Rectangle query, Consumer<T> consumer)
{
IntArrayList todoNodes = new IntArrayList(levelOffsets.length * degree);
IntArrayList todoLevels = new IntArrayList(levelOffsets.length * degree);
int rootLevel = levelOffsets.length - 1;
int rootIndex = levelOffsets[rootLevel];
if (doesIntersect(query, rootIndex)) {
todoNodes.push(rootIndex);
todoLevels.push(rootLevel);
}
while (!todoNodes.isEmpty()) {
int nodeIndex = todoNodes.popInt();
int level = todoLevels.popInt();
if (level == 0) {
// This is a leaf node
consumer.accept(items[nodeIndex / ENVELOPE_SIZE]);
}
else {
int childrenOffset = getChildrenOffset(nodeIndex, level);
for (int i = 0; i < degree; i++) {
int childIndex = childrenOffset + ENVELOPE_SIZE * i;
if (doesIntersect(query, childIndex)) {
todoNodes.push(childIndex);
todoLevels.push(level - 1);
}
}
}
}
} | @Test
public void testSingletonFlatbush()
{
List<Rectangle> items = ImmutableList.of(new Rectangle(0, 0, 1, 1));
Flatbush<Rectangle> rtree = new Flatbush<>(items.toArray(new Rectangle[] {}));
assertEquals(findIntersections(rtree, EVERYTHING), items);
// hit
assertEquals(findIntersections(rtree, new Rectangle(1, 1, 2, 2)), items);
// miss
assertEquals(findIntersections(rtree, new Rectangle(-1, -1, -0.1, -0.1)), ImmutableList.of());
} |
@Override
public RemotingCommand processRequest(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
SendMessageContext sendMessageContext;
switch (request.getCode()) {
case RequestCode.CONSUMER_SEND_MSG_BACK:
return this.consumerSendMsgBack(ctx, request);
default:
SendMessageRequestHeader requestHeader = parseRequestHeader(request);
if (requestHeader == null) {
return null;
}
TopicQueueMappingContext mappingContext = this.brokerController.getTopicQueueMappingManager().buildTopicQueueMappingContext(requestHeader, true);
RemotingCommand rewriteResult = this.brokerController.getTopicQueueMappingManager().rewriteRequestForStaticTopic(requestHeader, mappingContext);
if (rewriteResult != null) {
return rewriteResult;
}
sendMessageContext = buildMsgContext(ctx, requestHeader, request);
try {
this.executeSendMessageHookBefore(sendMessageContext);
} catch (AbortProcessException e) {
final RemotingCommand errorResponse = RemotingCommand.createResponseCommand(e.getResponseCode(), e.getErrorMessage());
errorResponse.setOpaque(request.getOpaque());
return errorResponse;
}
RemotingCommand response;
clearReservedProperties(requestHeader);
if (requestHeader.isBatch()) {
response = this.sendBatchMessage(ctx, request, sendMessageContext, requestHeader, mappingContext,
(ctx1, response1) -> executeSendMessageHookAfter(response1, ctx1));
} else {
response = this.sendMessage(ctx, request, sendMessageContext, requestHeader, mappingContext,
(ctx12, response12) -> executeSendMessageHookAfter(response12, ctx12));
}
return response;
}
} | @Test
public void testProcessRequest() throws Exception {
when(messageStore.asyncPutMessage(any(MessageExtBrokerInner.class))).
thenReturn(CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.PUT_OK, new AppendMessageResult(AppendMessageStatus.PUT_OK))));
assertPutResult(ResponseCode.SUCCESS);
} |
@VisibleForTesting
public ProcessContinuation run(
RestrictionTracker<OffsetRange, Long> tracker,
OutputReceiver<PartitionRecord> receiver,
ManualWatermarkEstimator<Instant> watermarkEstimator,
InitialPipelineState initialPipelineState)
throws Exception {
LOG.debug("DNP: Watermark: " + watermarkEstimator.getState());
LOG.debug("DNP: CurrentTracker: " + tracker.currentRestriction().getFrom());
if (tracker.currentRestriction().getFrom() == 0L) {
if (!tracker.tryClaim(0L)) {
LOG.error(
"Could not claim initial DetectNewPartition restriction. No partitions are outputted.");
return ProcessContinuation.stop();
}
watermarkEstimator.setWatermark(initialPipelineState.getStartTime());
if (initialPipelineState.isResume()) {
resumeFromPreviousPipelineAction.run(receiver);
} else {
generateInitialPartitionsAction.run(receiver, initialPipelineState.getStartTime());
}
return ProcessContinuation.resume();
}
// Create a new partition reconciler every run to reset the state each time.
partitionReconciler = new PartitionReconciler(metadataTableDao, metrics);
orphanedMetadataCleaner = new OrphanedMetadataCleaner();
// Calculating the new value of watermark is a resource intensive process. We have to do a full
// scan of the metadata table and then ensure we're not missing partitions and then calculate
// the low watermark. This is usually a fairly fast process even with thousands of partitions.
// However, sometimes this may take so long that the runner checkpoints before the watermark is
// calculated. Because the checkpoint takes place before tryClaim, this forces the DoFn to
// restart, wasting the resources spent calculating the watermark. On restart, we will try to
// calculate the watermark again. The problem causing the slow watermark calculation can persist
// leading to a crash loop. In order to ensure we persist the calculated watermark, we calculate
// the watermark after successful tryClaim. Then we write to the metadata table the new
// watermark. On the start of each run we read the watermark and update the DoFn's watermark.
DetectNewPartitionsState detectNewPartitionsState =
metadataTableDao.readDetectNewPartitionsState();
if (detectNewPartitionsState != null) {
watermarkEstimator.setWatermark(detectNewPartitionsState.getWatermark());
}
// Terminate if endTime <= watermark that means all partitions have read up to or beyond
// watermark. We no longer need to manage splits and merges, we can terminate.
if (endTime != null && !watermarkEstimator.currentWatermark().isBefore(endTime)) {
tracker.tryClaim(tracker.currentRestriction().getTo());
return ProcessContinuation.stop();
}
if (!tracker.tryClaim(tracker.currentRestriction().getFrom())) {
LOG.warn("DNP: Checkpointing, stopping this run: " + tracker.currentRestriction());
return ProcessContinuation.stop();
}
// Read StreamPartitions to calculate watermark.
List<StreamPartitionWithWatermark> streamPartitionsWithWatermark = null;
if (shouldUpdateWatermark(tracker.currentRestriction().getFrom(), detectNewPartitionsState)) {
streamPartitionsWithWatermark = metadataTableDao.readStreamPartitionsWithWatermark();
}
// Process NewPartitions and track the ones successfully outputted.
List<NewPartition> newPartitions = metadataTableDao.readNewPartitions();
List<ByteStringRange> outputtedNewPartitions = new ArrayList<>();
for (NewPartition newPartition : newPartitions) {
if (processNewPartitionsAction.processNewPartition(newPartition, receiver)) {
outputtedNewPartitions.add(newPartition.getPartition());
} else if (streamPartitionsWithWatermark != null) {
// streamPartitionsWithWatermark is not null on runs that we update watermark. We only run
// reconciliation when we update watermark. Only add incompleteNewPartitions if
// reconciliation is being run
partitionReconciler.addIncompleteNewPartitions(newPartition);
orphanedMetadataCleaner.addIncompleteNewPartitions(newPartition);
}
}
// Process the watermark using read StreamPartitions and NewPartitions.
if (streamPartitionsWithWatermark != null) {
Optional<Instant> maybeWatermark =
getNewWatermark(streamPartitionsWithWatermark, newPartitions);
maybeWatermark.ifPresent(metadataTableDao::updateDetectNewPartitionWatermark);
// Only start reconciling after the pipeline has been running for a while.
if (tracker.currentRestriction().getFrom() > 50) {
// Using NewPartitions and StreamPartitions, evaluate partitions that are possibly not being
// streamed. This isn't perfect because there may be partitions moving between
// StreamPartitions and NewPartitions while scanning the metadata table. Also, this does not
// include NewPartitions marked as deleted from a previous DNP run not yet processed by
// RCSP.
List<ByteStringRange> existingPartitions =
streamPartitionsWithWatermark.stream()
.map(StreamPartitionWithWatermark::getPartition)
.collect(Collectors.toList());
existingPartitions.addAll(outputtedNewPartitions);
List<ByteStringRange> missingStreamPartitions =
getMissingPartitionsFromEntireKeySpace(existingPartitions);
orphanedMetadataCleaner.addMissingPartitions(missingStreamPartitions);
partitionReconciler.addMissingPartitions(missingStreamPartitions);
processReconcilerPartitions(
receiver, watermarkEstimator, initialPipelineState.getStartTime());
cleanUpOrphanedMetadata();
}
}
return ProcessContinuation.resume().withResumeDelay(Duration.millis(100));
} | @Test
public void testBackToBackReconcile() throws Exception {
// We only start reconciling after 50.
// We advance watermark on every 2 restriction tracker advancement
OffsetRange offsetRange = new OffsetRange(52, Long.MAX_VALUE);
when(tracker.currentRestriction()).thenReturn(offsetRange);
when(tracker.tryClaim(offsetRange.getFrom())).thenReturn(true);
// Write 2 partitions to the table, missing [a, b) because [a, b) is trying to merge into [a, c)
ByteStringRange partitionEmptyA = ByteStringRange.create("", "a");
Instant watermarkEmptyA = endTime.plus(Duration.millis(100));
PartitionRecord partitionRecordEmptyA =
new PartitionRecord(
partitionEmptyA,
watermarkEmptyA,
UniqueIdGenerator.getNextId(),
watermarkEmptyA,
Collections.emptyList(),
null);
metadataTableDao.lockAndRecordPartition(partitionRecordEmptyA);
ByteStringRange partitionBEmpty = ByteStringRange.create("b", "");
Instant watermarkBEmpty = endTime.plus(Duration.millis(1));
PartitionRecord partitionRecordBEmpty =
new PartitionRecord(
partitionBEmpty,
watermarkBEmpty,
UniqueIdGenerator.getNextId(),
watermarkBEmpty,
Collections.emptyList(),
null);
metadataTableDao.lockAndRecordPartition(partitionRecordBEmpty);
// NewPartition [a, b) trying to merge into [a, c)
ByteStringRange parentPartitionAB = ByteStringRange.create("a", "b");
Instant watermarkAB = startTime;
ChangeStreamContinuationToken tokenAB =
ChangeStreamContinuationToken.create(parentPartitionAB, "ab");
ByteStringRange childPartitionAC = ByteStringRange.create("a", "c");
NewPartition newPartitionACFromAB =
new NewPartition(childPartitionAC, Collections.singletonList(tokenAB), watermarkAB);
metadataTableDao.writeNewPartition(newPartitionACFromAB);
// Artificially create that partitionAB has been missing for more than 1 minute.
HashMap<ByteStringRange, Instant> missingPartitionDurations = new HashMap<>();
missingPartitionDurations.put(
parentPartitionAB, Instant.now().minus(Duration.standardSeconds(121)));
metadataTableDao.writeDetectNewPartitionMissingPartitions(missingPartitionDurations);
assertEquals(1, metadataTableDao.readNewPartitions().size());
assertEquals(
DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)),
action.run(
tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false)));
// AB should be reconciled with token because it's been missing for more than 1 minute
verify(receiver, times(1))
.outputWithTimestamp(partitionRecordArgumentCaptor.capture(), eq(Instant.EPOCH));
assertEquals(parentPartitionAB, partitionRecordArgumentCaptor.getValue().getPartition());
assertEquals(watermarkAB, partitionRecordArgumentCaptor.getValue().getParentLowWatermark());
assertEquals(endTime, partitionRecordArgumentCaptor.getValue().getEndTime());
assertEquals(
partitionRecordArgumentCaptor.getValue().getChangeStreamContinuationTokens(),
Collections.singletonList(tokenAB));
assertTrue(metadataTableDao.readNewPartitions().isEmpty());
assertTrue(metadataTableDao.readDetectNewPartitionMissingPartitions().isEmpty());
clearInvocations(receiver);
// The reconciled partition was not processed by RCSP, so NewPartition is still marked for
// deletion and the partition is still considered missing. We run DNP again.
assertEquals(
DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)),
action.run(
tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false)));
// We don't reconcile the partition again.
verify(receiver, never()).outputWithTimestamp(any(), any());
} |
boolean hasMoreAvailableCapacityThan(final ClientState other) {
if (capacity <= 0) {
throw new IllegalStateException("Capacity of this ClientState must be greater than 0.");
}
if (other.capacity <= 0) {
throw new IllegalStateException("Capacity of other ClientState must be greater than 0");
}
final double otherLoad = (double) other.assignedTaskCount() / other.capacity;
final double thisLoad = (double) assignedTaskCount() / capacity;
if (thisLoad < otherLoad) {
return true;
} else if (thisLoad > otherLoad) {
return false;
} else {
return capacity > other.capacity;
}
} | @Test
public void shouldThrowIllegalStateExceptionIfCapacityOfThisClientStateIsZero() {
assertThrows(IllegalStateException.class, () -> zeroCapacityClient.hasMoreAvailableCapacityThan(client));
} |
@Override
public char[] convert(String source) {
return isNotEmpty(source) ? source.toCharArray() : null;
} | @Test
void testConvert() {
assertArrayEquals(new char[] {'1', '2', '3'}, converter.convert("123"));
assertNull(converter.convert(null));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.