focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static Write write() {
return new Write(null /* Configuration */, "");
} | @Test
public void testWriteValidationFailsMissingTable() {
HBaseIO.Write write = HBaseIO.write().withConfiguration(conf);
thrown.expect(IllegalArgumentException.class);
write.expand(null /* input */);
} |
@Private
public void handleEvent(JobHistoryEvent event) {
synchronized (lock) {
// If this is JobSubmitted Event, setup the writer
if (event.getHistoryEvent().getEventType() == EventType.AM_STARTED) {
try {
AMStartedEvent amStartedEvent =
(AMStartedEvent) event.getHistoryEvent();
setupEventWriter(event.getJobID(), amStartedEvent);
} catch (IOException ioe) {
LOG.error("Error JobHistoryEventHandler in handleEvent: " + event,
ioe);
throw new YarnRuntimeException(ioe);
}
}
// For all events
// (1) Write it out
// (2) Process it for JobSummary
// (3) Process it for ATS (if enabled)
MetaInfo mi = fileMap.get(event.getJobID());
try {
HistoryEvent historyEvent = event.getHistoryEvent();
if (! (historyEvent instanceof NormalizedResourceEvent)) {
mi.writeEvent(historyEvent);
}
processEventForJobSummary(event.getHistoryEvent(), mi.getJobSummary(),
event.getJobID());
if (LOG.isDebugEnabled()) {
LOG.debug("In HistoryEventHandler "
+ event.getHistoryEvent().getEventType());
}
} catch (IOException e) {
LOG.error("Error writing History Event: " + event.getHistoryEvent(),
e);
throw new YarnRuntimeException(e);
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_SUBMITTED) {
JobSubmittedEvent jobSubmittedEvent =
(JobSubmittedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setSubmitTime(jobSubmittedEvent.getSubmitTime());
mi.getJobIndexInfo().setQueueName(jobSubmittedEvent.getJobQueueName());
}
//initialize the launchTime in the JobIndexInfo of MetaInfo
if(event.getHistoryEvent().getEventType() == EventType.JOB_INITED ){
JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setJobStartTime(jie.getLaunchTime());
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_QUEUE_CHANGED) {
JobQueueChangeEvent jQueueEvent =
(JobQueueChangeEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setQueueName(jQueueEvent.getJobQueueName());
}
// If this is JobFinishedEvent, close the writer and setup the job-index
if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) {
try {
JobFinishedEvent jFinishedEvent =
(JobFinishedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jFinishedEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getSucceededMaps());
mi.getJobIndexInfo().setNumReduces(
jFinishedEvent.getSucceededReduces());
mi.getJobIndexInfo().setJobStatus(JobState.SUCCEEDED.toString());
closeEventWriter(event.getJobID());
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
// In case of JOB_ERROR, only process all the Done files(e.g. job
// summary, job history file etc.) if it is last AM retry.
if (event.getHistoryEvent().getEventType() == EventType.JOB_ERROR) {
try {
JobUnsuccessfulCompletionEvent jucEvent =
(JobUnsuccessfulCompletionEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps());
mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces());
mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
closeEventWriter(event.getJobID());
if(context.isLastAMRetry())
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_FAILED
|| event.getHistoryEvent().getEventType() == EventType.JOB_KILLED) {
try {
JobUnsuccessfulCompletionEvent jucEvent =
(JobUnsuccessfulCompletionEvent) event
.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps());
mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces());
mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
closeEventWriter(event.getJobID());
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
}
} | @Test (timeout=50000)
public void testMaxUnflushedCompletionEvents() throws Exception {
TestParams t = new TestParams();
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.workDir);
conf.setLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
60 * 1000l);
conf.setInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, 10);
conf.setInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, 10);
conf.setInt(
MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, 5);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
EventWriter mockWriter = null;
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
mockWriter = jheh.getEventWriter();
verify(mockWriter).write(any(HistoryEvent.class));
for (int i = 0 ; i < 100 ; i++) {
queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null, 0)));
}
handleNextNEvents(jheh, 9);
verify(mockWriter, times(0)).flush();
handleNextNEvents(jheh, 1);
verify(mockWriter).flush();
handleNextNEvents(jheh, 50);
verify(mockWriter, times(6)).flush();
} finally {
jheh.stop();
verify(mockWriter).close();
}
} |
@Override
public ObjectNode encode(Intent intent, CodecContext context) {
checkNotNull(intent, "Intent cannot be null");
final ObjectNode result = context.mapper().createObjectNode()
.put(TYPE, intent.getClass().getSimpleName())
.put(ID, intent.id().toString())
.put(KEY, intent.key().toString())
.put(APP_ID, UrlEscapers.urlPathSegmentEscaper()
.escape(intent.appId().name()));
if (intent.resourceGroup() != null) {
result.put(RESOURCE_GROUP, intent.resourceGroup().toString());
}
final ArrayNode jsonResources = result.putArray(RESOURCES);
intent.resources()
.forEach(resource -> {
if (resource instanceof Link) {
jsonResources.add(context.codec(Link.class).encode((Link) resource, context));
} else {
jsonResources.add(resource.toString());
}
});
IntentService service = context.getService(IntentService.class);
IntentState state = service.getIntentState(intent.key());
if (state != null) {
result.put(STATE, state.toString());
}
return result;
} | @Test
public void intentWithTreatmentSelectorAndConstraints() {
ConnectPoint ingress = NetTestTools.connectPoint("ingress", 1);
ConnectPoint egress = NetTestTools.connectPoint("egress", 2);
DeviceId did1 = did("device1");
DeviceId did2 = did("device2");
DeviceId did3 = did("device3");
Lambda ochSignal = Lambda.ochSignal(GridType.DWDM, ChannelSpacing.CHL_100GHZ, 4, 8);
final TrafficSelector selector = DefaultTrafficSelector.builder()
.matchIPProtocol((byte) 3)
.matchMplsLabel(MplsLabel.mplsLabel(4))
.add(Criteria.matchOchSignalType(OchSignalType.FIXED_GRID))
.add(Criteria.matchLambda(ochSignal))
.matchEthDst(MacAddress.BROADCAST)
.matchIPDst(IpPrefix.valueOf("1.2.3.4/24"))
.build();
final TrafficTreatment treatment = DefaultTrafficTreatment.builder()
.setMpls(MplsLabel.mplsLabel(44))
.setOutput(PortNumber.CONTROLLER)
.setEthDst(MacAddress.BROADCAST)
.build();
final List<Constraint> constraints =
ImmutableList.of(
new BandwidthConstraint(Bandwidth.bps(1.0)),
new AnnotationConstraint("key", 33.0),
new AsymmetricPathConstraint(),
new LatencyConstraint(Duration.ofSeconds(2)),
new ObstacleConstraint(did1, did2),
new WaypointConstraint(did3));
final PointToPointIntent intent =
PointToPointIntent.builder()
.appId(appId)
.selector(selector)
.treatment(treatment)
.filteredIngressPoint(new FilteredConnectPoint(ingress))
.filteredEgressPoint(new FilteredConnectPoint(egress))
.constraints(constraints)
.build();
final JsonCodec<PointToPointIntent> intentCodec =
context.codec(PointToPointIntent.class);
assertThat(intentCodec, notNullValue());
final ObjectNode intentJson = intentCodec.encode(intent, context);
assertThat(intentJson, matchesIntent(intent));
} |
public static <C> AsyncBuilder<C> builder() {
return new AsyncBuilder<>();
} | @SuppressWarnings("deprecation")
@Test
void whenReturnTypeIsResponseNoErrorHandling() throws Throwable {
Map<String, Collection<String>> headers = new LinkedHashMap<>();
headers.put("Location", Arrays.asList("http://bar.com"));
final Response response = Response.builder().status(302).reason("Found").headers(headers)
.request(Request.create(HttpMethod.GET, "/", Collections.emptyMap(), null, Util.UTF_8))
.body(new byte[0]).build();
ExecutorService execs = Executors.newSingleThreadExecutor();
// fake client as Client.Default follows redirects.
TestInterfaceAsync api = AsyncFeign.<Void>builder()
.client(new AsyncClient.Default<>((request, options) -> response, execs))
.target(TestInterfaceAsync.class, "http://localhost:" + server.getPort());
assertThat(unwrap(api.response()).headers()).hasEntrySatisfying("Location", value -> {
assertThat(value).contains("http://bar.com");
});
execs.shutdown();
} |
@Override
public void put(V e) throws InterruptedException {
RedissonQueueSemaphore semaphore = createSemaphore(e);
semaphore.acquire();
} | @Test
public void testPut() throws InterruptedException {
final RBoundedBlockingQueue<Integer> queue1 = redisson.getBoundedBlockingQueue("bounded-queue");
queue1.trySetCapacity(3);
queue1.add(1);
queue1.add(2);
queue1.add(3);
ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
final AtomicBoolean executed = new AtomicBoolean();
executor.schedule(new Runnable() {
@Override
public void run() {
RBoundedBlockingQueue<Integer> queue1 = redisson.getBoundedBlockingQueue("bounded-queue");
assertThat(queue1.poll()).isEqualTo(1);
executed.set(true);
}
}, 1, TimeUnit.SECONDS);
queue1.put(4);
await().atMost(5, TimeUnit.SECONDS).untilTrue(executed);
assertThat(queue1).containsExactly(2, 3, 4);
executor.shutdown();
assertThat(executor.awaitTermination(1, TimeUnit.MINUTES)).isTrue();
} |
@Override
public ExecuteContext after(ExecuteContext context) {
if (handler != null) {
handler.doAfter(context);
return context;
}
DefaultLitePullConsumerWrapper wrapper = RocketMqPullConsumerController
.getPullConsumerWrapper((DefaultLitePullConsumer) context.getObject());
// After canceling the subscription, the consumer's subscription information will change, and they need to be
// enabled or prohibited from consuming according to the prohibited topic configuration
disablePullConsumption(wrapper);
return context;
} | @Test
public void testAfter() {
subscription.remove("test-topic");
interceptor.after(context);
Assert.assertEquals(pullConsumerWrapper.getSubscribedTopics().size(), 0);
} |
@Override
public int getRowCount() {
return _groupByResults.size();
} | @Test
public void testGetRowCount() {
// Run the test
final int result = _groupByResultSetUnderTest.getRowCount();
// Verify the results
assertEquals(2, result);
} |
@Override
public boolean isShutdown() {
return delegate.isShutdown();
} | @Test
public void isShutdown_delegates_to_executorService() {
underTest.isShutdown();
inOrder.verify(executorService).isShutdown();
inOrder.verifyNoMoreInteractions();
} |
public Map<String, String> getAllProperties()
{
ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
return builder.put(CONCURRENT_LIFESPANS_PER_TASK, String.valueOf(getConcurrentLifespansPerTask()))
.put(ENABLE_SERIALIZED_PAGE_CHECKSUM, String.valueOf(isEnableSerializedPageChecksum()))
.put(ENABLE_VELOX_EXPRESSION_LOGGING, String.valueOf(isEnableVeloxExpressionLogging()))
.put(ENABLE_VELOX_TASK_LOGGING, String.valueOf(isEnableVeloxTaskLogging()))
.put(HTTP_SERVER_HTTP_PORT, String.valueOf(getHttpServerPort()))
.put(HTTP_SERVER_REUSE_PORT, String.valueOf(isHttpServerReusePort()))
.put(HTTP_SERVER_BIND_TO_NODE_INTERNAL_ADDRESS_ONLY_ENABLED, String.valueOf(isHttpServerBindToNodeInternalAddressOnlyEnabled()))
.put(REGISTER_TEST_FUNCTIONS, String.valueOf(isRegisterTestFunctions()))
.put(HTTP_SERVER_HTTPS_PORT, String.valueOf(getHttpsServerPort()))
.put(HTTP_SERVER_HTTPS_ENABLED, String.valueOf(isEnableHttpsCommunication()))
.put(HTTPS_CIPHERS, String.valueOf(getHttpsCiphers()))
.put(HTTPS_CERT_PATH, String.valueOf(getHttpsCertPath()))
.put(HTTPS_KEY_PATH, String.valueOf(getHttpsKeyPath()))
.put(HTTP_SERVER_NUM_IO_THREADS_HW_MULTIPLIER, String.valueOf(getHttpServerNumIoThreadsHwMultiplier()))
.put(EXCHANGE_HTTP_CLIENT_NUM_IO_THREADS_HW_MULTIPLIER, String.valueOf(getExchangeHttpClientNumIoThreadsHwMultiplier()))
.put(ASYNC_DATA_CACHE_ENABLED, String.valueOf(getAsyncDataCacheEnabled()))
.put(ASYNC_CACHE_SSD_GB, String.valueOf(getAsyncCacheSsdGb()))
.put(CONNECTOR_NUM_IO_THREADS_HW_MULTIPLIER, String.valueOf(getConnectorNumIoThreadsHwMultiplier()))
.put(PRESTO_VERSION, getPrestoVersion())
.put(SHUTDOWN_ONSET_SEC, String.valueOf(getShutdownOnsetSec()))
.put(SYSTEM_MEMORY_GB, String.valueOf(getSystemMemoryGb()))
.put(QUERY_MEMORY_GB, String.valueOf(getQueryMemoryGb()))
.put(USE_MMAP_ALLOCATOR, String.valueOf(getUseMmapAllocator()))
.put(MEMORY_ARBITRATOR_KIND, String.valueOf(getMemoryArbitratorKind()))
.put(MEMORY_ARBITRATOR_CAPACITY_GB, String.valueOf(getMemoryArbitratorCapacityGb()))
.put(MEMORY_ARBITRATOR_RESERVED_CAPACITY_GB, String.valueOf(getMemoryArbitratorReservedCapacityGb()))
.put(MEMORY_POOL_INIT_CAPACITY, String.valueOf(getMemoryPoolInitCapacity()))
.put(MEMORY_POOL_RESERVED_CAPACITY, String.valueOf(getMemoryPoolReservedCapacity()))
.put(MEMORY_POOL_TRANSFER_CAPACITY, String.valueOf(getMemoryPoolTransferCapacity()))
.put(MEMORY_RECLAIM_WAIT_MS, String.valueOf(getMemoryReclaimWaitMs()))
.put(SPILLER_SPILL_PATH, String.valueOf(getSpillerSpillPath()))
.put(TASK_MAX_DRIVERS_PER_TASK, String.valueOf(getMaxDriversPerTask()))
.put(ENABLE_OLD_TASK_CLEANUP, String.valueOf(getOldTaskCleanupMs()))
.put(SHUFFLE_NAME, getShuffleName())
.put(HTTP_SERVER_ACCESS_LOGS, String.valueOf(isEnableHttpServerAccessLog()))
.put(CORE_ON_ALLOCATION_FAILURE_ENABLED, String.valueOf(isCoreOnAllocationFailureEnabled()))
.build();
} | @Test
public void testNativeExecutionNodeConfig()
{
// Test defaults
assertRecordedDefaults(ConfigAssertions.recordDefaults(NativeExecutionNodeConfig.class)
.setNodeEnvironment("spark-velox")
.setNodeLocation("/dummy/location")
.setNodeInternalAddress("127.0.0.1")
.setNodeId(0)
.setNodeMemoryGb(10));
// Test explicit property mapping. Also makes sure properties returned by getAllProperties() covers full property list.
NativeExecutionNodeConfig expected = new NativeExecutionNodeConfig()
.setNodeEnvironment("next-gen-spark")
.setNodeId(1)
.setNodeLocation("/extra/dummy/location")
.setNodeInternalAddress("1.1.1.1")
.setNodeMemoryGb(40);
Map<String, String> properties = expected.getAllProperties();
assertFullMapping(properties, expected);
} |
public static FileRewriteCoordinator get() {
return INSTANCE;
} | @Test
public void testBinPackRewrite() throws NoSuchTableException, IOException {
sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName);
Dataset<Row> df = newDF(1000);
df.coalesce(1).writeTo(tableName).append();
df.coalesce(1).writeTo(tableName).append();
df.coalesce(1).writeTo(tableName).append();
df.coalesce(1).writeTo(tableName).append();
Table table = validationCatalog.loadTable(tableIdent);
Assert.assertEquals("Should produce 4 snapshots", 4, Iterables.size(table.snapshots()));
Dataset<Row> fileDF =
spark.read().format("iceberg").load(tableName(tableIdent.name() + ".files"));
List<Long> fileSizes = fileDF.select("file_size_in_bytes").as(Encoders.LONG()).collectAsList();
long avgFileSize = fileSizes.stream().mapToLong(i -> i).sum() / fileSizes.size();
try (CloseableIterable<FileScanTask> fileScanTasks = table.newScan().planFiles()) {
String fileSetID = UUID.randomUUID().toString();
ScanTaskSetManager taskSetManager = ScanTaskSetManager.get();
taskSetManager.stageTasks(table, fileSetID, Lists.newArrayList(fileScanTasks));
// read and pack original 4 files into 2 splits
Dataset<Row> scanDF =
spark
.read()
.format("iceberg")
.option(SparkReadOptions.SCAN_TASK_SET_ID, fileSetID)
.option(SparkReadOptions.SPLIT_SIZE, Long.toString(avgFileSize * 2))
.option(SparkReadOptions.FILE_OPEN_COST, "0")
.load(tableName);
// write the packed data into new files where each split becomes a new file
scanDF
.writeTo(tableName)
.option(SparkWriteOptions.REWRITTEN_FILE_SCAN_TASK_SET_ID, fileSetID)
.append();
// commit the rewrite
FileRewriteCoordinator rewriteCoordinator = FileRewriteCoordinator.get();
Set<DataFile> rewrittenFiles =
taskSetManager.fetchTasks(table, fileSetID).stream()
.map(t -> t.asFileScanTask().file())
.collect(Collectors.toSet());
Set<DataFile> addedFiles = rewriteCoordinator.fetchNewFiles(table, fileSetID);
table.newRewrite().rewriteFiles(rewrittenFiles, addedFiles).commit();
}
table.refresh();
Map<String, String> summary = table.currentSnapshot().summary();
Assert.assertEquals("Deleted files count must match", "4", summary.get("deleted-data-files"));
Assert.assertEquals("Added files count must match", "2", summary.get("added-data-files"));
Object rowCount = scalarSql("SELECT count(*) FROM %s", tableName);
Assert.assertEquals("Row count must match", 4000L, rowCount);
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void createSetAndAddStickerTgs() {
String setName = "test" + System.currentTimeMillis() + "_by_pengrad_test_bot";
InputSticker[] stickers = new InputSticker[]{new InputSticker(stickerFileAnim, Sticker.Format.animated, new String[]{"\uD83D\uDE00"})};
BaseResponse response = bot.execute(
new CreateNewStickerSet(chatId, setName, "test1", stickers));
assertTrue(response.isOk());
} |
@VisibleForTesting
static void configureSslEngineFactory(
final KsqlConfig config,
final SslEngineFactory sslFactory
) {
sslFactory
.configure(config.valuesWithPrefixOverride(KsqlConfig.KSQL_SCHEMA_REGISTRY_PREFIX));
} | @Test
public void shouldPickUpNonPrefixedSslConfig() {
// Given:
final KsqlConfig config = config(
SslConfigs.SSL_PROTOCOL_CONFIG, "SSLv3"
);
final Map<String, Object> expectedConfigs = defaultConfigs();
expectedConfigs.put(SslConfigs.SSL_PROTOCOL_CONFIG, "SSLv3");
// When:
KsqlSchemaRegistryClientFactory.configureSslEngineFactory(config, sslEngineFactory);
// Then:
verify(sslEngineFactory).configure(expectedConfigs);
} |
@Override
public int indexOfValue( String valueName ) {
if ( valueName == null ) {
return -1;
}
lock.writeLock().lock();
try {
Integer index = cache.findAndCompare( valueName, valueMetaList );
for ( int i = 0; ( index == null ) && ( i < valueMetaList.size() ); i++ ) {
if ( valueName.equalsIgnoreCase( valueMetaList.get( i ).getName() ) ) {
index = i;
// it is possible, that several threads can call storing simultaneously
// but it makes no harm as they will put the same value,
// because valueMetaList is defended from modifications by read lock
cache.storeMapping( valueName, index );
needRealClone = null;
}
}
if ( index == null ) {
return -1;
}
return index;
} finally {
lock.writeLock().unlock();
}
} | @Test
public void testIndexOfValue() {
List<ValueMetaInterface> list = rowMeta.getValueMetaList();
assertEquals( 0, list.indexOf( string ) );
assertEquals( 1, list.indexOf( integer ) );
assertEquals( 2, list.indexOf( date ) );
} |
public static boolean shouldLoadInIsolation(String name) {
return !(EXCLUDE.matcher(name).matches() && !INCLUDE.matcher(name).matches());
} | @Test
public void testAllowedBasicAuthExtensionClasses() {
List<String> basicAuthExtensionClasses = Arrays.asList(
"org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension"
//"org.apache.kafka.connect.rest.basic.auth.extension.JaasBasicAuthFilter", TODO fix?
//"org.apache.kafka.connect.rest.basic.auth.extension.PropertyFileLoginModule" TODO fix?
);
for (String clazz : basicAuthExtensionClasses) {
assertTrue(PluginUtils.shouldLoadInIsolation(clazz),
clazz + " from 'basic-auth-extension' is not loaded in isolation but should be");
}
} |
public boolean isAvailable(String featureName) {
String name = FEATURE_PREFIX + featureName;
String sysprop = System.getProperty(name);
if (sysprop != null) {
return Boolean.parseBoolean(sysprop);
} else {
return Boolean.parseBoolean(features.getProperty(name, "true"));
}
} | @Test
public void featureSysOverride() {
Features features = new Features();
assertFalse(features.isAvailable("A"));
System.setProperty("org.infinispan.feature.A", "true");
System.setProperty("org.infinispan.feature.B", "false");
boolean a = features.isAvailable("A");
boolean b = features.isAvailable("B");
System.clearProperty("org.infinispan.feature.A");
System.clearProperty("org.infinispan.feature.B");
assertTrue(a);
assertFalse(b);
} |
public static long toMillis(long day, long hour, long minute, long second, long millis)
{
try {
long value = millis;
value = addExact(value, multiplyExact(day, MILLIS_IN_DAY));
value = addExact(value, multiplyExact(hour, MILLIS_IN_HOUR));
value = addExact(value, multiplyExact(minute, MILLIS_IN_MINUTE));
value = addExact(value, multiplyExact(second, MILLIS_IN_SECOND));
return value;
}
catch (ArithmeticException e) {
throw new IllegalArgumentException(e);
}
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void testOverflow()
{
long days = (Long.MAX_VALUE / DAYS.toMillis(1)) + 1;
toMillis(days, 0, 0, 0, 0);
} |
public long timeout()
{
if (tickets.isEmpty()) {
return -1;
}
sortIfNeeded();
// Tickets are sorted, so check first ticket
Ticket first = tickets.get(0);
long time = first.start - now() + first.delay;
if (time > 0) {
return time;
}
else {
return 0;
}
} | @Test
public void testNoTicket()
{
assertThat(tickets.timeout(), is(-1L));
} |
@Override
@SuppressWarnings("MissingDefault")
public boolean offer(final E e) {
if (e == null) {
throw new NullPointerException();
}
long mask;
E[] buffer;
long pIndex;
while (true) {
long producerLimit = lvProducerLimit();
pIndex = lvProducerIndex(this);
// lower bit is indicative of resize, if we see it we spin until it's cleared
if ((pIndex & 1) == 1) {
continue;
}
// pIndex is even (lower bit is 0) -> actual index is (pIndex >> 1)
// mask/buffer may get changed by resizing -> only use for array access after successful CAS.
mask = this.producerMask;
buffer = this.producerBuffer;
// a successful CAS ties the ordering, lv(pIndex)-[mask/buffer]->cas(pIndex)
// assumption behind this optimization is that queue is almost always empty or near empty
if (producerLimit <= pIndex) {
int result = offerSlowPath(mask, pIndex, producerLimit);
switch (result) {
case 0:
break;
case 1:
continue;
case 2:
return false;
case 3:
resize(mask, buffer, pIndex, e);
return true;
}
}
if (casProducerIndex(this, pIndex, pIndex + 2)) {
break;
}
}
// INDEX visible before ELEMENT, consistent with consumer expectation
final long offset = modifiedCalcElementOffset(pIndex, mask);
soElement(buffer, offset, e);
return true;
} | @Test(dataProvider = "empty")
public void manyProducers_noConsumer(MpscGrowableArrayQueue<Integer> queue) {
var count = new AtomicInteger();
ConcurrentTestHarness.timeTasks(NUM_PRODUCERS, () -> {
for (int i = 0; i < PRODUCE; i++) {
if (queue.offer(i)) {
count.incrementAndGet();
}
}
});
assertThat(queue).hasSize(count.get());
} |
@Override
public VectorIndexConfig setName(String name) {
validateName(name);
this.indexName = name;
return this;
} | @Test
public void constructorNameValidation_failed() {
assertThatThrownBy(() -> new VectorIndexConfig().setName("asd*%6(&"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("The name of the vector index should "
+ "only consist of letters, numbers, and the symbols \"-\" or \"_\".");
} |
public Result toResult(String responseBody) {
try {
Result result = new Result();
Map<String, Object> map;
try {
map = GSON.fromJson(responseBody, new TypeToken<Map<String, Object>>() {}.getType());
} catch (Exception e) {
throw new RuntimeException("Check connection result should be returned as map, with status represented as string and messages represented as list");
}
if (map == null || map.isEmpty()) {
throw new RuntimeException("Empty response body");
}
String status;
try {
status = (String) map.get("status");
} catch (Exception e) {
throw new RuntimeException("Check connection 'status' should be of type string");
}
if (isEmpty(status)) {
throw new RuntimeException("Check connection 'status' is a required field");
}
if ("success".equalsIgnoreCase(status)) {
result.withSuccessMessages(new ArrayList<>());
} else {
result.withErrorMessages(new ArrayList<>());
}
if (map.containsKey("messages") && map.get("messages") != null) {
List<?> messages;
try {
messages = (List<?>) map.get("messages");
} catch (Exception e) {
throw new RuntimeException("Check connection 'messages' should be of type list of string");
}
if (!messages.isEmpty()) {
for (Object message : messages) {
if (!(message instanceof String)) {
throw new RuntimeException("Check connection 'message' should be of type string");
}
}
if (result.isSuccessful()) {
//noinspection unchecked
result.withSuccessMessages((List<String>) messages);
} else {
//noinspection unchecked
result.withErrorMessages((List<String>) messages);
}
}
}
return result;
} catch (Exception e) {
throw new RuntimeException(format("Unable to de-serialize json response. %s", e.getMessage()));
}
} | @Test
public void shouldBuildFailureResultFromResponseBody() {
String responseBody = "{\"status\":\"failure\",messages=[\"message-one\",\"message-two\"]}";
Result result = messageHandler.toResult(responseBody);
assertFailureResult(result, List.of("message-one", "message-two"));
} |
@Override
public OptionalLong apply(OptionalLong previousSendTimeNs) {
long delayNs;
if (previousGlobalFailures > 0) {
// If there were global failures (like a response timeout), we want to wait for the
// full backoff period.
delayNs = backoff.backoff(previousGlobalFailures);
} else if ((numReadyRequests > MAX_ASSIGNMENTS_PER_REQUEST) && !hasInflightRequests) {
// If there were no previous failures, and we have lots of requests, send it as soon
// as possible.
delayNs = 0;
} else {
// Otherwise, use the standard delay period. This helps to promote batching, which
// reduces load on the controller.
delayNs = backoff.initialInterval();
}
long newSendTimeNs = nowNs + delayNs;
if (previousSendTimeNs.isPresent() && previousSendTimeNs.getAsLong() < newSendTimeNs) {
// If the previous send time was before the new one we calculated, go with the
// previous one.
return previousSendTimeNs;
}
// Otherwise, return our new send time.
return OptionalLong.of(newSendTimeNs);
} | @Test
public void applyAfterDispatchInterval() {
assertEquals(OptionalLong.of(BACKOFF.initialInterval()),
new AssignmentsManagerDeadlineFunction(BACKOFF, 0, 0, false, 12).
apply(OptionalLong.empty()));
} |
@InvokeOnHeader(Web3jConstants.ETH_COMPILE_LLL)
void ethCompileLLL(Message message) throws IOException {
String sourceCode = message.getHeader(Web3jConstants.SOURCE_CODE, configuration::getSourceCode, String.class);
Request<?, EthCompileLLL> request = web3j.ethCompileLLL(sourceCode);
setRequestId(message, request);
EthCompileLLL response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getCompiledSourceCode());
}
} | @Test
public void ethCompileLLLTest() throws Exception {
EthCompileLLL response = Mockito.mock(EthCompileLLL.class);
Mockito.when(mockWeb3j.ethCompileLLL(any())).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getCompiledSourceCode()).thenReturn("test");
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_COMPILE_LLL);
template.send(exchange);
String body = exchange.getIn().getBody(String.class);
assertEquals("test", body);
} |
@Override
public boolean match(String attributeValue) {
if (attributeValue == null) {
return false;
}
switch (type) {
case Equals:
return attributeValue.equals(value);
case StartsWith:
return (length == -1 || length == attributeValue.length()) && attributeValue.startsWith(value);
case EndsWith:
return (length == -1 || length == attributeValue.length()) && attributeValue.endsWith(value);
case Contains:
return attributeValue.contains(value);
case Regexp:
return regexPattern.matcher(attributeValue).matches();
default:
throw new IllegalStateException("Unexpected type " + type);
}
} | @Test
public void testDegeneratedStartsWith() {
assertTrue(new LikeCondition("ab%").match("ab"));
assertTrue(new LikeCondition("ab%").match("abx"));
assertTrue(new LikeCondition("ab%").match("abxx"));
assertFalse(new LikeCondition("ab%").match("xab"));
assertFalse(new LikeCondition("ab%").match("axb"));
assertTrue(new LikeCondition("ab_").match("abc"));
assertFalse(new LikeCondition("ab_").match("ab"));
assertFalse(new LikeCondition("ab_").match("abxx"));
assertFalse(new LikeCondition("ab_").match("xab"));
assertFalse(new LikeCondition("ab_").match("xaby"));
} |
@Override
public List<String> assignSegment(String segmentName, Map<String, Map<String, String>> currentAssignment,
InstancePartitions instancePartitions, InstancePartitionsType instancePartitionsType) {
validateSegmentAssignmentStrategy(instancePartitions);
return SegmentAssignmentUtils.assignSegmentWithoutReplicaGroup(currentAssignment, instancePartitions, _replication);
} | @Test
public void testBootstrapTable() {
Map<String, Map<String, String>> currentAssignment = new TreeMap<>();
for (String segmentName : SEGMENTS) {
List<String> instancesAssigned =
_segmentAssignment.assignSegment(segmentName, currentAssignment, _instancePartitionsMap);
currentAssignment
.put(segmentName, SegmentAssignmentUtils.getInstanceStateMap(instancesAssigned, SegmentStateModel.ONLINE));
}
// Bootstrap table should reassign all segments based on their alphabetical order
RebalanceConfig rebalanceConfig = new RebalanceConfig();
rebalanceConfig.setBootstrap(true);
Map<String, Map<String, String>> newAssignment =
_segmentAssignment.rebalanceTable(currentAssignment, _instancePartitionsMap, null, null, rebalanceConfig);
assertEquals(newAssignment.size(), NUM_SEGMENTS);
List<String> sortedSegments = new ArrayList<>(SEGMENTS);
sortedSegments.sort(null);
for (int i = 0; i < NUM_SEGMENTS; i++) {
assertEquals(newAssignment.get(sortedSegments.get(i)), currentAssignment.get(SEGMENTS.get(i)));
}
} |
@Override
public ObjectNode encode(MastershipRole mastershipRole, CodecContext context) {
checkNotNull(mastershipRole, "MastershipRole cannot be null");
ObjectNode result = context.mapper().createObjectNode()
.put(ROLE, mastershipRole.name());
return result;
} | @Test
public void testMastershipRoleEncode() {
MastershipRole mastershipRole = MASTER;
ObjectNode mastershipRoleJson = mastershipRoleCodec.encode(mastershipRole, context);
assertThat(mastershipRoleJson, MastershipRoleJsonMatcher.matchesMastershipRole(mastershipRole));
} |
public static Builder withMaximumSizeBytes(long maxBloomFilterSizeBytes) {
checkArgument(maxBloomFilterSizeBytes > 0, "Expected Bloom filter size limit to be positive.");
long optimalNumberOfElements =
optimalNumInsertions(maxBloomFilterSizeBytes, DEFAULT_FALSE_POSITIVE_PROBABILITY);
checkArgument(
optimalNumberOfElements <= MAX_ELEMENTS,
"The specified size limit would attempt to create a Bloom filter builder larger than "
+ "the maximum supported size of 2^63.");
return withMaximumNumberOfInsertionsForOptimalBloomFilter(optimalNumberOfElements);
} | @Test
public void testBuilder() throws Exception {
ScalableBloomFilter.Builder builder = ScalableBloomFilter.withMaximumSizeBytes(MAX_SIZE);
assertTrue("Expected Bloom filter to have been modified.", builder.put(BUFFER));
// Re-adding should skip and not record the insertion.
assertFalse("Expected Bloom filter to not have been modified.", builder.put(BUFFER));
// Verify insertion
int maxValue = insertAndVerifyContents(builder, 31);
// Verify that the decoded value contains all the values and that it is much smaller
// than the maximum size.
verifyCoder(builder.build(), maxValue, MAX_SIZE / 50);
} |
public static void trackGeTuiNotificationClicked(String title, String content, String sfData, long time) {
trackNotificationOpenedEvent(sfData, title, content, "GeTui", null, time);
} | @Test
public void trackGeTuiNotificationClicked() {
SensorsDataAPI sensorsDataAPI = SAHelper.initSensors(mApplication);
final CountDownLatch countDownLatch = new CountDownLatch(1);
sensorsDataAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
assertEquals("$AppPushClick", eventName);
assertEquals("mock_title", eventProperties.optString("$app_push_msg_title"));
assertEquals("mock_content", eventProperties.optString("$app_push_msg_content"));
assertEquals("JPush", eventProperties.optString("$app_push_service_name"));
countDownLatch.countDown();
return true;
}
});
try {
Thread.sleep(1000);
Robolectric.getForegroundThreadScheduler().advanceTo(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
PushAutoTrackHelper.trackGeTuiNotificationClicked("mock_title", "mock_content", "", 0L);
try {
countDownLatch.await(1000, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
} |
@Override
public String toString() {
return getClass().getSimpleName() + " barCount: " + barCount;
} | @Test
public void naNValuesInInterval() {
BaseBarSeries series = new BaseBarSeries("NaN test");
for (long i = 0; i <= 10; i++) { // (0, NaN, 2, NaN, 4, NaN, 6, NaN, 8, ...)
Num highPrice = i % 2 == 0 ? series.numOf(i) : NaN;
series.addBar(ZonedDateTime.now().plusDays(i), NaN, highPrice, NaN, NaN, NaN);
}
AroonUpIndicator aroonUpIndicator = new AroonUpIndicator(series, 5);
for (int i = series.getBeginIndex(); i <= series.getEndIndex(); i++) {
if (i % 2 != 0) {
assertEquals(NaN.toString(), aroonUpIndicator.getValue(i).toString());
} else {
assertNumEquals(aroonUpIndicator.getValue(i).toString(), series.numOf(100));
}
}
} |
public static byte[] getMCastMacAddress(byte[] targetIp) {
checkArgument(targetIp.length == Ip6Address.BYTE_LENGTH);
return new byte[] {
0x33, 0x33,
targetIp[targetIp.length - 4],
targetIp[targetIp.length - 3],
targetIp[targetIp.length - 2],
targetIp[targetIp.length - 1],
};
} | @Test
public void testMulticastAddress() {
assertArrayEquals(MULTICAST_ADDRESS, getMCastMacAddress(DESTINATION_ADDRESS));
} |
public static COSNumber get( String number ) throws IOException
{
if (number.length() == 1)
{
char digit = number.charAt(0);
if ('0' <= digit && digit <= '9')
{
return COSInteger.get((long) digit - '0');
}
if (digit == '-' || digit == '.')
{
// See https://issues.apache.org/jira/browse/PDFBOX-592
return COSInteger.ZERO;
}
throw new IOException("Not a number: " + number);
}
if (isFloat(number))
{
return new COSFloat(number);
}
try
{
return COSInteger.get(Long.parseLong(number));
}
catch (NumberFormatException e)
{
// check if the given string could be a number at all
String numberString = number.startsWith("+") || number.startsWith("-")
? number.substring(1) : number;
if (!numberString.matches("[0-9]*"))
{
throw new IOException("Not a number: " + number);
}
// return a limited COSInteger value which is marked as invalid
return number.startsWith("-") ? COSInteger.OUT_OF_RANGE_MIN
: COSInteger.OUT_OF_RANGE_MAX;
}
} | @Test
void testInvalidNumber()
{
try
{
COSNumber.get("18446744073307F448448");
fail("Was expecting an IOException");
}
catch (IOException e)
{
}
} |
@SuppressWarnings("unchecked")
@Override
public Result execute(Query query, Target target) {
Query adjustedQuery = adjustQuery(query);
switch (target.mode()) {
case ALL_NODES:
adjustedQuery = Query.of(adjustedQuery).partitionIdSet(getAllPartitionIds()).build();
return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.ALL_NODES);
case LOCAL_NODE:
adjustedQuery = Query.of(adjustedQuery).partitionIdSet(getLocalPartitionIds()).build();
return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.LOCAL_NODE);
case PARTITION_OWNER:
int solePartition = target.partitions().solePartition();
adjustedQuery = Query.of(adjustedQuery).partitionIdSet(target.partitions()).build();
if (solePartition >= 0) {
return runOnGivenPartition(adjustedQuery, solePartition);
} else {
return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.ALL_NODES);
}
default:
throw new IllegalArgumentException("Illegal target " + target);
}
} | @Test
public void runQueryOnLocalPartitions() {
Predicate<Object, Object> predicate = Predicates.equal("this", value);
Query query = Query.of().mapName(map.getName()).predicate(predicate).iterationType(KEY).build();
QueryResult result = queryEngine.execute(query, Target.LOCAL_NODE);
assertEquals(1, result.size());
assertEquals(key, toObject(result.iterator().next().getKey()));
} |
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
try {
if (statement.getStatement() instanceof CreateAsSelect) {
registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement);
} else if (statement.getStatement() instanceof CreateSource) {
registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
// Remove schema id from SessionConfig
return stripSchemaIdConfig(statement);
} | @Test
public void shouldThrowInconsistentSchemaIdExceptionWithOverrideSchema()
throws IOException, RestClientException {
// Given:
when(schemaRegistryClient.register(anyString(), any(ParsedSchema.class))).thenReturn(2);
final SchemaAndId schemaAndId = SchemaAndId.schemaAndId(SCHEMA.value(), AVRO_SCHEMA, 1);
givenStatement("CREATE STREAM source (id int key, f1 varchar) "
+ "WITH ("
+ "kafka_topic='expectedName', "
+ "key_format='PROTOBUF', "
+ "value_format='AVRO', "
+ "value_schema_id=1, "
+ "partitions=1"
+ ");", Pair.of(null, schemaAndId));
// When:
final Exception e = assertThrows(
KsqlStatementException.class,
() -> injector.inject(statement)
);
// Then:
assertThat(e.getMessage(), containsString("Schema id registered is 2 which is "
+ "different from provided VALUE_SCHEMA_ID 1."
+ System.lineSeparator()
+ "Topic: expectedName"
+ System.lineSeparator()
+ "Subject: expectedName-value"));
} |
@Nullable
@Override
protected SelectionResultsBlock getNextBlock() {
if (_numDocsScanned >= _limit) {
// Already returned enough documents
return null;
}
ValueBlock valueBlock = _projectOperator.nextBlock();
if (valueBlock == null) {
return null;
}
int numExpressions = _expressions.size();
for (int i = 0; i < numExpressions; i++) {
_blockValSets[i] = valueBlock.getBlockValueSet(_expressions.get(i));
}
RowBasedBlockValueFetcher blockValueFetcher = new RowBasedBlockValueFetcher(_blockValSets);
int numDocs = valueBlock.getNumDocs();
int numDocsToReturn = Math.min(_limit - _numDocsScanned, numDocs);
List<Object[]> rows = new ArrayList<>(numDocsToReturn);
if (_nullHandlingEnabled) {
for (int i = 0; i < numExpressions; i++) {
_nullBitmaps[i] = _blockValSets[i].getNullBitmap();
}
for (int docId = 0; docId < numDocsToReturn; docId++) {
Object[] values = blockValueFetcher.getRow(docId);
for (int colId = 0; colId < numExpressions; colId++) {
if (_nullBitmaps[colId] != null && _nullBitmaps[colId].contains(docId)) {
values[colId] = null;
}
}
rows.add(values);
}
} else {
for (int i = 0; i < numDocsToReturn; i++) {
rows.add(blockValueFetcher.getRow(i));
}
}
_numDocsScanned += numDocs;
return new SelectionResultsBlock(_dataSchema, rows, _queryContext);
} | @Test
public void testNullHandling() {
QueryContext queryContext =
QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable WHERE intColumn IS NULL");
queryContext.setNullHandlingEnabled(true);
List<ExpressionContext> expressions =
SelectionOperatorUtils.extractExpressions(queryContext, _segmentWithNullValues);
BaseProjectOperator<?> projectOperator =
new ProjectPlanNode(new SegmentContext(_segmentWithNullValues), queryContext, expressions,
DocIdSetPlanNode.MAX_DOC_PER_CALL).run();
StreamingSelectionOnlyOperator operator = new StreamingSelectionOnlyOperator(
_segmentWithNullValues,
queryContext,
expressions,
projectOperator
);
SelectionResultsBlock block = operator.getNextBlock();
List<Object[]> rows = block.getRows();
assertNull(rows.get(0)[0], "Column value should be 'null' when null handling is enabled");
} |
public void run() throws Exception {
final Terminal terminal = TerminalBuilder.builder()
.nativeSignals(true)
.signalHandler(signal -> {
if (signal == Terminal.Signal.INT || signal == Terminal.Signal.QUIT) {
if (execState == ExecState.RUNNING) {
throw new InterruptShellException();
} else {
exit(0);
}
}
})
.build();
run((providersMap) -> {
String serviceUrl = "";
String adminUrl = "";
for (ShellCommandsProvider provider : providersMap.values()) {
final String providerServiceUrl = provider.getServiceUrl();
if (providerServiceUrl != null) {
serviceUrl = providerServiceUrl;
}
final String providerAdminUrl = provider.getAdminUrl();
if (providerAdminUrl != null) {
adminUrl = providerAdminUrl;
}
}
LineReaderBuilder readerBuilder = LineReaderBuilder.builder()
.terminal(terminal)
.parser(parser)
.completer(systemRegistry.completer())
.variable(LineReader.INDENTATION, 2)
.option(LineReader.Option.INSERT_BRACKET, true);
configureHistory(properties, readerBuilder);
LineReader reader = readerBuilder.build();
final String welcomeMessage =
String.format("Welcome to Pulsar shell!\n %s: %s\n %s: %s\n\n"
+ "Type %s to get started or try the autocompletion (TAB button).\n"
+ "Type %s or %s to end the shell session.\n",
new AttributedStringBuilder().style(AttributedStyle.BOLD).append("Service URL").toAnsi(),
serviceUrl,
new AttributedStringBuilder().style(AttributedStyle.BOLD).append("Admin URL").toAnsi(),
adminUrl,
new AttributedStringBuilder().style(AttributedStyle.BOLD).append("help").toAnsi(),
new AttributedStringBuilder().style(AttributedStyle.BOLD).append("exit").toAnsi(),
new AttributedStringBuilder().style(AttributedStyle.BOLD).append("quit").toAnsi());
output(welcomeMessage, terminal);
String promptMessage;
if (configShell.getCurrentConfig() != null) {
promptMessage = String.format("%s(%s)",
configShell.getCurrentConfig(), getHostFromUrl(serviceUrl));
} else {
promptMessage = getHostFromUrl(serviceUrl);
}
final String prompt = createPrompt(promptMessage);
return new InteractiveLineReader() {
@Override
public String readLine() {
return reader.readLine(prompt);
}
@Override
public List<String> parseLine(String line) {
return reader.getParser().parse(line, 0).words();
}
};
}, () -> terminal);
} | @Test
public void testInteractiveMode() throws Exception {
Terminal terminal = TerminalBuilder.builder().build();
final MockLineReader linereader = new MockLineReader(terminal);
final Properties props = new Properties();
props.setProperty("webServiceUrl", "http://localhost:8080");
linereader.addCmd("admin topics create my-topic --metadata a=b ");
linereader.addCmd("client produce -m msg my-topic");
linereader.addCmd("quit");
final TestPulsarShell testPulsarShell = new TestPulsarShell(new String[]{}, props, pulsarAdmin);
testPulsarShell.run((a) -> linereader, () -> terminal);
verify(topics).createNonPartitionedTopic(eq("persistent://public/default/my-topic"), any(Map.class));
verify(testPulsarShell.cmdProduceHolder.get()).call();
assertEquals((int) testPulsarShell.exitCode, 0);
} |
public void setIgnoreCookies(boolean ignoreCookies) {
kp.put("ignoreCookies",ignoreCookies);
} | @Test
public void testIgnoreCookies() throws Exception {
fetcher().setIgnoreCookies(true);
checkSetCookieURI();
// second request to see if cookie is NOT sent
CrawlURI curi = makeCrawlURI("http://localhost:7777/");
fetcher().process(curi);
runDefaultChecks(curi);
String requestString = httpRequestString(curi);
assertFalse(requestString.contains("Cookie:"));
} |
public static boolean containsSymlink(String path)
{
return (firstSymlinkIndex(path) < 0) ? false : true;
} | @Test
public void testContainsSymlink()
{
Assert.assertTrue(SymlinkUtil.containsSymlink(path1));
Assert.assertTrue(SymlinkUtil.containsSymlink(path2));
Assert.assertTrue(SymlinkUtil.containsSymlink(path3));
Assert.assertFalse(SymlinkUtil.containsSymlink(path4));
Assert.assertFalse(SymlinkUtil.containsSymlink(path5));
} |
@SuppressWarnings("unchecked")
public static <T> TypeInformation<T> convert(String jsonSchema) {
Preconditions.checkNotNull(jsonSchema, "JSON schema");
final ObjectMapper mapper = JacksonMapperFactory.createObjectMapper();
mapper.getFactory()
.enable(JsonParser.Feature.ALLOW_COMMENTS)
.enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES)
.enable(JsonParser.Feature.ALLOW_SINGLE_QUOTES);
final JsonNode node;
try {
node = mapper.readTree(jsonSchema);
} catch (IOException e) {
throw new IllegalArgumentException("Invalid JSON schema.", e);
}
return (TypeInformation<T>) convertType("<root>", node, node);
} | @Test
void testAtomicType() {
final TypeInformation<?> result = JsonRowSchemaConverter.convert("{ type: 'number' }");
assertThat(result).isEqualTo(Types.BIG_DEC);
} |
@Deprecated
public static String updateSerializedOptions(
String serializedOptions, Map<String, String> runtimeValues) {
ObjectNode root, options;
try {
root = PipelineOptionsFactory.MAPPER.readValue(serializedOptions, ObjectNode.class);
options = (ObjectNode) root.get("options");
checkNotNull(options, "Unable to locate 'options' in %s", serializedOptions);
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to parse %s", serializedOptions), e);
}
for (Map.Entry<String, String> entry : runtimeValues.entrySet()) {
options.put(entry.getKey(), entry.getValue());
}
try {
return PipelineOptionsFactory.MAPPER.writeValueAsString(root);
} catch (IOException e) {
throw new RuntimeException("Unable to parse re-serialize options", e);
}
} | @Test
public void testUpdateSerializeEmptyUpdate() throws Exception {
TestOptions submitOptions = PipelineOptionsFactory.as(TestOptions.class);
String serializedOptions = MAPPER.writeValueAsString(submitOptions);
String updatedOptions =
ValueProviders.updateSerializedOptions(serializedOptions, ImmutableMap.of());
TestOptions runtime =
MAPPER.readValue(updatedOptions, PipelineOptions.class).as(TestOptions.class);
assertNull(runtime.getString());
} |
@Override
public void putByPath(String expression, Object value) {
BeanPath.create(expression).set(this, value);
} | @Test
public void putByPathTest() {
final JSONObject json = new JSONObject();
json.putByPath("aa.bb", "BB");
assertEquals("{\"aa\":{\"bb\":\"BB\"}}", json.toString());
} |
public static Object get(Object object, int index) {
if (index < 0) {
throw new IndexOutOfBoundsException("Index cannot be negative: " + index);
}
if (object instanceof Map) {
Map map = (Map) object;
Iterator iterator = map.entrySet().iterator();
return get(iterator, index);
} else if (object instanceof List) {
return ((List) object).get(index);
} else if (object instanceof Object[]) {
return ((Object[]) object)[index];
} else if (object instanceof Iterator) {
Iterator it = (Iterator) object;
while (it.hasNext()) {
index--;
if (index == -1) {
return it.next();
} else {
it.next();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object instanceof Collection) {
Iterator iterator = ((Collection) object).iterator();
return get(iterator, index);
} else if (object instanceof Enumeration) {
Enumeration it = (Enumeration) object;
while (it.hasMoreElements()) {
index--;
if (index == -1) {
return it.nextElement();
} else {
it.nextElement();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object == null) {
throw new IllegalArgumentException("Unsupported object type: null");
} else {
try {
return Array.get(object, index);
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName());
}
}
} | @Test
void testGetIterator3() {
assertEquals("1", CollectionUtils.get(Collections.singleton("1").iterator(), 0));
assertEquals("2", CollectionUtils.get(Arrays.asList("1", "2").iterator(), 1));
} |
public static <T> AvroCoder<T> reflect(TypeDescriptor<T> type) {
return reflect((Class<T>) type.getRawType());
} | @Test
public void testAvroReflectCoderIsSerializable() throws Exception {
AvroCoder<Pojo> coder = AvroCoder.reflect(Pojo.class);
// Check that the coder is serializable using the regular JSON approach.
SerializableUtils.ensureSerializable(coder);
} |
public static byte[] getBytesWithoutClosing(InputStream stream) throws IOException {
if (stream instanceof ExposedByteArrayInputStream) {
// Fast path for the exposed version.
return ((ExposedByteArrayInputStream) stream).readAll();
} else if (stream instanceof ByteArrayInputStream) {
// Fast path for ByteArrayInputStream.
byte[] ret = new byte[stream.available()];
stream.read(ret);
return ret;
}
// Falls back to normal stream copying.
SoftReference<byte[]> refBuffer = threadLocalBuffer.get();
byte[] buffer = refBuffer == null ? null : refBuffer.get();
if (buffer == null) {
buffer = new byte[BUF_SIZE];
threadLocalBuffer.set(new SoftReference<>(buffer));
}
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
while (true) {
int r = stream.read(buffer);
if (r == -1) {
break;
}
outStream.write(buffer, 0, r);
}
return outStream.toByteArray();
} | @Test
public void testGetBytesFromInputStream() throws IOException {
// Any stream which is not a ByteArrayInputStream.
InputStream stream = new BufferedInputStream(new ByteArrayInputStream(testData));
byte[] bytes = StreamUtils.getBytesWithoutClosing(stream);
assertArrayEquals(testData, bytes);
assertEquals(0, stream.available());
} |
@Override
public void serialize(Asn1OutputStream out, Class<? extends ASN1Object> type, ASN1Object obj,
Asn1ObjectMapper mapper) throws IOException {
out.write(obj.getEncoded());
} | @Test
public void shouldSerialize() {
assertArrayEquals(
new byte[] { 0x13, 03, 'E', 'U', 'R' },
serialize(new Iso4217CurrencyCodeConverter(), Iso4217CurrencyCode.class, new Iso4217CurrencyCode("EUR"))
);
} |
@Override
public Object getValue() {
try {
return mBeanServerConn.getAttribute(getObjectName(), attributeName);
} catch (IOException | JMException e) {
return null;
}
} | @Test
public void returnsJmxAttribute() throws Exception {
ObjectName objectName = new ObjectName("java.lang:type=ClassLoading");
JmxAttributeGauge gauge = new JmxAttributeGauge(mBeanServer, objectName, "LoadedClassCount");
assertThat(gauge.getValue()).isInstanceOf(Integer.class);
assertThat((Integer) gauge.getValue()).isGreaterThan(0);
} |
@Override
public void close() {
} | @Test
public void shouldSucceed_gapDetectedLocal_disableAlos()
throws ExecutionException, InterruptedException {
// Given:
when(pushRoutingOptions.alosEnabled()).thenReturn(false);
final AtomicReference<Set<KsqlNode>> nodes = new AtomicReference<>(
ImmutableSet.of(ksqlNodeLocal, ksqlNodeRemote));
final PushRouting routing = new PushRouting(sqr -> nodes.get(), 50, true);
AtomicReference<TestLocalPublisher> localPublisher = new AtomicReference<>();
AtomicInteger localCount = new AtomicInteger(0);
when(pushPhysicalPlanManager.execute()).thenAnswer(a -> {
localPublisher.set(new TestLocalPublisher(context));
localCount.incrementAndGet();
context.runOnContext(v -> {
localPublisher.get().accept(LOCAL_ROW2);
});
return localPublisher.get();
});
doAnswer(a -> {
final Optional<PushOffsetRange> newOffsetRange = a.getArgument(0);
assertThat(newOffsetRange.isPresent(), is(true));
assertThat(newOffsetRange.get().getEndOffsets(), is(ImmutableList.of(0L, 3L)));
return null;
}).when(pushPhysicalPlanManager).reset(any());
// When:
final PushConnectionsHandle handle = handlePushRouting(routing);
context.runOnContext(v -> {
localPublisher.get().accept(LOCAL_CONTINUATION_TOKEN1);
localPublisher.get().accept(LOCAL_ROW1);
localPublisher.get().accept(LOCAL_CONTINUATION_TOKEN_GAP);
});
Set<List<?>> rows = waitOnRows(2);
handle.close();
// Then:
verify(pushPhysicalPlanManager, times(1)).execute();
assertThat(rows.contains(LOCAL_ROW1.value().values()), is(true));
assertThat(rows.contains(LOCAL_ROW2.value().values()), is(true));
} |
public void resetPositionsIfNeeded() {
Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp();
if (offsetResetTimestamps.isEmpty())
return;
resetPositionsAsync(offsetResetTimestamps);
} | @Test
public void testGetOffsetsIncludesLeaderEpoch() {
buildFetcher();
subscriptions.assignFromUser(singleton(tp0));
client.updateMetadata(initialUpdateResponse);
// Metadata update with leader epochs
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1,
Collections.emptyMap(), Collections.singletonMap(topicName, 4), tp -> 99, topicIds);
client.updateMetadata(metadataResponse);
// Request latest offset
subscriptions.requestOffsetReset(tp0);
offsetFetcher.resetPositionsIfNeeded();
// Check for epoch in outgoing request
MockClient.RequestMatcher matcher = body -> {
if (body instanceof ListOffsetsRequest) {
ListOffsetsRequest offsetRequest = (ListOffsetsRequest) body;
int epoch = offsetRequest.topics().get(0).partitions().get(0).currentLeaderEpoch();
assertTrue(epoch != ListOffsetsResponse.UNKNOWN_EPOCH, "Expected Fetcher to set leader epoch in request");
assertEquals(epoch, 99, "Expected leader epoch to match epoch from metadata update");
return true;
} else {
fail("Should have seen ListOffsetRequest");
return false;
}
};
client.prepareResponse(matcher, listOffsetResponse(Errors.NONE, 1L, 5L));
consumerClient.pollNoWakeup();
} |
@Override
public void checkpointCoordinator(long checkpointId, CompletableFuture<byte[]> result) {
// unfortunately, this method does not run in the scheduler executor, but in the
// checkpoint coordinator time thread.
// we can remove the delegation once the checkpoint coordinator runs fully in the
// scheduler's main thread executor
mainThreadExecutor.execute(() -> checkpointCoordinatorInternal(checkpointId, result));
} | @Test
void checkpointFutureInitiallyNotDone() throws Exception {
final EventReceivingTasks tasks = EventReceivingTasks.createForRunningTasks();
final OperatorCoordinatorHolder holder =
createCoordinatorHolder(tasks, TestingOperatorCoordinator::new);
final CompletableFuture<byte[]> checkpointFuture = new CompletableFuture<>();
holder.checkpointCoordinator(1L, checkpointFuture);
assertThat(checkpointFuture).isNotDone();
} |
public static void checkParam(String dataId, String group, String content) throws NacosException {
checkKeyParam(dataId, group);
if (StringUtils.isBlank(content)) {
throw new NacosException(NacosException.CLIENT_INVALID_PARAM, CONTENT_INVALID_MSG);
}
} | @Test
void testCheckParam2() throws NacosException {
String dataId = "b";
String group = "c";
String datumId = "d";
String content = "a";
ParamUtils.checkParam(dataId, group, datumId, content);
} |
public NshServicePathId nshSpi() {
return nshSpi;
} | @Test
public void testConstruction() {
final NiciraSetNshSpi niciraSetNshSpi = new NiciraSetNshSpi(NshServicePathId.of(10));
assertThat(niciraSetNshSpi, is(notNullValue()));
assertThat(niciraSetNshSpi.nshSpi().servicePathId(), is(10));
} |
public static MetricsReporter combine(MetricsReporter first, MetricsReporter second) {
if (null == first) {
return second;
} else if (null == second || first == second) {
return first;
}
Set<MetricsReporter> reporters = Sets.newIdentityHashSet();
if (first instanceof CompositeMetricsReporter) {
reporters.addAll(((CompositeMetricsReporter) first).reporters());
} else {
reporters.add(first);
}
if (second instanceof CompositeMetricsReporter) {
reporters.addAll(((CompositeMetricsReporter) second).reporters());
} else {
reporters.add(second);
}
return new CompositeMetricsReporter(reporters);
} | @Test
public void reportWithMultipleMetricsReporters() {
AtomicInteger counter = new AtomicInteger();
MetricsReporter combined =
MetricsReporters.combine(
report -> counter.incrementAndGet(), report -> counter.incrementAndGet());
combined.report(new MetricsReport() {});
assertThat(combined).isInstanceOf(MetricsReporters.CompositeMetricsReporter.class);
assertThat(((MetricsReporters.CompositeMetricsReporter) combined).reporters()).hasSize(2);
assertThat(counter.get()).isEqualTo(2);
} |
public static Map<String, PluginConfiguration> swap(final YamlAgentConfiguration yamlConfig) {
YamlPluginCategoryConfiguration plugins = yamlConfig.getPlugins();
if (null == plugins) {
return Collections.emptyMap();
}
Map<String, PluginConfiguration> result = new LinkedHashMap<>();
result.putAll(swap(plugins.getLogging()));
result.putAll(swap(plugins.getMetrics()));
result.putAll(swap(plugins.getTracing()));
return result;
} | @Test
void assertSwapWithNullPlugins() {
YamlAgentConfiguration yamlAgentConfig = new YamlAgentConfiguration();
yamlAgentConfig.setPlugins(new YamlPluginCategoryConfiguration());
assertTrue(YamlPluginsConfigurationSwapper.swap(yamlAgentConfig).isEmpty());
} |
public void start() {
myBatis.start();
} | @Test
void should_start_mybatis_instance() {
var myBatis = mock(MyBatis.class);
var startMyBatis = new StartMyBatis(myBatis);
startMyBatis.start();
verify(myBatis).start();
verifyNoMoreInteractions(myBatis);
} |
@Override
public void doSendMail(MailSendMessage message) {
// 1. 创建发送账号
MailAccountDO account = validateMailAccount(message.getAccountId());
MailAccount mailAccount = buildMailAccount(account, message.getNickname());
// 2. 发送邮件
try {
String messageId = MailUtil.send(mailAccount, message.getMail(),
message.getTitle(), message.getContent(), true);
// 3. 更新结果(成功)
mailLogService.updateMailSendResult(message.getLogId(), messageId, null);
} catch (Exception e) {
// 3. 更新结果(异常)
mailLogService.updateMailSendResult(message.getLogId(), null, e);
}
} | @Test
public void testDoSendMail_success() {
try (MockedStatic<MailUtil> mailUtilMock = mockStatic(MailUtil.class)) {
// 准备参数
MailSendMessage message = randomPojo(MailSendMessage.class, o -> o.setNickname("芋艿"));
// mock 方法(获得邮箱账号)
MailAccountDO account = randomPojo(MailAccountDO.class, o -> o.setMail("7685@qq.com"));
when(mailAccountService.getMailAccountFromCache(eq(message.getAccountId())))
.thenReturn(account);
// mock 方法(发送邮件)
String messageId = randomString();
mailUtilMock.when(() -> MailUtil.send(
argThat(mailAccount -> {
assertEquals("芋艿 <7685@qq.com>", mailAccount.getFrom());
assertTrue(mailAccount.isAuth());
assertEquals(account.getUsername(), mailAccount.getUser());
assertEquals(account.getPassword(), mailAccount.getPass());
assertEquals(account.getHost(), mailAccount.getHost());
assertEquals(account.getPort(), mailAccount.getPort());
assertEquals(account.getSslEnable(), mailAccount.isSslEnable());
return true;
}), eq(message.getMail()), eq(message.getTitle()), eq(message.getContent()), eq(true)))
.thenReturn(messageId);
// 调用
mailSendService.doSendMail(message);
// 断言
verify(mailLogService).updateMailSendResult(eq(message.getLogId()), eq(messageId), isNull());
}
} |
public static Container createCategorie(String categorieNummer, String... elementNummerValues) {
Container categorie = new Container();
categorie.setNummer(categorieNummer);
for(int i = 0 ; i < elementNummerValues.length; i+=2) {
Element element = new Element();
element.setNummer(elementNummerValues[i]);
element.setValue(elementNummerValues[i+1]);
categorie.getElement().add(element);
}
return categorie;
} | @Test
public void testCreateCategorie() {
Container container = CategorieUtil.createCategorie("A", "B", "C", "D", "E");
assertThat(container.getNummer(), is("A"));
assertThat(container.getElement().size(), is(2));
assertThat(container.getElement().get(0).getNummer(), is("B"));
assertThat(container.getElement().get(0).getValue(), is("C"));
assertThat(container.getElement().get(1).getNummer(), is("D"));
assertThat(container.getElement().get(1).getValue(), is("E"));
} |
@Override
public void setMaxTimestamp(TimestampType timestampType, long maxTimestamp) {
long currentMaxTimestamp = maxTimestamp();
// We don't need to recompute crc if the timestamp is not updated.
if (timestampType() == timestampType && currentMaxTimestamp == maxTimestamp)
return;
byte attributes = computeAttributes(compressionType(), timestampType, isTransactional(), isControlBatch(), hasDeleteHorizonMs());
buffer.putShort(ATTRIBUTES_OFFSET, attributes);
buffer.putLong(MAX_TIMESTAMP_OFFSET, maxTimestamp);
long crc = computeChecksum();
ByteUtils.writeUnsignedInt(buffer, CRC_OFFSET, crc);
} | @Test
public void testSetNoTimestampTypeNotAllowed() {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L,
Compression.NONE, TimestampType.CREATE_TIME,
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
DefaultRecordBatch batch = new DefaultRecordBatch(records.buffer());
assertThrows(IllegalArgumentException.class, () -> batch.setMaxTimestamp(TimestampType.NO_TIMESTAMP_TYPE, RecordBatch.NO_TIMESTAMP));
} |
public static String cut(String s, String splitChar, int index) {
if (s == null || splitChar == null || index < 0) {
return null;
}
final String[] parts = s.split(Pattern.quote(splitChar));
if (parts.length <= index) {
return null;
}
return emptyToNull(parts[index]);
} | @Test
public void testCutReturnsCorrectPart() throws Exception {
String result = SplitAndIndexExtractor.cut("foobar foobaz quux", " ", 2);
assertEquals("quux", result);
} |
static <T extends Type> String encodeArrayValues(Array<T> value) {
StringBuilder result = new StringBuilder();
for (Type type : value.getValue()) {
result.append(encode(type));
}
return result.toString();
} | @Test
public void testStaticStructStaticArray() {
StaticArray3<Bar> array =
new StaticArray3<>(
Bar.class,
new Bar(BigInteger.ONE, BigInteger.ZERO),
new Bar(BigInteger.ONE, BigInteger.ZERO),
new Bar(BigInteger.ONE, BigInteger.ZERO));
assertEquals(
TypeEncoder.encodeArrayValues(array),
("0000000000000000000000000000000000000000000000000000000000000001"
+ "0000000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000001"
+ "0000000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000001"
+ "0000000000000000000000000000000000000000000000000000000000000000"));
} |
@Override
public ProxyInvocationHandler parserInterfaceToProxy(Object target, String objectName) {
// eliminate the bean without two phase annotation.
Set<String> methodsToProxy = this.tccProxyTargetMethod(target);
if (methodsToProxy.isEmpty()) {
return null;
}
// register resource and enhance with interceptor
DefaultResourceRegisterParser.get().registerResource(target, objectName);
return new TccActionInterceptorHandler(target, methodsToProxy);
} | @Test
public void testNestTcc_should_rollback() throws Exception {
//given
RootContext.unbind();
DefaultResourceManager.get();
DefaultResourceManager.mockResourceManager(BranchType.TCC, resourceManager);
TransactionManagerHolder.set(transactionManager);
TccActionImpl tccAction = new TccActionImpl();
TccAction tccActionProxy = ProxyUtil.createProxy(tccAction);
Assertions.assertNotNull(tccActionProxy);
NestTccActionImpl nestTccAction = new NestTccActionImpl();
nestTccAction.setTccAction(tccActionProxy);
//when
ProxyInvocationHandler proxyInvocationHandler = DefaultInterfaceParser.get().parserInterfaceToProxy(nestTccAction, nestTccAction.getClass().getName());
//then
Assertions.assertNotNull(proxyInvocationHandler);
//when
NestTccAction nestTccActionProxy = ProxyUtil.createProxy(nestTccAction);
//then
Assertions.assertNotNull(nestTccActionProxy);
// transaction commit test
GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();
try {
tx.begin(60000, "testBiz");
boolean result = nestTccActionProxy.prepare(null, 1);
Assertions.assertFalse(result);
if (result) {
tx.commit();
} else {
tx.rollback();
}
} catch (Exception exx) {
tx.rollback();
throw exx;
}
Assertions.assertFalse(nestTccAction.isCommit());
Assertions.assertFalse(tccAction.isCommit());
} |
@Override
public synchronized String getUri() {
return String.format(
"jdbc:%s://%s:%d/%s",
getJDBCPrefix(), this.getHost(), this.getPort(), this.getDatabaseName());
} | @Test
public void testGetUriShouldReturnCorrectValue() {
when(container.getHost()).thenReturn(HOST);
when(container.getMappedPort(JDBC_PORT)).thenReturn(MAPPED_PORT);
assertThat(testManager.getUri())
.matches("jdbc:" + JDBC_PREFIX + "://" + HOST + ":" + MAPPED_PORT + "/" + DATABASE_NAME);
} |
@VisibleForTesting
DictTypeDO validateDictTypeExists(Long id) {
if (id == null) {
return null;
}
DictTypeDO dictType = dictTypeMapper.selectById(id);
if (dictType == null) {
throw exception(DICT_TYPE_NOT_EXISTS);
}
return dictType;
} | @Test
public void testValidateDictDataExists_notExists() {
assertServiceException(() -> dictTypeService.validateDictTypeExists(randomLongId()), DICT_TYPE_NOT_EXISTS);
} |
public URLNormalizer encodeNonURICharacters() {
url = toURI().toASCIIString();
return this;
} | @Test
public void testEncodeNonURICharacters() {
s = "http://www.example.com/^a [b]/c?d e=";
t = "http://www.example.com/%5Ea%20%5Bb%5D/c?d+e=";
assertEquals(t, n(s).encodeNonURICharacters().toString());
//Test for https://github.com/Norconex/collector-http/issues/294
//Was failing when HTTP was uppercase
s = "HTTP://www.Example.com/";
t = "HTTP://www.Example.com/";
assertEquals(t, n(s).encodeNonURICharacters().toString());
} |
public final void contains(@Nullable Object element) {
if (!Iterables.contains(checkNotNull(actual), element)) {
List<@Nullable Object> elementList = newArrayList(element);
if (hasMatchingToStringPair(actual, elementList)) {
failWithoutActual(
fact("expected to contain", element),
fact("an instance of", objectToTypeName(element)),
simpleFact("but did not"),
fact(
"though it did contain",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(actual, /* itemsToCheck= */ elementList))),
fullContents());
} else {
failWithActual("expected to contain", element);
}
}
} | @Test
public void iterableContains() {
assertThat(asList(1, 2, 3)).contains(1);
} |
@Override
public void close() {
close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS));
} | @Test
public void testInterceptorAutoCommitOnClose() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
completeCommitSyncApplicationEventSuccessfully();
completeUnsubscribeApplicationEventSuccessfully();
consumer.close(Duration.ZERO);
assertEquals(1, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get());
} |
PubSubMessage rowToMessage(Row row) {
row = castRow(row, row.getSchema(), schema);
PubSubMessage.Builder builder = PubSubMessage.newBuilder();
if (schema.hasField(MESSAGE_KEY_FIELD)) {
byte[] bytes = row.getBytes(MESSAGE_KEY_FIELD);
if (bytes != null) {
builder.setKey(ByteString.copyFrom(bytes));
}
}
if (schema.hasField(EVENT_TIMESTAMP_FIELD)) {
ReadableDateTime time = row.getDateTime(EVENT_TIMESTAMP_FIELD);
if (time != null) {
builder.setEventTime(Timestamps.fromMillis(time.getMillis()));
}
}
if (schema.hasField(ATTRIBUTES_FIELD)) {
Collection<Row> attributes = row.getArray(ATTRIBUTES_FIELD);
if (attributes != null) {
attributes.forEach(
entry -> {
AttributeValues.Builder valuesBuilder = AttributeValues.newBuilder();
Collection<byte[]> values =
checkArgumentNotNull(entry.getArray(ATTRIBUTES_VALUES_FIELD));
values.forEach(bytes -> valuesBuilder.addValues(ByteString.copyFrom(bytes)));
builder.putAttributes(
checkArgumentNotNull(entry.getString(ATTRIBUTES_KEY_FIELD)),
valuesBuilder.build());
});
}
}
if (payloadSerializer == null) {
byte[] payload = row.getBytes(PAYLOAD_FIELD);
if (payload != null) {
builder.setData(ByteString.copyFrom(payload));
}
} else {
Row payload = row.getRow(PAYLOAD_FIELD);
if (payload != null) {
builder.setData(ByteString.copyFrom(payloadSerializer.serialize(payload)));
}
}
return builder.build();
} | @Test
public void fullRowToMessage() {
RowHandler rowHandler = new RowHandler(FULL_WRITE_SCHEMA);
Instant now = Instant.now();
Row row =
Row.withSchema(FULL_WRITE_SCHEMA)
.withFieldValue(RowHandler.MESSAGE_KEY_FIELD, "val1".getBytes(UTF_8))
.withFieldValue(RowHandler.PAYLOAD_FIELD, "val2".getBytes(UTF_8))
.withFieldValue(RowHandler.EVENT_TIMESTAMP_FIELD, now)
.withFieldValue(
RowHandler.ATTRIBUTES_FIELD,
ImmutableList.of(
Row.withSchema(RowHandler.ATTRIBUTES_ENTRY_SCHEMA)
.attachValues(
"key1",
ImmutableList.of("attr1".getBytes(UTF_8), "attr2".getBytes(UTF_8))),
Row.withSchema(RowHandler.ATTRIBUTES_ENTRY_SCHEMA)
.attachValues("key2", ImmutableList.of("attr3".getBytes(UTF_8)))))
.build();
PubSubMessage expected =
PubSubMessage.newBuilder()
.setKey(ByteString.copyFromUtf8("val1"))
.setData(ByteString.copyFromUtf8("val2"))
.setEventTime(Timestamps.fromMillis(now.getMillis()))
.putAttributes(
"key1",
AttributeValues.newBuilder()
.addValues(ByteString.copyFromUtf8("attr1"))
.addValues(ByteString.copyFromUtf8("attr2"))
.build())
.putAttributes(
"key2",
AttributeValues.newBuilder().addValues(ByteString.copyFromUtf8("attr3")).build())
.build();
assertEquals(expected, rowHandler.rowToMessage(row));
} |
public CompletableFuture<Void> storePqLastResort(final UUID identifier, final byte deviceId, final KEMSignedPreKey lastResortKey) {
return pqLastResortKeys.store(identifier, deviceId, lastResortKey);
} | @Test
void testStorePqLastResort() {
assertEquals(0, keysManager.getPqEnabledDevices(ACCOUNT_UUID).join().size());
final ECKeyPair identityKeyPair = Curve.generateKeyPair();
final byte deviceId2 = 2;
final byte deviceId3 = 3;
keysManager.storePqLastResort(ACCOUNT_UUID, DEVICE_ID, KeysHelper.signedKEMPreKey(1, identityKeyPair)).join();
keysManager.storePqLastResort(ACCOUNT_UUID, (byte) 2, KeysHelper.signedKEMPreKey(2, identityKeyPair)).join();
assertEquals(2, keysManager.getPqEnabledDevices(ACCOUNT_UUID).join().size());
assertEquals(1L, keysManager.getLastResort(ACCOUNT_UUID, DEVICE_ID).join().get().keyId());
assertEquals(2L, keysManager.getLastResort(ACCOUNT_UUID, deviceId2).join().get().keyId());
assertFalse(keysManager.getLastResort(ACCOUNT_UUID, deviceId3).join().isPresent());
keysManager.storePqLastResort(ACCOUNT_UUID, DEVICE_ID, KeysHelper.signedKEMPreKey(3, identityKeyPair)).join();
keysManager.storePqLastResort(ACCOUNT_UUID, deviceId3, KeysHelper.signedKEMPreKey(4, identityKeyPair)).join();
assertEquals(3, keysManager.getPqEnabledDevices(ACCOUNT_UUID).join().size(), "storing new last-resort keys should not create duplicates");
assertEquals(3L, keysManager.getLastResort(ACCOUNT_UUID, DEVICE_ID).join().get().keyId(),
"storing new last-resort keys should overwrite old ones");
assertEquals(2L, keysManager.getLastResort(ACCOUNT_UUID, deviceId2).join().get().keyId(),
"storing new last-resort keys should leave untouched ones alone");
assertEquals(4L, keysManager.getLastResort(ACCOUNT_UUID, deviceId3).join().get().keyId(),
"storing new last-resort keys should overwrite old ones");
} |
@Override
public KeyValueIterator<Windowed<K>, V> fetch(final K key) {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlySessionStore<K, V>> stores = storeProvider.stores(storeName, queryableStoreType);
for (final ReadOnlySessionStore<K, V> store : stores) {
try {
final KeyValueIterator<Windowed<K>, V> result = store.fetch(key);
if (!result.hasNext()) {
result.close();
} else {
return result;
}
} catch (final InvalidStateStoreException ise) {
throw new InvalidStateStoreException("State store [" + storeName + "] is not available anymore" +
" and may have been migrated to another instance; " +
"please re-discover its location from the state metadata. " +
"Original error message: " + ise);
}
}
return KeyValueIterators.emptyIterator();
} | @Test
public void shouldNotGetValueFromOtherStores() {
final Windowed<String> expectedKey = new Windowed<>("foo", new SessionWindow(0, 0));
otherUnderlyingStore.put(new Windowed<>("foo", new SessionWindow(10, 10)), 10L);
underlyingSessionStore.put(expectedKey, 1L);
try (final KeyValueIterator<Windowed<String>, Long> result = sessionStore.fetch("foo")) {
assertEquals(KeyValue.pair(expectedKey, 1L), result.next());
assertFalse(result.hasNext());
}
} |
@Override
public final Optional<ConfigDefinition> getConfigDefinition(ConfigDefinitionKey defKey) {
if (configDefinitionSuppliers == null) {
configDefinitionSuppliers = new LinkedHashMap<>();
configDefinitionRepo.ifPresent(definitionRepo -> configDefinitionSuppliers.putAll(createLazyMapping(definitionRepo)));
configDefinitionSuppliers.putAll(applicationPackage.getAllExistingConfigDefs());
}
if ( ! configDefinitionSuppliers.containsKey(defKey)) return Optional.empty();
if (configDefinitionCache.get(defKey) != null)
return Optional.of(configDefinitionCache.get(defKey));
ConfigDefinition def = configDefinitionSuppliers.get(defKey).parse();
configDefinitionCache.put(defKey, def);
return Optional.of(def);
} | @Test
void testGetConfigDefinition() {
Map<ConfigDefinitionKey, com.yahoo.vespa.config.buildergen.ConfigDefinition> defs = new LinkedHashMap<>();
defs.put(new ConfigDefinitionKey("test2", "a.b"), new com.yahoo.vespa.config.buildergen.ConfigDefinition("test2", new String[]{"namespace=a.b", "doubleVal double default=1.0"}));
//defs.put(new ConfigDefinitionKey("test2", "c.d"), new com.yahoo.vespa.config.buildergen.ConfigDefinition("test2", new String[]{"namespace=c.d", "doubleVal double default=1.0"}));
defs.put(new ConfigDefinitionKey("test3", "xyzzy"), new com.yahoo.vespa.config.buildergen.ConfigDefinition("test3", new String[]{"namespace=xyzzy", "message string"}));
ApplicationPackage app = FilesApplicationPackage.fromFile(new File("src/test/cfg//application/app1"));
DeployState state = createDeployState(app, defs);
assertNotNull(state.getConfigDefinition(new ConfigDefinitionKey("test2", "a.b")));
ConfigDefinition test1 = state.getConfigDefinition(new ConfigDefinitionKey("test2", "a.b")).get();
assertNotNull(test1);
assertEquals("test2", test1.getName());
assertEquals("a.b", test1.getNamespace());
} |
public PrettyTime setLocale(Locale locale)
{
if (locale == null)
locale = Locale.getDefault();
this.locale = locale;
for (TimeUnit unit : units.keySet()) {
if (unit instanceof LocaleAware)
((LocaleAware<?>) unit).setLocale(locale);
}
for (TimeFormat format : units.values()) {
if (format instanceof LocaleAware)
((LocaleAware<?>) format).setLocale(locale);
}
cachedUnits = null;
return this;
} | @Test
public void testSetLocale() throws Exception
{
PrettyTime t = new PrettyTime(now);
final LocalDateTime threeDecadesAgo = now.minus(3, ChronoUnit.DECADES);
Assert.assertEquals("3 decades ago", t.format(threeDecadesAgo));
t.setLocale(Locale.GERMAN);
Assert.assertEquals("vor 3 Jahrzehnten", t.format(threeDecadesAgo));
} |
@Nullable static String getPropertyIfString(Message message, String name) {
try {
Object o = message.getObjectProperty(name);
if (o instanceof String) return o.toString();
return null;
} catch (Throwable t) {
propagateIfFatal(t);
log(t, "error getting property {0} from message {1}", name, message);
return null;
}
} | @Test void getPropertyIfString_null() {
assertThat(MessageProperties.getPropertyIfString(message, "b3")).isNull();
} |
public List<Entry> getEntries() {
return new ArrayList<>(actions.values());
} | @Test
public void actions_with_multiple_services() {
List<RefeedActions.Entry> entries = new ConfigChangeActionsBuilder().
refeed(ValidationId.indexModeChange, CHANGE_MSG, DOC_TYPE, CLUSTER, SERVICE_NAME).
refeed(ValidationId.indexModeChange, CHANGE_MSG, DOC_TYPE, CLUSTER, SERVICE_NAME_2).
build().getRefeedActions().getEntries();
assertThat(entries.size(), is(1));
assertThat(toString(entries.get(0)), equalTo("music.foo:[baz,qux][change]"));
} |
public String encodePassword(String password) throws NoSuchAlgorithmException {
// compute digest of the password
final MessageDigest messageDigest = MessageDigest.getInstance(algorithm);
messageDigest.update(password.getBytes(StandardCharsets.UTF_8));
// we need a SALT against rainbow tables
messageDigest.update(SALT);
final byte[] digest = messageDigest.digest();
// hexadecimal encoding of the digest
return "{" + algorithm + '}' + hexa(digest);
} | @Test
public void testEncodePassword() throws NoSuchAlgorithmException {
final String algorithm = "SHA-256";
final String password = "password";
final String hash = new MessageDigestPasswordEncoder(algorithm).encodePassword(password);
final String expectedHash = "{SHA-256}c33d66fe65ffcca1f2260e6982dbf0c614b6ea3ddfdb37d6142fbec0feca5245";
assertEquals("encodePassword", expectedHash, hash);
} |
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) {
return decoder.decodeFunctionResult(rawInput, outputParameters);
} | @Test
public void testVoidResultFunctionDecode() {
Function function = new Function("test", Collections.emptyList(), Collections.emptyList());
assertEquals(
FunctionReturnDecoder.decode("0x", function.getOutputParameters()),
(Collections.emptyList()));
} |
public static List<FieldSchema> convert(Schema schema) {
return schema.columns().stream()
.map(col -> new FieldSchema(col.name(), convertToTypeString(col.type()), col.doc()))
.collect(Collectors.toList());
} | @Test
public void testNotSupportedTypes() {
for (FieldSchema notSupportedField : getNotSupportedFieldSchemas()) {
assertThatThrownBy(
() ->
HiveSchemaUtil.convert(
Lists.newArrayList(Collections.singletonList(notSupportedField))))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageStartingWith("Unsupported Hive type");
}
} |
@Override
public MergeAppend appendFile(DataFile file) {
add(file);
return this;
} | @TestTemplate
public void testDefaultPartitionSummaries() {
table.newFastAppend().appendFile(FILE_A).commit();
Set<String> partitionSummaryKeys =
table.currentSnapshot().summary().keySet().stream()
.filter(key -> key.startsWith(SnapshotSummary.CHANGED_PARTITION_PREFIX))
.collect(Collectors.toSet());
assertThat(partitionSummaryKeys).isEmpty();
assertThat(table.currentSnapshot().summary())
.doesNotContainKey(SnapshotSummary.PARTITION_SUMMARY_PROP)
.containsEntry(SnapshotSummary.CHANGED_PARTITION_COUNT_PROP, "1");
} |
public static boolean isConfluentCustomer(final String customerId) {
return customerId != null
&& (CUSTOMER_PATTERN.matcher(customerId.toLowerCase(Locale.ROOT)).matches()
|| NEW_CUSTOMER_CASE_INSENSISTIVE_PATTERN.matcher(customerId).matches()
|| NEW_CUSTOMER_CASE_SENSISTIVE_PATTERN.matcher(customerId).matches());
} | @Test
public void testInvalidCustomer() {
String[] invalidIds = Stream.concat(
CustomerIdExamples.INVALID_CUSTOMER_IDS.stream(),
CustomerIdExamples.VALID_ANONYMOUS_IDS.stream()).
toArray(String[]::new);
for (String invalidCustomerId : invalidIds) {
assertFalse(invalidCustomerId + " is a valid customer identifier",
BaseSupportConfig.isConfluentCustomer(invalidCustomerId));
}
} |
@Override
public boolean match(Message msg, StreamRule rule) {
if(msg.getField(Message.FIELD_GL2_SOURCE_INPUT) == null) {
return rule.getInverted();
}
final String value = msg.getField(Message.FIELD_GL2_SOURCE_INPUT).toString();
return rule.getInverted() ^ value.trim().equalsIgnoreCase(rule.getValue());
} | @Test
public void testUnsuccessfulMatchWhenMissing() {
StreamRule rule = getSampleRule();
rule.setValue("input-id-dead");
Message msg = getSampleMessage();
StreamRuleMatcher matcher = getMatcher(rule);
assertFalse(matcher.match(msg, rule));
} |
public List<MappingField> resolveAndValidateFields(
List<MappingField> userFields,
Map<String, String> options,
NodeEngine nodeEngine
) {
final InternalSerializationService serializationService = (InternalSerializationService) nodeEngine
.getSerializationService();
final AbstractRelationsStorage relationsStorage = ((CalciteSqlOptimizer) nodeEngine.getSqlService().getOptimizer())
.relationsStorage();
// normalize and validate the names and external names
for (MappingField field : userFields) {
String name = field.name();
String externalName = field.externalName();
if (externalName == null) {
if (name.equals(KEY) || name.equals(VALUE)) {
externalName = name;
} else {
externalName = VALUE_PREFIX + name;
}
field.setExternalName(name);
}
if ((name.equals(KEY) && !externalName.equals(KEY))
|| (name.equals(VALUE) && !externalName.equals(VALUE))) {
throw QueryException.error("Cannot rename field: '" + name + '\'');
}
if (!EXT_NAME_PATTERN.matcher(externalName).matches()) {
throw QueryException.error("Invalid external name: " + externalName);
}
}
Stream<MappingField> keyFields = resolveAndValidateFields(true, userFields, options,
serializationService, relationsStorage);
Stream<MappingField> valueFields = resolveAndValidateFields(false, userFields, options,
serializationService, relationsStorage);
Map<String, MappingField> fields = Stream.concat(keyFields, valueFields)
.collect(LinkedHashMap::new, (map, field) -> map.putIfAbsent(field.name(), field), Map::putAll);
if (fields.isEmpty()) {
throw QueryException.error("The resolved field list is empty");
}
return new ArrayList<>(fields.values());
} | @Test
public void when_keyAndValueFieldsEmpty_then_throws() {
Map<String, String> options = ImmutableMap.of(
OPTION_KEY_FORMAT, JAVA_FORMAT,
OPTION_VALUE_FORMAT, JAVA_FORMAT
);
given(resolver.resolveAndValidateFields(eq(true), eq(emptyList()), eq(options), eq(ss)))
.willReturn(Stream.empty());
given(resolver.resolveAndValidateFields(eq(false), eq(emptyList()), eq(options), eq(ss)))
.willReturn(Stream.empty());
assertThatThrownBy(() -> resolvers.resolveAndValidateFields(emptyList(), options, nodeEngine))
.isInstanceOf(QueryException.class)
.hasMessageContaining("The resolved field list is empty");
} |
@Override
public SinkRecord newRecord(String topic, Integer kafkaPartition, Schema keySchema, Object key,
Schema valueSchema, Object value, Long timestamp,
Iterable<Header> headers) {
return new InternalSinkRecord(context, topic, kafkaPartition, keySchema, key,
valueSchema, value, kafkaOffset(), timestamp, timestampType(), headers);
} | @Test
public void shouldRetainOriginalTopicPartition() {
String transformedTopic = "transformed-test-topic";
SinkRecord sinkRecord = new SinkRecord(transformedTopic, 0, null, null, null, null, 10);
ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>(TOPIC, 0, 10, null, null);
ProcessingContext<ConsumerRecord<byte[], byte[]>> context = new ProcessingContext<>(consumerRecord);
InternalSinkRecord internalSinkRecord = new InternalSinkRecord(context, sinkRecord);
assertEquals(TOPIC, internalSinkRecord.originalTopic());
assertEquals(0, internalSinkRecord.originalKafkaPartition().intValue());
SinkRecord transformedSinkRecord = internalSinkRecord.newRecord(transformedTopic, 1, null, null, null, null, null);
assertEquals(TOPIC, transformedSinkRecord.originalTopic());
assertEquals(0, transformedSinkRecord.originalKafkaPartition().intValue());
} |
@Override
public Mono<Void> withoutFallback(final ServerWebExchange exchange, final Throwable throwable) {
Object error;
if (throwable instanceof DegradeException) {
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.SERVICE_RESULT_ERROR);
} else if (throwable instanceof FlowException) {
exchange.getResponse().setStatusCode(HttpStatus.TOO_MANY_REQUESTS);
error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.TOO_MANY_REQUESTS);
} else if (throwable instanceof BlockException) {
exchange.getResponse().setStatusCode(HttpStatus.TOO_MANY_REQUESTS);
error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.SENTINEL_BLOCK_ERROR);
} else if (throwable instanceof SentinelPlugin.SentinelFallbackException) {
return exchange.getAttribute(Constants.RESPONSE_MONO);
} else {
return Mono.error(throwable);
}
return WebFluxResultUtils.result(exchange, error);
} | @Test
public void testBlockException() {
StepVerifier.create(fallbackHandler.withoutFallback(exchange, new AuthorityException("Sentinel"))).expectSubscription().verifyComplete();
} |
@Override
public Num getValue(int index) {
return values.get(index);
} | @Test
public void cashFlowValueWithNoPositions() {
BarSeries sampleBarSeries = new MockBarSeries(numFunction, 3d, 2d, 5d, 4d, 7d, 6d, 7d, 8d, 5d, 6d);
CashFlow cashFlow = new CashFlow(sampleBarSeries, new BaseTradingRecord());
assertNumEquals(1, cashFlow.getValue(4));
assertNumEquals(1, cashFlow.getValue(7));
assertNumEquals(1, cashFlow.getValue(9));
} |
@VisibleForTesting
public static String wildcardToRegexp(String globExp) {
StringBuilder dst = new StringBuilder();
char[] src = globExp.replace("**/*", "**").toCharArray();
int i = 0;
while (i < src.length) {
char c = src[i++];
switch (c) {
case '*':
// One char lookahead for **
if (i < src.length && src[i] == '*') {
dst.append(".*");
++i;
} else {
dst.append("[^/]*");
}
break;
case '?':
dst.append("[^/]");
break;
case '.':
case '+':
case '{':
case '}':
case '(':
case ')':
case '|':
case '^':
case '$':
// These need to be escaped in regular expressions
dst.append('\\').append(c);
break;
case '\\':
i = doubleSlashes(dst, src, i);
break;
default:
dst.append(c);
break;
}
}
return dst.toString();
} | @Test
public void testGlobTranslation() {
assertEquals("foo", wildcardToRegexp("foo"));
assertEquals("fo[^/]*o", wildcardToRegexp("fo*o"));
assertEquals("f[^/]*o\\.[^/]", wildcardToRegexp("f*o.?"));
assertEquals("foo-[0-9][^/]*", wildcardToRegexp("foo-[0-9]*"));
assertEquals("foo-[0-9].*", wildcardToRegexp("foo-[0-9]**"));
assertEquals(".*foo", wildcardToRegexp("**/*foo"));
assertEquals(".*foo", wildcardToRegexp("**foo"));
assertEquals("foo/[^/]*", wildcardToRegexp("foo/*"));
assertEquals("foo[^/]*", wildcardToRegexp("foo*"));
assertEquals("foo/[^/]*/[^/]*/[^/]*", wildcardToRegexp("foo/*/*/*"));
assertEquals("foo/[^/]*/.*", wildcardToRegexp("foo/*/**"));
assertEquals("foo.*baz", wildcardToRegexp("foo**baz"));
assertEquals("foo\\\\", wildcardToRegexp("foo\\"));
assertEquals("foo/bar\\\\baz[^/]*", wildcardToRegexp("foo/bar\\baz*"));
} |
public B retries(Integer retries) {
this.retries = retries;
return getThis();
} | @Test
void retries() {
MethodBuilder builder = new MethodBuilder();
builder.retries(3);
Assertions.assertEquals(3, builder.build().getRetries());
} |
public final boolean debug() {
return debug(flags);
} | @Test void sampled_flags() {
assertThat(SamplingFlags.debug(false, SamplingFlags.DEBUG.flags))
.isEqualTo(SamplingFlags.SAMPLED.flags)
.isEqualTo(FLAG_SAMPLED_SET | FLAG_SAMPLED);
} |
public static String idOf(String entityUuid) {
requireNonNull(entityUuid, "entityUuid can't be null");
return ID_PREFIX + entityUuid;
} | @Test
public void idOf_fails_with_NPE_if_argument_is_null() {
assertThatThrownBy(() -> AuthorizationDoc.idOf(null))
.isInstanceOf(NullPointerException.class)
.hasMessage("entityUuid can't be null");
} |
public Set<String> extractPlaceholderKeys(String propertyString) {
Set<String> placeholderKeys = Sets.newHashSet();
if (!isPlaceholder(propertyString)) {
return placeholderKeys;
}
Stack<String> stack = new Stack<>();
stack.push(propertyString);
while (!stack.isEmpty()) {
String strVal = stack.pop();
int startIndex = strVal.indexOf(PLACEHOLDER_PREFIX);
if (startIndex == -1) {
placeholderKeys.add(strVal);
continue;
}
int endIndex = findPlaceholderEndIndex(strVal, startIndex);
if (endIndex == -1) {
// invalid placeholder?
continue;
}
String placeholderCandidate = strVal.substring(startIndex + PLACEHOLDER_PREFIX.length(), endIndex);
// ${some.key:other.key}
if (placeholderCandidate.startsWith(PLACEHOLDER_PREFIX)) {
stack.push(placeholderCandidate);
}
else {
// some.key:${some.other.key:100}
int separatorIndex = placeholderCandidate.indexOf(VALUE_SEPARATOR);
if (separatorIndex == -1) {
stack.push(placeholderCandidate);
}
else {
stack.push(placeholderCandidate.substring(0, separatorIndex));
String defaultValuePart =
normalizeToPlaceholder(placeholderCandidate.substring(separatorIndex + VALUE_SEPARATOR.length()));
if (!Strings.isNullOrEmpty(defaultValuePart)) {
stack.push(defaultValuePart);
}
}
}
// has remaining part, e.g. ${a}.${b}
if (endIndex + PLACEHOLDER_SUFFIX.length() < strVal.length() - 1) {
String remainingPart = normalizeToPlaceholder(strVal.substring(endIndex + PLACEHOLDER_SUFFIX.length()));
if (!Strings.isNullOrEmpty(remainingPart)) {
stack.push(remainingPart);
}
}
}
return placeholderKeys;
} | @Test
public void extractIllegalPlaceholderKeysTest() {
final String placeholderCase = "${some.key";
final String placeholderCase1 = "{some.key}";
final String placeholderCase2 = "some.key";
Set<String> placeholderKeys = PLACEHOLDER_HELPER.extractPlaceholderKeys(placeholderCase);
assertThat(placeholderKeys).isEmpty();
Set<String> placeholderKeys1 = PLACEHOLDER_HELPER.extractPlaceholderKeys(placeholderCase1);
assertThat(placeholderKeys1).isEmpty();
Set<String> placeholderKeys2 = PLACEHOLDER_HELPER.extractPlaceholderKeys(placeholderCase2);
assertThat(placeholderKeys2).isEmpty();
} |
@Override
public void cleanupExpiredSegments(final long streamTime) {
super.cleanupExpiredSegments(streamTime);
} | @Test
public void shouldCleanupSegmentsThatHaveExpired() {
final LogicalKeyValueSegment segment1 = segments.getOrCreateSegmentIfLive(0, context, 0);
final LogicalKeyValueSegment segment2 = segments.getOrCreateSegmentIfLive(2, context, SEGMENT_INTERVAL * 2L);
final LogicalKeyValueSegment segment3 = segments.getOrCreateSegmentIfLive(3, context, SEGMENT_INTERVAL * 3L);
final LogicalKeyValueSegment segment4 = segments.getOrCreateSegmentIfLive(7, context, SEGMENT_INTERVAL * 7L);
segments.cleanupExpiredSegments(SEGMENT_INTERVAL * 7L);
final List<LogicalKeyValueSegment> allSegments = segments.allSegments(true);
assertEquals(2, allSegments.size());
assertEquals(segment3, allSegments.get(0));
assertEquals(segment4, allSegments.get(1));
} |
public XAQueueConnection xaQueueConnection(XAQueueConnection connection) {
return TracingXAConnection.create(connection, this);
} | @Test void xaQueueConnection_wrapsInput() {
assertThat(jmsTracing.xaQueueConnection(mock(XAQueueConnection.class)))
.isInstanceOf(TracingXAConnection.class);
} |
protected boolean userIsOwner() {
final MantaAccountHomeInfo account = new MantaAccountHomeInfo(host.getCredentials().getUsername(), host.getDefaultPath());
return StringUtils.equals(host.getCredentials().getUsername(),
account.getAccountOwner());
} | @Test
public void testUserOwnerIdentification() {
final MantaSession ownerSession = new MantaSession(
new Host(
new MantaProtocol(),
null,
443,
new Credentials("theOwner")), new DisabledX509TrustManager(), new DefaultX509KeyManager());
assertTrue(ownerSession.userIsOwner());
final MantaSession subuserSession = new MantaSession(
new Host(
new MantaProtocol(),
null,
443,
new Credentials("theOwner/theSubUser")), new DisabledX509TrustManager(), new DefaultX509KeyManager());
assertFalse(subuserSession.userIsOwner());
} |
@SuppressFBWarnings(justification = "try with resource will clenaup the resources", value = {"OBL_UNSATISFIED_OBLIGATION"})
public List<SuppressionRule> parseSuppressionRules(File file) throws SuppressionParseException {
try (FileInputStream fis = new FileInputStream(file)) {
return parseSuppressionRules(fis);
} catch (SAXException | IOException ex) {
LOGGER.debug("", ex);
throw new SuppressionParseException(ex);
}
} | @Test
public void testParseSuppressionRulesV1dot1() throws Exception {
//File file = new File(this.getClass().getClassLoader().getResource("suppressions.xml").getPath());
File file = BaseTest.getResourceAsFile(this, "suppressions_1_1.xml");
SuppressionParser instance = new SuppressionParser();
List<SuppressionRule> result = instance.parseSuppressionRules(file);
Assert.assertEquals(5, result.size());
} |
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
} | @Test
public void testRMapCacheValues() {
final RMapCacheNative<String, String> map = redisson.getMapCacheNative("testRMapCacheValues");
map.put("1234", "5678", Duration.ofMinutes(1));
assertThat(map.values()).containsOnly("5678");
map.destroy();
} |
public static long getTimestampMillis(Binary timestampBinary)
{
if (timestampBinary.length() != 12) {
throw new PrestoException(NOT_SUPPORTED, "Parquet timestamp must be 12 bytes, actual " + timestampBinary.length());
}
byte[] bytes = timestampBinary.getBytes();
// little endian encoding - need to invert byte order
long timeOfDayNanos = Longs.fromBytes(bytes[7], bytes[6], bytes[5], bytes[4], bytes[3], bytes[2], bytes[1], bytes[0]);
int julianDay = Ints.fromBytes(bytes[11], bytes[10], bytes[9], bytes[8]);
return julianDayToMillis(julianDay) + (timeOfDayNanos / NANOS_PER_MILLISECOND);
} | @Test
public void testGetTimestampMillis()
{
assertTimestampCorrect("2011-01-01 00:00:00.000000000");
assertTimestampCorrect("2001-01-01 01:01:01.000000001");
assertTimestampCorrect("2015-12-31 23:59:59.999999999");
} |
@Override
protected Map<String, ConfigValue> validateSourceConnectorConfig(SourceConnector connector, ConfigDef configDef, Map<String, String> config) {
Map<String, ConfigValue> result = super.validateSourceConnectorConfig(connector, configDef, config);
validateSourceConnectorExactlyOnceSupport(config, result, connector);
validateSourceConnectorTransactionBoundary(config, result, connector);
return result;
} | @Test
public void testExactlyOnceSourceSupportValidationOnUnsupportedConnector() {
herder = exactlyOnceHerder();
Map<String, String> config = new HashMap<>();
config.put(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG, REQUIRED.toString());
SourceConnector connectorMock = mock(SourceConnector.class);
when(connectorMock.exactlyOnceSupport(eq(config))).thenReturn(ExactlyOnceSupport.UNSUPPORTED);
Map<String, ConfigValue> validatedConfigs = herder.validateSourceConnectorConfig(
connectorMock, SourceConnectorConfig.configDef(), config);
List<String> errors = validatedConfigs.get(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG).errorMessages();
assertEquals(
Collections.singletonList("The connector does not support exactly-once semantics with the provided configuration."),
errors);
} |
@Override
public void pickAddress() throws Exception {
if (publicAddress != null || bindAddress != null) {
return;
}
try {
AddressDefinition publicAddressDef = getPublicAddressByPortSearch();
if (publicAddressDef != null) {
publicAddress = createAddress(publicAddressDef, publicAddressDef.port);
logger.info("Using public address: " + publicAddress);
} else {
publicAddress = bindAddress;
logger.finest("Using public address the same as the bind address: " + publicAddress);
}
} catch (Exception e) {
ServerSocketChannel serverSocketChannel = getServerSocketChannel(endpointQualifier);
if (serverSocketChannel != null) {
serverSocketChannel.close();
}
logger.severe(e);
throw e;
}
} | @Test
public void testBindAddress_whenAddressAlreadyInUse() throws Exception {
int port = 6789;
config.getNetworkConfig().setPort(port);
config.getNetworkConfig().setPortAutoIncrement(false);
addressPicker = new DefaultAddressPicker(config, logger);
addressPicker.pickAddress();
try {
new DefaultAddressPicker(config, logger).pickAddress();
fail("Should fail with 'java.net.BindException: Address already in use'");
} catch (Exception expected) {
// expected exception
}
} |
@Override
public Response get() throws InterruptedException {
synchronized (this) {
while (!isDone) {
wait();
}
}
return response;
} | @Test
void testSyncGetResponseFailureWithTimeout() throws InterruptedException, TimeoutException {
assertThrows(TimeoutException.class, () -> {
DefaultRequestFuture requestFuture = new DefaultRequestFuture(CONNECTION_ID, REQUEST_ID);
requestFuture.get(100L);
});
} |
public B executes(Integer executes) {
this.executes = executes;
return getThis();
} | @Test
void executes() {
ServiceBuilder builder = new ServiceBuilder();
builder.executes(10);
Assertions.assertEquals(10, builder.build().getExecutes());
} |
public static Map<String, String> parseToMap(String attributesModification) {
if (Strings.isNullOrEmpty(attributesModification)) {
return new HashMap<>();
}
// format: +key1=value1,+key2=value2,-key3,+key4=value4
Map<String, String> attributes = new HashMap<>();
String[] kvs = attributesModification.split(ATTR_ARRAY_SEPARATOR_COMMA);
for (String kv : kvs) {
String key;
String value;
if (kv.contains(ATTR_KEY_VALUE_EQUAL_SIGN)) {
String[] splits = kv.split(ATTR_KEY_VALUE_EQUAL_SIGN);
key = splits[0];
value = splits[1];
if (!key.contains(ATTR_ADD_PLUS_SIGN)) {
throw new RuntimeException("add/alter attribute format is wrong: " + key);
}
} else {
key = kv;
value = "";
if (!key.contains(ATTR_DELETE_MINUS_SIGN)) {
throw new RuntimeException("delete attribute format is wrong: " + key);
}
}
String old = attributes.put(key, value);
if (old != null) {
throw new RuntimeException("key duplication: " + key);
}
}
return attributes;
} | @Test
public void parseToMap_EmptyString_ReturnsEmptyMap() {
String attributesModification = "";
Map<String, String> result = AttributeParser.parseToMap(attributesModification);
assertTrue(result.isEmpty());
} |
public static <T> Point<T> interpolate(Point<T> p1, Point<T> p2, Instant targetTime) {
checkNotNull(p1, "Cannot perform interpolation when the first input points is null");
checkNotNull(p2, "Cannot perform interpolation when the second input points is null");
checkNotNull(targetTime, "Cannot perform interpolation when the targetTime is null");
checkArgument(
p1.time().isBefore(p2.time()) || p1.time().equals(p2.time()),
"The input points must be in chronological order"
);
TimeWindow window = TimeWindow.of(p1.time(), p2.time());
checkArgument(
window.contains(targetTime),
"The targetTime is outside the required time window"
);
if (p1.time().equals(targetTime)) {
return (new PointBuilder<T>(p1)).build();
} else if (p2.time().equals(targetTime)) {
return (new PointBuilder<T>(p2)).build();
} else {
double fraction = window.toFractionOfRange(targetTime);
//build an interpolated point
LatLong interpolatedLatLong = interpolateLatLong(p1.latLong(), p2.latLong(), fraction);
Double interpolatedCourseInDegrees = interpolateCourse(
isNull(p1.course()) ? null : p1.course().inDegrees(),
isNull(p2.course()) ? null : p2.course().inDegrees(),
fraction
);
//correct the interpolated course when one of the input values was null
if (interpolatedCourseInDegrees == null) {
interpolatedCourseInDegrees = Spherical.courseInDegrees(p1.latLong(), p2.latLong());
}
double interpolatedSpeed = interpolateSpeed(p1, p2, fraction);
Distance interpolatedAltitude = interpolate(
p1.altitude(),
p2.altitude(),
fraction
);
//return a copy of the 1st input point but with corrected trajectory data
return (new PointBuilder<T>(p1))
.latLong(interpolatedLatLong)
.course(Course.ofDegrees(interpolatedCourseInDegrees))
.speed(Speed.ofKnots(interpolatedSpeed))
.altitude(interpolatedAltitude)
.time(targetTime)
.build();
}
} | @Test
public void testInterpolatePointWithBadNulCourse() {
//The RH message shown in s1 has no course
String s1 = "[RH],STARS,GEG,07/08/2017,14:09:11.474,,,,1200,0,0,,47.61734,-117.54339,655,0,0.3008,-0.1445,,,,GEG,,,,,,,IFR,,,,,,,,,,,,{RH}\n";
//The RH message shown in s2 has a course of 253
String s2 = "[RH],STARS,GEG,07/08/2017,14:09:16.284,,,,1200,28,88,253,47.61675,-117.54618,655,0,0.1876,-0.1797,,,,GEG,,,,,,,IFR,,,,,,,,,,,,{RH}\n";
Point<NopHit> p1 = NopHit.from(s1);
Point<NopHit> p2 = NopHit.from(s2);
Point<NopHit> interpolated = interpolate(p1, p2, p1.time().plusSeconds(2));
assertThat(
"The course of the interpolated point is set even though the 1st point had no course",
Spherical.courseInDegrees(p1.latLong(), p2.latLong()),
is(interpolated.course().inDegrees())
);
} |
@Override
public PostgreSQLIdentifierTag getIdentifier() {
return PostgreSQLMessagePacketType.DATA_ROW;
} | @Test
void assertGetIdentifier() {
assertThat(new PostgreSQLDataRowPacket(Collections.emptyList()).getIdentifier(), is(PostgreSQLMessagePacketType.DATA_ROW));
} |
public Schema toKsqlSchema(final Schema schema) {
try {
final Schema rowSchema = toKsqlFieldSchema(schema);
if (rowSchema.type() != Schema.Type.STRUCT) {
throw new KsqlException("KSQL stream/table schema must be structured");
}
if (rowSchema.fields().isEmpty()) {
throw new KsqlException("Schema does not include any columns with "
+ "types that ksqlDB supports."
+ System.lineSeparator()
+ "schema: " + FORMATTER.format(schema));
}
return rowSchema;
} catch (final UnsupportedTypeException e) {
throw new KsqlException("Unsupported type at root of schema: " + e.getMessage(), e);
}
} | @Test
public void shouldTranslatePrimitives() {
final Schema connectSchema = SchemaBuilder
.struct()
.field("intField", Schema.INT32_SCHEMA)
.field("longField", Schema.INT64_SCHEMA)
.field("doubleField", Schema.FLOAT64_SCHEMA)
.field("stringField", Schema.STRING_SCHEMA)
.field("booleanField", Schema.BOOLEAN_SCHEMA)
.field("bytesField", Schema.BYTES_SCHEMA)
.build();
final Schema ksqlSchema = translator.toKsqlSchema(connectSchema);
assertThat(ksqlSchema.schema().type(), equalTo(Schema.Type.STRUCT));
assertThat(ksqlSchema.fields().size(), equalTo(connectSchema.fields().size()));
for (int i = 0; i < ksqlSchema.fields().size(); i++) {
assertThat(
ksqlSchema.fields().get(i).name(),
equalTo(nameTranslator.apply(connectSchema.fields().get(i).name())));
assertThat(
ksqlSchema.fields().get(i).schema().type(),
equalTo(connectSchema.fields().get(i).schema().type()));
assertThat(ksqlSchema.fields().get(i).schema().isOptional(), is(true));
}
// Make sure that regular int32/int64 fields do not get converted to date/time/timestamp
assertThat(ksqlSchema.field(nameTranslator.apply("longField")).schema(),
is(Schema.OPTIONAL_INT64_SCHEMA));
assertThat(ksqlSchema.field(nameTranslator.apply("intField")).schema(),
is(Schema.OPTIONAL_INT32_SCHEMA));
} |
public void processOnce() throws IOException {
// set status of query to OK.
ctx.getState().reset();
executor = null;
// reset sequence id of MySQL protocol
final MysqlChannel channel = ctx.getMysqlChannel();
channel.setSequenceId(0);
// read packet from channel
try {
packetBuf = channel.fetchOnePacket();
if (packetBuf == null) {
throw new RpcException(ctx.getRemoteIP(), "Error happened when receiving packet.");
}
} catch (AsynchronousCloseException e) {
// when this happened, timeout checker close this channel
// killed flag in ctx has been already set, just return
return;
}
// dispatch
dispatch();
// finalize
finalizeCommand();
ctx.setCommand(MysqlCommand.COM_SLEEP);
} | @Test
public void testInitDb() throws IOException {
ConnectContext ctx = initMockContext(mockChannel(initDbPacket), GlobalStateMgr.getCurrentState());
ctx.setCurrentUserIdentity(UserIdentity.ROOT);
ctx.setCurrentRoleIds(Sets.newHashSet(PrivilegeBuiltinConstants.ROOT_ROLE_ID));
ctx.setQualifiedUser(AuthenticationMgr.ROOT_USER);
ConnectProcessor processor = new ConnectProcessor(ctx);
processor.processOnce();
Assert.assertEquals(MysqlCommand.COM_INIT_DB, myContext.getCommand());
Assert.assertTrue(myContext.getState().toResponsePacket() instanceof MysqlOkPacket);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.