focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public XmlStringBuilder toXML(org.jivesoftware.smack.packet.XmlEnvironment enclosingNamespace) {
XmlStringBuilder xml = new XmlStringBuilder(this, enclosingNamespace);
xml.rightAngleBracket();
xml.emptyElement(compressFailureError);
xml.optElement(stanzaError);
xml.closeElement(this);
return xml;
} | @Test
public void simpleFailureTest() throws SAXException, IOException {
Failure failure = new Failure(Failure.CompressFailureError.processing_failed);
CharSequence xml = failure.toXML();
final String expectedXml = "<failure xmlns='http://jabber.org/protocol/compress'><processing-failed/></failure>";
assertXmlSimilar(expectedXml, xml.toString());
} |
@Override
public List<PortStatistics> getPortDeltaStatistics(DeviceId deviceId) {
checkNotNull(deviceId, DEVICE_NULL);
// TODO not supported at the moment.
return ImmutableList.of();
} | @Test
public void testGetPortDeltaStatistics() {
manager.registerTenantId(TenantId.tenantId(tenantIdValue1));
VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1));
VirtualDevice virtualDevice = manager.createVirtualDevice(virtualNetwork.id(), DID1);
manager.createVirtualDevice(virtualNetwork.id(), DID2);
DeviceService deviceService = manager.get(virtualNetwork.id(), DeviceService.class);
// test the getPortDeltaStatistics() method
assertEquals("The port delta statistics set size did not match.", 0,
deviceService.getPortDeltaStatistics(DID1).size());
} |
@Override
public void serialize(String name, byte[] message, OutputStream out) throws IOException {
byte[] header = new byte[4 + COMMAND_LEN + 4 + 4 /* checksum */];
ByteUtils.writeInt32BE(packetMagic, header, 0);
// The header array is initialized to zero by Java so we don't have to worry about
// NULL terminating the string here.
for (int i = 0; i < name.length() && i < COMMAND_LEN; i++) {
header[4 + i] = (byte) (name.codePointAt(i) & 0xFF);
}
ByteUtils.writeInt32LE(message.length, header, 4 + COMMAND_LEN);
byte[] hash = Sha256Hash.hashTwice(message);
System.arraycopy(hash, 0, header, 4 + COMMAND_LEN + 4, 4);
out.write(header);
out.write(message);
if (log.isDebugEnabled())
log.debug("Sending {} message: {}", name, ByteUtils.formatHex(header) + ByteUtils.formatHex(message));
} | @Test(expected = Error.class)
public void testSerializeUnknownMessage() throws Exception {
MessageSerializer serializer = MAINNET.getDefaultSerializer();
Message unknownMessage = new BaseMessage() {
@Override
protected void bitcoinSerializeToStream(OutputStream stream) {}
};
ByteArrayOutputStream bos = new ByteArrayOutputStream(ADDRESS_MESSAGE_BYTES.length);
serializer.serialize(unknownMessage, bos);
} |
public static SqlQueryBuilder select(String... columns) {
if (columns == null || columns.length == 0) {
throw new IllegalArgumentException("No columns provided with SELECT statement. Please mention column names or '*' to select all columns.");
}
StringBuilder sqlBuilder = new StringBuilder();
sqlBuilder.append("select ");
sqlBuilder.append(String.join(", ", columns));
return new SqlQueryBuilder(sqlBuilder);
} | @Test
public void testSelect() {
String sql = SqlQueryBuilder.select("id", "rider", "time")
.from("trips")
.join("users").on("trips.rider = users.id")
.where("(trips.time > 100 or trips.time < 200)")
.orderBy("id", "time")
.limit(10).toString();
assertEquals("select id, rider, time from trips "
+ "join users on trips.rider = users.id "
+ "where (trips.time > 100 or trips.time < 200) "
+ "order by id, time "
+ "limit 10", sql);
} |
@DeleteMapping("/clean/{timePoint}")
@RequiresPermissions("system:role:delete")
public AdminResult<Boolean> clean(@PathVariable @DateTimeFormat(pattern = DateUtils.DATE_FORMAT_DATETIME) final Date timePoint) {
return ResultUtil.ok(recordLogService.cleanHistory(timePoint));
} | @Test
public void testClean() throws Exception {
given(this.operationRecordLogService.cleanHistory(any())).willReturn(true);
this.mockMvc.perform(MockMvcRequestBuilders.delete("/operation-record/log/clean/" + "2020-10-22 10:10:10")
.contentType(MediaType.APPLICATION_JSON)
.accept(MediaType.APPLICATION_JSON))
.andExpect(status().isOk())
.andReturn();
} |
public <V> Iterable<V> getAll(TupleTag<V> tag) {
int index = schema.getIndex(tag);
if (index < 0) {
throw new IllegalArgumentException("TupleTag " + tag + " is not in the schema");
}
@SuppressWarnings("unchecked")
Iterable<V> unions = (Iterable<V>) valueMap.get(index);
return unions;
} | @Test
@SuppressWarnings("BoxedPrimitiveEquality")
public void testCachedResults() {
// The caching strategies are different for the different implementations.
Assume.assumeTrue(useReiterator);
// Ensure we don't fail below due to a non-default java.lang.Integer.IntegerCache.high setting,
// as we want to test our cache is working as expected, unimpeded by a higher-level cache.
int integerCacheLimit = 128;
assertThat(
Integer.valueOf(integerCacheLimit), not(sameInstance(Integer.valueOf(integerCacheLimit))));
int perTagCache = 10;
int crossTagCache = 2 * integerCacheLimit;
int[] tags = new int[crossTagCache + 8 * perTagCache];
for (int i = 0; i < 2 * perTagCache; i++) {
tags[crossTagCache + 4 * i] = 1;
tags[crossTagCache + 4 * i + 1] = 2;
}
TestUnionValues values = new TestUnionValues(tags);
CoGbkResult result = new CoGbkResult(createSchema(5), values, crossTagCache, perTagCache);
// More that perTagCache values should be cached for the first tag, as they came first.
List<Object> tag0 = Lists.newArrayList(result.getAll("tag0").iterator());
List<Object> tag0again = Lists.newArrayList(result.getAll("tag0").iterator());
assertThat(tag0.get(0), sameInstance(tag0again.get(0)));
assertThat(tag0.get(integerCacheLimit), sameInstance(tag0again.get(integerCacheLimit)));
assertThat(tag0.get(crossTagCache - 1), sameInstance(tag0again.get(crossTagCache - 1)));
// However, not all elements are cached.
assertThat(tag0.get(tag0.size() - 1), not(sameInstance(tag0again.get(tag0.size() - 1))));
// For tag 1 and tag 2, we cache perTagCache elements, plus possibly one more due to peeking
// iterators.
List<Object> tag1 = Lists.newArrayList(result.getAll("tag1").iterator());
List<Object> tag1again = Lists.newArrayList(result.getAll("tag1").iterator());
assertThat(tag1.get(0), sameInstance(tag1again.get(0)));
assertThat(tag1.get(perTagCache - 1), sameInstance(tag1again.get(perTagCache - 1)));
assertThat(tag1.get(perTagCache + 1), not(sameInstance(tag1again.get(perTagCache + 1))));
List<Object> tag2 = Lists.newArrayList(result.getAll("tag1").iterator());
List<Object> tag2again = Lists.newArrayList(result.getAll("tag1").iterator());
assertThat(tag2.get(0), sameInstance(tag2again.get(0)));
assertThat(tag2.get(perTagCache - 1), sameInstance(tag2again.get(perTagCache - 1)));
assertThat(tag2.get(perTagCache + 1), not(sameInstance(tag2again.get(perTagCache + 1))));
} |
@Override
public void execute(GraphModel graphModel) {
Graph graph;
if (isDirected) {
graph = graphModel.getDirectedGraphVisible();
} else {
graph = graphModel.getUndirectedGraphVisible();
}
execute(graph);
} | @Test
public void testColumnCreation() {
GraphModel graphModel = GraphGenerator.generateNullUndirectedGraph(1);
ClusteringCoefficient cc = new ClusteringCoefficient();
cc.execute(graphModel);
Assert.assertTrue(graphModel.getNodeTable().hasColumn(ClusteringCoefficient.CLUSTERING_COEFF));
} |
@Override
public String toString() {
final String nInfo = n == 1 ? "" : "(" + n + ")";
return getClass().getSimpleName() + nInfo + "[" + this.indicator + "]";
} | @Test
public void testToStringMethodWithNGreaterThen1() {
prevValueIndicator = new PreviousValueIndicator(openPriceIndicator, 2);
final String prevValueIndicatorAsString = prevValueIndicator.toString();
assertTrue(prevValueIndicatorAsString.startsWith("PreviousValueIndicator(2)["));
assertTrue(prevValueIndicatorAsString.endsWith("]"));
} |
public synchronized void refreshTableByEvent(HiveTable updatedHiveTable, HiveCommonStats commonStats, Partition partition) {
String dbName = updatedHiveTable.getDbName();
String tableName = updatedHiveTable.getTableName();
DatabaseTableName databaseTableName = DatabaseTableName.of(dbName, tableName);
tableCache.put(databaseTableName, updatedHiveTable);
if (updatedHiveTable.isUnPartitioned()) {
Map<String, HiveColumnStats> columnStats = get(tableStatsCache, databaseTableName).getColumnStats();
HivePartitionStats updatedPartitionStats = createPartitionStats(commonStats, columnStats);
tableStatsCache.put(databaseTableName, updatedPartitionStats);
partitionCache.put(HivePartitionName.of(dbName, tableName, Lists.newArrayList()), partition);
} else {
partitionKeysCache.asMap().keySet().stream().filter(hivePartitionValue -> hivePartitionValue.getHiveTableName().
equals(databaseTableName)).forEach(partitionKeysCache::invalidate);
List<HivePartitionName> presentPartitions = getPresentPartitionNames(partitionCache, dbName, tableName);
presentPartitions.forEach(p -> partitionCache.invalidate(p));
List<HivePartitionName> presentPartitionStats = getPresentPartitionNames(partitionStatsCache, dbName, tableName);
presentPartitionStats.forEach(p -> partitionStatsCache.invalidate(p));
}
} | @Test
public void testRefreshTableByEvent() {
CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore(
metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false);
HiveCommonStats stats = new HiveCommonStats(10, 100);
// unpartition
{
HiveTable table = (HiveTable) cachingHiveMetastore.getTable("db1", "tbl1");
Partition partition = cachingHiveMetastore.getPartition(
"db1", "tbl1", Lists.newArrayList("par1"));
cachingHiveMetastore.refreshTableByEvent(table, stats, partition);
}
// partition
{
HiveTable table = (HiveTable) cachingHiveMetastore.getTable("db1", "unpartitioned_table");
Partition partition = cachingHiveMetastore.getPartition(
"db1", "unpartitioned_table", Lists.newArrayList("col1"));
cachingHiveMetastore.refreshTableByEvent(table, stats, partition);
}
} |
@Override
public int size() {
return list.size();
} | @Test
public void testSize() {
Set<Integer> set = redisson.getSortedSet("set");
set.add(1);
set.add(2);
set.add(3);
set.add(3);
set.add(4);
set.add(5);
set.add(5);
Assertions.assertEquals(5, set.size());
} |
@Override
public boolean equals(final Object obj) {
// Make sure equals() is tune with hashCode() so works ok in a hashSet !
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final DefaultAlarm other = (DefaultAlarm) obj;
// id or timeRaised or timeUpdated may differ
if (!Objects.equals(this.deviceId, other.deviceId)) {
return false;
}
if (!Objects.equals(this.description, other.description)) {
return false;
}
if (!Objects.equals(this.source, other.source)) {
return false;
}
if (!Objects.equals(this.timeCleared, other.timeCleared)) {
return false;
}
if (this.severity != other.severity) {
return false;
}
if (this.isServiceAffecting != other.isServiceAffecting) {
return false;
}
if (this.isAcknowledged != other.isAcknowledged) {
return false;
}
if (this.isManuallyClearable != other.isManuallyClearable) {
return false;
}
if (!Objects.equals(this.assignedUser, other.assignedUser)) {
return false;
}
return true;
} | @Test
public void testEquals() {
final DefaultAlarm a = new DefaultAlarm.Builder(ALARM_ID_2,
DeviceId.NONE, "desc", Alarm.SeverityLevel.MINOR, 3).build();
final DefaultAlarm b = new DefaultAlarm.Builder(ALARM_ID,
DeviceId.NONE, "desc", Alarm.SeverityLevel.MINOR, a.timeRaised() + 1)
.withTimeUpdated(a.timeUpdated() + 1).build();
assertEquals("id or timeRaised or timeUpdated may differ", a, b);
assertNotEquals(a, new DefaultAlarm.Builder(a).withAcknowledged(!a.acknowledged()).build());
assertNotEquals(a, new DefaultAlarm.Builder(a).withManuallyClearable(!a.manuallyClearable()).build());
assertNotEquals(a, new DefaultAlarm.Builder(a).withServiceAffecting(!a.serviceAffecting()).build());
assertNotEquals(a, new DefaultAlarm.Builder(a).withAssignedUser("Changed" + a.assignedUser()).build());
} |
@Override
public List<String> choices() {
if (commandLine.getArguments() == null) {
return Collections.emptyList();
}
List<String> argList = Lists.newArrayList();
String argOne = null;
if (argList.size() > 1) {
argOne = argList.get(1);
}
VplsCommandEnum vplsCommandEnum = VplsCommandEnum.enumFromString(argOne);
if (vplsCommandEnum != null) {
switch (vplsCommandEnum) {
case CREATE:
case LIST:
return Collections.emptyList();
default:
VplsCommandEnum.toStringList();
}
}
return VplsCommandEnum.toStringList();
} | @Test
public void testOptArgCompleter() {
VplsOptArgCompleter completer = new VplsOptArgCompleter();
completer.vpls = new TestVpls();
((TestVpls) completer.vpls).initSampleData();
completer.interfaceService = new TestInterfaceService();
// Add interface to VPLS
commandSession.updateArguments(VPLS_CMD, VplsCommandEnum.ADD_IFACE.toString(), VPLS1);
List<String> choices = completer.choices();
List<String> expected = ImmutableList.of(V300H1.name(),
V300H2.name(),
V400H1.name(),
VNONEH1.name(),
VNONEH2.name(),
VNONEH3.name());
// Can not ensure the order, use contains all instead of equals
assertEquals(choices.size(), expected.size());
assertTrue(choices.containsAll(expected));
// Removes interface from VPLS
commandSession.updateArguments(VPLS_CMD, VplsCommandEnum.REMOVE_IFACE.toString(), VPLS1);
choices = completer.choices();
expected = completer.vpls.getVpls(VPLS1).interfaces().stream()
.map(Interface::name)
.collect(Collectors.toList());
// Can not ensure the order, use contains all instead of equals
assertEquals(choices.size(), expected.size());
assertTrue(choices.containsAll(expected));
// Sets encapsulation
commandSession.updateArguments(VPLS_CMD, VplsCommandEnum.SET_ENCAP.toString(), VPLS1);
choices = completer.choices();
expected = Arrays.stream(EncapsulationType.values())
.map(Enum::toString)
.collect(Collectors.toList());
// Can not ensure the order, use contains all instead of equals
assertEquals(choices.size(), expected.size());
assertTrue(choices.containsAll(expected));
} |
@VisibleForTesting
public void validateSmsTemplateCodeDuplicate(Long id, String code) {
SmsTemplateDO template = smsTemplateMapper.selectByCode(code);
if (template == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的字典类型
if (id == null) {
throw exception(SMS_TEMPLATE_CODE_DUPLICATE, code);
}
if (!template.getId().equals(id)) {
throw exception(SMS_TEMPLATE_CODE_DUPLICATE, code);
}
} | @Test
public void testValidateDictDataValueUnique_success() {
// 调用,成功
smsTemplateService.validateSmsTemplateCodeDuplicate(randomLongId(), randomString());
} |
@Override
public CompletableFuture<HttpResponse> enqueue(DocumentId documentId, HttpRequest request) {
RetriableFuture<HttpResponse> result = new RetriableFuture<>(); // Carries the aggregate result of the operation, including retries.
if (destroyed.get()) {
result.complete();
return result;
}
CompletableFuture<HttpResponse> vessel = new CompletableFuture<>(); // Holds the computation of a single dispatch to the HTTP client.
RetriableFuture<HttpResponse> previous = inflightById.put(documentId, result);
if (previous == null) {
acquireSlot();
offer(request, vessel);
throttler.sent(inflight.get(), result);
}
else {
result.dependOn(previous); // In case result is aborted, also abort the previous if still inflight.
previous.whenComplete((__, ___) -> offer(request, vessel));
}
handleAttempt(vessel, request, result, 1);
return result.handle((response, error) -> {
if (inflightById.compute(documentId, (__, current) -> current == result ? null : current) == null)
releaseSlot();
if (error != null) {
if (error instanceof FeedException) throw (FeedException) error;
throw new FeedException(documentId, error);
}
return response;
});
} | @Test
void testShutdown() throws IOException {
MockCluster cluster = new MockCluster();
AtomicLong nowNanos = new AtomicLong(0);
CircuitBreaker breaker = new GracePeriodCircuitBreaker(nowNanos::get, Duration.ofSeconds(1), Duration.ofMinutes(10));
HttpRequestStrategy strategy = new HttpRequestStrategy(new FeedClientBuilderImpl(List.of(URI.create("https://dummy.com:123")))
.setRetryStrategy(new FeedClient.RetryStrategy() {
@Override public int retries() { return 1; }
})
.setCircuitBreaker(breaker)
.setConnectionsPerEndpoint(3), // Must be >= 0.5x text ops.
() -> cluster);
DocumentId id1 = DocumentId.of("ns", "type", "1");
DocumentId id2 = DocumentId.of("ns", "type", "2");
DocumentId id3 = DocumentId.of("ns", "type", "3");
DocumentId id4 = DocumentId.of("ns", "type", "4");
DocumentId id5 = DocumentId.of("ns", "type", "5");
HttpRequest failing = new HttpRequest("POST", "/", "", null, null, Duration.ofSeconds(1), nowNanos::get);
HttpRequest partial = new HttpRequest("POST", "/", "", null, null, Duration.ofSeconds(1), nowNanos::get);
HttpRequest request = new HttpRequest("POST", "/", "", null, null, Duration.ofSeconds(1), nowNanos::get);
HttpRequest blocking = new HttpRequest("POST", "/", "", null, null, Duration.ofSeconds(1), nowNanos::get);
// Enqueue some operations to the same id, which are serialised, and then shut down while operations are in flight.
Phaser phaser = new Phaser(2);
Phaser blocker = new Phaser(2);
cluster.expect((req, vessel) -> {
if (req == blocking) {
phaser.arriveAndAwaitAdvance(); // Synchronise with test main thread, and then ...
blocker.arriveAndAwaitAdvance(); // ... block dispatch thread, so we get something in the queue.
throw new RuntimeException("never"); // Dispatch thread should die, tearing down everything.
}
else if (req == partial) {
phaser.arriveAndAwaitAdvance(); // Let test thread enqueue more ops before failing (and retrying) this.
vessel.completeExceptionally(new IOException("failed"));
}
else if (req == failing) {
System.err.println("failing");
vessel.completeExceptionally(new RuntimeException("fatal"));
}
});
// inflight completes dispatch, but causes no response.
CompletableFuture<HttpResponse> inflight = strategy.enqueue(id1, request);
// serialised 1 and 2 are waiting for the above inflight to complete.
CompletableFuture<HttpResponse> serialised1 = strategy.enqueue(id1, request);
CompletableFuture<HttpResponse> serialised2 = strategy.enqueue(id1, request);
CompletableFuture<HttpResponse> retried = strategy.enqueue(id2, partial);
CompletableFuture<HttpResponse> failed = strategy.enqueue(id3, failing);
CompletableFuture<HttpResponse> blocked = strategy.enqueue(id4, blocking);
CompletableFuture<HttpResponse> delayed = strategy.enqueue(id5, request);
phaser.arriveAndAwaitAdvance(); // retried is allowed to dispatch, and will be retried async.
// failed immediately fails, and lets us assert the above retry is indeed enqueued.
assertEquals("ai.vespa.feed.client.FeedException: (id:ns:type::3) java.lang.RuntimeException: fatal",
assertThrows(ExecutionException.class, failed::get).getMessage());
phaser.arriveAndAwaitAdvance(); // blocked starts dispatch, and hangs, blocking dispatch thread.
// Current state: inflight is "inflight to cluster", serialised1/2 are waiting completion of it;
// blocked is blocking dispatch, delayed is enqueued, waiting for dispatch;
// failed has a partial result, and has a retry in the dispatch queue.
assertFalse(inflight.isDone());
assertFalse(serialised1.isDone());
assertFalse(serialised2.isDone());
assertTrue(failed.isDone());
assertFalse(retried.isDone());
assertFalse(blocked.isDone());
assertFalse(delayed.isDone());
// Kill dispatch thread, and see that all enqueued operations, and new ones, complete.
blocker.arriveAndAwaitAdvance();
assertEquals("ai.vespa.feed.client.FeedException: Operation aborted",
assertThrows(ExecutionException.class, inflight::get).getMessage());
assertEquals("ai.vespa.feed.client.FeedException: Operation aborted",
assertThrows(ExecutionException.class, serialised1::get).getMessage());
assertEquals("ai.vespa.feed.client.FeedException: Operation aborted",
assertThrows(ExecutionException.class, serialised2::get).getMessage());
assertEquals("ai.vespa.feed.client.FeedException: Operation aborted",
assertThrows(ExecutionException.class, blocked::get).getMessage());
assertEquals("ai.vespa.feed.client.FeedException: Operation aborted",
assertThrows(ExecutionException.class, delayed::get).getMessage());
assertEquals("ai.vespa.feed.client.FeedException: (id:ns:type::2) java.io.IOException: failed",
assertThrows(ExecutionException.class, retried::get).getMessage());
assertEquals("ai.vespa.feed.client.FeedException: Operation aborted",
assertThrows(ExecutionException.class, strategy.enqueue(id1, request)::get).getMessage());
} |
Set<SourceName> analyzeExpression(
final Expression expression,
final String clauseType
) {
final Validator extractor = new Validator(clauseType);
extractor.process(expression, null);
return extractor.referencedSources;
} | @Test
public void shouldGetSourceForUnqualifiedColumnRef() {
// Given:
final ColumnName column = ColumnName.of("qualified");
final Expression expression = new QualifiedColumnReferenceExp(
SourceName.of("fully"),
column
);
when(sourceSchemas.sourcesWithField(any(), any())).thenReturn(sourceNames("something"));
// When:
analyzer.analyzeExpression(expression, CLAUSE_TYPE);
// Then:
verify(sourceSchemas).sourcesWithField(Optional.of(SourceName.of("fully")), column);
} |
public FEELFnResult<BigDecimal> invoke(@ParameterName("list") List list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "the list cannot be null"));
}
if (list.isEmpty()) {
return FEELFnResult.ofResult(null); // DMN spec, Table 75: ...or null if list is empty
}
BigDecimal sum = BigDecimal.ZERO;
for ( Object element : list ) {
if ( element instanceof BigDecimal ) {
sum = sum.add( (BigDecimal) element );
} else if ( element instanceof Number ) {
BigDecimal value = NumberEvalHelper.getBigDecimalOrNull(element );
if (value == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "an element in the list is not suitable for the sum"));
} else {
sum = sum.add( value );
}
} else {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "an element in the list is not a number"));
}
}
return FEELFnResult.ofResult( sum );
} | @Test
void invokeListParamContainsUnsupportedNumber() {
FunctionTestUtil.assertResultError(sumFunction.invoke(Arrays.asList(10, 2, Double.NaN)),
InvalidParametersEvent.class);
} |
public final void containsEntry(@Nullable Object key, @Nullable Object value) {
Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value);
checkNotNull(actual);
if (!actual.entrySet().contains(entry)) {
List<@Nullable Object> keyList = singletonList(key);
List<@Nullable Object> valueList = singletonList(value);
if (actual.containsKey(key)) {
Object actualValue = actual.get(key);
/*
* In the case of a null expected or actual value, clarify that the key *is* present and
* *is* expected to be present. That is, get() isn't returning null to indicate that the key
* is missing, and the user isn't making an assertion that the key is missing.
*/
StandardSubjectBuilder check = check("get(%s)", key);
if (value == null || actualValue == null) {
check = check.withMessage("key is present but with a different value");
}
// See the comment on IterableSubject's use of failEqualityCheckForEqualsWithoutDescription.
check.that(actualValue).failEqualityCheckForEqualsWithoutDescription(value);
} else if (hasMatchingToStringPair(actual.keySet(), keyList)) {
failWithoutActual(
fact("expected to contain entry", entry),
fact("an instance of", objectToTypeName(entry)),
simpleFact("but did not"),
fact(
"though it did contain keys",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(actual.keySet(), /* itemsToCheck= */ keyList))),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else if (actual.containsValue(value)) {
Set<@Nullable Object> keys = new LinkedHashSet<>();
for (Map.Entry<?, ?> actualEntry : actual.entrySet()) {
if (Objects.equal(actualEntry.getValue(), value)) {
keys.add(actualEntry.getKey());
}
}
failWithoutActual(
fact("expected to contain entry", entry),
simpleFact("but did not"),
fact("though it did contain keys with that value", keys),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else if (hasMatchingToStringPair(actual.values(), valueList)) {
failWithoutActual(
fact("expected to contain entry", entry),
fact("an instance of", objectToTypeName(entry)),
simpleFact("but did not"),
fact(
"though it did contain values",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(actual.values(), /* itemsToCheck= */ valueList))),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else {
failWithActual("expected to contain entry", entry);
}
}
} | @Test
public void containsNullEntryValue() {
Map<String, String> actual = Maps.newHashMap();
actual.put(null, null);
expectFailureWhenTestingThat(actual).containsEntry("kurt", null);
assertFailureKeys(
"expected to contain entry",
"but did not",
"though it did contain keys with that value",
"full contents");
assertFailureValue("expected to contain entry", "kurt=null");
assertFailureValue("though it did contain keys with that value", "[null]");
} |
public boolean isTransient() {
return isTransient;
} | @Test
public void testTransient() {
assertTrue(converter(NULL_CONVERTER).isTransient());
assertTrue(converter(INTEGER_CONVERTER, NULL_CONVERTER).isTransient());
assertFalse(converter(INTEGER_CONVERTER).isTransient());
assertFalse(converter(INTEGER_CONVERTER, IDENTITY_CONVERTER).isTransient());
} |
@Override
public NativeQuerySpec<Record> select(String sql, Object... args) {
return new NativeQuerySpecImpl<>(this, sql, args, DefaultRecord::new, false);
} | @Test
public void testAgg() {
DefaultQueryHelper helper = new DefaultQueryHelper(database);
database.dml()
.insert("s_test")
.value("id", "agg-test")
.value("name", "agg")
.value("age", 111)
.execute()
.sync();
helper.select("select sum(age) num from s_test t")
.where(dsl -> dsl.is("name", "agg"))
.fetch()
.doOnNext(v -> System.out.println(JSON.toJSONString(v, SerializerFeature.PrettyFormat)))
.as(StepVerifier::create)
.expectNextCount(1)
.verifyComplete();
} |
public void setContract(@Nullable Produce contract)
{
this.contract = contract;
setStoredContract(contract);
handleContractState();
} | @Test
public void redberriesContractRedberriesGrowing()
{
final long unixNow = Instant.now().getEpochSecond();
final long expectedCompletion = unixNow + 60;
// Get the bush patch
final FarmingPatch patch = farmingGuildPatches.get(Varbits.FARMING_4772);
assertNotNull(patch);
// Not ready to check
when(farmingTracker.predictPatch(patch))
.thenReturn(new PatchPrediction(Produce.REDBERRIES, CropState.GROWING, expectedCompletion, 2, 3));
farmingContractManager.setContract(Produce.REDBERRIES);
assertEquals(SummaryState.IN_PROGRESS, farmingContractManager.getSummary());
assertEquals(CropState.GROWING, farmingContractManager.getContractCropState());
assertEquals(expectedCompletion, farmingContractManager.getCompletionTime());
} |
@Override
public void refreshPluginDataSelf(final List<PluginData> pluginDataList) {
LOG.info("start refresh plugin data self");
if (CollectionUtils.isEmpty(pluginDataList)) {
return;
}
BaseDataCache.getInstance().cleanPluginDataSelf(pluginDataList);
} | @Test
public void testRefreshPluginDataSelf() {
baseDataCache.cleanPluginData();
PluginData firstCachedPluginData = PluginData.builder().name(mockName1).build();
PluginData secondCachedPluginData = PluginData.builder().name(mockName2).build();
baseDataCache.cachePluginData(firstCachedPluginData);
baseDataCache.cachePluginData(secondCachedPluginData);
assertNotNull(baseDataCache.obtainPluginData(firstCachedPluginData.getName()));
assertNotNull(baseDataCache.obtainPluginData(secondCachedPluginData.getName()));
commonPluginDataSubscriber.refreshPluginDataSelf(Lists.newArrayList(firstCachedPluginData));
assertNull(baseDataCache.obtainPluginData(firstCachedPluginData.getName()));
assertNotNull(baseDataCache.obtainPluginData(secondCachedPluginData.getName()));
} |
@Override
public MergeAppend appendFile(DataFile file) {
add(file);
return this;
} | @TestTemplate
public void testAppendWithManifestScanExecutor() {
assertThat(listManifestFiles()).isEmpty();
TableMetadata base = readMetadata();
assertThat(base.currentSnapshot()).isNull();
assertThat(base.lastSequenceNumber()).isEqualTo(0);
AtomicInteger scanThreadsIndex = new AtomicInteger(0);
Snapshot snapshot =
commit(
table,
table
.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.scanManifestsWith(
Executors.newFixedThreadPool(
1,
runnable -> {
Thread thread = new Thread(runnable);
thread.setName("scan-" + scanThreadsIndex.getAndIncrement());
thread.setDaemon(
true); // daemon threads will be terminated abruptly when the JVM
// exits
return thread;
})),
branch);
assertThat(scanThreadsIndex.get())
.as("Thread should be created in provided pool")
.isGreaterThan(0);
assertThat(snapshot).isNotNull();
} |
public void setProjectionDataMapSerializer(ProjectionDataMapSerializer projectionDataMapSerializer)
{
RestliRequestOptions existingRequestOptions =
(_requestOptions == null) ? RestliRequestOptions.DEFAULT_OPTIONS : _requestOptions;
// If the desired value is same as existing, this is a no-op.
if (existingRequestOptions.getProjectionDataMapSerializer().equals(projectionDataMapSerializer))
{
return;
}
_requestOptions = new RestliRequestOptionsBuilder(existingRequestOptions)
.setProjectionDataMapSerializer(projectionDataMapSerializer)
.build();
} | @Test
public void testSetProjectionDataMapSerializer()
{
ProjectionDataMapSerializer customSerializer = (paramName, pathSpecs) -> new DataMap();
GetRequest<TestRecord> getRequest = generateDummyRequestBuilder().build();
getRequest.setProjectionDataMapSerializer(customSerializer);
assertEquals(getRequest.getRequestOptions().getProjectionDataMapSerializer(), customSerializer);
} |
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) {
return invoke(n, BigDecimal.ZERO);
} | @Test
void invokeZero() {
FunctionTestUtil.assertResultBigDecimal(ceilingFunction.invoke(BigDecimal.ZERO), BigDecimal.ZERO);
} |
@Override
public void onMatch(RelOptRuleCall call) {
final Sort sort = call.rel(0);
final SortExchange exchange = call.rel(1);
final RelMetadataQuery metadataQuery = call.getMetadataQuery();
if (RelMdUtil.checkInputForCollationAndLimit(
metadataQuery,
exchange.getInput(),
sort.getCollation(),
sort.offset,
sort.fetch)) {
// Don't rewrite anything if the input is already sorted AND the
// input node would already return fewer than sort.offset + sort.fetch
// rows (e.g. there is already an inner limit applied)
return;
}
RelCollation collation = sort.getCollation();
Preconditions.checkArgument(
collation.equals(exchange.getCollation()),
"Expected collation on exchange and sort to be the same"
);
final RexNode fetch;
if (sort.fetch == null) {
fetch = null;
} else if (sort.offset == null) {
fetch = sort.fetch;
} else {
int total = RexExpressionUtils.getValueAsInt(sort.fetch) + RexExpressionUtils.getValueAsInt(sort.offset);
fetch = REX_BUILDER.makeLiteral(total, TYPE_FACTORY.createSqlType(SqlTypeName.INTEGER));
}
// do not transform sort-exchange copy when there's no fetch limit, or fetch amount is larger than threshold
if (!collation.getFieldCollations().isEmpty()
&& (fetch == null || RexExpressionUtils.getValueAsInt(fetch) > DEFAULT_SORT_EXCHANGE_COPY_THRESHOLD)) {
return;
}
final RelNode newExchangeInput = sort.copy(sort.getTraitSet(), exchange.getInput(), collation, null, fetch);
final RelNode exchangeCopy = exchange.copy(exchange.getTraitSet(), newExchangeInput, exchange.getDistribution());
final RelNode sortCopy = sort.copy(sort.getTraitSet(), exchangeCopy, collation,
sort.offset == null ? REX_ZERO : sort.offset, sort.fetch);
call.transformTo(sortCopy);
} | @Test
public void shouldMatchLimitNoOffsetNoSort() {
// Given:
SortExchange exchange =
PinotLogicalSortExchange.create(_input, RelDistributions.SINGLETON, RelCollations.EMPTY, false, false);
Sort sort = LogicalSort.create(exchange, RelCollations.EMPTY, null, literal(1));
Mockito.when(_call.rel(0)).thenReturn(sort);
Mockito.when(_call.rel(1)).thenReturn(exchange);
// When:
PinotSortExchangeCopyRule.SORT_EXCHANGE_COPY.onMatch(_call);
// Then:
ArgumentCaptor<RelNode> sortCopyCapture = ArgumentCaptor.forClass(LogicalSort.class);
Mockito.verify(_call, Mockito.times(1)).transformTo(sortCopyCapture.capture());
RelNode sortCopy = sortCopyCapture.getValue();
Assert.assertTrue(sortCopy instanceof LogicalSort);
Assert.assertTrue(((LogicalSort) sortCopy).getInput() instanceof PinotLogicalSortExchange);
Assert.assertTrue(((LogicalSort) sortCopy).getInput().getInput(0) instanceof LogicalSort);
LogicalSort innerSort = (LogicalSort) ((LogicalSort) sortCopy).getInput().getInput(0);
Assert.assertEquals(innerSort.getCollation().getKeys().size(), 0);
Assert.assertNull((innerSort).offset);
Assert.assertEquals((innerSort).fetch, literal(1));
} |
@VisibleForTesting
WxMpService getWxMpService(Integer userType) {
// 第一步,查询 DB 的配置项,获得对应的 WxMpService 对象
SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType(
SocialTypeEnum.WECHAT_MP.getType(), userType);
if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) {
return wxMpServiceCache.getUnchecked(client.getClientId() + ":" + client.getClientSecret());
}
// 第二步,不存在 DB 配置项,则使用 application-*.yaml 对应的 WxMpService 对象
return wxMpService;
} | @Test
public void testGetWxMpService_clientDisable() {
// 准备参数
Integer userType = randomPojo(UserTypeEnum.class).getValue();
// mock 数据
SocialClientDO client = randomPojo(SocialClientDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())
.setUserType(userType).setSocialType(SocialTypeEnum.WECHAT_MP.getType()));
socialClientMapper.insert(client);
// 调用
WxMpService result = socialClientService.getWxMpService(userType);
// 断言
assertSame(wxMpService, result);
} |
public PrivateKey convertPrivateKey(final String privatePemKey) {
StringReader keyReader = new StringReader(privatePemKey);
try {
PrivateKeyInfo privateKeyInfo = PrivateKeyInfo
.getInstance(new PEMParser(keyReader).readObject());
return new JcaPEMKeyConverter().getPrivateKey(privateKeyInfo);
} catch (IOException exception) {
throw new RuntimeException(exception);
}
} | @Test
void givenEmptyPrivateKey_whenConvertPrivateKey_thenThrowRuntimeException() {
// Given
String emptyPrivatePemKey = "";
// When & Then
assertThatThrownBy(() -> KeyConverter.convertPrivateKey(emptyPrivatePemKey))
.isInstanceOf(RuntimeException.class)
.hasCauseInstanceOf(PEMException.class)
.hasMessageContaining("PEMException");
} |
@ShellMethod(key = "stats wa", value = "Write Amplification. Ratio of how many records were upserted to how many "
+ "records were actually written")
public String writeAmplificationStats(
@ShellOption(value = {"--limit"}, help = "Limit commits", defaultValue = "-1") final Integer limit,
@ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField,
@ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending,
@ShellOption(value = {"--headeronly"}, help = "Print Header Only",
defaultValue = "false") final boolean headerOnly)
throws IOException {
long totalRecordsUpserted = 0;
long totalRecordsWritten = 0;
HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().getActiveTimeline();
HoodieTimeline timeline = activeTimeline.getCommitAndReplaceTimeline().filterCompletedInstants();
List<Comparable[]> rows = new ArrayList<>();
DecimalFormat df = new DecimalFormat("#.00");
for (HoodieInstant instantTime : timeline.getInstants()) {
String waf = "0";
HoodieCommitMetadata commit = HoodieCommitMetadata.fromBytes(activeTimeline.getInstantDetails(instantTime).get(),
HoodieCommitMetadata.class);
if (commit.fetchTotalUpdateRecordsWritten() > 0) {
waf = df.format((float) commit.fetchTotalRecordsWritten() / commit.fetchTotalUpdateRecordsWritten());
}
rows.add(new Comparable[] {instantTime.getTimestamp(), commit.fetchTotalUpdateRecordsWritten(),
commit.fetchTotalRecordsWritten(), waf});
totalRecordsUpserted += commit.fetchTotalUpdateRecordsWritten();
totalRecordsWritten += commit.fetchTotalRecordsWritten();
}
String waf = "0";
if (totalRecordsUpserted > 0) {
waf = df.format((float) totalRecordsWritten / totalRecordsUpserted);
}
rows.add(new Comparable[] {"Total", totalRecordsUpserted, totalRecordsWritten, waf});
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_COMMIT_TIME)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_UPSERTED)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_WRITTEN)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_WRITE_AMPLIFICATION_FACTOR);
return HoodiePrintHelper.print(header, new HashMap<>(), sortByField, descending, limit, headerOnly, rows);
} | @Test
public void testWriteAmplificationStats() throws Exception {
// generate data and metadata
Map<String, Integer[]> data = new LinkedHashMap<>();
data.put("100", new Integer[] {15, 10});
data.put("101", new Integer[] {20, 10});
data.put("102", new Integer[] {15, 15});
for (Map.Entry<String, Integer[]> entry : data.entrySet()) {
String k = entry.getKey();
Integer[] v = entry.getValue();
HoodieTestCommitMetadataGenerator.createCommitFileWithMetadata(tablePath, k, storageConf(),
Option.of(v[0]), Option.of(v[1]));
}
Object result = shell.evaluate(() -> "stats wa");
assertTrue(ShellEvaluationResultUtil.isSuccess(result));
// generate expect
List<Comparable[]> rows = new ArrayList<>();
DecimalFormat df = new DecimalFormat("#.00");
data.forEach((key, value) -> {
// there are two partitions, so need to *2
rows.add(new Comparable[] {key, value[1] * 2, value[0] * 2, df.format((float) value[0] / value[1])});
});
int totalWrite = data.values().stream().map(integers -> integers[0] * 2).mapToInt(s -> s).sum();
int totalUpdate = data.values().stream().map(integers -> integers[1] * 2).mapToInt(s -> s).sum();
rows.add(new Comparable[] {"Total", totalUpdate, totalWrite, df.format((float) totalWrite / totalUpdate)});
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_COMMIT_TIME)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_UPSERTED)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_WRITTEN)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_WRITE_AMPLIFICATION_FACTOR);
String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(result.toString());
assertEquals(expected, got);
} |
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
if (remaining.split("/").length > 1) {
throw new IllegalArgumentException("Invalid URI: " + URISupport.sanitizeUri(uri));
}
SplunkHECEndpoint answer = new SplunkHECEndpoint(uri, this, new SplunkHECConfiguration());
setProperties(answer, parameters);
answer.setSplunkURL(remaining);
return answer;
} | @Test
public void testSplunkURLIsNotOverriddenByQuery() throws Exception {
SplunkHECEndpoint endpoint = (SplunkHECEndpoint) component.createEndpoint(
"splunk-hec:192.168.0.1:18808?token=11111111-1111-1111-1111-111111111111&splunkURL=ignored");
assertEquals("192.168.0.1:18808", endpoint.getSplunkURL());
endpoint.init();
assertEquals("192.168.0.1:18808", endpoint.getSplunkURL());
} |
public int accumulateSum(int... nums) {
LOGGER.info(SOURCE_MODULE, VERSION);
return Arrays.stream(nums).reduce(0, Integer::sum);
} | @Test
void testAccumulateSum() {
assertEquals(0, source.accumulateSum(-1, 0, 1));
} |
public long computeTotalDiff() {
long diffTotal = 0L;
for (Entry<MessageQueue, OffsetWrapper> entry : this.offsetTable.entrySet()) {
diffTotal += entry.getValue().getBrokerOffset() - entry.getValue().getConsumerOffset();
}
return diffTotal;
} | @Test
public void testComputeTotalDiff() {
ConsumeStats stats = new ConsumeStats();
MessageQueue messageQueue = Mockito.mock(MessageQueue.class);
OffsetWrapper offsetWrapper = Mockito.mock(OffsetWrapper.class);
Mockito.when(offsetWrapper.getConsumerOffset()).thenReturn(1L);
Mockito.when(offsetWrapper.getBrokerOffset()).thenReturn(2L);
stats.getOffsetTable().put(messageQueue, offsetWrapper);
MessageQueue messageQueue2 = Mockito.mock(MessageQueue.class);
OffsetWrapper offsetWrapper2 = Mockito.mock(OffsetWrapper.class);
Mockito.when(offsetWrapper2.getConsumerOffset()).thenReturn(2L);
Mockito.when(offsetWrapper2.getBrokerOffset()).thenReturn(3L);
stats.getOffsetTable().put(messageQueue2, offsetWrapper2);
Assert.assertEquals(2L, stats.computeTotalDiff());
} |
public String convert(ILoggingEvent event) {
StringBuilder sb = new StringBuilder();
int pri = facility + LevelToSyslogSeverity.convert(event);
sb.append("<");
sb.append(pri);
sb.append(">");
sb.append(computeTimeStampString(event.getTimeStamp()));
sb.append(' ');
sb.append(localHostName);
sb.append(' ');
return sb.toString();
} | @Test
public void multipleConversions() {
LoggingEvent le = createLoggingEvent();
calendar.set(2012, Calendar.OCTOBER, 11, 22, 14, 15);
le.setTimeStamp(calendar.getTimeInMillis());
assertEquals("<191>Oct 11 22:14:15 " + HOSTNAME + " ", converter.convert(le));
assertEquals("<191>Oct 11 22:14:15 " + HOSTNAME + " ", converter.convert(le));
calendar.set(2012, Calendar.OCTOBER, 11, 22, 14, 16);
le.setTimeStamp(calendar.getTimeInMillis());
assertEquals("<191>Oct 11 22:14:16 " + HOSTNAME + " ", converter.convert(le));
} |
@Override
public Map<String, String> getTopicConfig(final String topicName) {
return topicConfig(topicName, true);
} | @Test
public void shouldGetTopicConfig() {
// Given:
givenTopicConfigs(
"fred",
overriddenConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "12345"),
defaultConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
);
// When:
final Map<String, String> config = kafkaTopicClient.getTopicConfig("fred");
// Then:
assertThat(config.get(TopicConfig.RETENTION_MS_CONFIG), is("12345"));
assertThat(config.get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG), is("1"));
} |
@Override
public V put(K key, V value) {
checkNotNull(key, "Key cannot be null.");
checkNotNull(value, "Value cannot be null.");
byte[] prevVal = items.put(serializer.encode(key), serializer.encode(value));
if (prevVal == null) {
return null;
}
return serializer.decode(prevVal);
} | @Test public void testPut() throws Exception {
//Tests insertion behavior (particularly the returning of previous value)
fillMap(10);
for (int i = 0; i < 10; i++) {
assertEquals("Put should return the previous value", Integer.valueOf(i), map.put(i, i + 1));
}
assertNull(map.put(11, 11));
} |
public Mappings getMapping(String tableName) {
Map<String, Object> properties =
mappingStructures.containsKey(tableName) ?
mappingStructures.get(tableName).properties : new HashMap<>();
Mappings.Source source =
mappingStructures.containsKey(tableName) ?
mappingStructures.get(tableName).source : new Mappings.Source();
return Mappings.builder()
.type(ElasticSearchClient.TYPE)
.properties(properties)
.source(source)
.build();
} | @Test
public void getMapping() {
IndexStructures structures = new IndexStructures();
HashMap<String, Object> properties = new HashMap<>();
properties.put("a", "b");
properties.put("c", "d");
structures.putStructure(
"test", Mappings.builder()
.type(ElasticSearchClient.TYPE)
.properties(properties)
.build(), new HashMap<>());
Mappings mapping = structures.getMapping("test");
Assertions.assertEquals(mapping.getProperties(), properties);
structures.putStructure(
"test2", Mappings.builder()
.type(ElasticSearchClient.TYPE)
.properties(new HashMap<>())
.build(), new HashMap<>());
mapping = structures.getMapping("test2");
Assertions.assertTrue(mapping.getProperties().isEmpty());
//test with source
IndexStructures structuresSource = new IndexStructures();
Mappings.Source source = new Mappings.Source();
source.getExcludes().add("a");
structuresSource.putStructure(
"test", Mappings.builder()
.type(ElasticSearchClient.TYPE)
.properties(properties)
.source(source)
.build(), new HashMap<>());
Assertions.assertEquals(properties, structuresSource.getMapping("test").getProperties());
Assertions.assertEquals(source.getExcludes(), structuresSource.getMapping("test").getSource().getExcludes());
} |
public Filter parseSingleExpression(final String filterExpression, final List<EntityAttribute> attributes) {
if (!filterExpression.contains(FIELD_AND_VALUE_SEPARATOR)) {
throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG);
}
final String[] split = filterExpression.split(FIELD_AND_VALUE_SEPARATOR, 2);
final String fieldPart = split[0];
if (fieldPart == null || fieldPart.isEmpty()) {
throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG);
}
final String valuePart = split[1];
if (valuePart == null || valuePart.isEmpty()) {
throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG);
}
final EntityAttribute attributeMetaData = getAttributeMetaData(attributes, fieldPart);
final SearchQueryField.Type fieldType = attributeMetaData.type();
if (isRangeValueExpression(valuePart, fieldType)) {
if (valuePart.startsWith(RANGE_VALUES_SEPARATOR)) {
return new RangeFilter(attributeMetaData.id(),
null,
extractValue(fieldType, valuePart.substring(RANGE_VALUES_SEPARATOR.length()))
);
} else if (valuePart.endsWith(RANGE_VALUES_SEPARATOR)) {
return new RangeFilter(attributeMetaData.id(),
extractValue(fieldType, valuePart.substring(0, valuePart.length() - RANGE_VALUES_SEPARATOR.length())),
null
);
} else {
final String[] ranges = valuePart.split(RANGE_VALUES_SEPARATOR);
return new RangeFilter(attributeMetaData.id(),
extractValue(fieldType, ranges[0]),
extractValue(fieldType, ranges[1])
);
}
} else {
return new SingleValueFilter(attributeMetaData.id(), extractValue(fieldType, valuePart));
}
} | @Test
void parsesFilterExpressionCorrectlyForObjectIdType() {
assertEquals(new SingleValueFilter("id", new ObjectId("5f4dfb9c69be46153b9a9a7b")),
toTest.parseSingleExpression("id:5f4dfb9c69be46153b9a9a7b",
List.of(EntityAttribute.builder()
.id("id")
.title("Id")
.type(SearchQueryField.Type.OBJECT_ID)
.filterable(true)
.build())
));
} |
@Override
public KeyGroupsStateHandle getIntersection(KeyGroupRange keyGroupRange) {
KeyGroupRangeOffsets offsets = groupRangeOffsets.getIntersection(keyGroupRange);
if (offsets.getKeyGroupRange().getNumberOfKeyGroups() <= 0) {
return null;
}
return new KeyGroupsStateHandle(offsets, stateHandle, stateHandleId);
} | @Test
void testEmptyIntersection() {
KeyGroupRangeOffsets offsets = new KeyGroupRangeOffsets(0, 7);
byte[] dummy = new byte[10];
StreamStateHandle streamHandle = new ByteStreamStateHandle("test", dummy);
KeyGroupsStateHandle handle = new KeyGroupsStateHandle(offsets, streamHandle);
// return null if the keygroup intersection is empty.
KeyGroupRange newRange = new KeyGroupRange(8, 11);
assertThat(handle.getIntersection(newRange)).isNull();
} |
public static String getMD5Checksum(File file) throws IOException, NoSuchAlgorithmException {
return getChecksum(MD5, file);
} | @Test
public void testGetMD5Checksum_File() throws Exception {
File file = new File(this.getClass().getClassLoader().getResource("checkSumTest.file").toURI().getPath());
String expResult = "f0915c5f46b8cfa283e5ad67a09b3793";
String result = Checksum.getMD5Checksum(file);
assertEquals(expResult, result);
} |
protected FileStatus[] listStatus(JobConf job) throws IOException {
Path[] dirs = getInputPaths(job);
if (dirs.length == 0) {
throw new IOException("No input paths specified in job");
}
// get tokens for all the required FileSystems..
TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs, job);
// Whether we need to recursive look into the directory structure
boolean recursive = job.getBoolean(INPUT_DIR_RECURSIVE, false);
// creates a MultiPathFilter with the hiddenFileFilter and the
// user provided one (if any).
List<PathFilter> filters = new ArrayList<PathFilter>();
filters.add(hiddenFileFilter);
PathFilter jobFilter = getInputPathFilter(job);
if (jobFilter != null) {
filters.add(jobFilter);
}
PathFilter inputFilter = new MultiPathFilter(filters);
FileStatus[] result;
int numThreads = job
.getInt(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS,
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
StopWatch sw = new StopWatch().start();
if (numThreads == 1) {
List<FileStatus> locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive);
result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]);
} else {
Iterable<FileStatus> locatedFiles = null;
try {
LocatedFileStatusFetcher locatedFileStatusFetcher = new LocatedFileStatusFetcher(
job, dirs, recursive, inputFilter, false);
locatedFiles = locatedFileStatusFetcher.getFileStatuses();
} catch (InterruptedException e) {
throw (IOException)
new InterruptedIOException("Interrupted while getting file statuses")
.initCause(e);
}
result = Iterables.toArray(locatedFiles, FileStatus.class);
}
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Time taken to get FileStatuses: "
+ sw.now(TimeUnit.MILLISECONDS));
}
LOG.info("Total input files to process : " + result.length);
return result;
} | @Test
public void testListStatusErrorOnNonExistantDir() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat
.configureTestErrorOnNonExistantDir(conf, localFs);
JobConf jobConf = new JobConf(conf);
TextInputFormat fif = new TextInputFormat();
fif.configure(jobConf);
try {
fif.listStatus(jobConf);
Assert.fail("Expecting an IOException for a missing Input path");
} catch (IOException e) {
Path expectedExceptionPath = new Path(TEST_ROOT_DIR, "input2");
expectedExceptionPath = localFs.makeQualified(expectedExceptionPath);
Assert.assertTrue(e instanceof InvalidInputException);
Assert.assertEquals(
"Input path does not exist: " + expectedExceptionPath.toString(),
e.getMessage());
}
} |
@Override
public String toString() {
StringBuilder sb = new StringBuilder(256);
for (Timing type : Timing.values()) {
if (sb.length() > 0) {
sb.append(" ");
}
sb.append(type.name().toLowerCase())
.append("Time=").append(get(type));
}
return sb.toString();
} | @Test
public void testToString() {
ProcessingDetails details = new ProcessingDetails(TimeUnit.MICROSECONDS);
details.set(Timing.ENQUEUE, 10);
details.set(Timing.QUEUE, 20, TimeUnit.MILLISECONDS);
assertEquals("enqueueTime=10 queueTime=20000 handlerTime=0 " +
"processingTime=0 lockfreeTime=0 lockwaitTime=0 locksharedTime=0 " +
"lockexclusiveTime=0 responseTime=0", details.toString());
} |
ControllerResult<CreateTopicsResponseData> createTopics(
ControllerRequestContext context,
CreateTopicsRequestData request,
Set<String> describable
) {
Map<String, ApiError> topicErrors = new HashMap<>();
List<ApiMessageAndVersion> records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP);
// Check the topic names.
validateNewTopicNames(topicErrors, request.topics(), topicsWithCollisionChars);
// Identify topics that already exist and mark them with the appropriate error
request.topics().stream().filter(creatableTopic -> topicsByName.containsKey(creatableTopic.name()))
.forEach(t -> topicErrors.put(t.name(), new ApiError(Errors.TOPIC_ALREADY_EXISTS,
"Topic '" + t.name() + "' already exists.")));
// Verify that the configurations for the new topics are OK, and figure out what
// configurations should be created.
Map<ConfigResource, Map<String, Entry<OpType, String>>> configChanges =
computeConfigChanges(topicErrors, request.topics());
// Try to create whatever topics are needed.
Map<String, CreatableTopicResult> successes = new HashMap<>();
for (CreatableTopic topic : request.topics()) {
if (topicErrors.containsKey(topic.name())) continue;
// Figure out what ConfigRecords should be created, if any.
ConfigResource configResource = new ConfigResource(TOPIC, topic.name());
Map<String, Entry<OpType, String>> keyToOps = configChanges.get(configResource);
List<ApiMessageAndVersion> configRecords;
if (keyToOps != null) {
ControllerResult<ApiError> configResult =
configurationControl.incrementalAlterConfig(configResource, keyToOps, true);
if (configResult.response().isFailure()) {
topicErrors.put(topic.name(), configResult.response());
continue;
} else {
configRecords = configResult.records();
}
} else {
configRecords = Collections.emptyList();
}
ApiError error;
try {
error = createTopic(context, topic, records, successes, configRecords, describable.contains(topic.name()));
} catch (ApiException e) {
error = ApiError.fromThrowable(e);
}
if (error.isFailure()) {
topicErrors.put(topic.name(), error);
}
}
// Create responses for all topics.
CreateTopicsResponseData data = new CreateTopicsResponseData();
StringBuilder resultsBuilder = new StringBuilder();
String resultsPrefix = "";
for (CreatableTopic topic : request.topics()) {
ApiError error = topicErrors.get(topic.name());
if (error != null) {
data.topics().add(new CreatableTopicResult().
setName(topic.name()).
setErrorCode(error.error().code()).
setErrorMessage(error.message()));
resultsBuilder.append(resultsPrefix).append(topic).append(": ").
append(error.error()).append(" (").append(error.message()).append(")");
resultsPrefix = ", ";
continue;
}
CreatableTopicResult result = successes.get(topic.name());
data.topics().add(result);
resultsBuilder.append(resultsPrefix).append(topic).append(": ").
append("SUCCESS");
resultsPrefix = ", ";
}
if (request.validateOnly()) {
log.info("Validate-only CreateTopics result(s): {}", resultsBuilder);
return ControllerResult.atomicOf(Collections.emptyList(), data);
} else {
log.info("CreateTopics result(s): {}", resultsBuilder);
return ControllerResult.atomicOf(records, data);
}
} | @Test
public void testCreateTopicsWithMutationQuotaExceeded() {
ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().build();
ReplicationControlManager replicationControl = ctx.replicationControl;
CreateTopicsRequestData request = new CreateTopicsRequestData();
request.topics().add(new CreatableTopic().setName("foo").
setNumPartitions(-1).setReplicationFactor((short) -1));
ctx.registerBrokers(0, 1, 2);
ctx.unfenceBrokers(0, 1, 2);
ControllerRequestContext requestContext =
anonymousContextWithMutationQuotaExceededFor(ApiKeys.CREATE_TOPICS);
ControllerResult<CreateTopicsResponseData> result =
replicationControl.createTopics(requestContext, request, Collections.singleton("foo"));
CreateTopicsResponseData expectedResponse = new CreateTopicsResponseData();
expectedResponse.topics().add(new CreatableTopicResult().setName("foo").
setErrorCode(THROTTLING_QUOTA_EXCEEDED.code()).
setErrorMessage(QUOTA_EXCEEDED_IN_TEST_MSG));
assertEquals(expectedResponse, result.response());
} |
@Override
public ResultSet getBestRowIdentifier(final String catalog, final String schema, final String table, final int scope, final boolean nullable) throws SQLException {
return createDatabaseMetaDataResultSet(getDatabaseMetaData().getBestRowIdentifier(getActualCatalog(catalog), getActualSchema(schema),
getActualTable(getActualCatalog(catalog), table), scope, nullable));
} | @Test
void assertGetBestRowIdentifier() throws SQLException {
when(databaseMetaData.getBestRowIdentifier("test", null, null, 1, true)).thenReturn(resultSet);
assertThat(shardingSphereDatabaseMetaData.getBestRowIdentifier("test", null, null, 1, true), instanceOf(DatabaseMetaDataResultSet.class));
} |
public FlowableHttpClient determineHttpClient() {
if (httpClient != null) {
return httpClient;
} else if (isApacheHttpComponentsPresent) {
// Backwards compatibility, if apache HTTP Components is present then it has priority
this.httpClient = new ApacheHttpComponentsFlowableHttpClient(this);
return this.httpClient;
} else if (isSpringWebClientPresent && isReactorHttpClientPresent) {
this.httpClient = new SpringWebClientFlowableHttpClient(this);
return httpClient;
} else if (isApacheHttpComponents5Present) {
ApacheHttpComponents5FlowableHttpClient httpClient = new ApacheHttpComponents5FlowableHttpClient(this);
this.httpClient = httpClient;
this.closeRunnable = httpClient::close;
return this.httpClient;
}
else {
throw new FlowableException("Failed to determine FlowableHttpClient");
}
} | @Test
void determineHttpClientWhenNotSet() {
HttpClientConfig config = new HttpClientConfig();
assertThat(config.determineHttpClient()).isInstanceOf(ApacheHttpComponentsFlowableHttpClient.class);
} |
public static byte[] generateKey(ZUCAlgorithm algorithm) {
return KeyUtil.generateKey(algorithm.value).getEncoded();
} | @Test
public void zuc128Test(){
final byte[] secretKey = ZUC.generateKey(ZUC.ZUCAlgorithm.ZUC_128);
byte[] iv = RandomUtil.randomBytes(16);
final ZUC zuc = new ZUC(ZUC.ZUCAlgorithm.ZUC_128, secretKey, iv);
String msg = RandomUtil.randomString(500);
byte[] crypt2 = zuc.encrypt(msg);
String msg2 = zuc.decryptStr(crypt2, CharsetUtil.CHARSET_UTF_8);
assertEquals(msg, msg2);
} |
@Override
public InterpreterResult interpret(String st, InterpreterContext context) {
return helper.interpret(session, st, context);
} | @Test
void should_execute_statement_with_request_timeout() {
// Given
String statement = "@requestTimeOut=10000000\n" +
"SELECT * FROM zeppelin.artists;";
// When
final InterpreterResult actual = interpreter.interpret(statement, intrContext);
// Then
assertEquals(Code.SUCCESS, actual.code());
} |
@Override
public List<RoleDO> getRoleListByStatus(Collection<Integer> statuses) {
return roleMapper.selectListByStatus(statuses);
} | @Test
public void testGetRoleListByStatus() {
// mock 数据
RoleDO dbRole01 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()));
roleMapper.insert(dbRole01);
RoleDO dbRole02 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()));
roleMapper.insert(dbRole02);
// 调用
List<RoleDO> list = roleService.getRoleListByStatus(
singleton(CommonStatusEnum.ENABLE.getStatus()));
// 断言
assertEquals(1, list.size());
assertPojoEquals(dbRole01, list.get(0));
} |
@Override
public AuthenticationResult authenticate(final ChannelHandlerContext context, final PacketPayload payload) {
if (SSL_REQUEST_PAYLOAD_LENGTH == payload.getByteBuf().markReaderIndex().readInt() && SSL_REQUEST_CODE == payload.getByteBuf().readInt()) {
if (ProxySSLContext.getInstance().isSSLEnabled()) {
SslHandler sslHandler = new SslHandler(ProxySSLContext.getInstance().newSSLEngine(context.alloc()), true);
context.pipeline().addFirst(SslHandler.class.getSimpleName(), sslHandler);
context.writeAndFlush(new PostgreSQLSSLWillingPacket());
} else {
context.writeAndFlush(new PostgreSQLSSLUnwillingPacket());
}
return AuthenticationResultBuilder.continued();
}
payload.getByteBuf().resetReaderIndex();
AuthorityRule rule = ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getGlobalRuleMetaData().getSingleRule(AuthorityRule.class);
return startupMessageReceived ? processPasswordMessage(context, (PostgreSQLPacketPayload) payload, rule) : processStartupMessage(context, (PostgreSQLPacketPayload) payload, rule);
} | @Test
void assertSSLUnwilling() {
ByteBuf byteBuf = createByteBuf(8, 8);
byteBuf.writeInt(8);
byteBuf.writeInt(80877103);
PacketPayload payload = new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8);
ChannelHandlerContext context = mock(ChannelHandlerContext.class);
AuthenticationResult actual = new OpenGaussAuthenticationEngine().authenticate(context, payload);
verify(context).writeAndFlush(any(PostgreSQLSSLUnwillingPacket.class));
assertFalse(actual.isFinished());
} |
@Override
public boolean isReadable() {
return false;
} | @Test
public void testIsReadable() {
EmptyByteBuf empty = new EmptyByteBuf(UnpooledByteBufAllocator.DEFAULT);
assertFalse(empty.isReadable());
assertFalse(empty.isReadable(1));
} |
@SuppressWarnings("unchecked")
public static void addNamedOutput(Job job, String namedOutput, Class<? extends OutputFormat> outputFormatClass,
Schema keySchema) {
addNamedOutput(job, namedOutput, outputFormatClass, keySchema, null);
} | @Test
void avroInput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.avro").toURI().toString()));
job.setInputFormatClass(AvroKeyInputFormat.class);
AvroJob.setInputKeySchema(job, TextStats.SCHEMA$);
AvroMultipleOutputs.addNamedOutput(job, "myavro3", AvroKeyOutputFormat.class, TextStats.SCHEMA$, null);
job.setMapperClass(StatCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(SpecificStatsReducer.class);
AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(DIR.getPath() + "/testAvroInput");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/myavro3-*"));
assertEquals(1, outputFiles.length);
Map<String, Integer> counts = new HashMap<>();
try (DataFileReader<TextStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>())) {
for (TextStats record : reader) {
counts.put(record.getName().toString(), record.getCount());
}
}
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
} |
public FEELFnResult<Boolean> invoke(@ParameterName( "range" ) Range range, @ParameterName( "point" ) Comparable point) {
if ( point == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null"));
}
if ( range == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null"));
}
try {
boolean result = ( range.getLowBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getLowEndPoint() ) == 0 );
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range"));
}
} | @Test
void invokeParamRangeAndSingle() {
FunctionTestUtil.assertResult( startedByFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
"f" ),
Boolean.FALSE );
FunctionTestUtil.assertResult( startedByFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
"a"),
Boolean.TRUE );
FunctionTestUtil.assertResult( startedByFunction.invoke(
new RangeImpl( Range.RangeBoundary.OPEN, "a", "f", Range.RangeBoundary.OPEN ),
"a" ),
Boolean.FALSE );
FunctionTestUtil.assertResult( startedByFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
"g" ),
Boolean.FALSE );
} |
public static <T> List<List<T>> splitBySize(List<T> list, int expectedSize)
throws NullPointerException, IllegalArgumentException {
Preconditions.checkNotNull(list, "list must not be null");
Preconditions.checkArgument(expectedSize > 0, "expectedSize must larger than 0");
if (1 == expectedSize) {
return Collections.singletonList(list);
}
int splitSize = Math.min(expectedSize, list.size());
List<List<T>> result = new ArrayList<List<T>>(splitSize);
for (int i = 0; i < splitSize; i++) {
result.add(new ArrayList<>());
}
int index = 0;
for (T t : list) {
result.get(index).add(t);
index = (index + 1) % splitSize;
}
return result;
} | @Test
public void testSplitBySizeWithNullList() {
List<Integer> lists = null;
int expectSize = 10;
expectedEx.expect(NullPointerException.class);
expectedEx.expectMessage("list must not be null");
ListUtil.splitBySize(lists, expectSize);
} |
@Override
public StorageObject upload(final Path file, Local local, final BandwidthThrottle throttle, final StreamListener listener,
final TransferStatus status, final ConnectionCallback prompt) throws BackgroundException {
if(this.threshold(status)) {
try {
return new S3MultipartUploadService(session, writer, acl).upload(file, local, throttle, listener, status, prompt);
}
catch(NotfoundException | InteroperabilityException e) {
log.warn(String.format("Failure %s using multipart upload. Fallback to single upload.", e));
status.append(false);
try {
return new S3SingleUploadService(session, writer).upload(file, local, throttle, listener, status, prompt);
}
catch(BackgroundException f) {
log.warn(String.format("Failure %s using single upload. Throw original multipart failure %s", e, e));
throw e;
}
}
}
// Use single upload service
return new S3SingleUploadService(session, writer).upload(file, local, throttle, listener, status, prompt);
} | @Test
public void testUploadSinglePartUsEast() throws Exception {
final S3ThresholdUploadService service = new S3ThresholdUploadService(session, new S3AccessControlListFeature(session), 5 * 1024L);
final Path container = new Path("test-us-east-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final String name = UUID.randomUUID().toString();
final Path test = new Path(container, name, EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), name);
final byte[] random = RandomUtils.nextBytes(1000);
IOUtils.write(random, local.getOutputStream(false));
final TransferStatus status = new TransferStatus();
status.setLength(random.length);
status.setStorageClass(S3Object.STORAGE_CLASS_REDUCED_REDUNDANCY);
final BytecountStreamListener count = new BytecountStreamListener();
service.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
count, status, new DisabledLoginCallback());
assertEquals(random.length, count.getSent());
assertTrue(status.isComplete());
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test));
final PathAttributes attributes = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(test);
assertEquals(random.length, attributes.getSize());
assertEquals(S3Object.STORAGE_CLASS_REDUCED_REDUNDANCY, new S3StorageClassFeature(session, new S3AccessControlListFeature(session)).getClass(test));
final Map<String, String> metadata = new S3MetadataFeature(session, new S3AccessControlListFeature(session)).getMetadata(test);
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
} |
@VisibleForTesting
static Map<String, Object> forCodec(
Map<String, Object> codec, boolean replaceWithByteArrayCoder) {
String coderType = (String) codec.get(PropertyNames.OBJECT_TYPE_NAME);
// Handle well known coders.
if (LENGTH_PREFIX_CODER_TYPE.equals(coderType)) {
if (replaceWithByteArrayCoder) {
return CloudObjects.asCloudObject(
LENGTH_PREFIXED_BYTE_ARRAY_CODER, /*sdkComponents=*/ null);
}
return codec;
} else if (WELL_KNOWN_CODER_TYPES.contains(coderType)) {
// The runner knows about these types and can instantiate them so handle their
// component encodings.
Map<String, Object> prefixedCodec = new HashMap<>(codec);
// Recursively replace component encodings
if (codec.containsKey(PropertyNames.COMPONENT_ENCODINGS)) {
List<Map<String, Object>> prefixedComponents = new ArrayList<>();
for (Map<String, Object> component :
(Iterable<Map<String, Object>>) codec.get(PropertyNames.COMPONENT_ENCODINGS)) {
prefixedComponents.add(forCodec(component, replaceWithByteArrayCoder));
}
prefixedCodec.put(PropertyNames.COMPONENT_ENCODINGS, prefixedComponents);
}
return prefixedCodec;
}
// Wrap unknown coders with length prefix coder.
if (replaceWithByteArrayCoder) {
return CloudObjects.asCloudObject(LENGTH_PREFIXED_BYTE_ARRAY_CODER, /*sdkComponents=*/ null);
} else {
Map<String, Object> prefixedCodec = new HashMap<>();
prefixedCodec.put(PropertyNames.OBJECT_TYPE_NAME, LENGTH_PREFIX_CODER_TYPE);
prefixedCodec.put(PropertyNames.COMPONENT_ENCODINGS, ImmutableList.of(codec));
return prefixedCodec;
}
} | @Test
public void testLengthPrefixAndReplaceUnknownCoder() throws Exception {
Coder<WindowedValue<KV<String, Integer>>> windowedValueCoder =
WindowedValue.getFullCoder(
KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of()), GlobalWindow.Coder.INSTANCE);
Map<String, Object> lengthPrefixedCoderCloudObject =
forCodec(CloudObjects.asCloudObject(windowedValueCoder, /*sdkComponents=*/ null), true);
assertEqualsAsJson(
CloudObjects.asCloudObject(prefixedAndReplacedWindowedValueCoder, /*sdkComponents=*/ null),
lengthPrefixedCoderCloudObject);
} |
@Override
public void prepare() throws ServiceNotProvidedException {
coordinator = new KubernetesCoordinator(getManager(), config);
this.registerServiceImplementation(ClusterRegister.class, coordinator);
this.registerServiceImplementation(ClusterNodesQuery.class, coordinator);
this.registerServiceImplementation(ClusterCoordinator.class, coordinator);
} | @Test
public void prepare() {
provider.prepare();
} |
public static ExtensionInfo load(String className, ClassLoader classLoader) {
try (InputStream input = classLoader.getResourceAsStream(className.replace('.', '/') + ".class")) {
ExtensionInfo info = new ExtensionInfo(className);
new ClassReader(input).accept(new ExtensionVisitor(info), ClassReader.SKIP_DEBUG);
return info;
} catch (IOException e) {
log.error(e.getMessage(), e);
return null;
}
} | @Test
void loadShouldReturnNullWhenClassDoesNotExist() {
ExtensionInfo info = ExtensionInfo.load("non.existent.Class", this.getClass().getClassLoader());
assertNull(info);
} |
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) {
OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers);
if (filteredOpenAPI == null) {
return filteredOpenAPI;
}
OpenAPI clone = new OpenAPI();
clone.info(filteredOpenAPI.getInfo());
clone.openapi(filteredOpenAPI.getOpenapi());
clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect());
clone.setSpecVersion(filteredOpenAPI.getSpecVersion());
clone.setExtensions(filteredOpenAPI.getExtensions());
clone.setExternalDocs(filteredOpenAPI.getExternalDocs());
clone.setSecurity(filteredOpenAPI.getSecurity());
clone.setServers(filteredOpenAPI.getServers());
clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags()));
final Set<String> allowedTags = new HashSet<>();
final Set<String> filteredTags = new HashSet<>();
Paths clonedPaths = new Paths();
if (filteredOpenAPI.getPaths() != null) {
for (String resourcePath : filteredOpenAPI.getPaths().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clonedPaths.addPathItem(resourcePath, clonedPathItem);
}
}
}
clone.paths(clonedPaths);
}
filteredTags.removeAll(allowedTags);
final List<Tag> tags = clone.getTags();
if (tags != null && !filteredTags.isEmpty()) {
tags.removeIf(tag -> filteredTags.contains(tag.getName()));
if (clone.getTags().isEmpty()) {
clone.setTags(null);
}
}
if (filteredOpenAPI.getWebhooks() != null) {
for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clone.addWebhooks(resourcePath, clonedPathItem);
}
}
}
}
if (filteredOpenAPI.getComponents() != null) {
clone.components(new Components());
clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers));
clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes());
clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks());
clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples());
clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions());
clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders());
clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks());
clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters());
clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies());
clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses());
clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems());
}
if (filter.isRemovingUnreferencedDefinitions()) {
clone = removeBrokenReferenceDefinitions(clone);
}
return clone;
} | @Test
public void shouldRemoveBrokenNestedRefs() throws IOException {
final OpenAPI openAPI = getOpenAPI(RESOURCE_PATH_3303);
openAPI.getPaths().get("/pet/{petId}").getGet().getResponses().getDefault().getHeaders().remove("X-Rate-Limit-Limit");
assertNotNull(openAPI.getComponents().getSchemas().get("PetHeader"));
final RemoveUnreferencedDefinitionsFilter remover = new RemoveUnreferencedDefinitionsFilter();
final OpenAPI filtered = new SpecFilter().filter(openAPI, remover, null, null, null);
assertNull(filtered.getComponents().getSchemas().get("PetHeader"));
assertNull(filtered.getComponents().getSchemas().get("Bar"));
assertNotNull(filtered.getComponents().getSchemas().get("Category"));
assertNotNull(filtered.getComponents().getSchemas().get("Pet"));
assertNotNull(filtered.getComponents().getSchemas().get("Foo"));
assertNotNull(filtered.getComponents().getSchemas().get("allOfChild"));
assertNotNull(filtered.getComponents().getSchemas().get("anyOfChild"));
assertNotNull(filtered.getComponents().getSchemas().get("oneOfChild"));
assertNotNull(filtered.getComponents().getSchemas().get("allOfparentA"));
assertNotNull(filtered.getComponents().getSchemas().get("allOfparentB"));
assertNotNull(filtered.getComponents().getSchemas().get("anyOfparentA"));
assertNotNull(filtered.getComponents().getSchemas().get("anyOfparentB"));
assertNotNull(filtered.getComponents().getSchemas().get("oneOfparentA"));
assertNotNull(filtered.getComponents().getSchemas().get("oneOfparentB"));
assertNotNull(filtered.getComponents().getSchemas().get("oneOfNestedParentA"));
assertNotNull(filtered.getComponents().getSchemas().get("oneOfNestedParentB"));
assertNotNull(filtered.getComponents().getSchemas().get("discriminatorParent"));
assertNotNull(filtered.getComponents().getSchemas().get("discriminatorMatchedChildA"));
assertNotNull(filtered.getComponents().getSchemas().get("discriminatorRefProperty"));
assertNotNull(filtered.getComponents().getSchemas().get("discriminatorParentRefProperty"));
assertNotNull(filtered.getComponents().getSchemas().get("discriminatorMatchedChildB"));
} |
@Override
public int deserializeKV(DataInputStream in, SizedWritable<?> key,
SizedWritable<?> value) throws IOException {
if (!in.hasUnReadData()) {
return 0;
}
key.length = in.readInt();
value.length = in.readInt();
keySerializer.deserialize(in, key.length, key.v);
valueSerializer.deserialize(in, value.length, value.v);
return key.length + value.length + KV_HEAD_LENGTH;
} | @Test
public void testDeserializer() throws IOException {
final DataInputStream in = Mockito.mock(DataInputStream.class);
Mockito.when(in.hasUnReadData()).thenReturn(true);
Assert.assertTrue(serializer.deserializeKV(in, key, value) > 0);
Mockito.verify(in, Mockito.times(4)).readInt();
Mockito.verify(in, Mockito.times(2)).readFully(any(byte[].class),
anyInt(), anyInt());
} |
public List<ListGroupsResponseData.ListedGroup> listGroups(
Set<String> statesFilter,
Set<String> typesFilter,
long committedOffset
) {
// Converts each state filter string to lower case for a case-insensitive comparison.
Set<String> caseInsensitiveFilterSet = statesFilter.stream()
.map(String::toLowerCase)
.map(String::trim)
.collect(Collectors.toSet());
// Converts each type filter string to a value in the GroupType enum while being case-insensitive.
Set<Group.GroupType> enumTypesFilter = typesFilter.stream()
.map(Group.GroupType::parse)
.collect(Collectors.toSet());
Predicate<Group> combinedFilter = group -> {
boolean stateCheck = statesFilter.isEmpty() || group.isInStates(caseInsensitiveFilterSet, committedOffset);
boolean typeCheck = enumTypesFilter.isEmpty() || enumTypesFilter.contains(group.type());
return stateCheck && typeCheck;
};
Stream<Group> groupStream = groups.values(committedOffset).stream();
return groupStream
.filter(combinedFilter)
.map(group -> group.asListedGroup(committedOffset))
.collect(Collectors.toList());
} | @Test
public void testListGroups() {
String consumerGroupId = "consumer-group-id";
String classicGroupId = "classic-group-id";
String shareGroupId = "share-group-id";
String memberId1 = Uuid.randomUuid().toString();
String fooTopicName = "foo";
MockPartitionAssignor assignor = new MockPartitionAssignor("range");
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withConsumerGroupAssignors(Collections.singletonList(assignor))
.withShareGroupAssignor(assignor)
.withConsumerGroup(new ConsumerGroupBuilder(consumerGroupId, 10))
.build();
// Create one classic group record.
context.replay(GroupMetadataManagerTestContext.newGroupMetadataRecord(
classicGroupId,
new GroupMetadataValue()
.setMembers(Collections.emptyList())
.setGeneration(2)
.setLeader(null)
.setProtocolType("classic")
.setProtocol("range")
.setCurrentStateTimestamp(context.time.milliseconds()),
MetadataVersion.latestTesting()));
// Create one share group record.
context.replay(GroupCoordinatorRecordHelpers.newShareGroupEpochRecord(shareGroupId, 6));
context.commit();
ClassicGroup classicGroup = context.groupMetadataManager.getOrMaybeCreateClassicGroup(classicGroupId, false);
context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(consumerGroupId, new ConsumerGroupMember.Builder(memberId1)
.setSubscribedTopicNames(Collections.singletonList(fooTopicName))
.build()));
context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(consumerGroupId, 11));
// Test list group response without a group state or group type filter.
Map<String, ListGroupsResponseData.ListedGroup> actualAllGroupMap =
context.sendListGroups(Collections.emptyList(), Collections.emptyList()).stream()
.collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
Map<String, ListGroupsResponseData.ListedGroup> expectAllGroupMap =
Stream.of(
new ListGroupsResponseData.ListedGroup()
.setGroupId(classicGroup.groupId())
.setProtocolType("classic")
.setGroupState(EMPTY.toString())
.setGroupType(Group.GroupType.CLASSIC.toString()),
new ListGroupsResponseData.ListedGroup()
.setGroupId(consumerGroupId)
.setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)
.setGroupState(ConsumerGroup.ConsumerGroupState.EMPTY.toString())
.setGroupType(Group.GroupType.CONSUMER.toString()),
new ListGroupsResponseData.ListedGroup()
.setGroupId(shareGroupId)
.setProtocolType(ShareGroup.PROTOCOL_TYPE)
.setGroupState(ShareGroup.ShareGroupState.EMPTY.toString())
.setGroupType(Group.GroupType.SHARE.toString())
).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
assertEquals(expectAllGroupMap, actualAllGroupMap);
// List group with case-insensitive ‘empty’.
actualAllGroupMap =
context.sendListGroups(Collections.singletonList("empty"), Collections.emptyList())
.stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
assertEquals(expectAllGroupMap, actualAllGroupMap);
context.commit();
// Test list group response to check assigning state in the consumer group.
actualAllGroupMap = context.sendListGroups(Collections.singletonList("assigning"), Collections.emptyList()).stream()
.collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
expectAllGroupMap =
Stream.of(
new ListGroupsResponseData.ListedGroup()
.setGroupId(consumerGroupId)
.setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)
.setGroupState(ConsumerGroup.ConsumerGroupState.ASSIGNING.toString())
.setGroupType(Group.GroupType.CONSUMER.toString())
).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
assertEquals(expectAllGroupMap, actualAllGroupMap);
// Test list group response with group state filter and no group type filter.
actualAllGroupMap = context.sendListGroups(Collections.singletonList("Empty"), Collections.emptyList()).stream()
.collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
expectAllGroupMap = Stream.of(
new ListGroupsResponseData.ListedGroup()
.setGroupId(classicGroup.groupId())
.setProtocolType("classic")
.setGroupState(EMPTY.toString())
.setGroupType(Group.GroupType.CLASSIC.toString()),
new ListGroupsResponseData.ListedGroup()
.setGroupId(shareGroupId)
.setProtocolType(ShareGroup.PROTOCOL_TYPE)
.setGroupState(ShareGroup.ShareGroupState.EMPTY.toString())
.setGroupType(Group.GroupType.SHARE.toString())
).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
assertEquals(expectAllGroupMap, actualAllGroupMap);
// Test list group response with no group state filter and with group type filter.
actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList(Group.GroupType.CLASSIC.toString())).stream()
.collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
expectAllGroupMap = Stream.of(
new ListGroupsResponseData.ListedGroup()
.setGroupId(classicGroup.groupId())
.setProtocolType("classic")
.setGroupState(EMPTY.toString())
.setGroupType(Group.GroupType.CLASSIC.toString())
).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
assertEquals(expectAllGroupMap, actualAllGroupMap);
// Test list group response with no group state filter and with group type filter in a different case.
actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList("Consumer")).stream()
.collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
expectAllGroupMap = Stream.of(
new ListGroupsResponseData.ListedGroup()
.setGroupId(consumerGroupId)
.setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)
.setGroupState(ConsumerGroup.ConsumerGroupState.ASSIGNING.toString())
.setGroupType(Group.GroupType.CONSUMER.toString())
).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
assertEquals(expectAllGroupMap, actualAllGroupMap);
actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList("Share")).stream()
.collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
expectAllGroupMap = Stream.of(
new ListGroupsResponseData.ListedGroup()
.setGroupId(shareGroupId)
.setProtocolType(ShareGroup.PROTOCOL_TYPE)
.setGroupState(ShareGroup.ShareGroupState.EMPTY.toString())
.setGroupType(Group.GroupType.SHARE.toString())
).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
assertEquals(expectAllGroupMap, actualAllGroupMap);
actualAllGroupMap = context.sendListGroups(Arrays.asList("empty", "Assigning"), Collections.emptyList()).stream()
.collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
expectAllGroupMap = Stream.of(
new ListGroupsResponseData.ListedGroup()
.setGroupId(classicGroup.groupId())
.setProtocolType(Group.GroupType.CLASSIC.toString())
.setGroupState(EMPTY.toString())
.setGroupType(Group.GroupType.CLASSIC.toString()),
new ListGroupsResponseData.ListedGroup()
.setGroupId(consumerGroupId)
.setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)
.setGroupState(ConsumerGroup.ConsumerGroupState.ASSIGNING.toString())
.setGroupType(Group.GroupType.CONSUMER.toString()),
new ListGroupsResponseData.ListedGroup()
.setGroupId(shareGroupId)
.setProtocolType(ShareGroup.PROTOCOL_TYPE)
.setGroupState(ShareGroup.ShareGroupState.EMPTY.toString())
.setGroupType(Group.GroupType.SHARE.toString())
).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
assertEquals(expectAllGroupMap, actualAllGroupMap);
// Test list group response with no group state filter and with invalid group type filter .
actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList("Invalid")).stream()
.collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
expectAllGroupMap = Collections.emptyMap();
assertEquals(expectAllGroupMap, actualAllGroupMap);
// Test list group response with invalid group state filter and with no group type filter .
actualAllGroupMap = context.sendListGroups(Collections.singletonList("Invalid"), Collections.emptyList()).stream()
.collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
expectAllGroupMap = Collections.emptyMap();
assertEquals(expectAllGroupMap, actualAllGroupMap);
} |
Span handleStart(Req request, Span span) {
if (span.isNoop()) return span;
try {
parseRequest(request, span);
} catch (Throwable t) {
propagateIfFatal(t);
Platform.get().log("error parsing request {0}", request, t);
} finally {
// all of the above parsing happened before a timestamp on the span
long timestamp = request.startTimestamp();
if (timestamp == 0L) {
span.start();
} else {
span.start(timestamp);
}
}
return span;
} | @Test void handleStart_nothingOnNoop_success() {
when(span.isNoop()).thenReturn(true);
handler.handleStart(request, span);
verify(span, never()).start();
} |
@Override
public String version() {
return AppInfoParser.getVersion();
} | @Test
public void testRegexRouterRetrievesVersionFromAppInfoParser() {
final RegexRouter<SinkRecord> router = new RegexRouter<>();
assertEquals(AppInfoParser.getVersion(), router.version());
} |
public PlainAccessResource buildPlainAccessResource(PlainAccessConfig plainAccessConfig) throws AclException {
checkPlainAccessConfig(plainAccessConfig);
return PlainAccessResource.build(plainAccessConfig, remoteAddressStrategyFactory.
getRemoteAddressStrategy(plainAccessConfig.getWhiteRemoteAddress()));
} | @Test(expected = AclException.class)
public void accountNullTest() {
plainAccessConfig.setAccessKey(null);
plainPermissionManager.buildPlainAccessResource(plainAccessConfig);
} |
@ConstantFunction(name = "bitand", argTypes = {INT, INT}, returnType = INT)
public static ConstantOperator bitandInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createInt(first.getInt() & second.getInt());
} | @Test
public void bitandInt() {
assertEquals(10, ScalarOperatorFunctions.bitandInt(O_INT_10, O_INT_10).getInt());
} |
@Override
public boolean isEmpty() {
return pseudoHeaders.length == 0 && otherHeaders.length == 0;
} | @Test
public void testIsEmpty() {
Http2Headers headers = ReadOnlyHttp2Headers.trailers(false);
assertTrue(headers.isEmpty());
} |
@Override
protected void configurePipeline(ChannelHandlerContext ctx, String protocol) throws Exception {
if (ApplicationProtocolNames.HTTP_2.equals(protocol)) {
ctx.channel().attr(PROTOCOL_NAME).set(PROTOCOL_HTTP_2);
configureHttp2(ctx.pipeline());
return;
}
if (ApplicationProtocolNames.HTTP_1_1.equals(protocol)) {
ctx.channel().attr(PROTOCOL_NAME).set(PROTOCOL_HTTP_1_1);
configureHttp1(ctx.pipeline());
return;
}
throw new IllegalStateException("unknown protocol: " + protocol);
} | @Test
void protocolCloseHandlerAddedByDefault() throws Exception {
EmbeddedChannel channel = new EmbeddedChannel();
ChannelConfig channelConfig = new ChannelConfig();
channelConfig.add(new ChannelConfigValue<>(CommonChannelConfigKeys.maxHttp2HeaderListSize, 32768));
Http2OrHttpHandler http2OrHttpHandler =
new Http2OrHttpHandler(new ChannelInboundHandlerAdapter(), channelConfig, cp -> {});
channel.pipeline().addLast("codec_placeholder", new DummyChannelHandler());
channel.pipeline().addLast(Http2OrHttpHandler.class.getSimpleName(), http2OrHttpHandler);
http2OrHttpHandler.configurePipeline(channel.pipeline().lastContext(), ApplicationProtocolNames.HTTP_2);
assertNotNull(channel.pipeline().context(Http2ConnectionErrorHandler.class));
} |
boolean hasReachedDeliveryTimeout(long deliveryTimeoutMs, long now) {
return deliveryTimeoutMs <= now - this.createdMs;
} | @Test
public void testBatchExpiration() {
long deliveryTimeoutMs = 10240;
ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now);
// Set `now` to 2ms before the create time.
assertFalse(batch.hasReachedDeliveryTimeout(deliveryTimeoutMs, now - 2));
// Set `now` to deliveryTimeoutMs.
assertTrue(batch.hasReachedDeliveryTimeout(deliveryTimeoutMs, now + deliveryTimeoutMs));
} |
public static String intLongToString(Long number) {
return intLongToString(number, DEC_RADIX);
} | @Test
public void numberToString_Test() {
Assertions.assertEquals("00111010", TbUtils.intLongToString(58L, MIN_RADIX));
Assertions.assertEquals("0000000010011110", TbUtils.intLongToString(158L, MIN_RADIX));
Assertions.assertEquals("00000000000000100000001000000001", TbUtils.intLongToString(131585L, MIN_RADIX));
Assertions.assertEquals("0111111111111111111111111111111111111111111111111111111111111111", TbUtils.intLongToString(Long.MAX_VALUE, MIN_RADIX));
Assertions.assertEquals("1000000000000000000000000000000000000000000000000000000000000001", TbUtils.intLongToString(-Long.MAX_VALUE, MIN_RADIX));
Assertions.assertEquals("1111111111111111111111111111111111111111111111111111111110011010", TbUtils.intLongToString(-102L, MIN_RADIX));
Assertions.assertEquals("1111111111111111111111111111111111111111111111111100110010011010", TbUtils.intLongToString(-13158L, MIN_RADIX));
Assertions.assertEquals("777777777777777777777", TbUtils.intLongToString(Long.MAX_VALUE, 8));
Assertions.assertEquals("1000000000000000000000", TbUtils.intLongToString(Long.MIN_VALUE, 8));
Assertions.assertEquals("9223372036854775807", TbUtils.intLongToString(Long.MAX_VALUE));
Assertions.assertEquals("-9223372036854775808", TbUtils.intLongToString(Long.MIN_VALUE));
Assertions.assertEquals("3366", TbUtils.intLongToString(13158L, 16));
Assertions.assertEquals("FFCC9A", TbUtils.intLongToString(-13158L, 16));
Assertions.assertEquals("0xFFCC9A", TbUtils.intLongToString(-13158L, 16, true, true));
Assertions.assertEquals("0x0400", TbUtils.intLongToString(1024L, 16, true, true));
Assertions.assertNotEquals("400", TbUtils.intLongToString(1024L, 16));
Assertions.assertEquals("0xFFFC00", TbUtils.intLongToString(-1024L, 16, true, true));
Assertions.assertNotEquals("0xFC00", TbUtils.intLongToString(-1024L, 16, true, true));
Assertions.assertEquals("hazelnut", TbUtils.intLongToString(1356099454469L, MAX_RADIX));
} |
@Override
public Output run(RunContext runContext) throws Exception {
URI from = new URI(runContext.render(this.from));
final PebbleExpressionPredicate predicate = getExpressionPredication(runContext);
final Path path = runContext.workingDir().createTempFile(".ion");
long processedItemsTotal = 0L;
long droppedItemsTotal = 0L;
try (final BufferedWriter writer = Files.newBufferedWriter(path);
final BufferedReader reader = newBufferedReader(runContext, from)) {
String item;
while ((item = reader.readLine()) != null) {
IllegalVariableEvaluationException exception = null;
Boolean match = null;
try {
match = predicate.apply(item);
} catch (IllegalVariableEvaluationException e) {
exception = e;
}
FilterType action = this.filterType;
if (match == null) {
switch (errorOrNullBehavior) {
case FAIL -> {
if (exception != null) {
throw exception;
} else {
throw new IllegalVariableEvaluationException(String.format(
"Expression `%s` return `null` on item `%s`",
filterCondition,
item
));
}
}
case INCLUDE -> action = FilterType.INCLUDE;
case EXCLUDE -> action = FilterType.EXCLUDE;
}
match = true;
}
if (!match) {
action = action.reverse();
}
switch (action) {
case INCLUDE -> {
writer.write(item);
writer.newLine();
}
case EXCLUDE -> droppedItemsTotal++;
}
processedItemsTotal++;
}
}
URI uri = runContext.storage().putFile(path.toFile());
return Output.builder()
.uri(uri)
.processedItemsTotal(processedItemsTotal)
.droppedItemsTotal(droppedItemsTotal)
.build();
} | @Test
void shouldFilterGivenValidBooleanExpressionForInclude() throws Exception {
// Given
RunContext runContext = runContextFactory.of();
FilterItems task = FilterItems
.builder()
.from(generateKeyValueFile(TEST_VALID_ITEMS, runContext).toString())
.filterCondition(" {{ value % 2 == 0 }} ")
.filterType(FilterItems.FilterType.INCLUDE)
.build();
// When
FilterItems.Output output = task.run(runContext);
// Then
Assertions.assertNotNull(output);
Assertions.assertNotNull(output.getUri());
Assertions.assertEquals(2, output.getDroppedItemsTotal());
Assertions.assertEquals(4, output.getProcessedItemsTotal());
assertFile(runContext, output, List.of(new KeyValue("k2", 2), new KeyValue("k4", 4)), KeyValue.class);
} |
public long getCrc() {
return crc;
} | @Test
public void getCrcOutputZero() {
// Arrange
final LogHeader objectUnderTest = new LogHeader(0);
// Act
final long actual = objectUnderTest.getCrc();
// Assert result
Assert.assertEquals(0L, actual);
} |
@Override
public List<Document> get() {
try (var input = markdownResource.getInputStream()) {
Node node = parser.parseReader(new InputStreamReader(input));
DocumentVisitor documentVisitor = new DocumentVisitor(config);
node.accept(documentVisitor);
return documentVisitor.getDocuments();
}
catch (IOException e) {
throw new RuntimeException(e);
}
} | @Test
void testCodeWhenCodeBlockShouldNotBeSeparatedDocument() {
MarkdownDocumentReaderConfig config = MarkdownDocumentReaderConfig.builder()
.withHorizontalRuleCreateDocument(true)
.withIncludeCodeBlock(true)
.build();
MarkdownDocumentReader reader = new MarkdownDocumentReader("classpath:/code.md", config);
List<Document> documents = reader.get();
assertThat(documents).satisfiesExactly(document -> {
assertThat(document.getMetadata()).isEqualTo(Map.of("lang", "java", "category", "code_block"));
assertThat(document.getContent()).startsWith("This is a Java sample application: package com.example.demo")
.contains("SpringApplication.run(DemoApplication.class, args);");
}, document -> {
assertThat(document.getMetadata()).isEqualTo(Map.of("category", "code_inline"));
assertThat(document.getContent()).isEqualTo(
"Markdown also provides the possibility to use inline code formatting throughout the entire sentence.");
}, document -> {
assertThat(document.getMetadata()).isEqualTo(Map.of("lang", "", "category", "code_block"));
assertThat(document.getContent()).isEqualTo(
"Another possibility is to set block code without specific highlighting: ./mvnw spring-javaformat:apply\n");
});
} |
public static Object get(final ConvertedMap data, final FieldReference field) {
final Object target = findParent(data, field);
return target == null ? null : fetch(target, field.getKey());
} | @Test
public void testDeepListGet() throws Exception {
Map<Serializable, Object> data = new HashMap<>();
List<String> inner = new ArrayList<>();
data.put("foo", inner);
inner.add("bar");
String reference = "[foo][0]";
assertEquals(
RubyUtil.RUBY.newString("bar"), get(ConvertedMap.newFromMap(data), reference)
);
} |
public void removeFromFirstWhen(final Predicate<T> predicate) {
Segment<T> firstSeg = getFirst();
while (true) {
if (firstSeg == null) {
this.firstOffset = this.size = 0;
return;
}
int removed = firstSeg.removeFromFirstWhen(predicate);
if (removed == 0) {
break;
}
this.size -= removed;
this.firstOffset = firstSeg.offset;
if (firstSeg.isEmpty()) {
RecycleUtil.recycle(this.segments.pollFirst());
firstSeg = getFirst();
this.firstOffset = 0;
}
}
} | @Test
public void testRemoveFromFirstWhen() {
fillList();
this.list.removeFromFirstWhen(x -> x < 200);
assertEquals(800, this.list.size());
assertEquals(200, (int) this.list.get(0));
for (int i = 0; i < 800; i++) {
assertEquals(200 + i, (int) this.list.get(i));
}
this.list.removeFromFirstWhen(x -> x < 500);
assertEquals(500, this.list.size());
for (int i = 0; i < 500; i++) {
assertEquals(500 + i, (int) this.list.get(i));
}
this.list.removeFromFirstWhen(x -> x < 1000);
assertTrue(this.list.isEmpty());
assertEquals(0, this.list.segmentSize());
fillList();
assertFilledList();
} |
@Override
public void deleteProject(Long id) {
// 校验存在
validateProjectExists(id);
// 删除
goViewProjectMapper.deleteById(id);
} | @Test
public void testDeleteProject_success() {
// mock 数据
GoViewProjectDO dbGoViewProject = randomPojo(GoViewProjectDO.class);
goViewProjectMapper.insert(dbGoViewProject);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbGoViewProject.getId();
// 调用
goViewProjectService.deleteProject(id);
// 校验数据不存在了
assertNull(goViewProjectMapper.selectById(id));
} |
public static FilterPredicate rewrite(FilterPredicate pred) {
Objects.requireNonNull(pred, "pred cannot be null");
return pred.accept(INSTANCE);
} | @Test
public void testNested() {
Contains<Integer> contains1 = contains(eq(intColumn, 1));
Contains<Integer> contains2 = contains(eq(intColumn, 2));
Contains<Integer> contains3 = contains(eq(intColumn, 3));
Contains<Integer> contains4 = contains(eq(intColumn, 4));
assertEquals(contains1.and(contains2.or(contains3)), rewrite(and(contains1, or(contains2, contains3))));
assertEquals(contains1.and(contains2).or(contains3), rewrite(or(and(contains1, contains2), contains3)));
assertEquals(
contains1.and(contains2).and(contains2.or(contains3)),
rewrite(and(and(contains1, contains2), or(contains2, contains3))));
assertEquals(
contains1.and(contains2).or(contains3.or(contains4)),
rewrite(or(and(contains1, contains2), or(contains3, contains4))));
} |
@NonNull
public static Permutor<FeedItem> getPermutor(@NonNull SortOrder sortOrder) {
Comparator<FeedItem> comparator = null;
Permutor<FeedItem> permutor = null;
switch (sortOrder) {
case EPISODE_TITLE_A_Z:
comparator = (f1, f2) -> itemTitle(f1).compareTo(itemTitle(f2));
break;
case EPISODE_TITLE_Z_A:
comparator = (f1, f2) -> itemTitle(f2).compareTo(itemTitle(f1));
break;
case DATE_OLD_NEW:
comparator = (f1, f2) -> pubDate(f1).compareTo(pubDate(f2));
break;
case DATE_NEW_OLD:
comparator = (f1, f2) -> pubDate(f2).compareTo(pubDate(f1));
break;
case DURATION_SHORT_LONG:
comparator = (f1, f2) -> Integer.compare(duration(f1), duration(f2));
break;
case DURATION_LONG_SHORT:
comparator = (f1, f2) -> Integer.compare(duration(f2), duration(f1));
break;
case EPISODE_FILENAME_A_Z:
comparator = (f1, f2) -> itemLink(f1).compareTo(itemLink(f2));
break;
case EPISODE_FILENAME_Z_A:
comparator = (f1, f2) -> itemLink(f2).compareTo(itemLink(f1));
break;
case FEED_TITLE_A_Z:
comparator = (f1, f2) -> feedTitle(f1).compareTo(feedTitle(f2));
break;
case FEED_TITLE_Z_A:
comparator = (f1, f2) -> feedTitle(f2).compareTo(feedTitle(f1));
break;
case RANDOM:
permutor = Collections::shuffle;
break;
case SMART_SHUFFLE_OLD_NEW:
permutor = (queue) -> smartShuffle(queue, true);
break;
case SMART_SHUFFLE_NEW_OLD:
permutor = (queue) -> smartShuffle(queue, false);
break;
case SIZE_SMALL_LARGE:
comparator = (f1, f2) -> Long.compare(size(f1), size(f2));
break;
case SIZE_LARGE_SMALL:
comparator = (f1, f2) -> Long.compare(size(f2), size(f1));
break;
case COMPLETION_DATE_NEW_OLD:
comparator = (f1, f2) -> f2.getMedia().getPlaybackCompletionDate()
.compareTo(f1.getMedia().getPlaybackCompletionDate());
break;
default:
throw new IllegalArgumentException("Permutor not implemented");
}
if (comparator != null) {
final Comparator<FeedItem> comparator2 = comparator;
permutor = (queue) -> Collections.sort(queue, comparator2);
}
return permutor;
} | @Test
public void testPermutorForRule_FEED_TITLE_DESC() {
Permutor<FeedItem> permutor = FeedItemPermutors.getPermutor(SortOrder.FEED_TITLE_Z_A);
List<FeedItem> itemList = getTestList();
assertTrue(checkIdOrder(itemList, 1, 3, 2)); // before sorting
permutor.reorder(itemList);
assertTrue(checkIdOrder(itemList, 3, 2, 1)); // after sorting
} |
@Override
public long getTotalSpace() {
throw new UnsupportedOperationException("Not implemented");
} | @Test(expectedExceptions = UnsupportedOperationException.class)
public void testGetTotalSpace() {
fs.getFile("nonsuch.txt").getTotalSpace();
} |
@Override
public void write(final OutputStream out) {
// CHECKSTYLE_RULES.ON: CyclomaticComplexity
try {
out.write("[".getBytes(StandardCharsets.UTF_8));
write(out, buildHeader());
final BlockingRowQueue rowQueue = queryMetadata.getRowQueue();
while (!connectionClosed && queryMetadata.isRunning() && !limitReached && !complete) {
final KeyValueMetadata<List<?>, GenericRow> row = rowQueue.poll(
disconnectCheckInterval,
TimeUnit.MILLISECONDS
);
if (row != null) {
write(out, buildRow(row));
} else {
// If no new rows have been written, the user may have terminated the connection without
// us knowing. Check by trying to write a single newline.
out.write("\n".getBytes(StandardCharsets.UTF_8));
out.flush();
}
drainAndThrowOnError(out);
}
if (connectionClosed) {
return;
}
drain(out);
if (limitReached) {
objectMapper.writeValue(out, StreamedRow.finalMessage("Limit Reached"));
} else if (complete) {
objectMapper.writeValue(out, StreamedRow.finalMessage("Query Completed"));
}
out.write("]\n".getBytes(StandardCharsets.UTF_8));
out.flush();
} catch (final EOFException exception) {
// The user has terminated the connection; we can stop writing
log.warn("Query terminated due to exception:" + exception.toString());
} catch (final InterruptedException exception) {
// The most likely cause of this is the server shutting down. Should just try to close
// gracefully, without writing any more to the connection stream.
log.warn("Interrupted while writing to connection stream");
} catch (final Exception exception) {
log.error("Exception occurred while writing to connection stream: ", exception);
outputException(out, exception);
} finally {
close();
}
} | @Test
public void shouldExitAndDrainIfQueryComplete() {
// Given:
doAnswer(streamRows("Row1", "Row2", "Row3"))
.when(rowQueue).drainTo(any());
writer = new QueryStreamWriter(
queryMetadata,
1000,
objectMapper,
new CompletableFuture<>()
);
out = new ByteArrayOutputStream();
verify(queryMetadata).setCompletionHandler(completionHandlerCapture.capture());
completionHandler = completionHandlerCapture.getValue();
completionHandler.complete();
// When:
writer.write(out);
// Then:
final List<String> lines = getOutput(out);
assertThat(lines, is(Arrays.asList(
"[{\"header\":{\"queryId\":\"id\",\"schema\":\"`col1` STRING\"}},",
"{\"row\":{\"columns\":[\"Row1\"]}},",
"{\"row\":{\"columns\":[\"Row2\"]}},",
"{\"row\":{\"columns\":[\"Row3\"]}},",
"{\"finalMessage\":\"Query Completed\"}]"
)));
} |
public static boolean stopTransferLeadership(final ThreadId id) {
final Replicator r = (Replicator) id.lock();
if (r == null) {
return false;
}
r.timeoutNowIndex = 0;
id.unlock();
return true;
} | @Test
public void testStopTransferLeadership() {
testTransferLeadership();
Replicator.stopTransferLeadership(this.id);
final Replicator r = getReplicator();
this.id.unlock();
assertEquals(0, r.getTimeoutNowIndex());
assertNull(r.getTimeoutNowInFly());
} |
Object getCellValue(Cell cell, Schema.FieldType type) {
ByteString cellValue = cell.getValue();
int valueSize = cellValue.size();
switch (type.getTypeName()) {
case BOOLEAN:
checkArgument(valueSize == 1, message("Boolean", 1));
return cellValue.toByteArray()[0] != 0;
case BYTE:
checkArgument(valueSize == 1, message("Byte", 1));
return cellValue.toByteArray()[0];
case INT16:
checkArgument(valueSize == 2, message("Int16", 2));
return Shorts.fromByteArray(cellValue.toByteArray());
case INT32:
checkArgument(valueSize == 4, message("Int32", 4));
return Ints.fromByteArray(cellValue.toByteArray());
case INT64:
checkArgument(valueSize == 8, message("Int64", 8));
return Longs.fromByteArray(cellValue.toByteArray());
case FLOAT:
checkArgument(valueSize == 4, message("Float", 4));
return Float.intBitsToFloat(Ints.fromByteArray(cellValue.toByteArray()));
case DOUBLE:
checkArgument(valueSize == 8, message("Double", 8));
return Double.longBitsToDouble(Longs.fromByteArray(cellValue.toByteArray()));
case DATETIME:
return DateTime.parse(cellValue.toStringUtf8());
case STRING:
return cellValue.toStringUtf8();
case BYTES:
return cellValue.toByteArray();
case LOGICAL_TYPE:
String identifier = checkArgumentNotNull(type.getLogicalType()).getIdentifier();
throw new IllegalStateException("Unsupported logical type: " + identifier);
default:
throw new IllegalArgumentException(
String.format("Unsupported cell value type '%s'.", type.getTypeName()));
}
} | @Test
public void shouldParseBooleanTypeFalse() {
byte[] value = new byte[] {0};
assertEquals(false, PARSER.getCellValue(cell(value), BOOLEAN));
} |
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key);
StateQueryRequest<ValueAndTimestamp<GenericRow>>
request = inStore(stateStore.getStateStoreName())
.withQuery(query)
.withPartitions(ImmutableSet.of(partition));
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<ValueAndTimestamp<GenericRow>>
result = stateStore.getKafkaStreams().query(request);
final QueryResult<ValueAndTimestamp<GenericRow>> queryResult =
result.getPartitionResults().get(partition);
// Some of these failures are retriable, and in the future, we may want to retry
// locally before throwing.
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
} else if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
} else {
final ValueAndTimestamp<GenericRow> row = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(
ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp()))
.iterator(),
queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
} | @Test
public void shouldReturnValuesUpperBound() {
// Given:
when(kafkaStreams.query(any())).thenReturn(getIteratorResult());
// When:
final KsMaterializedQueryResult<Row> result = table.get(PARTITION, null, A_KEY2);
// Then:
Iterator<Row> rowIterator = result.getRowIterator();
assertThat(rowIterator.hasNext(), is(true));
assertThat(rowIterator.next(), is(Row.of(SCHEMA, A_KEY, ROW1, TIME1)));
assertThat(rowIterator.next(), is(Row.of(SCHEMA, A_KEY2, ROW2, TIME2)));
assertThat(rowIterator.hasNext(), is(false));
assertThat(result.getPosition(), not(Optional.empty()));
assertThat(result.getPosition().get(), is(POSITION));
} |
public void deletePartitionMetadataTable() {
List<String> ddl = new ArrayList<>();
if (this.isPostgres()) {
ddl.add("DROP INDEX \"" + CREATED_AT_START_TIMESTAMP_INDEX + "\"");
ddl.add("DROP INDEX \"" + WATERMARK_INDEX + "\"");
ddl.add("DROP TABLE \"" + tableName + "\"");
} else {
ddl.add("DROP INDEX " + CREATED_AT_START_TIMESTAMP_INDEX);
ddl.add("DROP INDEX " + WATERMARK_INDEX);
ddl.add("DROP TABLE " + tableName);
}
OperationFuture<Void, UpdateDatabaseDdlMetadata> op =
databaseAdminClient.updateDatabaseDdl(instanceId, databaseId, ddl, null);
try {
// Initiate the request which returns an OperationFuture.
op.get(TIMEOUT_MINUTES, TimeUnit.MINUTES);
} catch (ExecutionException | TimeoutException e) {
// If the operation failed or timed out during execution, expose the cause.
if (e.getCause() != null) {
throw (SpannerException) e.getCause();
} else {
throw SpannerExceptionFactory.asSpannerException(e);
}
} catch (InterruptedException e) {
// Throw when a thread is waiting, sleeping, or otherwise occupied,
// and the thread is interrupted, either before or during the activity.
throw SpannerExceptionFactory.propagateInterrupt(e);
}
} | @Test
public void testDeletePartitionMetadataTable() throws Exception {
when(op.get(TIMEOUT_MINUTES, TimeUnit.MINUTES)).thenReturn(null);
partitionMetadataAdminDao.deletePartitionMetadataTable();
verify(databaseAdminClient, times(1))
.updateDatabaseDdl(eq(INSTANCE_ID), eq(DATABASE_ID), statements.capture(), isNull());
assertEquals(3, ((Collection<?>) statements.getValue()).size());
Iterator<String> it = statements.getValue().iterator();
assertTrue(it.next().contains("DROP INDEX"));
assertTrue(it.next().contains("DROP INDEX"));
assertTrue(it.next().contains("DROP TABLE"));
} |
public SampleMetadata getMetadata() {
return metadata;
} | @Test
public void testGetMetadata() {
try (CsvSampleReader reader = new CsvSampleReader(tempCsv, metadata)) {
assertThat(reader.getMetadata().toString(),
CoreMatchers.is(metadata.toString()));
}
} |
public static <C> Collection<Data> objectToDataCollection(Collection<C> collection,
SerializationService serializationService) {
List<Data> dataCollection = new ArrayList<>(collection.size());
objectToDataCollection(collection, dataCollection, serializationService, null);
return dataCollection;
} | @Test
public void testObjectToDataCollection_size() {
SerializationService serializationService = new DefaultSerializationServiceBuilder().build();
Collection<Object> list = new ArrayList<>();
list.add(1);
list.add("foo");
Collection<Data> dataCollection = objectToDataCollection(list, serializationService);
assertEquals(list.size(), dataCollection.size());
} |
@Override
public boolean isValidHeader(final int readableBytes) {
return readableBytes >= (startupPhase ? 0 : MESSAGE_TYPE_LENGTH) + PAYLOAD_LENGTH;
} | @Test
void assertIsValidHeader() {
assertTrue(new OpenGaussPacketCodecEngine().isValidHeader(50));
} |
public static String getType(String fileStreamHexHead) {
if(StrUtil.isBlank(fileStreamHexHead)){
return null;
}
if (MapUtil.isNotEmpty(FILE_TYPE_MAP)) {
for (final Entry<String, String> fileTypeEntry : FILE_TYPE_MAP.entrySet()) {
if (StrUtil.startWithIgnoreCase(fileStreamHexHead, fileTypeEntry.getKey())) {
return fileTypeEntry.getValue();
}
}
}
byte[] bytes = HexUtil.decodeHex(fileStreamHexHead);
return FileMagicNumber.getMagicNumber(bytes).getExtension();
} | @Test
@Disabled
public void issue3024Test() {
String x = FileTypeUtil.getType(FileUtil.getInputStream("d:/test/TEST_WPS_DOC.doc"),true);
System.out.println(x);
} |
public static long parseLongAscii(final CharSequence cs, final int index, final int length)
{
if (length <= 0)
{
throw new AsciiNumberFormatException("empty string: index=" + index + " length=" + length);
}
final boolean negative = MINUS_SIGN == cs.charAt(index);
int i = index;
if (negative)
{
i++;
if (1 == length)
{
throwParseLongError(cs, index, length);
}
}
final int end = index + length;
if (end - i < LONG_MAX_DIGITS)
{
final long tally = parsePositiveLongAscii(cs, index, length, i, end);
return negative ? -tally : tally;
}
else if (negative)
{
return -parseLongAsciiOverflowCheck(cs, index, length, LONG_MIN_VALUE_DIGITS, i, end);
}
else
{
return parseLongAsciiOverflowCheck(cs, index, length, LONG_MAX_VALUE_DIGITS, i, end);
}
} | @Test
void shouldThrowExceptionWhenParsingLongWhichCanOverFlow()
{
final String maxValuePlusOneDigit = Long.MAX_VALUE + "1";
assertThrows(AsciiNumberFormatException.class,
() -> parseLongAscii(maxValuePlusOneDigit, 0, maxValuePlusOneDigit.length()),
maxValuePlusOneDigit);
final String maxValuePlusOne = "9223372036854775808";
assertThrows(AsciiNumberFormatException.class,
() -> parseLongAscii(maxValuePlusOne, 0, maxValuePlusOne.length()),
maxValuePlusOne);
final String minValuePlusOneDigit = Long.MIN_VALUE + "1";
assertThrows(AsciiNumberFormatException.class,
() -> parseLongAscii(minValuePlusOneDigit, 0, minValuePlusOneDigit.length()),
minValuePlusOneDigit);
final String minValueMinusOne = "-9223372036854775809";
assertThrows(AsciiNumberFormatException.class,
() -> parseLongAscii(minValueMinusOne, 0, minValueMinusOne.length()),
minValueMinusOne);
} |
public static String calculateTypeName(CompilationUnit compilationUnit, FullyQualifiedJavaType fqjt) {
if (fqjt.isArray()) {
// if array, then calculate the name of the base (non-array) type
// then add the array indicators back in
String fqn = fqjt.getFullyQualifiedName();
String typeName = calculateTypeName(compilationUnit,
new FullyQualifiedJavaType(fqn.substring(0, fqn.indexOf('['))));
return typeName + fqn.substring(fqn.indexOf('['));
}
if (!fqjt.getTypeArguments().isEmpty()) {
return calculateParameterizedTypeName(compilationUnit, fqjt);
}
if (compilationUnit == null
|| typeDoesNotRequireImport(fqjt)
|| typeIsInSamePackage(compilationUnit, fqjt)
|| typeIsAlreadyImported(compilationUnit, fqjt)) {
return fqjt.getShortName();
} else {
return fqjt.getFullyQualifiedName();
}
} | @Test
void testGenericTypeWithWildCardSomeImported() {
Interface interfaze = new Interface(new FullyQualifiedJavaType("com.foo.UserMapper"));
interfaze.addImportedType(new FullyQualifiedJavaType("java.util.Map"));
interfaze.addImportedType(new FullyQualifiedJavaType("java.util.List"));
interfaze.addImportedType(new FullyQualifiedJavaType("java.math.BigDecimal"));
FullyQualifiedJavaType fqjt = new FullyQualifiedJavaType("java.util.Map<java.math.BigDecimal, java.util.List<? super com.beeant.dto.User>>");
assertEquals("Map<BigDecimal, List<? super com.beeant.dto.User>>",
JavaDomUtils.calculateTypeName(interfaze, fqjt));
} |
public static Subject.Factory<Re2jStringSubject, String> re2jString() {
return Re2jStringSubject.FACTORY;
} | @Test
public void containsMatch_pattern_succeeds() {
assertAbout(re2jString()).that("this is a hello world").containsMatch(PATTERN);
} |
public DataSchemaParser.ParseResult parseSources(String[] rawSources) throws IOException
{
Set<String> fileExtensions = _parserByFileExtension.keySet();
Map<String, List<String>> byExtension = new HashMap<>(fileExtensions.size());
for (String fileExtension : fileExtensions)
{
byExtension.put(fileExtension, new ArrayList<>());
}
String[] sortedSources = Arrays.copyOf(rawSources, rawSources.length);
Arrays.sort(sortedSources);
// Extract all schema files from the given source paths and group by extension (JARs are handled specially)
for (String source : sortedSources)
{
final File sourceFile = new File(source);
if (sourceFile.exists())
{
if (sourceFile.isDirectory())
{
// Source path is a directory, so recursively find all schema files contained therein
final FileExtensionFilter filter = new FileExtensionFilter(fileExtensions);
final List<File> sourceFilesInDirectory = FileUtil.listFiles(sourceFile, filter);
// Add each schema to the corresponding extension's source list
for (File f : sourceFilesInDirectory)
{
String ext = FilenameUtils.getExtension(f.getName());
List<String> filesForExtension = byExtension.get(ext);
if (filesForExtension != null)
{
filesForExtension.add(f.getAbsolutePath());
}
}
}
else if (sourceFile.getName().endsWith(".jar"))
{
// Source path is a JAR, so add it to each extension's source list.
// The file-based parser for each extension will extract the JAR and process only files matching the extension
byExtension.values().forEach(files -> files.add(sourceFile.getAbsolutePath()));
}
else
{
// Source path is a non-JAR file, so add it to the corresponding extension's source list
String ext = FilenameUtils.getExtension(sourceFile.getName());
List<String> filesForExtension = byExtension.get(ext);
if (filesForExtension != null)
{
filesForExtension.add(sourceFile.getAbsolutePath());
}
}
}
}
// Parse all schema files and JARs using the appropriate file format parser
final ParseResult result = new ParseResult();
for (Map.Entry<String, List<String>> entry : byExtension.entrySet())
{
String ext = entry.getKey();
List<String> files = entry.getValue();
_parserByFileExtension.get(ext).parseSources(files.toArray(new String[files.size()]), result);
}
return result;
} | @Test(dataProvider = "entityRelationshipInputFiles")
public void testSchemaFilesInExtensionPathInJar(String[] files, String[] expectedExtensions) throws Exception
{
String tempDirectoryPath = _tempDir.getAbsolutePath();
String jarFile = tempDirectoryPath + FS + "test.jar";
String schemaDir = TEST_RESOURCES_DIR + FS + "extensionSchemas";
Map<String, String> entryToFileMap = Arrays.stream(files).collect(Collectors.toMap(
filename -> schemaDir + FS + filename,
filename -> filename));
createTempJarFile(entryToFileMap, jarFile);
List<SchemaDirectory> resolverDirectories = Arrays.asList(
SchemaDirectoryName.EXTENSIONS, SchemaDirectoryName.PEGASUS);
List<SchemaDirectory> sourceDirectories = Collections.singletonList(SchemaDirectoryName.EXTENSIONS);
DataSchemaParser parser = new DataSchemaParser.Builder(jarFile)
.setResolverDirectories(resolverDirectories)
.setSourceDirectories(sourceDirectories)
.build();
DataSchemaParser.ParseResult parseResult = parser.parseSources(new String[]{jarFile});
Map<DataSchema, DataSchemaLocation> extensions = parseResult.getExtensionDataSchemaAndLocations();
assertEquals(extensions.size(), expectedExtensions.length);
Set<String> actualNames = extensions
.keySet()
.stream()
.map(dataSchema -> (NamedDataSchema) dataSchema)
.map(NamedDataSchema::getName)
.collect(Collectors.toSet());
assertEquals(actualNames, Arrays.stream(expectedExtensions).collect(Collectors.toSet()));
} |
public PickTableLayoutWithoutPredicate pickTableLayoutWithoutPredicate()
{
return new PickTableLayoutWithoutPredicate(metadata);
} | @Test
public void doesNotFireIfTableScanHasTableLayout()
{
tester().assertThat(pickTableLayout.pickTableLayoutWithoutPredicate())
.on(p -> p.tableScan(
nationTableHandle,
ImmutableList.of(p.variable("nationkey", BIGINT)),
ImmutableMap.of(p.variable("nationkey", BIGINT), new TpchColumnHandle("nationkey", BIGINT))))
.doesNotFire();
} |
@Override
public void checkAuthorization(
final KsqlSecurityContext securityContext,
final MetaStore metaStore,
final Statement statement
) {
if (statement instanceof Query) {
validateQuery(securityContext, metaStore, (Query)statement);
} else if (statement instanceof InsertInto) {
validateInsertInto(securityContext, metaStore, (InsertInto)statement);
} else if (statement instanceof CreateAsSelect) {
validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement);
} else if (statement instanceof PrintTopic) {
validatePrintTopic(securityContext, (PrintTopic)statement);
} else if (statement instanceof CreateSource) {
validateCreateSource(securityContext, (CreateSource)statement);
}
} | @Test
public void shouldCreateAsSelectExistingTopicWithWritePermissionsAllowed() {
// Given:
final Statement statement = givenStatement(String.format(
"CREATE STREAM %s AS SELECT * FROM %s;", AVRO_STREAM_TOPIC, KAFKA_STREAM_TOPIC)
);
// When/Then:
authorizationValidator.checkAuthorization(securityContext, metaStore, statement);
} |
@Override
public boolean put(File localFile, JobID jobId, BlobKey blobKey) throws IOException {
createBasePathIfNeeded();
String toBlobPath = BlobUtils.getStorageLocationPath(basePath, jobId, blobKey);
try (FSDataOutputStream os =
fileSystem.create(new Path(toBlobPath), FileSystem.WriteMode.OVERWRITE)) {
LOG.debug("Copying from {} to {}.", localFile, toBlobPath);
Files.copy(localFile, os);
os.sync();
}
return true;
} | @Test
void testMissingFilePut() {
assertThatThrownBy(
() ->
testInstance.put(
new File("/not/existing/file"),
new JobID(),
new PermanentBlobKey()))
.isInstanceOf(FileNotFoundException.class);
} |
public static HostAndPort toHostAndPort(NetworkEndpoint networkEndpoint) {
switch (networkEndpoint.getType()) {
case IP:
return HostAndPort.fromHost(networkEndpoint.getIpAddress().getAddress());
case IP_PORT:
return HostAndPort.fromParts(
networkEndpoint.getIpAddress().getAddress(), networkEndpoint.getPort().getPortNumber());
case HOSTNAME:
case IP_HOSTNAME:
return HostAndPort.fromHost(networkEndpoint.getHostname().getName());
case HOSTNAME_PORT:
case IP_HOSTNAME_PORT:
return HostAndPort.fromParts(
networkEndpoint.getHostname().getName(), networkEndpoint.getPort().getPortNumber());
case UNRECOGNIZED:
case TYPE_UNSPECIFIED:
throw new AssertionError("Type for NetworkEndpoint must be specified.");
}
throw new AssertionError(
String.format(
"Should never happen. Unchecked NetworkEndpoint type: %s", networkEndpoint.getType()));
} | @Test
public void toHostAndPort_withHostnameAndPort_returnsHostWithHostnameAndPort() {
NetworkEndpoint hostnameAndPortEndpoint =
NetworkEndpoint.newBuilder()
.setType(NetworkEndpoint.Type.HOSTNAME_PORT)
.setPort(Port.newBuilder().setPortNumber(8888))
.setHostname(Hostname.newBuilder().setName("localhost"))
.build();
assertThat(NetworkEndpointUtils.toHostAndPort(hostnameAndPortEndpoint))
.isEqualTo(HostAndPort.fromParts("localhost", 8888));
} |
public boolean insertGroupCapacity(final GroupCapacity capacity) {
GroupCapacityMapper groupCapacityMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.GROUP_CAPACITY);
MapperResult mapperResult;
MapperContext context = new MapperContext();
context.putUpdateParameter(FieldConstant.GROUP_ID, capacity.getGroup());
context.putUpdateParameter(FieldConstant.QUOTA, capacity.getQuota());
context.putUpdateParameter(FieldConstant.MAX_SIZE, capacity.getMaxSize());
context.putUpdateParameter(FieldConstant.MAX_AGGR_SIZE, capacity.getMaxAggrSize());
context.putUpdateParameter(FieldConstant.MAX_AGGR_COUNT, capacity.getMaxAggrCount());
context.putUpdateParameter(FieldConstant.GMT_CREATE, capacity.getGmtCreate());
context.putUpdateParameter(FieldConstant.GMT_MODIFIED, capacity.getGmtModified());
context.putWhereParameter(FieldConstant.GROUP_ID, capacity.getGroup());
if (CLUSTER.equals(capacity.getGroup())) {
mapperResult = groupCapacityMapper.insertIntoSelect(context);
} else {
// Note: add "tenant_id = ''" condition.
mapperResult = groupCapacityMapper.insertIntoSelectByWhere(context);
}
return jdbcTemplate.update(mapperResult.getSql(), mapperResult.getParamList().toArray()) > 0;
} | @Test
void testInsertGroupCapacity() {
doReturn(1).when(jdbcTemplate).update(anyString(), eq(""), eq(null), eq(null), eq(null), eq(null), eq(null), eq(null));
// when(jdbcTemplate.update(anyString(), eq(timestamp), eq("test3"))).thenReturn(1);
GroupCapacity capacity = new GroupCapacity();
capacity.setGroup(GroupCapacityPersistService.CLUSTER);
assertTrue(service.insertGroupCapacity(capacity));
capacity.setGroup("test");
doReturn(1).when(jdbcTemplate)
.update(anyString(), eq("test"), eq(null), eq(null), eq(null), eq(null), eq(null), eq(null), eq("test"));
assertTrue(service.insertGroupCapacity(capacity));
} |
public abstract BuiltIndex<T> build(); | @Test
@UseDataProvider("indexAndTypeMappings")
public void fail_when_nested_with_no_field(NewIndex<?> newIndex, TypeMapping typeMapping) {
NestedFieldBuilder<TypeMapping> nestedFieldBuilder = typeMapping.nestedFieldBuilder("measures");
assertThatThrownBy(() -> nestedFieldBuilder.build())
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("At least one sub-field must be declared in nested property 'measures'");
} |
public String anonymize(final ParseTree tree) {
return build(tree);
} | @Test
public void shouldThrowWhenUnparseableStringProvided() {
// Given:
final String nonsenseUnparsedQuery = "cat";
// Then:
Assert.assertThrows(ParsingException.class, () -> anon.anonymize(nonsenseUnparsedQuery));
} |
@Override
public void showPreviewForKey(
Keyboard.Key key, Drawable icon, View parentView, PreviewPopupTheme previewPopupTheme) {
KeyPreview popup = getPopupForKey(key, parentView, previewPopupTheme);
Point previewPosition =
mPositionCalculator.calculatePositionForPreview(
key, previewPopupTheme, getLocationInWindow(parentView));
popup.showPreviewForKey(key, icon, previewPosition);
} | @Test
public void testCycleThroughPopupQueueWhenAllAreActive() {
KeyPreviewsManager underTest =
new KeyPreviewsManager(getApplicationContext(), mPositionCalculator, 3);
final int[] reuseIndex = new int[] {0, 1, 2, 0, 1, 2, 0};
final List<TextView> usedWindows = new ArrayList<>();
for (int index = 0; index < reuseIndex.length; index++) {
underTest.showPreviewForKey(mTestKeys[index], mTestKeys[index].label, mKeyboardView, mTheme);
usedWindows.add(
getLatestCreatedPopupWindow().getContentView().findViewById(R.id.key_preview_text));
final TextView textView = usedWindows.get(reuseIndex[index]);
Assert.assertEquals(textView.getText().toString(), mTestKeys[index].label);
}
} |
static Entry<ScramMechanism, String> parsePerMechanismArgument(String input) {
input = input.trim();
int equalsIndex = input.indexOf('=');
if (equalsIndex < 0) {
throw new FormatterException("Failed to find equals sign in SCRAM " +
"argument '" + input + "'");
}
String mechanismString = input.substring(0, equalsIndex);
String configString = input.substring(equalsIndex + 1);
ScramMechanism mechanism = ScramMechanism.forMechanismName(mechanismString);
if (mechanism == null) {
throw new FormatterException("The add-scram mechanism " + mechanismString +
" is not supported.");
}
if (!configString.startsWith("[")) {
throw new FormatterException("Expected configuration string to start with [");
}
if (!configString.endsWith("]")) {
throw new FormatterException("Expected configuration string to end with ]");
}
return new AbstractMap.SimpleImmutableEntry<>(mechanism,
configString.substring(1, configString.length() - 1));
} | @Test
public void testParsePerMechanismArgumentWithConfigStringWithoutBraces() {
assertEquals("Expected configuration string to start with [",
assertThrows(FormatterException.class,
() -> ScramParser.parsePerMechanismArgument(
"SCRAM-SHA-256=name=scram-admin,password=scram-user-secret")).getMessage());
} |
public String deserialize(String password, String encryptedPassword, Validatable config) {
if (isNotBlank(password) && isNotBlank(encryptedPassword)) {
config.addError(PASSWORD, "You may only specify `password` or `encrypted_password`, not both!");
config.addError(ScmMaterialConfig.ENCRYPTED_PASSWORD, "You may only specify `password` or `encrypted_password`, not both!");
}
if (isNotBlank(password)) {
try {
return goCipher.encrypt(password);
} catch (CryptoException e) {
config.addError(PASSWORD, "Could not encrypt the password. This usually happens when the cipher text is invalid");
}
} else if (isNotBlank(encryptedPassword)) {
try {
goCipher.decrypt(encryptedPassword);
} catch (Exception e) {
config.addError(ENCRYPTED_PASSWORD, "Encrypted value for password is invalid. This usually happens when the cipher text is invalid.");
}
return encryptedPassword;
}
return null;
} | @Test
public void shouldErrorOutWhenEncryptedPasswordIsInvalid() {
SvnMaterialConfig svnMaterialConfig = svn();
PasswordDeserializer passwordDeserializer = new PasswordDeserializer();
passwordDeserializer.deserialize(null, "invalidEncryptedPassword", svnMaterialConfig);
assertThat(svnMaterialConfig.errors().getAllOn("encryptedPassword"), is(List.of("Encrypted value for password is invalid. This usually happens when the cipher text is invalid.")));
} |
static java.sql.Date parseSqlDate(final String value) {
try {
// JDK format in Date.valueOf is compatible with DATE_FORMAT
return java.sql.Date.valueOf(value);
} catch (IllegalArgumentException e) {
return throwRuntimeParseException(value, new ParseException(value, 0), SQL_DATE_FORMAT);
}
} | @Test
public void testSqlDateWithLeadingZerosInMonthAndDay() throws Exception {
// Given
long expectedDateInMillis = new SimpleDateFormat(SQL_DATE_FORMAT)
.parse("2003-01-04")
.getTime();
java.sql.Date expectedDate = new java.sql.Date(expectedDateInMillis);
// When
java.sql.Date actualDate = DateHelper.parseSqlDate(expectedDate.toString());
// Then
assertSqlDatesEqual(expectedDate, actualDate);
} |
@Override
public Object merge(T mergingValue, T existingValue) {
if (existingValue == null) {
return null;
}
return existingValue.getRawValue();
} | @Test
@SuppressWarnings("ConstantConditions")
public void merge_existingValueAbsent() {
MapMergeTypes existing = null;
MapMergeTypes merging = mergingValueWithGivenValue(MERGING);
assertNull(mergePolicy.merge(merging, existing));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.