focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public TpcEngineBuilder setReactorCount(int reactorCount) {
this.reactorCount = checkPositive(reactorCount, "reactorCount");
return this;
} | @Test
public void test_setReactorCountWhenZero() {
TpcEngineBuilder builder = new TpcEngineBuilder();
assertThrows(IllegalArgumentException.class, () -> builder.setReactorCount(0));
} |
public static Object convertValue(final Object value, final Class<?> convertType) throws SQLFeatureNotSupportedException {
ShardingSpherePreconditions.checkNotNull(convertType, () -> new SQLFeatureNotSupportedException("Type can not be null"));
if (null == value) {
return convertNullValue(convertType);
}
if (value.getClass() == convertType) {
return value;
}
if (value instanceof LocalDateTime) {
return convertLocalDateTimeValue((LocalDateTime) value, convertType);
}
if (value instanceof Timestamp) {
return convertTimestampValue((Timestamp) value, convertType);
}
if (URL.class.equals(convertType)) {
return convertURL(value);
}
if (value instanceof Number) {
return convertNumberValue(value, convertType);
}
if (value instanceof Date) {
return convertDateValue((Date) value, convertType);
}
if (value instanceof byte[]) {
return convertByteArrayValue((byte[]) value, convertType);
}
if (boolean.class.equals(convertType)) {
return convertBooleanValue(value);
}
if (String.class.equals(convertType)) {
return value.toString();
}
try {
return convertType.cast(value);
} catch (final ClassCastException ignored) {
throw new SQLFeatureNotSupportedException("getObject with type");
}
} | @Test
void assertConvertNumberValueError() {
assertThrows(UnsupportedDataTypeConversionException.class, () -> ResultSetUtils.convertValue(1, Date.class));
} |
public Map<String, Object> getNodeMetrics(String node) {
Map<String, Object> metrics = new HashMap<>();
Request nodeStatRequest = new Request("GET", "_nodes/" + node + "/stats");
final DocumentContext nodeContext = getNodeContextFromRequest(node, nodeStatRequest);
if (Objects.nonNull(nodeContext)) {
Arrays.stream(NodeStatMetrics.values())
.filter(m -> Objects.nonNull(m.getNodeStat()))
.forEach(metric -> {
try {
metrics.put(metric.getFieldName(), metric.mapValue(nodeContext.read(metric.getNodeStat())));
} catch (Exception e) {
log.error("Could not retrieve metric {} for node {}", metric.getFieldName(), node);
}
});
}
return metrics;
} | @Test
public void getNodeMetrics() {
Map<String, Object> nodeMetrics = collector.getNodeMetrics(NODENAME);
assertThat(nodeMetrics.get("cpu_load")).isEqualTo(26.4873046875);
assertThat(nodeMetrics.get("disk_free")).isEqualTo(572.1824f);
String[] allMetrics = Arrays.stream(NodeStatMetrics.values()).map(NodeStatMetrics::getFieldName).toArray(String[]::new);
assertThat(nodeMetrics).containsKeys(allMetrics);
} |
@Override
protected Optional<ErrorResponse> filter(DiscFilterRequest req) {
var certs = req.getClientCertificateChain();
log.fine(() -> "Certificate chain contains %d elements".formatted(certs.size()));
if (certs.isEmpty()) {
log.fine("Missing client certificate");
return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Unauthorized"));
}
if (legacyMode) {
log.fine("Legacy mode validation complete");
ClientPrincipal.attachToRequest(req, Set.of(), Set.of(READ, WRITE));
return Optional.empty();
}
var permission = Permission.getRequiredPermission(req).orElse(null);
if (permission == null) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
var clientCert = certs.get(0);
var clientIds = new TreeSet<String>();
var permissions = new TreeSet<Permission>();
for (Client c : allowedClients) {
if (!c.permissions().contains(permission)) continue;
if (!c.certificates().contains(clientCert)) continue;
clientIds.add(c.id());
permissions.addAll(c.permissions());
}
if (clientIds.isEmpty()) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
ClientPrincipal.attachToRequest(req, clientIds, permissions);
return Optional.empty();
} | @Test
void accepts_client_with_valid_certificate() {
var req = FilterTestUtils.newRequestBuilder()
.withMethod(Method.POST)
.withClientCertificate(FEED_CERT)
.build();
var responseHandler = new MockResponseHandler();
newFilterWithClientsConfig().filter(req, responseHandler);
assertNull(responseHandler.getResponse());
assertEquals(new ClientPrincipal(Set.of(FEED_CLIENT_ID), Set.of(WRITE)), req.getUserPrincipal());
} |
@SuppressWarnings("unchecked")
public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException {
return register(MetricName.build(name), metric);
} | @Test
public void registeringACounterTriggersANotification() throws Exception {
assertThat(registry.register(THING, counter))
.isEqualTo(counter);
verify(listener).onCounterAdded(THING, counter);
} |
public void transmit(final int msgTypeId, final DirectBuffer srcBuffer, final int srcIndex, final int length)
{
checkTypeId(msgTypeId);
checkMessageLength(length);
final AtomicBuffer buffer = this.buffer;
long currentTail = buffer.getLong(tailCounterIndex);
int recordOffset = (int)currentTail & (capacity - 1);
final int recordLength = HEADER_LENGTH + length;
final int recordLengthAligned = BitUtil.align(recordLength, RECORD_ALIGNMENT);
final long newTail = currentTail + recordLengthAligned;
final int toEndOfBuffer = capacity - recordOffset;
if (toEndOfBuffer < recordLengthAligned)
{
signalTailIntent(buffer, newTail + toEndOfBuffer);
insertPaddingRecord(buffer, recordOffset, toEndOfBuffer);
currentTail += toEndOfBuffer;
recordOffset = 0;
}
else
{
signalTailIntent(buffer, newTail);
}
buffer.putInt(lengthOffset(recordOffset), recordLength);
buffer.putInt(typeOffset(recordOffset), msgTypeId);
buffer.putBytes(msgOffset(recordOffset), srcBuffer, srcIndex, length);
buffer.putLongOrdered(latestCounterIndex, currentTail);
buffer.putLongOrdered(tailCounterIndex, currentTail + recordLengthAligned);
} | @Test
void shouldTransmitIntoUsedBuffer()
{
final long tail = RECORD_ALIGNMENT * 3;
final int recordOffset = (int)tail;
final int length = 8;
final int recordLength = length + HEADER_LENGTH;
final int recordLengthAligned = align(recordLength, RECORD_ALIGNMENT);
when(buffer.getLong(TAIL_COUNTER_INDEX)).thenReturn(tail);
final UnsafeBuffer srcBuffer = new UnsafeBuffer(new byte[1024]);
final int srcIndex = 0;
broadcastTransmitter.transmit(MSG_TYPE_ID, srcBuffer, srcIndex, length);
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer).getLong(TAIL_COUNTER_INDEX);
inOrder.verify(buffer).putLongOrdered(TAIL_INTENT_COUNTER_OFFSET, tail + recordLengthAligned);
inOrder.verify(buffer).putInt(lengthOffset(recordOffset), recordLength);
inOrder.verify(buffer).putInt(typeOffset(recordOffset), MSG_TYPE_ID);
inOrder.verify(buffer).putBytes(msgOffset(recordOffset), srcBuffer, srcIndex, length);
inOrder.verify(buffer).putLongOrdered(LATEST_COUNTER_INDEX, tail);
inOrder.verify(buffer).putLongOrdered(TAIL_COUNTER_INDEX, tail + recordLengthAligned);
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testInflightFetchOnPendingPartitions() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
subscriptions.markPendingRevocation(singleton(tp0));
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertNull(fetchRecords().get(tp0));
} |
@Override
public String toString() {
return getClass().getSimpleName() + "[application=" + getApplication() + ", name="
+ getName() + ", storageName=" + getStorageName() + ", startDate=" + getStartDate()
+ ", childCounterName=" + getChildCounterName() + ", " + requests.size()
+ " requests, " + (errors == null ? "" : errors.size() + " errors, ")
+ "maxRequestsCount=" + getMaxRequestsCount() + ", displayed=" + isDisplayed()
+ ']';
} | @Test
public void testToString() {
counter.addRequest("test toString", 100, 50, 50, false, 1000);
final String string = counter.toString();
assertNotNull("toString not null", string);
assertFalse("toString not empty", string.isEmpty());
final String string2 = new Counter(Counter.ERROR_COUNTER_NAME, null).toString();
assertNotNull("toString not null", string2);
assertFalse("toString not empty", string2.isEmpty());
if (createCounterRequest().toString().isEmpty()) {
fail("toString vide");
}
} |
static <T extends Type> String encodeDynamicArray(DynamicArray<T> value) {
int size = value.getValue().size();
String encodedLength = encode(new Uint(BigInteger.valueOf(size)));
String valuesOffsets = encodeArrayValuesOffsets(value);
String encodedValues = encodeArrayValues(value);
StringBuilder result = new StringBuilder();
result.append(encodedLength);
result.append(valuesOffsets);
result.append(encodedValues);
return result.toString();
} | @SuppressWarnings("unchecked")
@Test
public void testArrayOfStrings() {
DynamicArray<Utf8String> array =
new DynamicArray<>(
new Utf8String(
"This string value is extra long so that it "
+ "requires more than 32 bytes"),
new Utf8String("abc"),
new Utf8String(""),
new Utf8String("web3j"));
DynamicArray emptyArray = DynamicArray.empty("string");
DynamicArray<Utf8String> arrayOfEmptyStrings =
new DynamicArray<>(new Utf8String(""), new Utf8String(""));
assertEquals(
TypeEncoder.encodeDynamicArray(array),
// array length
("0000000000000000000000000000000000000000000000000000000000000004"
// offset first string
+ "0000000000000000000000000000000000000000000000000000000000000080"
// offset second string
+ "0000000000000000000000000000000000000000000000000000000000000100"
// offset third string
+ "0000000000000000000000000000000000000000000000000000000000000140"
// offset fourth string
+ "0000000000000000000000000000000000000000000000000000000000000160"
// length first string
+ "0000000000000000000000000000000000000000000000000000000000000046"
// first string
+ "5468697320737472696e672076616c7565206973206578747261206c6f6e6720"
// first string continued
+ "736f2074686174206974207265717569726573206d6f7265207468616e203332"
// first string continued
+ "2062797465730000000000000000000000000000000000000000000000000000"
// length second string
+ "0000000000000000000000000000000000000000000000000000000000000003"
// second string
+ "6162630000000000000000000000000000000000000000000000000000000000"
// length third string
+ "0000000000000000000000000000000000000000000000000000000000000000"
// length fourth string
+ "0000000000000000000000000000000000000000000000000000000000000005"
// fourth string
+ "776562336a000000000000000000000000000000000000000000000000000000"));
assertEquals(
TypeEncoder.encodeDynamicArray(emptyArray),
// array length
("0000000000000000000000000000000000000000000000000000000000000000"));
assertEquals(
TypeEncoder.encodeDynamicArray(arrayOfEmptyStrings),
// array length
("0000000000000000000000000000000000000000000000000000000000000002"
// offset first string
+ "0000000000000000000000000000000000000000000000000000000000000040"
// offset second string
+ "0000000000000000000000000000000000000000000000000000000000000060"
// length first string
+ "0000000000000000000000000000000000000000000000000000000000000000"
// length second string
+ "0000000000000000000000000000000000000000000000000000000000000000"));
} |
@Override
public Optional<Integer> extractIndexNumber(final String indexName) {
final int beginIndex = indexPrefixLength(indexName);
if (indexName.length() < beginIndex) {
return Optional.empty();
}
final String suffix = indexName.substring(beginIndex);
try {
return Optional.of(Integer.parseInt(suffix));
} catch (NumberFormatException e) {
return Optional.empty();
}
} | @Test
public void testExtractIndexNumber() {
assertThat(mongoIndexSet.extractIndexNumber("graylog_0")).contains(0);
assertThat(mongoIndexSet.extractIndexNumber("graylog_4")).contains(4);
assertThat(mongoIndexSet.extractIndexNumber("graylog_52")).contains(52);
assertThat(mongoIndexSet.extractIndexNumber("graylog_warm_1")).contains(1);
} |
public LoggerContext apply(LogLevelConfig logLevelConfig, Props props) {
if (!ROOT_LOGGER_NAME.equals(logLevelConfig.getRootLoggerName())) {
throw new IllegalArgumentException("Value of LogLevelConfig#rootLoggerName must be \"" + ROOT_LOGGER_NAME + "\"");
}
LoggerContext rootContext = getRootContext();
logLevelConfig.getConfiguredByProperties().forEach((key, value) -> applyLevelByProperty(props, rootContext.getLogger(key), value));
logLevelConfig.getConfiguredByHardcodedLevel().forEach((key, value) -> applyHardcodedLevel(rootContext, key, value));
Level propertyValueAsLevel = getPropertyValueAsLevel(props, LOG_LEVEL.getKey());
boolean traceGloballyEnabled = propertyValueAsLevel == Level.TRACE;
logLevelConfig.getOffUnlessTrace().forEach(logger -> applyHardUnlessTrace(rootContext, logger, traceGloballyEnabled));
return rootContext;
} | @Test
public void apply_fails_with_IAE_if_LogLevelConfig_does_not_have_ROOT_LOGGER_NAME_of_LogBack() {
LogLevelConfig logLevelConfig = LogLevelConfig.newBuilder(randomAlphanumeric(2)).build();
assertThatThrownBy(() -> underTest.apply(logLevelConfig, props))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Value of LogLevelConfig#rootLoggerName must be \"ROOT\"");
} |
@Nonnull
public static <T> Sink<T> list(@Nonnull String listName) {
return fromProcessor("listSink(" + listName + ')', writeListP(listName));
} | @Test
public void list_byRef() {
// Given
populateList(srcList);
// When
Sink<Object> sink = Sinks.list(sinkList);
// Then
p.readFrom(Sources.list(srcList)).writeTo(sink);
execute();
assertEquals(itemCount, sinkList.size());
} |
@Override
public void run() {
try (DbSession dbSession = dbClient.openSession(false)) {
int size = dbClient.ceQueueDao().countByStatus(dbSession, CeQueueDto.Status.PENDING);
metrics.setNumberOfPendingTasks(size);
}
} | @Test
public void run_setsValueInMetricsBasedOnValueReturnedFromDatabase() {
NumberOfTasksInQueueTask task = new NumberOfTasksInQueueTask(dbClient, metrics, config);
when(dbClient.ceQueueDao()).thenReturn(ceQueueDao);
when(ceQueueDao.countByStatus(any(), any())).thenReturn(10);
task.run();
verify(metrics, times(1)).setNumberOfPendingTasks(10);
} |
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"));
} | @Test
@Ignore
public void testVersioning() throws Exception {
final Path container = new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
new SpectraVersioningFeature(session).setConfiguration(container, new DisabledPasswordCallback(), new VersioningConfiguration(true));
final Path folder = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir(folder, new TransferStatus());
final Path test = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final byte[] content = RandomUtils.nextBytes(1000);
final TransferStatus status = new TransferStatus().withLength(content.length);
status.setChecksum(new CRC32ChecksumCompute().compute(new ByteArrayInputStream(content), status));
// Allocate
final SpectraBulkService bulk = new SpectraBulkService(session);
bulk.pre(Transfer.Type.upload, Collections.singletonMap(new TransferItem(test), status), new DisabledConnectionCallback());
{
final OutputStream out = new SpectraWriteFeature(session).write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out);
out.close();
}
assertEquals(content.length, new SpectraAttributesFinderFeature(session).find(test).getSize());
// Overwrite
bulk.pre(Transfer.Type.upload, Collections.singletonMap(new TransferItem(test), status.exists(true)), new DisabledConnectionCallback());
{
final OutputStream out = new SpectraWriteFeature(session).write(test, status.exists(true), new DisabledConnectionCallback());
new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out);
out.close();
}
assertEquals(content.length, new SpectraAttributesFinderFeature(session).find(test).getSize());
final AttributedList<Path> list = new SpectraObjectListService(session).list(folder, new DisabledListProgressListener());
assertEquals(2, list.size());
for(Path f : list) {
assertTrue(f.attributes().getMetadata().isEmpty());
}
new SpectraDeleteFeature(session).delete(Arrays.asList(test, folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
for(Path f : new SpectraObjectListService(session).list(folder, new DisabledListProgressListener())) {
assertTrue(f.attributes().isDuplicate());
if(f.attributes().getSize() == 0L) {
assertTrue(f.attributes().getMetadata().containsKey(SpectraVersioningFeature.KEY_REVERTABLE));
}
else {
assertTrue(f.attributes().getMetadata().isEmpty());
}
}
new SpectraDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
static SpecificData getModelForSchema(Schema schema) {
final Class<?> clazz;
if (schema != null && (schema.getType() == Schema.Type.RECORD || schema.getType() == Schema.Type.UNION)) {
clazz = SpecificData.get().getClass(schema);
} else {
return null;
}
// If clazz == null, the underlying Avro class for the schema is not on the classpath
if (clazz == null) {
return null;
}
final SpecificData model;
try {
final Field modelField = clazz.getDeclaredField("MODEL$");
modelField.setAccessible(true);
model = (SpecificData) modelField.get(null);
} catch (NoSuchFieldException e) {
LOG.info(String.format("Generated Avro class %s did not contain a MODEL$ field. ", clazz)
+ "Parquet will use default SpecificData model for reading and writing.");
return null;
} catch (IllegalAccessException e) {
LOG.warn(
String.format("Field `MODEL$` in class %s was inaccessible. ", clazz)
+ "Parquet will use default SpecificData model for reading and writing.",
e);
return null;
}
final String avroVersion = getRuntimeAvroVersion();
// Avro 1.7 and 1.8 don't include conversions in the MODEL$ field by default
if (avroVersion != null && (avroVersion.startsWith("1.8.") || avroVersion.startsWith("1.7."))) {
try {
addLogicalTypeConversion(model, schema, new HashSet<>());
} catch (IllegalAccessException e) {
LOG.warn(
String.format("Logical-type conversions were inaccessible for %s", clazz)
+ "Parquet will use default SpecificData model for reading and writing.",
e);
return null;
}
}
return model;
} | @Test
public void testModelForSpecificRecordWithLogicalTypes() {
SpecificData model = AvroRecordConverter.getModelForSchema(LogicalTypesTest.SCHEMA$);
// Test that model is generated correctly
Collection<Conversion<?>> conversions = model.getConversions();
assertEquals(conversions.size(), 3);
assertNotNull(model.getConversionByClass(Instant.class));
assertNotNull(model.getConversionByClass(LocalDate.class));
assertNotNull(model.getConversionByClass(LocalTime.class));
} |
@Override
public EueWriteFeature.Chunk upload(final Path file, Local local, final BandwidthThrottle throttle, final StreamListener listener,
final TransferStatus status, final ConnectionCallback prompt) throws BackgroundException {
if(status.getLength() >= threshold) {
if(Vault.DISABLED == registry.find(session, file)) {
// Only allow concurrent write of chunks when not uploading to vault. Write with default feature multiple 4MB chunks in parallel
return new EueLargeUploadService(session, fileid, writer).upload(file, local, throttle, listener, status, prompt);
}
// Write with multipart write feature for known file length sequentially 4MB chunks
return new EueUploadService(session, fileid, writer).upload(file, local, throttle, listener, status, prompt);
}
// Write single chunk smaller than threshold
return new EueSingleUploadService(session, fileid, writer).upload(file, local, throttle, listener, status, prompt);
} | @Test
public void testUploadLargeFileInChunks() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final EueThresholdUploadService service = new EueThresholdUploadService(session, fileid, VaultRegistry.DISABLED);
final Path container = new EueDirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus());
final String name = new AlphanumericRandomStringService().random();
final Path file = new Path(container, name, EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), name);
final int length = 5242881;
final byte[] content = RandomUtils.nextBytes(length);
IOUtils.write(content, local.getOutputStream(false));
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final BytecountStreamListener count = new BytecountStreamListener();
service.upload(file, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), count, status, new DisabledConnectionCallback());
assertEquals(content.length, count.getSent());
assertTrue(status.isComplete());
assertTrue(new EueFindFeature(session, fileid).find(file));
assertEquals(content.length, new EueAttributesFinderFeature(session, fileid).find(file).getSize());
final byte[] compare = new byte[length];
IOUtils.readFully(new EueReadFeature(session, fileid).read(file, new TransferStatus().withLength(length), new DisabledConnectionCallback()), compare);
assertArrayEquals(content, compare);
new EueDeleteFeature(session, fileid).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
} |
public void execute() {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository).buildFor(formulas))
.visit(treeRootHolder.getReportTreeRoot());
} | @Test
public void compute_duplicated_lines_counts_lines_from_original_and_InnerDuplicate_of_a_single_line() {
TextBlock original = new TextBlock(1, 1);
duplicationRepository.addDuplication(FILE_1_REF, original, new TextBlock(2, 2));
underTest.execute();
assertRawMeasureValue(FILE_1_REF, DUPLICATED_LINES_KEY, 2);
} |
public static <T, R extends Type<T>, E extends Type<T>> List<E> typeMap(
List<List<T>> input, Class<E> outerDestType, Class<R> innerType) {
List<E> result = new ArrayList<>();
try {
Constructor<E> constructor =
outerDestType.getDeclaredConstructor(Class.class, List.class);
for (List<T> ts : input) {
E e = constructor.newInstance(innerType, typeMap(ts, innerType));
result.add(e);
}
} catch (NoSuchMethodException
| IllegalAccessException
| InstantiationException
| InvocationTargetException e) {
throw new TypeMappingException(e);
}
return result;
} | @SuppressWarnings("unchecked")
@Test
public void testTypeMapNested() {
List<BigInteger> innerList1 = Arrays.asList(BigInteger.valueOf(1), BigInteger.valueOf(2));
List<BigInteger> innerList2 = Arrays.asList(BigInteger.valueOf(3), BigInteger.valueOf(4));
final List<List<BigInteger>> input = Arrays.asList(innerList1, innerList2);
StaticArray2<Uint256> staticArray1 =
new StaticArray2<>(Uint256.class, new Uint256(1), new Uint256(2));
StaticArray2<Uint256> staticArray2 =
new StaticArray2<>(Uint256.class, new Uint256(3), new Uint256(4));
List<StaticArray2> actual = typeMap(input, StaticArray2.class, Uint256.class);
assertEquals(actual.get(0), (staticArray1));
assertEquals(actual.get(1), (staticArray2));
} |
@Override
public BackgroundException map(final ApiException failure) {
final StringBuilder buffer = new StringBuilder();
if(StringUtils.isNotBlank(failure.getMessage())) {
for(String s : StringUtils.split(failure.getMessage(), ",")) {
this.append(buffer, LocaleFactory.localizedString(s, "EUE"));
}
}
if(null != failure.getResponseHeaders()) {
final List<List<String>> headers = failure.getResponseHeaders().entrySet().stream()
.filter(e -> "X-UI-ENHANCED-STATUS".equalsIgnoreCase(e.getKey())).map(Map.Entry::getValue).collect(Collectors.toList());
for(List<String> header : headers) {
for(String s : header) {
this.append(buffer, LocaleFactory.localizedString(s, "EUE"));
}
}
}
for(Throwable cause : ExceptionUtils.getThrowableList(failure)) {
if(cause instanceof ProcessingException) {
return new InteroperabilityException(cause.getMessage(), cause);
}
if(cause instanceof SocketException) {
// Map Connection has been shutdown: javax.net.ssl.SSLException: java.net.SocketException: Broken pipe
return new DefaultSocketExceptionMappingService().map((SocketException) cause);
}
if(cause instanceof HttpResponseException) {
return new DefaultHttpResponseExceptionMappingService().map((HttpResponseException) cause);
}
if(cause instanceof IOException) {
return new DefaultIOExceptionMappingService().map((IOException) cause);
}
if(cause instanceof IllegalStateException) {
// Caused by: ApiException: javax.ws.rs.ProcessingException: java.lang.IllegalStateException: Connection pool shut down
return new ConnectionCanceledException(cause);
}
}
switch(failure.getCode()) {
case HttpStatus.SC_UNPROCESSABLE_ENTITY:
return new LockedException(buffer.toString(), failure);
case HttpStatus.SC_TOO_MANY_REQUESTS:
final Optional<Map.Entry<String, List<String>>> header
= failure.getResponseHeaders().entrySet().stream().filter(e -> HttpHeaders.RETRY_AFTER.equals(e.getKey())).findAny();
if(header.isPresent()) {
final Optional<String> value = header.get().getValue().stream().findAny();
return value.map(s -> new RetriableAccessDeniedException(buffer.toString(),
Duration.ofSeconds(Long.parseLong(s)), failure)).orElseGet(() -> new RetriableAccessDeniedException(buffer.toString(), failure));
}
}
return new DefaultHttpResponseExceptionMappingService().map(failure, buffer, failure.getCode());
} | @Test
public void testEnhancedStatus() {
final BackgroundException failure = new EueExceptionMappingService().map(new ApiException(404, "",
Collections.singletonMap("X-UI-ENHANCED-STATUS", Collections.singletonList("NOT_FOUND")), ""));
assertTrue(failure instanceof NotfoundException);
assertEquals("NOT_FOUND. Please contact your web hosting service provider for assistance.", failure.getDetail());
} |
@Udf
public String chr(@UdfParameter(
description = "Decimal codepoint") final Integer decimalCode) {
if (decimalCode == null) {
return null;
}
if (!Character.isValidCodePoint(decimalCode)) {
return null;
}
final char[] resultChars = Character.toChars(decimalCode);
return String.valueOf(resultChars);
} | @Test
public void shouldConvertZhFromDecimal() {
final String result = udf.chr(22909);
assertThat(result, is("好"));
} |
@Override
protected ExecuteContext doBefore(ExecuteContext context) {
ClientMessage clientMessage = (ClientMessage) context.getArguments()[0];
String database = getDataBaseInfo(context).getDatabaseName();
handleWriteOperationIfWriteDisabled(clientMessage.description(), database,
DatabaseWriteProhibitionManager.getMySqlProhibitionDatabases(), context);
return context;
} | @Test
public void testDoBefore() throws Exception {
// the database write prohibition switch is disabled
globalConfig.setEnableMySqlWriteProhibition(false);
context = ExecuteContext.forMemberMethod(clientMock, methodMock, argument, null, null);
interceptor.before(context);
Assert.assertNull(context.getThrowableOut());
// The database write prohibition function is disabled.
// The write prohibition database set contains the database that is blocked
Set<String> databases = new HashSet<>();
databases.add("database-test");
globalConfig.setMySqlDatabases(databases);
interceptor.before(context);
Assert.assertNull(context.getThrowableOut());
// The database write prohibition switch is enabled, and the database set contains the database that is blocked
globalConfig.setEnableMySqlWriteProhibition(true);
interceptor.before(context);
Assert.assertEquals("Database prohibit to write, database: database-test",
context.getThrowableOut().getMessage());
//The database write prohibition switch is turned on, the sql does not write,
// and the database set contains the blocked database
Mockito.when(messageMock.description()).thenReturn("SELECT * FROM table");
context = ExecuteContext.forMemberMethod(clientMock, methodMock, argument, null, null);
interceptor.before(context);
Assert.assertNull(context.getThrowableOut());
//The database write prohibition switch is enabled. The database set does not contain the database that is blocked
Mockito.when(messageMock.description()).thenReturn("INSERT INTO table (name) VALUES ('test')");
globalConfig.setMySqlDatabases(new HashSet<>());
interceptor.before(context);
Assert.assertNull(context.getThrowableOut());
} |
public static Map<String, Set<String>> parseTableExpressionWithSchema(final ShardingSphereDatabase database, final Collection<SchemaTable> schemaTables) {
Collection<String> systemSchemas = DatabaseTypedSPILoader.getService(DialectSystemDatabase.class, database.getProtocolType()).getSystemSchemas();
if (schemaTables.stream().anyMatch(each -> "*".equals(each.getTable()) && ("*".equals(each.getSchema()) || each.getSchema().isEmpty()))) {
return parseTableExpressionWithAllTables(database, systemSchemas);
}
Map<String, Set<String>> result = new HashMap<>();
DialectDatabaseMetaData dialectDatabaseMetaData = new DatabaseTypeRegistry(database.getProtocolType()).getDialectDatabaseMetaData();
for (SchemaTable each : schemaTables) {
if ("*".equals(each.getSchema())) {
result.putAll(parseTableExpressionWithAllSchema(database, systemSchemas, each));
} else if ("*".equals(each.getTable())) {
result.putAll(parseTableExpressionWithAllTable(database, each));
} else {
String schemaName = each.getSchema();
if (dialectDatabaseMetaData.getDefaultSchema().isPresent() && schemaName.isEmpty()) {
schemaName = dialectDatabaseMetaData.getDefaultSchema().get();
}
ShardingSpherePreconditions.checkNotNull(database.getSchema(schemaName).getTable(each.getTable()), () -> new TableNotFoundException(each.getTable()));
result.computeIfAbsent(schemaName, ignored -> new HashSet<>()).add(each.getTable());
}
}
return result;
} | @Test
void assertParseTableExpression() {
Map<String, ShardingSphereSchema> schemas = new HashMap<>(2, 1F);
schemas.put("public", mockedPublicSchema());
schemas.put("test", mockedTestSchema());
ShardingSphereDatabase database = new ShardingSphereDatabase("sharding_db", TypedSPILoader.getService(DatabaseType.class, "openGauss"), null, null, schemas);
List<SchemaTable> schemaTables = Arrays.asList(SchemaTable.newBuilder().setSchema("public").setTable("t_order").build(),
SchemaTable.newBuilder().setSchema("test").setTable("*").build());
Map<String, Set<String>> expected = new HashMap<>(2, 1F);
expected.put("test", new HashSet<>(Arrays.asList("t_order_item", "t_order_item2")));
expected.put("public", Collections.singleton("t_order"));
Map<String, Set<String>> actual = CDCSchemaTableUtils.parseTableExpressionWithSchema(database, schemaTables);
assertThat(actual, is(expected));
schemaTables = Collections.singletonList(SchemaTable.newBuilder().setTable("t_order").build());
actual = CDCSchemaTableUtils.parseTableExpressionWithSchema(database, schemaTables);
expected = Collections.singletonMap("public", Collections.singleton("t_order"));
assertThat(actual, is(expected));
schemaTables = Collections.singletonList(SchemaTable.newBuilder().setSchema("*").setTable("t_order").build());
actual = CDCSchemaTableUtils.parseTableExpressionWithSchema(database, schemaTables);
expected = Collections.singletonMap("public", Collections.singleton("t_order"));
assertThat(actual, is(expected));
} |
@Override
public <K, V> void forward(final K key, final V value) {
throw new UnsupportedOperationException("StateStores can't access forward.");
} | @Test
public void shouldThrowOnForwardWithTo() {
assertThrows(UnsupportedOperationException.class, () -> context.forward("key", "value", To.all()));
} |
@Override
public void checkAuthorization(
final KsqlSecurityContext securityContext,
final MetaStore metaStore,
final Statement statement
) {
if (statement instanceof Query) {
validateQuery(securityContext, metaStore, (Query)statement);
} else if (statement instanceof InsertInto) {
validateInsertInto(securityContext, metaStore, (InsertInto)statement);
} else if (statement instanceof CreateAsSelect) {
validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement);
} else if (statement instanceof PrintTopic) {
validatePrintTopic(securityContext, (PrintTopic)statement);
} else if (statement instanceof CreateSource) {
validateCreateSource(securityContext, (CreateSource)statement);
}
} | @Test
public void shouldThrowWhenCreateAsSelectExistingStreamWithoutWritePermissionsDenied() {
// Given:
givenTopicAccessDenied(AVRO_TOPIC, AclOperation.WRITE);
final Statement statement = givenStatement(String.format(
"CREATE STREAM %s AS SELECT * FROM %s;", AVRO_STREAM_TOPIC, KAFKA_STREAM_TOPIC)
);
// When:
final Exception e = assertThrows(
KsqlTopicAuthorizationException.class,
() -> authorizationValidator.checkAuthorization(securityContext, metaStore, statement)
);
// Then:
assertThat(e.getMessage(), containsString(String.format(
"Authorization denied to Write on topic(s): [%s]", AVRO_TOPIC
)));
} |
public List<PartitionStatisticsFile> partitionStatisticsFiles() {
return partitionStatisticsFiles;
} | @Test
public void testParsePartitionStatisticsFiles() throws Exception {
String data = readTableMetadataInputFile("TableMetadataPartitionStatisticsFiles.json");
TableMetadata parsed = TableMetadataParser.fromJson(data);
assertThat(parsed.partitionStatisticsFiles())
.hasSize(1)
.first()
.isEqualTo(
ImmutableGenericPartitionStatisticsFile.builder()
.snapshotId(3055729675574597004L)
.path("s3://a/b/partition-stats.parquet")
.fileSizeInBytes(43L)
.build());
} |
@NonNull
@Override
public String getId() {
return ID;
} | @Test
public void getBitbucketScm() throws UnirestException {
Map r = new RequestBuilder(baseUrl)
.crumb(crumb)
.status(200)
.jwtToken(getJwtToken(j.jenkins, authenticatedUser.getId(), authenticatedUser.getId()))
.post("/organizations/jenkins/scm/"+ BitbucketCloudScm.ID + "/" + getApiUrlParam())
.build(Map.class);
assertNotNull(r);
assertEquals(BitbucketCloudScm.ID, r.get("id"));
assertEquals(apiUrl, r.get("uri"));
assertNull(r.get("credentialId"));
} |
@Override
public int run(String[] args) throws Exception {
Options opts = new Options();
opts.addOption("lnl", LIST_LABELS_CMD, false,
"List cluster node-label collection");
opts.addOption("lna", LIST_CLUSTER_ATTRIBUTES, false,
"List cluster node-attribute collection");
opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
opts.addOption("dnl", DIRECTLY_ACCESS_NODE_LABEL_STORE, false,
"This is DEPRECATED, will be removed in future releases. Directly access node label store, "
+ "with this option, all node label related operations"
+ " will NOT connect RM. Instead, they will"
+ " access/modify stored node labels directly."
+ " By default, it is false (access via RM)."
+ " AND PLEASE NOTE: if you configured "
+ YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR
+ " to a local directory"
+ " (instead of NFS or HDFS), this option will only work"
+ " when the command run on the machine where RM is running."
+ " Also, this option is UNSTABLE, could be removed in future"
+ " releases.");
int exitCode = -1;
CommandLine parsedCli = null;
try {
parsedCli = new GnuParser().parse(opts, args);
} catch (MissingArgumentException ex) {
sysout.println("Missing argument for options");
printUsage(opts);
return exitCode;
}
createAndStartYarnClient();
if (parsedCli.hasOption(DIRECTLY_ACCESS_NODE_LABEL_STORE)) {
accessLocal = true;
}
if (parsedCli.hasOption(LIST_LABELS_CMD)) {
printClusterNodeLabels();
} else if(parsedCli.hasOption(LIST_CLUSTER_ATTRIBUTES)){
printClusterNodeAttributes();
} else if (parsedCli.hasOption(HELP_CMD)) {
printUsage(opts);
return 0;
} else {
syserr.println("Invalid Command Usage : ");
printUsage(opts);
}
return 0;
} | @Test
public void testGetClusterNodeLabels() throws Exception {
when(client.getClusterNodeLabels()).thenReturn(
Arrays.asList(NodeLabel.newInstance("label1"),
NodeLabel.newInstance("label2")));
ClusterCLI cli = createAndGetClusterCLI();
int rc =
cli.run(new String[] { ClusterCLI.CMD, "-" + ClusterCLI.LIST_LABELS_CMD });
assertEquals(0, rc);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintWriter pw = new PrintWriter(baos);
pw.print("Node Labels: <label1:exclusivity=true>,<label2:exclusivity=true>");
pw.close();
verify(sysOut).println(baos.toString("UTF-8"));
} |
@SuppressWarnings("rawtypes")
public Collection<RuleConfiguration> swapToRuleConfigurations(final Collection<RepositoryTuple> repositoryTuples) {
if (repositoryTuples.isEmpty()) {
return Collections.emptyList();
}
Collection<RuleConfiguration> result = new LinkedList<>();
YamlRuleConfigurationSwapperEngine yamlSwapperEngine = new YamlRuleConfigurationSwapperEngine();
for (YamlRuleConfigurationSwapper each : OrderedSPILoader.getServices(YamlRuleConfigurationSwapper.class)) {
Class<? extends YamlRuleConfiguration> yamlRuleConfigClass = getYamlRuleConfigurationClass(each);
swapToYamlRuleConfiguration(repositoryTuples, yamlRuleConfigClass).ifPresent(optional -> result.add(yamlSwapperEngine.swapToRuleConfiguration(optional)));
}
return result;
} | @Test
void assertSwapToRuleConfigurations() {
assertTrue(new RepositoryTupleSwapperEngine().swapToRuleConfigurations(Collections.singleton(new RepositoryTuple("/rules/leaf/versions/0", "value: foo"))).isEmpty());
} |
public static <T> TimeLimiterOperator<T> of(TimeLimiter timeLimiter) {
return new TimeLimiterOperator<>(timeLimiter);
} | @Test
public void doNotTimeoutUsingMono() {
given(timeLimiter.getTimeLimiterConfig())
.willReturn(toConfig(Duration.ofMinutes(1)));
given(helloWorldService.returnHelloWorld())
.willReturn("Hello world");
Mono<?> mono = Mono.fromCallable(helloWorldService::returnHelloWorld)
.transformDeferred(TimeLimiterOperator.of(timeLimiter));
StepVerifier.create(mono)
.expectNextCount(1)
.verifyComplete();
then(timeLimiter).should(times(1))
.onSuccess();
} |
public static Expression generateFilterExpression(SearchArgument sarg) {
return translate(sarg.getExpression(), sarg.getLeaves());
} | @Test
public void testAndOperand() {
SearchArgument.Builder builder = SearchArgumentFactory.newBuilder();
SearchArgument arg =
builder
.startAnd()
.equals("salary", PredicateLeaf.Type.LONG, 3000L)
.equals("salary", PredicateLeaf.Type.LONG, 4000L)
.end()
.build();
And expected =
(And)
Expressions.and(Expressions.equal("salary", 3000L), Expressions.equal("salary", 4000L));
And actual = (And) HiveIcebergFilterFactory.generateFilterExpression(arg);
assertThat(expected.op()).isEqualTo(actual.op());
assertThat(expected.left().op()).isEqualTo(actual.left().op());
assertThat(expected.right().op()).isEqualTo(actual.right().op());
} |
@NonNull
@Override
public String getId() {
return ID;
} | @Test
public void getRepositoriesWithCredentialId() throws IOException, UnirestException {
String credentialId = createCredential(BitbucketCloudScm.ID);
Map repoResp = new RequestBuilder(baseUrl)
.crumb(crumb)
.status(200)
.jwtToken(getJwtToken(j.jenkins, authenticatedUser.getId(), authenticatedUser.getId()))
.post("/organizations/jenkins/scm/"+BitbucketCloudScm.ID+"/organizations/" + BbCloudWireMock.TEAM_UUID + "/repositories/"+getApiUrlParam()+"&credentialId="+credentialId)
.build(Map.class);
List repos = (List) ((Map)repoResp.get("repositories")).get("items");
assertEquals("pipeline-demo-test", ((Map)repos.get(0)).get("name"));
assertEquals("pipeline-demo-test", ((Map)repos.get(0)).get("description"));
assertTrue((Boolean) ((Map)repos.get(0)).get("private"));
assertEquals("master",((Map)repos.get(0)).get("defaultBranch"));
assertEquals(2, repos.size());
assertEquals("emptyrepo", ((Map)repos.get(1)).get("name"));
assertEquals("emptyrepo", ((Map)repos.get(1)).get("description"));
assertTrue((Boolean) ((Map)repos.get(1)).get("private"));
assertNull(((Map)repos.get(1)).get("defaultBranch"));
} |
public static Socket acceptWithoutTimeout(ServerSocket serverSocket) throws IOException {
Preconditions.checkArgument(
serverSocket.getSoTimeout() == 0, "serverSocket SO_TIMEOUT option must be 0");
while (true) {
try {
return serverSocket.accept();
} catch (SocketTimeoutException exception) {
// This should be impossible given that the socket timeout is set to zero
// which indicates an infinite timeout. This is due to the underlying JDK-8237858
// bug. We retry the accept call indefinitely to replicate the expected behavior.
}
}
} | @Test
void testAcceptWithoutTimeoutSuppressesTimeoutException() throws IOException {
// Validates that acceptWithoutTimeout suppresses all SocketTimeoutExceptions
Socket expected = new Socket();
ServerSocket serverSocket =
new ServerSocket() {
private int count = 0;
@Override
public Socket accept() throws IOException {
if (count < 2) {
count++;
throw new SocketTimeoutException();
}
return expected;
}
};
assertThat(NetUtils.acceptWithoutTimeout(serverSocket)).isEqualTo(expected);
} |
@Override
public void indexOnStartup(Set<IndexType> uninitializedIndexTypes) {
// TODO do not load everything in memory. Db rows should be scrolled.
List<IndexPermissions> authorizations = getAllAuthorizations();
Stream<AuthorizationScope> scopes = getScopes(uninitializedIndexTypes);
index(authorizations, scopes, Size.LARGE);
} | @Test
public void indexOnStartup_grants_access_to_anybody_on_view() {
PortfolioDto view = createAndIndexPortfolio();
UserDto user = db.users().insertUser();
GroupDto group = db.users().insertGroup();
indexOnStartup();
verifyAnyoneAuthorized(view);
verifyAuthorized(view, user);
verifyAuthorized(view, user, group);
} |
static ArgumentParser argParser() {
ArgumentParser parser = ArgumentParsers
.newArgumentParser("producer-performance")
.defaultHelp(true)
.description("This tool is used to verify the producer performance. To enable transactions, " +
"you can specify a transaction id or set a transaction duration using --transaction-duration-ms. " +
"There are three ways to specify the transaction id: set transaction.id=<id> via --producer-props, " +
"set transaction.id=<id> in the config file via --producer.config, or use --transaction-id <id>.");
MutuallyExclusiveGroup payloadOptions = parser
.addMutuallyExclusiveGroup()
.required(true)
.description("either --record-size or --payload-file must be specified but not both.");
parser.addArgument("--topic")
.action(store())
.required(true)
.type(String.class)
.metavar("TOPIC")
.help("produce messages to this topic");
parser.addArgument("--num-records")
.action(store())
.required(true)
.type(Long.class)
.metavar("NUM-RECORDS")
.dest("numRecords")
.help("number of messages to produce");
payloadOptions.addArgument("--record-size")
.action(store())
.required(false)
.type(Integer.class)
.metavar("RECORD-SIZE")
.dest("recordSize")
.help("message size in bytes. Note that you must provide exactly one of --record-size or --payload-file " +
"or --payload-monotonic.");
payloadOptions.addArgument("--payload-file")
.action(store())
.required(false)
.type(String.class)
.metavar("PAYLOAD-FILE")
.dest("payloadFile")
.help("file to read the message payloads from. This works only for UTF-8 encoded text files. " +
"Payloads will be read from this file and a payload will be randomly selected when sending messages. " +
"Note that you must provide exactly one of --record-size or --payload-file or --payload-monotonic.");
payloadOptions.addArgument("--payload-monotonic")
.action(storeTrue())
.type(Boolean.class)
.metavar("PAYLOAD-MONOTONIC")
.dest("payloadMonotonic")
.help("payload is monotonically increasing integer. Note that you must provide exactly one of --record-size " +
"or --payload-file or --payload-monotonic.");
parser.addArgument("--payload-delimiter")
.action(store())
.required(false)
.type(String.class)
.metavar("PAYLOAD-DELIMITER")
.dest("payloadDelimiter")
.setDefault("\\n")
.help("provides delimiter to be used when --payload-file is provided. " +
"Defaults to new line. " +
"Note that this parameter will be ignored if --payload-file is not provided.");
parser.addArgument("--throughput")
.action(store())
.required(true)
.type(Double.class)
.metavar("THROUGHPUT")
.help("throttle maximum message throughput to *approximately* THROUGHPUT messages/sec. Set this to -1 to disable throttling.");
parser.addArgument("--producer-props")
.nargs("+")
.required(false)
.metavar("PROP-NAME=PROP-VALUE")
.type(String.class)
.dest("producerConfig")
.help("kafka producer related configuration properties like bootstrap.servers,client.id etc. " +
"These configs take precedence over those passed via --producer.config.");
parser.addArgument("--producer.config")
.action(store())
.required(false)
.type(String.class)
.metavar("CONFIG-FILE")
.dest("producerConfigFile")
.help("producer config properties file.");
parser.addArgument("--print-metrics")
.action(storeTrue())
.type(Boolean.class)
.metavar("PRINT-METRICS")
.dest("printMetrics")
.help("print out metrics at the end of the test.");
parser.addArgument("--transactional-id")
.action(store())
.required(false)
.type(String.class)
.metavar("TRANSACTIONAL-ID")
.dest("transactionalId")
.help("The transactional id to use. This config takes precedence over the transactional.id " +
"specified via --producer.config or --producer-props. Note that if the transactional id " +
"is not specified while --transaction-duration-ms is provided, the default value for the " +
"transactional id will be performance-producer- followed by a random uuid.");
parser.addArgument("--transaction-duration-ms")
.action(store())
.required(false)
.type(Long.class)
.metavar("TRANSACTION-DURATION")
.dest("transactionDurationMs")
.help("The max age of each transaction. The commitTransaction will be called after this time has elapsed. " +
"The value should be greater than 0. If the transactional id is specified via --producer-props, " +
"--producer.config, or --transactional-id but --transaction-duration-ms is not specified, " +
"the default value will be 3000.");
return parser;
} | @Test
public void testNoTransactionRelatedConfigs() throws IOException, ArgumentParserException {
ArgumentParser parser = ProducerPerformance.argParser();
String[] args = new String[]{
"--topic", "Hello-Kafka",
"--num-records", "5",
"--throughput", "100",
"--record-size", "100",
"--producer-props", "bootstrap.servers=localhost:9000"};
ProducerPerformance.ConfigPostProcessor configs = new ProducerPerformance.ConfigPostProcessor(parser, args);
assertFalse(configs.transactionsEnabled);
assertNull(configs.transactionDurationMs);
assertFalse(configs.producerProps.contains(ProducerConfig.TRANSACTIONAL_ID_CONFIG));
} |
@Override
public List<TaskProperty> getPropertiesForDisplay() {
return new ArrayList<>();
} | @Test
public void shouldReturnEmptyPropertiesForDisplay() {
assertThat(new NullTask().getPropertiesForDisplay().isEmpty(), is(true));
} |
@Override
public synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record) {
return send(record, null);
} | @Test
@SuppressWarnings("unchecked")
public void shouldThrowClassCastException() {
try (MockProducer<Integer, String> customProducer = new MockProducer<>(true, new IntegerSerializer(), new StringSerializer())) {
assertThrows(ClassCastException.class, () -> customProducer.send(new ProducerRecord(topic, "key1", "value1")));
}
} |
public static void copyBody(Message source, Message target) {
// Preserve the DataType if both messages are DataTypeAware
if (source.hasTrait(MessageTrait.DATA_AWARE)) {
target.setBody(source.getBody());
target.setPayloadForTrait(MessageTrait.DATA_AWARE,
source.getPayloadForTrait(MessageTrait.DATA_AWARE));
return;
}
target.setBody(source.getBody());
} | @Test
void shouldCopyBodyIfBothDataTypeAwareWithoutDataTypeSet() {
Object body = new Object();
DefaultMessage m1 = new DefaultMessage((Exchange) null);
m1.setBody(body, (DataType) null);
DefaultMessage m2 = new DefaultMessage((Exchange) null);
copyBody(m1, m2);
assertSame(body, m2.getBody());
} |
@Override
public Mono<GetCurrencyConversionsResponse> getCurrencyConversions(final GetCurrencyConversionsRequest request) {
AuthenticationUtil.requireAuthenticatedDevice();
final CurrencyConversionEntityList currencyConversionEntityList = currencyManager
.getCurrencyConversions()
.orElseThrow(Status.UNAVAILABLE::asRuntimeException);
final List<GetCurrencyConversionsResponse.CurrencyConversionEntity> currencyConversionEntities = currencyConversionEntityList
.getCurrencies()
.stream()
.map(cce -> GetCurrencyConversionsResponse.CurrencyConversionEntity.newBuilder()
.setBase(cce.getBase())
.putAllConversions(transformBigDecimalsToStrings(cce.getConversions()))
.build())
.toList();
return Mono.just(GetCurrencyConversionsResponse.newBuilder()
.addAllCurrencies(currencyConversionEntities).setTimestamp(currencyConversionEntityList.getTimestamp())
.build());
} | @Test
void testUnavailable() {
when(currencyManager.getCurrencyConversions()).thenReturn(Optional.empty());
assertStatusException(Status.UNAVAILABLE, () -> authenticatedServiceStub().getCurrencyConversions(
GetCurrencyConversionsRequest.newBuilder().build()));
} |
@Override
public void subscribe(URL url, NotifyListener listener) {
if (url == null) {
throw new IllegalArgumentException("subscribe url == null");
}
if (listener == null) {
throw new IllegalArgumentException("subscribe listener == null");
}
if (logger.isInfoEnabled()) {
logger.info("Subscribe: " + url);
}
Set<NotifyListener> listeners = subscribed.computeIfAbsent(url, n -> new ConcurrentHashSet<>());
listeners.add(listener);
} | @Test
void testSubscribe() {
// check parameters
try {
abstractRegistry.subscribe(testUrl, null);
Assertions.fail();
} catch (Exception e) {
Assertions.assertTrue(e instanceof IllegalArgumentException);
}
// check parameters
try {
abstractRegistry.subscribe(null, null);
Assertions.fail();
} catch (Exception e) {
Assertions.assertTrue(e instanceof IllegalArgumentException);
}
// check if subscribe successfully
Assertions.assertNull(abstractRegistry.getSubscribed().get(testUrl));
abstractRegistry.subscribe(testUrl, listener);
Assertions.assertNotNull(abstractRegistry.getSubscribed().get(testUrl));
Assertions.assertTrue(abstractRegistry.getSubscribed().get(testUrl).contains(listener));
} |
public static String formatDA(TimeZone tz, Date date) {
return formatDA(tz, date, new StringBuilder(8)).toString();
} | @Test
public void testFormatDA() {
assertEquals("19700101", DateUtils.formatDA(tz, new Date(0)));
} |
public static FormationFilterDefinition parseFormationFilterDefinition(String definition) {
String[] tokens = definition.split(",");
Distance dist = Distance.ofNauticalMiles(parseDouble(tokens[0]));
Duration time = Duration.ofSeconds(parseLong(tokens[1]));
boolean log = Boolean.parseBoolean(tokens[2]);
return new FormationFilterDefinition(dist, time, log);
} | @Test
public void canParseOneDefinition() {
String singleDef = "0.85,92,true";
FormationFilterDefinition def = parseFormationFilterDefinition(singleDef);
assertThat(def.timeRequirement, is(Duration.ofSeconds(92)));
assertThat(def.proximityRequirement, is(Distance.ofNauticalMiles(0.85)));
assertThat(def.logRemovedFilter, is(true));
} |
public static List<String> getFieldNames(Class<? extends Enum<?>> clazz) {
if(null == clazz){
return null;
}
final List<String> names = new ArrayList<>();
final Field[] fields = ReflectUtil.getFields(clazz);
String name;
for (Field field : fields) {
name = field.getName();
if (field.getType().isEnum() || name.contains("$VALUES") || "ordinal".equals(name)) {
continue;
}
if (false == names.contains(name)) {
names.add(name);
}
}
return names;
} | @Test
public void getFieldNamesTest() {
List<String> names = EnumUtil.getFieldNames(TestEnum.class);
assertTrue(names.contains("type"));
assertTrue(names.contains("name"));
} |
public void append(long offset, int position) {
lock.lock();
try {
if (isFull())
throw new IllegalArgumentException("Attempt to append to a full index (size = " + entries() + ").");
if (entries() == 0 || offset > lastOffset) {
log.trace("Adding index entry {} => {} to {}", offset, position, file().getAbsolutePath());
mmap().putInt(relativeOffset(offset));
mmap().putInt(position);
incrementEntries();
lastOffset = offset;
if (entries() * ENTRY_SIZE != mmap().position())
throw new IllegalStateException(entries() + " entries but file position in index is " + mmap().position());
} else
throw new InvalidOffsetException("Attempt to append an offset " + offset + " to position " + entries() +
" no larger than the last offset appended (" + lastOffset + ") to " + file().getAbsolutePath());
} finally {
lock.unlock();
}
} | @Test
public void appendOutOfOrder() {
index.append(51, 0);
assertThrows(InvalidOffsetException.class, () -> index.append(50, 1));
} |
public static boolean metadataChanged(Cluster previous, Cluster current) {
// Broker has changed.
Set<Node> prevNodeSet = new HashSet<>(previous.nodes());
if (prevNodeSet.size() != current.nodes().size()) {
return true;
}
current.nodes().forEach(prevNodeSet::remove);
if (!prevNodeSet.isEmpty()) {
return true;
}
// Topic has changed
if (!previous.topics().equals(current.topics())) {
return true;
}
// partition has changed.
for (String topic : previous.topics()) {
if (!previous.partitionCountForTopic(topic).equals(current.partitionCountForTopic(topic))) {
return true;
}
for (PartitionInfo prevPartInfo : previous.partitionsForTopic(topic)) {
PartitionInfo currPartInfo = current.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition()));
if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) {
return true;
}
}
}
return false;
} | @Test
public void testMetadataChanged() {
Node[] nodesWithOrder1 = {NODE_0, NODE_1};
Node[] nodesWithOrder2 = {NODE_1, NODE_0};
Node[] nodes2 = {NODE_0, NODE_2};
// Cluster 1 just has one partition
PartitionInfo t0p0 = new PartitionInfo(TOPIC0, 0, NODE_0, nodesWithOrder1, nodesWithOrder2);
PartitionInfo t0p1 = new PartitionInfo(TOPIC0, 1, NODE_1, nodesWithOrder1, nodesWithOrder2);
PartitionInfo t1p0 = new PartitionInfo(TOPIC1, 0, NODE_2, nodesWithOrder1, nodesWithOrder2);
PartitionInfo t1p1 = new PartitionInfo(TOPIC1, 1, NODE_0, nodesWithOrder1, nodesWithOrder2);
Set<PartitionInfo> partitions1 = new HashSet<>(Arrays.asList(t0p0, t0p1, t1p0, t1p1));
Cluster cluster1 = new Cluster("cluster", Arrays.asList(NODE_0, NODE_1, NODE_2), partitions1,
Collections.emptySet(), Collections.emptySet());
Set<Integer> brokersWithReplicas = new HashSet<>();
brokersWithReplicas.add(0);
brokersWithReplicas.add(1);
// Verify number of replicas and brokers with replicas in the cluster
assertEquals(8, MonitorUtils.numReplicas(cluster1));
assertEquals(brokersWithReplicas, MonitorUtils.brokersWithReplicas(cluster1));
// Cluster2 has a new topic
PartitionInfo t2p0 = new PartitionInfo(TOPIC2, 0, NODE_1, nodesWithOrder1, nodesWithOrder2);
Cluster cluster2 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(TOPIC2, 0), t2p0));
// Verify number of replicas and brokers with replicas in the cluster
assertEquals(10, MonitorUtils.numReplicas(cluster2));
assertEquals(brokersWithReplicas, MonitorUtils.brokersWithReplicas(cluster2));
// A new partition.
PartitionInfo t0p2 = new PartitionInfo(TOPIC0, 2, NODE_1, nodesWithOrder1, nodesWithOrder2);
Cluster cluster3 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(TOPIC2, 2), t0p2));
// Verify number of replicas and brokers with replicas in the cluster
assertEquals(10, MonitorUtils.numReplicas(cluster3));
assertEquals(brokersWithReplicas, MonitorUtils.brokersWithReplicas(cluster3));
// An existing partition with different replica orders
PartitionInfo t0p0DifferentOrder = new PartitionInfo(TOPIC0, 0, NODE_0, nodesWithOrder2, nodesWithOrder2);
Cluster cluster4 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(TOPIC0, 0), t0p0DifferentOrder));
// Verify number of replicas and brokers with replicas in the cluster
assertEquals(8, MonitorUtils.numReplicas(cluster4));
assertEquals(brokersWithReplicas, MonitorUtils.brokersWithReplicas(cluster4));
// An existing partition with a different replica assignment
PartitionInfo t0p0DifferentAssignment = new PartitionInfo(TOPIC0, 0, NODE_0, nodes2, nodesWithOrder2);
Cluster cluster5 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(TOPIC0, 0), t0p0DifferentAssignment));
// Verify number of replicas and brokers with replicas in the cluster
brokersWithReplicas.add(2);
assertEquals(8, MonitorUtils.numReplicas(cluster5));
assertEquals(brokersWithReplicas, MonitorUtils.brokersWithReplicas(cluster5));
// An existing partition with a different leader
PartitionInfo t0p0DifferentLeader = new PartitionInfo(TOPIC0, 0, NODE_1, nodesWithOrder1, nodesWithOrder2);
Cluster cluster6 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(TOPIC0, 0), t0p0DifferentLeader));
// Verify number of replicas and brokers with replicas in the cluster
brokersWithReplicas.remove(2);
assertEquals(8, MonitorUtils.numReplicas(cluster6));
assertEquals(brokersWithReplicas, MonitorUtils.brokersWithReplicas(cluster6));
// An existing partition with the same cluster but different ISR
PartitionInfo t0p0DifferentIsr = new PartitionInfo(TOPIC0, 0, NODE_0, nodesWithOrder1, new Node[]{NODE_0});
Cluster cluster7 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(TOPIC0, 0), t0p0DifferentIsr));
// Verify number of replicas and brokers with replicas in the cluster
assertEquals(8, MonitorUtils.numReplicas(cluster7));
assertEquals(brokersWithReplicas, MonitorUtils.brokersWithReplicas(cluster7));
assertTrue(MonitorUtils.metadataChanged(cluster1, cluster2));
assertTrue(MonitorUtils.metadataChanged(cluster1, cluster3));
assertTrue(MonitorUtils.metadataChanged(cluster1, cluster4));
assertTrue(MonitorUtils.metadataChanged(cluster1, cluster5));
assertTrue(MonitorUtils.metadataChanged(cluster1, cluster6));
assertFalse(MonitorUtils.metadataChanged(cluster1, cluster7));
} |
public void verify(CvCertificate cert) {
final Deque<CvCertificate> chain = getTrustChain(cert);
// Only CVCA has domain parameters
final ECDomainParameters params = chain.getLast().getBody().getPublicKey().getParams();
while (!chain.isEmpty()) {
final CvCertificate signer = chain.pop();
signatureService.verify(cert, signer.getBody().getPublicKey(), params);
cert = signer;
}
} | @Test
public void shouldVerifyIfRootIsTrustedWithIntermediate() throws Exception {
certificateRepo.save(loadCvCertificate("rdw/acc/cvca.cvcert", true));
certificateRepo.save(loadCvCertificate("rdw/acc/dvca.cvcert", false));
certificateRepo.flush();
service.verify(readCvCertificate("rdw/acc/at001.cvcert"));
} |
@Override
public ObjectNode encode(MappingAddress address, CodecContext context) {
EncodeMappingAddressCodecHelper encoder =
new EncodeMappingAddressCodecHelper(address, context);
return encoder.encode();
} | @Test
public void ipv4MappingAddressTest() {
MappingAddress address = MappingAddresses.ipv4MappingAddress(IPV4_PREFIX);
ObjectNode result = addressCodec.encode(address, context);
assertThat(result, matchesMappingAddress(address));
} |
@Override
public synchronized void editSchedule() {
updateConfigIfNeeded();
long startTs = clock.getTime();
CSQueue root = scheduler.getRootQueue();
Resource clusterResources = Resources.clone(scheduler.getClusterResource());
containerBasedPreemptOrKill(root, clusterResources);
if (LOG.isDebugEnabled()) {
LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms.");
}
} | @Test
public void testDeadzone() {
int[][] qData = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 39, 43, 21 }, // used
{ 10, 10, 0, 0 }, // pending
{ 0, 0, 0, 0 }, // reserved
{ 3, 1, 1, 1 }, // apps
{ -1, 1, 1, 1 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
conf.setFloat(
CapacitySchedulerConfiguration.PREEMPTION_MAX_IGNORED_OVER_CAPACITY,
(float) 0.1);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// ignore 10% overcapacity to avoid jitter
verify(mDisp, never()).handle(isA(ContainerPreemptEvent.class));
} |
protected boolean isIpAddress(String field, FieldPresence presence) {
return isIpAddress(object, field, presence);
} | @Test
public void isIpAddress() {
assertTrue("is not proper ip", cfg.isIpAddress(IP, MANDATORY));
assertTrue("is not proper ip", cfg.isIpAddress(IP, OPTIONAL));
assertTrue("is not proper ip", cfg.isIpAddress("none", OPTIONAL));
assertTrue("did not detect missing ip",
expectInvalidField(() -> cfg.isIpAddress("none", MANDATORY)));
assertTrue("did not detect bad ip",
expectInvalidField(() -> cfg.isIpAddress(BAD_IP, MANDATORY)));
} |
public Book get(long bookId) throws BookNotFoundException {
if (!collection.containsKey(bookId)) {
throw new BookNotFoundException("Not found book with id: " + bookId);
}
// return copy of the book
return new Book(collection.get(bookId));
} | @Test
void testDefaultVersionRemainsZeroAfterAdd() throws BookNotFoundException {
var book = bookRepository.get(bookId);
assertEquals(0, book.getVersion());
} |
@Override
public ComponentCreationData createProjectAndBindToDevOpsPlatform(DbSession dbSession, CreationMethod creationMethod, Boolean monorepo, @Nullable String projectKey,
@Nullable String projectName) {
String pat = findPersonalAccessTokenOrThrow(dbSession, almSettingDto);
String url = requireNonNull(almSettingDto.getUrl(), "DevOps Platform url cannot be null");
checkArgument(devOpsProjectDescriptor.projectIdentifier() != null, "DevOps Project Identifier cannot be null for Azure DevOps");
GsonAzureRepo repo = fetchAzureDevOpsProject(url, pat, devOpsProjectDescriptor.projectIdentifier(), devOpsProjectDescriptor.repositoryIdentifier());
ComponentCreationData componentCreationData = projectCreator.createProject(
dbSession,
getProjectKey(projectKey, repo),
getProjectName(projectName, repo),
repo.getDefaultBranchName(),
creationMethod);
ProjectDto projectDto = Optional.ofNullable(componentCreationData.projectDto()).orElseThrow();
createProjectAlmSettingDto(dbSession, repo, projectDto, almSettingDto, monorepo);
return componentCreationData;
} | @Test
void createProjectAndBindToDevOpsPlatform_whenRepositoryNotFound_shouldThrow() {
mockPatForUser();
when(azureDevOpsHttpClient.getRepo(AZURE_DEVOPS_URL, USER_PAT, DEVOPS_PROJECT_ID, REPOSITORY_NAME))
.thenThrow(new AzureDevopsServerException(404, "Problem fetching repository from AzureDevOps"));
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> underTest.createProjectAndBindToDevOpsPlatform(mock(DbSession.class), CreationMethod.ALM_IMPORT_API, false, null, null))
.withMessage("Failed to fetch AzureDevOps repository 'repositoryName' from project 'project-identifier' from 'http://api.com'");
} |
public void parseStepParameter(
Map<String, Map<String, Object>> allStepOutputData,
Map<String, Parameter> workflowParams,
Map<String, Parameter> stepParams,
Parameter param,
String stepId) {
parseStepParameter(
allStepOutputData, workflowParams, stepParams, param, stepId, new HashSet<>());
} | @Test
public void testParseLiteralStepParameter() {
StringParameter bar = StringParameter.builder().name("bar").value("test ${foo}").build();
paramEvaluator.parseStepParameter(
Collections.emptyMap(),
Collections.emptyMap(),
Collections.singletonMap("foo", StringParameter.builder().value("123").build()),
bar,
"step1");
assertEquals("test 123", bar.getEvaluatedResult());
bar = StringParameter.builder().name("bar").value("test ${foo}").build();
paramEvaluator.parseStepParameter(
Collections.emptyMap(),
Collections.emptyMap(),
Collections.singletonMap(
"foo", StringParameter.builder().evaluatedResult("123").evaluatedTime(123L).build()),
bar,
"step1");
assertEquals("test 123", bar.getEvaluatedResult());
} |
@Override
public <T extends BaseRedisNodes> T getRedisNodes(org.redisson.api.redisnode.RedisNodes<T> nodes) {
if (nodes.getClazz() == RedisSingle.class) {
if (config.isSentinelConfig() || config.isClusterConfig()) {
throw new IllegalArgumentException("Can't be used in non Redis single configuration");
}
return (T) new RedissonSingleNode(connectionManager, commandExecutor);
}
if (nodes.getClazz() == RedisCluster.class) {
if (!config.isClusterConfig()) {
throw new IllegalArgumentException("Can't be used in non Redis Cluster configuration");
}
return (T) new RedissonClusterNodes(connectionManager, commandExecutor);
}
if (nodes.getClazz() == RedisSentinelMasterSlave.class) {
if (!config.isSentinelConfig()) {
throw new IllegalArgumentException("Can't be used in non Redis Sentinel configuration");
}
return (T) new RedissonSentinelMasterSlaveNodes(connectionManager, commandExecutor);
}
if (nodes.getClazz() == RedisMasterSlave.class) {
if (config.isSentinelConfig() || config.isClusterConfig()) {
throw new IllegalArgumentException("Can't be used in non Redis Master Slave configuration");
}
return (T) new RedissonMasterSlaveNodes(connectionManager, commandExecutor);
}
throw new IllegalArgumentException();
} | @Test
public void testSave() throws InterruptedException {
Instant s2 = Instant.now();
Thread.sleep(1000);
RedisSingle nodes = redisson.getRedisNodes(RedisNodes.SINGLE);
RedisMaster node = nodes.getInstance();
Instant time1 = node.getLastSaveTime();
assertThat(time1).isNotNull();
node.save();
Instant time2 = node.getLastSaveTime();
assertThat(time2.isAfter(s2)).isTrue();
node.bgSave();
node.bgRewriteAOF();
} |
@Override
protected IRODSFileSystemAO connect(final ProxyFinder proxy, final HostKeyCallback key, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException {
try {
final IRODSFileSystem fs = this.configure(IRODSFileSystem.instance());
final IRODSAccessObjectFactory factory = fs.getIRODSAccessObjectFactory();
final String region = this.getRegion();
final String resource = this.getResource();
final Credentials credentials = host.getCredentials();
try {
return factory.getIRODSFileSystemAO(new URIEncodingIRODSAccount(credentials.getUsername(), credentials.getPassword(),
new IRODSHomeFinderService(IRODSSession.this).find().getAbsolute(), region, resource));
}
catch(IllegalArgumentException e) {
throw new LoginFailureException(e.getMessage(), e);
}
}
catch(JargonException e) {
throw new IRODSExceptionMappingService().map(e);
}
} | @Test
public void testConnect() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials(
PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret")
));
final IRODSSession session = new IRODSSession(host);
assertNotNull(session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()));
assertTrue(session.isConnected());
assertNotNull(session.getClient());
session.close();
assertFalse(session.isConnected());
} |
@Deprecated
public static String getJwt(JwtClaims claims) throws JoseException {
String jwt;
RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey(
jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName());
// A JWT is a JWS and/or a JWE with JSON claims as the payload.
// In this example it is a JWS nested inside a JWE
// So we first create a JsonWebSignature object.
JsonWebSignature jws = new JsonWebSignature();
// The payload of the JWS is JSON content of the JWT Claims
jws.setPayload(claims.toJson());
// The JWT is signed using the sender's private key
jws.setKey(privateKey);
// Get provider from security config file, it should be two digit
// And the provider id will set as prefix for keyid in the token header, for example: 05100
// if there is no provider id, we use "00" for the default value
String provider_id = "";
if (jwtConfig.getProviderId() != null) {
provider_id = jwtConfig.getProviderId();
if (provider_id.length() == 1) {
provider_id = "0" + provider_id;
} else if (provider_id.length() > 2) {
logger.error("provider_id defined in the security.yml file is invalid; the length should be 2");
provider_id = provider_id.substring(0, 2);
}
}
jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid());
// Set the signature algorithm on the JWT/JWS that will integrity protect the claims
jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256);
// Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS
// representation, which is a string consisting of three dot ('.') separated
// base64url-encoded parts in the form Header.Payload.Signature
jwt = jws.getCompactSerialization();
return jwt;
} | @Test
public void longlivedLightPortalLocalhost() throws Exception {
JwtClaims claims = ClaimsUtil.getTestClaims("stevehu@gmail.com", "EMPLOYEE", "f7d42348-c647-4efb-a52d-4c5787421e73", Arrays.asList("portal.r", "portal.w"), "user lightapi.net admin");
claims.setExpirationTimeMinutesInTheFuture(5256000);
String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA));
System.out.println("***Long lived token for portal localhost***: " + jwt);
} |
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
} | @Test
public void testReplaceValueTTL() throws InterruptedException {
RMapCacheNative<SimpleKey, SimpleValue> map = redisson.getMapCacheNative("simple");
map.put(new SimpleKey("1"), new SimpleValue("2"), Duration.ofSeconds(1));
Thread.sleep(1000);
SimpleValue res = map.replace(new SimpleKey("1"), new SimpleValue("3"));
assertThat(res).isNull();
SimpleValue val1 = map.get(new SimpleKey("1"));
assertThat(val1).isNull();
map.destroy();
} |
@Override
public void put(final Bytes rawBaseKey,
final byte[] value) {
final long timestamp = baseKeySchema.segmentTimestamp(rawBaseKey);
observedStreamTime = Math.max(observedStreamTime, timestamp);
final long segmentId = segments.segmentId(timestamp);
final S segment = segments.getOrCreateSegmentIfLive(segmentId, context, observedStreamTime);
if (segment == null) {
expiredRecordSensor.record(1.0d, context.currentSystemTimeMs());
LOG.warn("Skipping record for expired segment.");
} else {
synchronized (position) {
StoreQueryUtils.updatePosition(position, stateStoreContext);
// Put to index first so that if put to base failed, when we iterate index, we will
// find no base value. If put to base first but putting to index fails, when we iterate
// index, we can't find the key but if we iterate over base store, we can find the key
// which lead to inconsistency.
if (hasIndex()) {
final KeyValue<Bytes, byte[]> indexKeyValue = getIndexKeyValue(rawBaseKey, value);
segment.put(indexKeyValue.key, indexKeyValue.value);
}
segment.put(rawBaseKey, value);
}
}
} | @Test
public void shouldFetchSessionForTimeRange() {
// Only for TimeFirstSessionKeySchema schema
if (!(getBaseSchema() instanceof TimeFirstSessionKeySchema)) {
return;
}
final String keyA = "a";
final String keyB = "b";
final String keyC = "c";
final Window[] sessionWindows = new Window[4];
sessionWindows[0] = new SessionWindow(100L, 100L);
sessionWindows[1] = new SessionWindow(50L, 200L);
sessionWindows[2] = new SessionWindow(200L, 300L);
bytesStore.put(serializeKey(new Windowed<>(keyA, sessionWindows[0])), serializeValue(10));
bytesStore.put(serializeKey(new Windowed<>(keyB, sessionWindows[1])), serializeValue(100));
bytesStore.put(serializeKey(new Windowed<>(keyC, sessionWindows[2])), serializeValue(200));
// Fetch point
try (final KeyValueIterator<Bytes, byte[]> values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(100L, 100L)) {
final List<KeyValue<Windowed<String>, Long>> expected = Collections.singletonList(
KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L)
);
assertEquals(expected, toList(values));
}
// Fetch partial boundary
try (final KeyValueIterator<Bytes, byte[]> values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(100L, 200L)) {
final List<KeyValue<Windowed<String>, Long>> expected = asList(
KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L),
KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L)
);
assertEquals(expected, toList(values));
}
// Fetch partial
try (final KeyValueIterator<Bytes, byte[]> values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(99L, 201L)) {
final List<KeyValue<Windowed<String>, Long>> expected = asList(
KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L),
KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L)
);
assertEquals(expected, toList(values));
}
// Fetch partial
try (final KeyValueIterator<Bytes, byte[]> values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(101L, 199L)) {
assertTrue(toList(values).isEmpty());
}
// Fetch all boundary
try (final KeyValueIterator<Bytes, byte[]> values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(100L, 300L)) {
final List<KeyValue<Windowed<String>, Long>> expected = asList(
KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L),
KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L),
KeyValue.pair(new Windowed<>(keyC, sessionWindows[2]), 200L)
);
assertEquals(expected, toList(values));
}
// Fetch all
try (final KeyValueIterator<Bytes, byte[]> values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(99L, 301L)) {
final List<KeyValue<Windowed<String>, Long>> expected = asList(
KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L),
KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L),
KeyValue.pair(new Windowed<>(keyC, sessionWindows[2]), 200L)
);
assertEquals(expected, toList(values));
}
// Fetch all
try (final KeyValueIterator<Bytes, byte[]> values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(101L, 299L)) {
final List<KeyValue<Windowed<String>, Long>> expected = Collections.singletonList(
KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L)
);
assertEquals(expected, toList(values));
}
} |
@Override
public String toString() {
// Add the options that we received from deserialization
SortedMap<String, Object> sortedOptions = new TreeMap<>(jsonOptions);
// Override with any programmatically set options.
for (Map.Entry<String, BoundValue> entry : options.entrySet()) {
sortedOptions.put(entry.getKey(), entry.getValue().getValue());
}
StringBuilder b = new StringBuilder();
b.append("Current Settings:\n");
for (Map.Entry<String, Object> entry : sortedOptions.entrySet()) {
b.append(" " + entry.getKey() + ": " + entry.getValue() + "\n");
}
return b.toString();
} | @Test
public void testToString() throws Exception {
// TODO: Java core test failing on windows, https://github.com/apache/beam/issues/20485
assumeFalse(SystemUtils.IS_OS_WINDOWS);
ProxyInvocationHandler handler = new ProxyInvocationHandler(Maps.newHashMap());
StringWithDefault proxy = handler.as(StringWithDefault.class);
proxy.setString("stringValue");
DefaultAnnotations proxy2 = proxy.as(DefaultAnnotations.class);
proxy2.setLong(57L);
assertEquals(
String.format("Current Settings:%n" + " long: 57%n" + " string: stringValue%n"),
proxy.toString());
} |
public static GcsPath fromComponents(@Nullable String bucket, @Nullable String object) {
return new GcsPath(null, bucket, object);
} | @Test(expected = IllegalArgumentException.class)
public void testInvalidObject_cr() {
GcsPath.fromComponents(null, "a\rb");
} |
public static <T> MutationDetector forValueWithCoder(T value, Coder<T> coder)
throws CoderException {
if (value == null) {
return noopMutationDetector();
} else {
return new CodedValueMutationDetector<>(value, coder);
}
} | @Test
public void testImmutableIterable() throws Exception {
Iterable<Integer> value = FluentIterable.from(Arrays.asList(1, 2, 3, 4)).cycle().limit(50);
MutationDetector detector =
MutationDetectors.forValueWithCoder(value, IterableCoder.of(VarIntCoder.of()));
detector.verifyUnmodified();
} |
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
} | @Test
public void should_clone_serializable_complex_object_with_non_serializable_nested_object() {
Map<String, List<NonSerializableObject>> map = new LinkedHashMap<>();
map.put("key1", Lists.newArrayList(new NonSerializableObject("name1")));
map.put("key2", Lists.newArrayList(
new NonSerializableObject("name2"),
new NonSerializableObject("name3")
));
Object original = new SerializableComplexObjectWithNonSerializableNestedObject(map);
Object cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
} |
static SortKey[] rangeBounds(
int numPartitions, Comparator<StructLike> comparator, SortKey[] samples) {
// sort the keys first
Arrays.sort(samples, comparator);
int numCandidates = numPartitions - 1;
SortKey[] candidates = new SortKey[numCandidates];
int step = (int) Math.ceil((double) samples.length / numPartitions);
int position = step - 1;
int numChosen = 0;
while (position < samples.length && numChosen < numCandidates) {
SortKey candidate = samples[position];
// skip duplicate values
if (numChosen > 0 && candidate.equals(candidates[numChosen - 1])) {
// linear probe for the next distinct value
position += 1;
} else {
candidates[numChosen] = candidate;
position += step;
numChosen += 1;
}
}
return candidates;
} | @Test
public void testRangeBoundsNonDivisible() {
// step is 3 = ceiling(11/4)
assertThat(
SketchUtil.rangeBounds(
4,
SORT_ORDER_COMPARTOR,
new SortKey[] {
CHAR_KEYS.get("a"),
CHAR_KEYS.get("b"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("d"),
CHAR_KEYS.get("e"),
CHAR_KEYS.get("f"),
CHAR_KEYS.get("g"),
CHAR_KEYS.get("h"),
CHAR_KEYS.get("i"),
CHAR_KEYS.get("j"),
CHAR_KEYS.get("k"),
}))
.containsExactly(CHAR_KEYS.get("c"), CHAR_KEYS.get("f"), CHAR_KEYS.get("i"));
} |
@Override
public <T extends Notification<?>> Flowable<T> subscribe(
Request request, String unsubscribeMethod, Class<T> responseType) {
// We can't use usual Observer since we can call "onError"
// before first client is subscribed and we need to
// preserve it
BehaviorSubject<T> subject = BehaviorSubject.create();
// We need to subscribe synchronously, since if we return
// an Flowable to a client before we got a reply
// a client can unsubscribe before we know a subscription
// id and this can cause a race condition
subscribeToEventsStream(request, subject, responseType);
return subject.doOnDispose(() -> closeSubscription(subject, unsubscribeMethod))
.toFlowable(BackpressureStrategy.BUFFER);
} | @Test
public void testPropagateSubscriptionEvent() throws Exception {
CountDownLatch eventReceived = new CountDownLatch(1);
CountDownLatch disposed = new CountDownLatch(1);
AtomicReference<NewHeadsNotification> actualNotificationRef = new AtomicReference<>();
runAsync(
() -> {
Disposable disposable =
subscribeToEvents()
.subscribe(
newHeadsNotification -> {
actualNotificationRef.set(newHeadsNotification);
eventReceived.countDown();
});
try {
eventReceived.await(2, TimeUnit.SECONDS);
disposable.dispose();
disposed.countDown();
} catch (InterruptedException e) {
e.printStackTrace();
}
});
sendSubscriptionConfirmation();
sendWebSocketEvent();
assertTrue(disposed.await(6, TimeUnit.SECONDS));
assertEquals(
"0xd9263f42a87",
actualNotificationRef.get().getParams().getResult().getDifficulty());
assertEquals(
"0xcd0c3e8af590364c09d0fa6a1210faf5",
actualNotificationRef.get().getParams().getSubscription());
} |
@Override
public Num calculate(BarSeries series, Position position) {
return calculateProfit(series, position);
} | @Test
public void calculateWithWinningLongPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series),
Trade.buyAt(3, series), Trade.sellAt(5, series));
// include base percentage
AnalysisCriterion retWithBase = getCriterion();
assertNumEquals(1.10 * 1.05, retWithBase.calculate(series, tradingRecord));
// exclude base percentage
AnalysisCriterion retWithoutBase = getCriterion(false);
assertNumEquals(1.10 * 1.05 - 1, retWithoutBase.calculate(series, tradingRecord));
} |
@Override
public void upgrade() {
if (clusterConfigService.get(V20161216123500_Succeeded.class) != null) {
return;
}
// The default index set must have been created first.
checkState(clusterConfigService.get(DefaultIndexSetCreated.class) != null, "The default index set hasn't been created yet. This is a bug!");
final IndexSetConfig defaultIndexSet = indexSetService.getDefault();
migrateIndexSet(defaultIndexSet, elasticsearchConfiguration.getDefaultIndexTemplateName());
final List<IndexSetConfig> allWithoutDefault = indexSetService.findAll()
.stream()
.filter(indexSetConfig -> !indexSetConfig.equals(defaultIndexSet))
.collect(Collectors.toList());
for (IndexSetConfig indexSetConfig : allWithoutDefault) {
migrateIndexSet(indexSetConfig, indexSetConfig.indexPrefix() + "-template");
}
clusterConfigService.write(V20161216123500_Succeeded.create());
} | @Test
public void upgradeFailsIfDefaultIndexSetHasNotBeenCreated() {
when(clusterConfigService.get(DefaultIndexSetCreated.class)).thenReturn(null);
expectedException.expect(IllegalStateException.class);
expectedException.expectMessage("The default index set hasn't been created yet. This is a bug!");
migration.upgrade();
} |
public void initDefaultMasterKey() {
String defaultMasterKeySpec = Config.default_master_key;
keysLock.writeLock().lock();
try {
if (defaultMasterKeySpec.isEmpty()) {
if (idToKey.size() != 0) {
LOG.error("default_master_key removed in config");
System.exit(1);
}
} else {
EncryptionKey masterKeyFromConfig = EncryptionKey.createFromSpec(defaultMasterKeySpec);
if (idToKey.size() == 0) {
// setup default master key
masterKeyFromConfig.id = DEFAULT_MASTER_KYE_ID;
addKey(masterKeyFromConfig);
LOG.info("create default master key:" + masterKeyFromConfig);
} else {
// check masterkey not changed
EncryptionKey masterKey = idToKey.get(DEFAULT_MASTER_KYE_ID);
Preconditions.checkState(masterKey.equals(masterKeyFromConfig),
"default_master_key changed meta:%s config:%s", masterKey.toSpec(),
masterKeyFromConfig.toSpec());
}
if (idToKey.size() == 1) {
// setup first KEK
generateNewKEK();
}
}
} catch (Exception e) {
LOG.fatal("init default master key failed, will exit.", e);
System.exit(-1);
} finally {
keysLock.writeLock().unlock();
}
} | @Test
public void testInitDefaultMasterKey() {
new MockUp<System>() {
@Mock
public void exit(int value) {
throw new RuntimeException(String.valueOf(value));
}
};
String oldConfig = Config.default_master_key;
try {
Config.default_master_key = "plain:aes_128:enwSdCUAiCLLx2Bs9E/neQ==";
KeyMgr keyMgr = new KeyMgr();
keyMgr.initDefaultMasterKey();
Assert.assertEquals(2, keyMgr.numKeys());
Config.default_master_key = "plain:aes_128:eCsM28LaDORFTZDUMz3y4g==";
keyMgr.initDefaultMasterKey();
Assert.fail("should throw exception");
} catch (RuntimeException e) {
Assert.assertEquals("-1", e.getMessage());
} finally {
Config.default_master_key = oldConfig;
}
} |
@Override
public String toString() {
return "ResourceConfig{" +
"url=" + url +
", id='" + id + '\'' +
", resourceType=" + resourceType +
'}';
} | @Test
public void when_addNonexistentResourceWithPathAndId_then_throwsException() {
// Given
String id = "exist";
String path = Paths.get("/i/do/not/" + id).toString();
// Then
expectedException.expect(JetException.class);
expectedException.expectMessage("Not an existing, readable file: " + path);
// When
config.addClasspathResource(path, id);
} |
@Override
public byte[] get(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key);
} | @Test
public void testGeo() {
RedisTemplate<String, String> redisTemplate = new RedisTemplate<>();
redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson));
redisTemplate.afterPropertiesSet();
String key = "test_geo_key";
Point point = new Point(116.401001, 40.119499);
redisTemplate.opsForGeo().add(key, point, "a");
point = new Point(111.545998, 36.133499);
redisTemplate.opsForGeo().add(key, point, "b");
point = new Point(111.483002, 36.030998);
redisTemplate.opsForGeo().add(key, point, "c");
Circle within = new Circle(116.401001, 40.119499, 80000);
RedisGeoCommands.GeoRadiusCommandArgs args = RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs().includeCoordinates();
GeoResults<RedisGeoCommands.GeoLocation<String>> res = redisTemplate.opsForGeo().radius(key, within, args);
assertThat(res.getContent().get(0).getContent().getName()).isEqualTo("a");
} |
public InnerCNode getTree() throws CodegenRuntimeException {
try {
if (root == null) parse();
} catch (DefParserException | IOException e) {
throw new CodegenRuntimeException("Error parsing or reading config definition." + e.getMessage(), e);
}
return root;
} | @Test
void testEnum() {
StringBuilder sb = createDefTemplate();
sb.append("enum1 enum {A,B} default=A\n");
sb.append("enum2 enum {A, B} default=A\n");
sb.append("enum3 enum { A, B} default=A\n");
sb.append("enum4 enum { A, B } default=A\n");
sb.append("enum5 enum { A , B } default=A\n");
sb.append("enum6 enum {A , B } default=A\n");
sb.append("enumVal enum { FOO, BAR, FOOBAR }\n");
DefParser parser = createParser(sb.toString());
try {
parser.getTree();
} catch (Exception e) {
assertNotNull(null);
}
CNode root = parser.getTree();
LeafCNode node = (LeafCNode) root.getChild("enum1");
assertNotNull(node);
assertEquals("A", node.getDefaultValue().getStringRepresentation());
} |
public Flowable<String> getKeys() {
return getKeysByPattern(null);
} | @Test
public void testMassDelete() {
RBucketRx<String> bucket = redisson.getBucket("test");
sync(bucket.set("someValue"));
RMapRx<String, String> map = redisson.getMap("map2");
sync(map.fastPut("1", "2"));
Assertions.assertEquals(2, sync(redisson.getKeys().delete("test", "map2")).intValue());
Assertions.assertEquals(0, sync(redisson.getKeys().delete("test", "map2")).intValue());
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void setMyDescription() {
BaseResponse response = bot.execute(new SetMyDescription().description("desc").languageCode("en"));
assertTrue(response.isOk());
GetMyDescriptionResponse descResponse = bot.execute(new GetMyDescription().languageCode("en"));
assertTrue(descResponse.isOk());
BotDescription desc = descResponse.botDescription();
assertNotNull(desc);
assertEquals("desc", desc.description());
} |
public static <T> Supplier<T> recover(Supplier<T> supplier,
Predicate<T> resultPredicate, UnaryOperator<T> resultHandler) {
return () -> {
T result = supplier.get();
if(resultPredicate.test(result)){
return resultHandler.apply(result);
}
return result;
};
} | @Test
public void shouldRecoverSupplierFromException() {
Supplier<String> supplier = () -> {
throw new RuntimeException("BAM!");
};
Supplier<String> supplierWithRecovery = SupplierUtils.recover(supplier, (ex) -> "Bla");
String result = supplierWithRecovery.get();
assertThat(result).isEqualTo("Bla");
} |
@VisibleForTesting
WxMpService getWxMpService(Integer userType) {
// 第一步,查询 DB 的配置项,获得对应的 WxMpService 对象
SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType(
SocialTypeEnum.WECHAT_MP.getType(), userType);
if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) {
return wxMpServiceCache.getUnchecked(client.getClientId() + ":" + client.getClientSecret());
}
// 第二步,不存在 DB 配置项,则使用 application-*.yaml 对应的 WxMpService 对象
return wxMpService;
} | @Test
public void testGetWxMpService_clientEnable() {
// 准备参数
Integer userType = randomPojo(UserTypeEnum.class).getValue();
// mock 数据
SocialClientDO client = randomPojo(SocialClientDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())
.setUserType(userType).setSocialType(SocialTypeEnum.WECHAT_MP.getType()));
socialClientMapper.insert(client);
// mock 方法
WxMpProperties.ConfigStorage configStorage = mock(WxMpProperties.ConfigStorage.class);
when(wxMpProperties.getConfigStorage()).thenReturn(configStorage);
// 调用
WxMpService result = socialClientService.getWxMpService(userType);
// 断言
assertNotSame(wxMpService, result);
assertEquals(client.getClientId(), result.getWxMpConfigStorage().getAppId());
assertEquals(client.getClientSecret(), result.getWxMpConfigStorage().getSecret());
} |
@Override
public T deserialize(final String topic, final byte[] bytes) {
try {
final SchemaAndValue schemaAndValue = converter.toConnectData(topic, bytes);
final Object val = translator.toKsqlRow(schemaAndValue.schema(), schemaAndValue.value());
return SerdeUtils.castToTargetType(val, targetType);
} catch (final Exception e) {
throw new SerializationException(
"Error deserializing message from topic: " + topic, e);
}
} | @Test
public void shouldDeserializeRecordsCorrectly() {
// When:
final String deserialized = connectDeserializer.deserialize(TOPIC, BYTES);
// Then:
verify(converter, times(1)).toConnectData(TOPIC, BYTES);
verify(dataTranslator, times(1)).toKsqlRow(schema, value);
assertThat(deserialized, is("some-data"));
} |
public void mix() {
LOGGER.info("Mixing the stew we find: {} potatoes, {} carrots, {} meat and {} peppers",
numPotatoes, numCarrots, numMeat, numPeppers);
} | @Test
void testMix() {
final var stew = new ImmutableStew(1, 2, 3, 4);
final var expectedMessage = "Mixing the immutable stew we find: 1 potatoes, "
+ "2 carrots, 3 meat and 4 peppers";
for (var i = 0; i < 20; i++) {
stew.mix();
assertEquals(expectedMessage, appender.getLastMessage());
}
assertEquals(20, appender.getLogSize());
} |
@Nonnull
static String constructLookup(@Nonnull final String service, @Nonnull final String proto, @Nonnull final String name)
{
String lookup = "";
if ( !service.startsWith( "_" ) )
{
lookup += "_";
}
lookup += service;
if ( !lookup.endsWith( "." ) )
{
lookup += ".";
}
if ( !proto.startsWith( "_" ) )
{
lookup += "_";
}
lookup += proto;
if ( !lookup.endsWith( "." ) )
{
lookup += ".";
}
lookup += name;
if ( !lookup.endsWith( "." ) ) {
lookup += ".";
}
return lookup.toLowerCase();
} | @Test
public void testConstructLookup() throws Exception
{
// Setup test fixture.
final String service = "xmpp-client";
final String protocol = "tcp";
final String name = "igniterealtime.org";
// Execute system under test.
final String result = DNSUtil.constructLookup(service, protocol, name);
// Verify results.
assertEquals("_xmpp-client._tcp.igniterealtime.org.", result);
} |
@Override
public Iterable<ConnectorFactory> getConnectorFactories()
{
return ImmutableList.of(new RedisConnectorFactory(tableDescriptionSupplier));
} | @Test
public void testStartup()
{
RedisPlugin plugin = new RedisPlugin();
ConnectorFactory factory = getOnlyElement(plugin.getConnectorFactories());
assertInstanceOf(factory, RedisConnectorFactory.class);
Connector c = factory.create(
"test-connector",
ImmutableMap.<String, String>builder()
.put("redis.table-names", "test")
.put("redis.nodes", "localhost:6379")
.build(),
new TestingConnectorContext());
assertNotNull(c);
} |
public Builder asBuilder() {
return new Builder(columns);
} | @Test
public void shouldDuplicateViaAsBuilder() {
// Given:
final Builder builder = SOME_SCHEMA.asBuilder();
// When:
final LogicalSchema clone = builder.build();
// Then:
assertThat(clone, is(SOME_SCHEMA));
} |
@CanIgnoreReturnValue
public <K1 extends K, V1 extends V> Caffeine<K1, V1> evictionListener(
RemovalListener<? super K1, ? super V1> evictionListener) {
requireState(this.evictionListener == null,
"eviction listener was already set to %s", this.evictionListener);
@SuppressWarnings("unchecked")
Caffeine<K1, V1> self = (Caffeine<K1, V1>) this;
self.evictionListener = requireNonNull(evictionListener);
return self;
} | @Test
public void evictionListener() {
RemovalListener<Object, Object> removalListener = (k, v, c) -> {};
var builder = Caffeine.newBuilder().evictionListener(removalListener);
assertThat(builder.evictionListener).isSameInstanceAs(removalListener);
assertThat(builder.build()).isNotNull();
} |
public synchronized static MetricRegistry setDefault(String name) {
final MetricRegistry registry = getOrCreate(name);
return setDefault(name, registry);
} | @Test
public void errorsWhenDefaultAlreadySet() throws Exception {
try {
SharedMetricRegistries.setDefault("foobah");
SharedMetricRegistries.setDefault("borg");
} catch (final Exception e) {
assertThat(e).isInstanceOf(IllegalStateException.class);
assertThat(e.getMessage()).isEqualTo("Default metric registry name is already set.");
}
} |
@Override
public EncodedMessage transform(ActiveMQMessage message) throws Exception {
if (message == null) {
return null;
}
long messageFormat = 0;
Header header = null;
Properties properties = null;
Map<Symbol, Object> daMap = null;
Map<Symbol, Object> maMap = null;
Map<String,Object> apMap = null;
Map<Object, Object> footerMap = null;
Section body = convertBody(message);
if (message.isPersistent()) {
if (header == null) {
header = new Header();
}
header.setDurable(true);
}
byte priority = message.getPriority();
if (priority != Message.DEFAULT_PRIORITY) {
if (header == null) {
header = new Header();
}
header.setPriority(UnsignedByte.valueOf(priority));
}
String type = message.getType();
if (type != null) {
if (properties == null) {
properties = new Properties();
}
properties.setSubject(type);
}
MessageId messageId = message.getMessageId();
if (messageId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setMessageId(getOriginalMessageId(message));
}
ActiveMQDestination destination = message.getDestination();
if (destination != null) {
if (properties == null) {
properties = new Properties();
}
properties.setTo(destination.getQualifiedName());
if (maMap == null) {
maMap = new HashMap<>();
}
maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination));
}
ActiveMQDestination replyTo = message.getReplyTo();
if (replyTo != null) {
if (properties == null) {
properties = new Properties();
}
properties.setReplyTo(replyTo.getQualifiedName());
if (maMap == null) {
maMap = new HashMap<>();
}
maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo));
}
String correlationId = message.getCorrelationId();
if (correlationId != null) {
if (properties == null) {
properties = new Properties();
}
try {
properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId));
} catch (AmqpProtocolException e) {
properties.setCorrelationId(correlationId);
}
}
long expiration = message.getExpiration();
if (expiration != 0) {
long ttl = expiration - System.currentTimeMillis();
if (ttl < 0) {
ttl = 1;
}
if (header == null) {
header = new Header();
}
header.setTtl(new UnsignedInteger((int) ttl));
if (properties == null) {
properties = new Properties();
}
properties.setAbsoluteExpiryTime(new Date(expiration));
}
long timeStamp = message.getTimestamp();
if (timeStamp != 0) {
if (properties == null) {
properties = new Properties();
}
properties.setCreationTime(new Date(timeStamp));
}
// JMSX Message Properties
int deliveryCount = message.getRedeliveryCounter();
if (deliveryCount > 0) {
if (header == null) {
header = new Header();
}
header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount));
}
String userId = message.getUserID();
if (userId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8)));
}
String groupId = message.getGroupID();
if (groupId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setGroupId(groupId);
}
int groupSequence = message.getGroupSequence();
if (groupSequence > 0) {
if (properties == null) {
properties = new Properties();
}
properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence));
}
final Map<String, Object> entries;
try {
entries = message.getProperties();
} catch (IOException e) {
throw JMSExceptionSupport.create(e);
}
for (Map.Entry<String, Object> entry : entries.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (key.startsWith(JMS_AMQP_PREFIX)) {
if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) {
// skip transformer appended properties
continue;
} else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) {
// skip transformer appended properties
continue;
} else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) {
messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class);
continue;
} else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) {
if (header == null) {
header = new Header();
}
continue;
} else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
continue;
} else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (maMap == null) {
maMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length());
maMap.put(Symbol.valueOf(name), value);
continue;
} else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) {
if (header == null) {
header = new Header();
}
header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class));
continue;
} else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class)));
continue;
} else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class)));
continue;
} else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class));
continue;
} else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (daMap == null) {
daMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length());
daMap.put(Symbol.valueOf(name), value);
continue;
} else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (footerMap == null) {
footerMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length());
footerMap.put(Symbol.valueOf(name), value);
continue;
}
} else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) {
// strip off the scheduled message properties
continue;
}
// The property didn't map into any other slot so we store it in the
// Application Properties section of the message.
if (apMap == null) {
apMap = new HashMap<>();
}
apMap.put(key, value);
int messageType = message.getDataStructureType();
if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) {
// Type of command to recognize advisory message
Object data = message.getDataStructure();
if(data != null) {
apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName());
}
}
}
final AmqpWritableBuffer buffer = new AmqpWritableBuffer();
encoder.setByteBuffer(buffer);
if (header != null) {
encoder.writeObject(header);
}
if (daMap != null) {
encoder.writeObject(new DeliveryAnnotations(daMap));
}
if (maMap != null) {
encoder.writeObject(new MessageAnnotations(maMap));
}
if (properties != null) {
encoder.writeObject(properties);
}
if (apMap != null) {
encoder.writeObject(new ApplicationProperties(apMap));
}
if (body != null) {
encoder.writeObject(body);
}
if (footerMap != null) {
encoder.writeObject(new Footer(footerMap));
}
return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength());
} | @Test
public void testConvertTextMessageContentNotStoredCreatesAmqpValueStringBody() throws Exception {
String contentString = "myTextMessageContent";
ActiveMQTextMessage outbound = createTextMessage(contentString);
outbound.onSend();
JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer();
EncodedMessage encoded = transformer.transform(outbound);
assertNotNull(encoded);
Message amqp = encoded.decode();
assertNotNull(amqp.getBody());
assertTrue(amqp.getBody() instanceof AmqpValue);
assertEquals(contentString, ((AmqpValue) amqp.getBody()).getValue());
} |
public static MetricsMode fromString(String mode) {
if ("none".equalsIgnoreCase(mode)) {
return None.get();
} else if ("counts".equalsIgnoreCase(mode)) {
return Counts.get();
} else if ("full".equalsIgnoreCase(mode)) {
return Full.get();
}
Matcher truncateMatcher = TRUNCATE.matcher(mode.toLowerCase(Locale.ENGLISH));
if (truncateMatcher.matches()) {
int length = Integer.parseInt(truncateMatcher.group(1));
return Truncate.withLength(length);
}
throw new IllegalArgumentException("Invalid metrics mode: " + mode);
} | @TestTemplate
public void testInvalidTruncationLength() {
assertThatThrownBy(() -> MetricsModes.fromString("truncate(0)"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Truncate length should be positive");
} |
public static byte[] decodeHex(String hexStr) {
return decodeHex((CharSequence) hexStr);
} | @Test
public void decodeHexTest(){
final String s = HexUtil.encodeHexStr("6");
final String s1 = HexUtil.decodeHexStr(s);
assertEquals("6", s1);
} |
@Override
public Map<String, Map<String, Double>> spellcheck(String indexName, String query, SpellcheckOptions options) {
return commandExecutor.get(spellcheckAsync(indexName, query, options));
} | @Test
public void testSpellcheck() {
RSearch s = redisson.getSearch();
s.createIndex("idx", IndexOptions.defaults()
.on(IndexType.HASH)
.prefix(Arrays.asList("doc:")),
FieldIndex.text("t1"),
FieldIndex.text("t2"));
assertThat(s.addDict("name", "hockey", "stik")).isEqualTo(2);
List<String> tt = s.dumpDict("name");
assertThat(tt).containsExactly("hockey", "stik");
Map<String, Map<String, Double>> res = s.spellcheck("idx", "Hocke sti", SpellcheckOptions.defaults()
.includedTerms("name"));
assertThat(res.get("hocke")).containsExactlyEntriesOf(Collections.singletonMap("hockey", (double) 0));
assertThat(res.get("sti")).containsExactlyEntriesOf(Collections.singletonMap("stik", (double) 0));
Map<String, Map<String, Double>> emptyRes = s.spellcheck("idx", "Hocke sti", SpellcheckOptions.defaults());
assertThat(emptyRes.get("hocke")).isEmpty();
assertThat(emptyRes.get("sti")).isEmpty();
} |
@Override
public IcebergEnumeratorState snapshotState(long checkpointId) {
return new IcebergEnumeratorState(
enumeratorPosition.get(), assigner.state(), enumerationHistory.snapshot());
} | @Test
public void testDiscoverWhenReaderRegistered() throws Exception {
TestingSplitEnumeratorContext<IcebergSourceSplit> enumeratorContext =
new TestingSplitEnumeratorContext<>(4);
ScanContext scanContext =
ScanContext.builder()
.streaming(true)
.startingStrategy(StreamingStartingStrategy.TABLE_SCAN_THEN_INCREMENTAL)
.build();
ManualContinuousSplitPlanner splitPlanner = new ManualContinuousSplitPlanner(scanContext, 0);
ContinuousIcebergEnumerator enumerator =
createEnumerator(enumeratorContext, scanContext, splitPlanner);
// register one reader, and let it request a split
enumeratorContext.registerReader(2, "localhost");
enumerator.addReader(2);
enumerator.handleSourceEvent(2, new SplitRequestEvent());
// make one split available and trigger the periodic discovery
List<IcebergSourceSplit> splits =
SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 1, 1);
splitPlanner.addSplits(splits);
enumeratorContext.triggerAllActions();
assertThat(enumerator.snapshotState(1).pendingSplits()).isEmpty();
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.contains(splits.get(0));
} |
public Map<String, IndexMeta> getAllIndexes() {
return allIndexes;
} | @Test
public void testGetAllIndexes() {
Map<String, IndexMeta> allIndexes = tableMeta.getAllIndexes();
assertEquals(1, allIndexes.size(), "Should return all indexes added");
assertTrue(allIndexes.containsKey("primary"), "Should contain index 'primary'");
} |
public static Object get(Object object, int index) {
if (index < 0) {
throw new IndexOutOfBoundsException("Index cannot be negative: " + index);
}
if (object instanceof Map) {
Map map = (Map) object;
Iterator iterator = map.entrySet().iterator();
return get(iterator, index);
} else if (object instanceof List) {
return ((List) object).get(index);
} else if (object instanceof Object[]) {
return ((Object[]) object)[index];
} else if (object instanceof Iterator) {
Iterator it = (Iterator) object;
while (it.hasNext()) {
index--;
if (index == -1) {
return it.next();
} else {
it.next();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object instanceof Collection) {
Iterator iterator = ((Collection) object).iterator();
return get(iterator, index);
} else if (object instanceof Enumeration) {
Enumeration it = (Enumeration) object;
while (it.hasMoreElements()) {
index--;
if (index == -1) {
return it.nextElement();
} else {
it.nextElement();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object == null) {
throw new IllegalArgumentException("Unsupported object type: null");
} else {
try {
return Array.get(object, index);
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName());
}
}
} | @Test
void testGetList1() {
assertThrows(IndexOutOfBoundsException.class, () -> {
CollectionUtils.get(Collections.emptyList(), -1);
});
} |
public void subscribeToEvent(
ResultPartitionID partitionId,
EventListener<TaskEvent> eventListener,
Class<? extends TaskEvent> eventType) {
checkNotNull(partitionId);
checkNotNull(eventListener);
checkNotNull(eventType);
TaskEventHandler taskEventHandler;
synchronized (registeredHandlers) {
taskEventHandler = registeredHandlers.get(partitionId);
}
if (taskEventHandler == null) {
throw new IllegalStateException(
"Partition " + partitionId + " not registered at task event dispatcher.");
}
taskEventHandler.subscribe(eventListener, eventType);
} | @Test
void subscribeToEventNotRegistered() {
TaskEventDispatcher ted = new TaskEventDispatcher();
assertThatThrownBy(
() ->
ted.subscribeToEvent(
new ResultPartitionID(),
new ZeroShotEventListener(),
TaskEvent.class))
.hasMessageContaining("not registered at task event dispatcher")
.isInstanceOf(IllegalStateException.class);
} |
public static String removeCharacter(String str, char charToRemove) {
if (str == null || str.indexOf(charToRemove) == -1) {
return str;
}
char[] chars = str.toCharArray();
int pos = 0;
for (int i = 0; i < chars.length; i++) {
if (chars[i] != charToRemove) {
chars[pos++] = chars[i];
}
}
return new String(chars, 0, pos);
} | @Test
void when_removingCharactersFromString_then_properValue() {
assertEquals("", StringUtil.removeCharacter("-------", '-'));
assertEquals("-------", StringUtil.removeCharacter("-------", '0'));
assertEquals("-------", StringUtil.removeCharacter("-0-0-0-0-0-0-", '0'));
assertEquals("-------", StringUtil.removeCharacter("-00000-0-0000-0000-0-0-", '0'));
} |
@Override
public AttributedList<Path> search(final Path workdir, final Filter<Path> regex, final ListProgressListener listener) throws BackgroundException {
try {
return new DriveSearchListService(session, fileid, regex.toString()).list(workdir, listener);
}
catch(NotfoundException e) {
return AttributedList.emptyList();
}
} | @Test
public void testSearchFolder() throws Exception {
final String name = new AlphanumericRandomStringService().random();
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path workdir = new DriveDirectoryFeature(session, fileid).mkdir(new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path file = new DriveTouchFeature(session, fileid).touch(new Path(workdir, name, EnumSet.of(Path.Type.file)), new TransferStatus());
final DriveSearchFeature feature = new DriveSearchFeature(session, fileid);
assertTrue(feature.search(workdir, new SearchFilter(name), new DisabledListProgressListener()).contains(file));
assertTrue(feature.search(workdir, new SearchFilter(StringUtils.substring(name, 2)), new DisabledListProgressListener()).contains(file));
final AttributedList<Path> result = feature.search(workdir, new SearchFilter(StringUtils.substring(name, 0, name.length() - 2)), new DisabledListProgressListener());
assertTrue(result.contains(file));
assertEquals(workdir, result.get(result.indexOf(file)).getParent());
final Path subdir = new DriveDirectoryFeature(session, fileid).mkdir(new Path(workdir, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertFalse(feature.search(subdir, new SearchFilter(name), new DisabledListProgressListener()).contains(file));
new DriveDeleteFeature(session, fileid).delete(Arrays.asList(file, subdir, workdir), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
static int parseMajorJavaVersion(String javaVersion) {
int version = parseDotted(javaVersion);
if (version == -1) {
version = extractBeginningInt(javaVersion);
}
if (version == -1) {
return 6; // Choose minimum supported JDK version as default
}
return version;
} | @Test
public void testUnknownVersionFormat() {
assertThat(JavaVersion.parseMajorJavaVersion("Java9")).isEqualTo(6); // unknown format
} |
public void setRemoteIp(String remoteIp) {
this.remoteIp = remoteIp;
} | @Test
void testSetRemoteIp() {
assertNull(addressContext.getRemoteIp());
addressContext.setRemoteIp("127.0.0.1");
assertEquals("127.0.0.1", addressContext.getRemoteIp());
} |
@Override
public PageResult<JobLogDO> getJobLogPage(JobLogPageReqVO pageReqVO) {
return jobLogMapper.selectPage(pageReqVO);
} | @Test
public void testGetJobPage() {
// mock 数据
JobLogDO dbJobLog = randomPojo(JobLogDO.class, o -> {
o.setExecuteIndex(1);
o.setHandlerName("handlerName 单元测试");
o.setStatus(JobLogStatusEnum.SUCCESS.getStatus());
o.setBeginTime(buildTime(2021, 1, 8));
o.setEndTime(buildTime(2021, 1, 8));
});
jobLogMapper.insert(dbJobLog);
// 测试 jobId 不匹配
jobLogMapper.insert(cloneIgnoreId(dbJobLog, o -> o.setJobId(randomLongId())));
// 测试 handlerName 不匹配
jobLogMapper.insert(cloneIgnoreId(dbJobLog, o -> o.setHandlerName(randomString())));
// 测试 beginTime 不匹配
jobLogMapper.insert(cloneIgnoreId(dbJobLog, o -> o.setBeginTime(buildTime(2021, 1, 7))));
// 测试 endTime 不匹配
jobLogMapper.insert(cloneIgnoreId(dbJobLog, o -> o.setEndTime(buildTime(2021, 1, 9))));
// 测试 status 不匹配
jobLogMapper.insert(cloneIgnoreId(dbJobLog, o -> o.setStatus(JobLogStatusEnum.FAILURE.getStatus())));
// 准备参数
JobLogPageReqVO reqVo = new JobLogPageReqVO();
reqVo.setJobId(dbJobLog.getJobId());
reqVo.setHandlerName("单元");
reqVo.setBeginTime(dbJobLog.getBeginTime());
reqVo.setEndTime(dbJobLog.getEndTime());
reqVo.setStatus(JobLogStatusEnum.SUCCESS.getStatus());
// 调用
PageResult<JobLogDO> pageResult = jobLogService.getJobLogPage(reqVo);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbJobLog, pageResult.getList().get(0));
} |
public Class<?> getSerializedClass() {
return serializedClass;
} | @Test
void testConstructorWithCause() {
NacosSerializationException exception = new NacosSerializationException(new RuntimeException("test"));
assertEquals(Constants.Exception.SERIALIZE_ERROR_CODE, exception.getErrCode());
assertEquals("errCode: 100, errMsg: Nacos serialize failed. ", exception.getMessage());
assertNull(exception.getSerializedClass());
} |
@Override
public Path createTempFile() throws IOException {
return createTempFile(null, null);
} | @Test
void shouldCreatedTempFile() throws IOException {
String workingDirId = IdUtils.create();
TestWorkingDir workingDirectory = new TestWorkingDir(workingDirId, new LocalWorkingDir(Path.of("/tmp/sub/dir/tmp/"), workingDirId));
Path tempFile = workingDirectory.createTempFile();
assertThat(tempFile.toFile().getAbsolutePath().startsWith("/tmp/sub/dir/tmp/"), is(true));
assertThat(workingDirectory.getAllCreatedTempFiles().size(), is(1));
} |
public String compile(final String xls,
final String template,
int startRow,
int startCol) {
return compile( xls,
template,
InputType.XLS,
startRow,
startCol );
} | @Test
public void testLoadBasicWithExtraCells() {
final String drl = converter.compile("/data/BasicWorkbook.drl.xls",
"/templates/test_template4.drl",
InputType.XLS,
10,
2);
assertThat(drl).isNotNull();
assertThat(drl).contains("This is a function block");
assertThat(drl).contains("global Class1 obj1;");
assertThat(drl).contains("myObject.getColour().equals(blue)");
assertThat(drl).contains("Foo(myObject.getColour().equals(red), myObject.size() > 12\")");
assertThat(drl).contains("b: Bar()\n eval(myObject.size() < 3)");
assertThat(drl).contains("b: Bar()\n eval(myObject.size() < 9)");
assertThat(drl.indexOf("Foo(myObject.getColour().equals(red), myObject.size() > 1)") < drl.indexOf("b: Bar()\n eval(myObject.size() < 3)")).isTrue();
} |
@Override
public Collection<TaskExecutorPartitionInfo> getTrackedPartitionsFor(JobID producingJobId) {
return partitionTable.getTrackedPartitions(producingJobId).stream()
.map(
partitionId -> {
final PartitionInfo<JobID, TaskExecutorPartitionInfo> partitionInfo =
partitionInfos.get(partitionId);
Preconditions.checkNotNull(partitionInfo);
return partitionInfo.getMetaInfo();
})
.collect(toList());
} | @Test
void testGetTrackedPartitionsFor() {
final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
final JobID jobId = new JobID();
final ResultPartitionID resultPartitionId = new ResultPartitionID();
final TaskExecutorPartitionTracker partitionTracker =
new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
TaskExecutorPartitionInfo partitionInfo =
new TaskExecutorPartitionInfo(
new TestingShuffleDescriptor(resultPartitionId),
new IntermediateDataSetID(),
1);
partitionTracker.startTrackingPartition(jobId, partitionInfo);
Collection<TaskExecutorPartitionInfo> partitions =
partitionTracker.getTrackedPartitionsFor(jobId);
assertThat(partitions).hasSize(1);
assertThat(partitions.iterator().next()).isEqualTo(partitionInfo);
} |
public void setLoggerLevel(LoggerRequest loggerRequest) throws BadRequestException {
try {
Class.forName(loggerRequest.getName());
} catch (Throwable ignore) {
throw new BadRequestException(
"The class of '" + loggerRequest.getName() + "' doesn't exists");
}
org.apache.log4j.Logger logger = LogManager.getLogger(loggerRequest.getName());
if (null == logger) {
throw new BadRequestException("The name of the logger is wrong");
}
org.apache.log4j.Level level = org.apache.log4j.Level.toLevel(loggerRequest.getLevel(), null);
if (null == level) {
throw new BadRequestException("The level of the logger is wrong");
}
logger.setLevel(level);
} | @Test
void testSetLoggerLevel() {
AdminService adminService = new AdminService();
String testLoggerName = "test";
org.apache.log4j.Logger logger = adminService.getLogger(testLoggerName);
org.apache.log4j.Level level = logger.getLevel();
boolean setInfo = false;
if (org.apache.log4j.Level.INFO == level) {
// if a current level is INFO, set DEBUG to check if it's changed or not
logger.setLevel(org.apache.log4j.Level.DEBUG);
} else {
logger.setLevel(org.apache.log4j.Level.INFO);
setInfo = true;
}
logger = adminService.getLogger(testLoggerName);
assertTrue((setInfo && org.apache.log4j.Level.INFO == logger.getLevel())
|| (!setInfo && Level.DEBUG == logger.getLevel()), "Level of logger should be changed");
} |
@Override
public Object decode(Response response, Type type) throws IOException, DecodeException {
if (response.status() == 404 || response.status() == 204)
if (JSONObject.class.isAssignableFrom((Class<?>) type))
return new JSONObject();
else if (JSONArray.class.isAssignableFrom((Class<?>) type))
return new JSONArray();
else if (String.class.equals(type))
return null;
else
throw new DecodeException(response.status(),
format("%s is not a type supported by this decoder.", type), response.request());
if (response.body() == null)
return null;
try (Reader reader = response.body().asReader(response.charset())) {
Reader bodyReader = (reader.markSupported()) ? reader : new BufferedReader(reader);
bodyReader.mark(1);
if (bodyReader.read() == -1) {
return null; // Empty body
}
bodyReader.reset();
return decodeBody(response, type, bodyReader);
} catch (JSONException jsonException) {
if (jsonException.getCause() != null && jsonException.getCause() instanceof IOException) {
throw (IOException) jsonException.getCause();
}
throw new DecodeException(response.status(), jsonException.getMessage(), response.request(),
jsonException);
}
} | @Test
void decodesObject() throws IOException {
String json = "{\"a\":\"b\",\"c\":1}";
Response response = Response.builder()
.status(200)
.reason("OK")
.headers(Collections.emptyMap())
.body(json, UTF_8)
.request(request)
.build();
assertThat(jsonObject.similar(new JsonDecoder().decode(response, JSONObject.class))).isTrue();
} |
@Override
public Object handle(String targetService, List<Object> invokers, Object invocation, Map<String, String> queryMap,
String serviceInterface) {
if (!shouldHandle(invokers)) {
return invokers;
}
List<Object> targetInvokers;
if (routerConfig.isUseRequestRouter()) {
targetInvokers = getTargetInvokersByRequest(targetService, invokers, invocation);
} else {
targetInvokers = getTargetInvokersByRules(invokers, invocation, queryMap, targetService, serviceInterface);
}
return super.handle(targetService, targetInvokers, invocation, queryMap, serviceInterface);
} | @Test
public void testGetTargetInvokersByRequestWithMismatch() {
config.setUseRequestRouter(true);
config.setRequestTags(Arrays.asList("foo", "bar", "version"));
List<Object> invokers = new ArrayList<>();
ApacheInvoker<Object> invoker1 = new ApacheInvoker<>("1.0.0",
Collections.singletonMap(RouterConstant.PARAMETERS_KEY_PREFIX + "foo", "bar1"));
invokers.add(invoker1);
ApacheInvoker<Object> invoker2 = new ApacheInvoker<>("1.0.1",
Collections.singletonMap(RouterConstant.PARAMETERS_KEY_PREFIX + "bar", "bar2"));
invokers.add(invoker2);
ApacheInvoker<Object> invoker3 = new ApacheInvoker<>("1.0.1");
invokers.add(invoker3);
Invocation invocation = new ApacheInvocation();
Map<String, String> queryMap = new HashMap<>();
queryMap.put("side", "consumer");
queryMap.put("group", "fooGroup");
queryMap.put("version", "0.0.1");
queryMap.put("interface", "io.sermant.foo.FooTest");
DubboCache.INSTANCE.putApplication("io.sermant.foo.FooTest", "foo");
// When not matching the bar: bar1 instance, match instances without bar labels
invocation.setAttachment("bar", "bar1");
List<Object> targetInvokers = (List<Object>) flowRouteHandler.handle(
DubboCache.INSTANCE.getApplication("io.sermant.foo.FooTest")
, invokers, invocation, queryMap, "io.sermant.foo.FooTest");
Assert.assertEquals(2, targetInvokers.size());
Assert.assertFalse(targetInvokers.contains(invoker2));
// When not matching instance bar1, prioritize matching instances without bar labels.
// If there are no instances without bar labels, return an empty list
List<Object> sameInvokers = new ArrayList<>();
ApacheInvoker<Object> sameInvoker1 = new ApacheInvoker<>("1.0.0",
Collections.singletonMap(RouterConstant.PARAMETERS_KEY_PREFIX + "bar", "bar3"));
sameInvokers.add(sameInvoker1);
ApacheInvoker<Object> sameInvoker2 = new ApacheInvoker<>("1.0.1",
Collections.singletonMap(RouterConstant.PARAMETERS_KEY_PREFIX + "bar", "bar2"));
sameInvokers.add(sameInvoker2);
invocation.getObjectAttachments().clear();
invocation.setAttachment("bar", "bar1");
targetInvokers = (List<Object>) flowRouteHandler.handle(DubboCache.INSTANCE.getApplication("io.sermant" +
".foo.FooTest")
, sameInvokers, invocation, queryMap, "io.sermant.foo.FooTest");
Assert.assertEquals(0, targetInvokers.size());
// When the version does not match the 1.0.3 instance, return all instances of the version
invocation.getObjectAttachments().clear();
invocation.setAttachment("version", "1.0.3");
targetInvokers = (List<Object>) flowRouteHandler.handle(DubboCache.INSTANCE.getApplication("io.sermant" +
".foo.FooTest")
, invokers, invocation, queryMap, "io.sermant.foo.FooTest");
Assert.assertEquals(3, targetInvokers.size());
// Match unlabeled instances when no attachment is passed in
invocation.getObjectAttachments().clear();
targetInvokers = (List<Object>) flowRouteHandler.handle(DubboCache.INSTANCE.getApplication("io.sermant" +
".foo.FooTest")
, invokers, invocation, queryMap, "io.sermant.foo.FooTest");
Assert.assertEquals(1, targetInvokers.size());
Assert.assertEquals(invoker3, targetInvokers.get(0));
// When no attachment is passed in, priority is given to matching unlabeled instances. When there are no
// unlabeled instances, all instances are returned
invocation.getObjectAttachments().clear();
targetInvokers = (List<Object>) flowRouteHandler.handle(DubboCache.INSTANCE.getApplication("io.sermant" +
".foo.FooTest")
, sameInvokers, invocation, queryMap, "io.sermant.foo.FooTest");
Assert.assertEquals(sameInvokers, targetInvokers);
} |
public static void setUserInfo(String username, String userRole) {
USER_THREAD_LOCAL.set(new User(username, userRole));
} | @Test
public void testSetUserInfo() {
UserContext.setUserInfo(USERNAME, USER_ROLE);
ThreadLocal<UserContext.User> userThreadLocal = (ThreadLocal<UserContext.User>) ReflectUtil.getFieldValue(UserContext.class, "USER_THREAD_LOCAL");
Assert.notNull(userThreadLocal.get());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.