focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public <T> T select(final List<T> elements) {
if (elements == null) {
throw new NullPointerException("elements");
}
final int size = elements.size();
if (size == 1) {
return elements.get(0);
}
final int roundRobinIndex = indexUpdater.getAndIncrement(this) & Integer.MAX_VALUE;
return elements.get(roundRobinIndex % size);
} | @Test
public void selectTest() {
List<Integer> elements = Lists.newArrayList();
elements.add(0);
elements.add(1);
elements.add(2);
elements.add(3);
elements.add(4);
RoundRobinLoadBalancer balancer1 = RoundRobinLoadBalancer.getInstance(1);
Assert.assertEquals(0, balancer1.select(elements).intValue());
Assert.assertEquals(1, balancer1.select(elements).intValue());
RoundRobinLoadBalancer balancer2 = RoundRobinLoadBalancer.getInstance(2);
Assert.assertEquals(0, balancer2.select(elements).intValue());
Assert.assertEquals(1, balancer2.select(elements).intValue());
Assert.assertEquals(2, balancer2.select(elements).intValue());
Assert.assertEquals(3, balancer2.select(elements).intValue());
Assert.assertEquals(4, balancer2.select(elements).intValue());
Assert.assertEquals(0, balancer2.select(elements).intValue());
} |
public static boolean isValidAddress(String address) {
return ADDRESS_PATTERN.matcher(address).matches();
} | @Test
void testIsValidAddress() {
assertFalse(NetUtils.isValidV4Address((InetAddress) null));
InetAddress address = mock(InetAddress.class);
when(address.isLoopbackAddress()).thenReturn(true);
assertFalse(NetUtils.isValidV4Address(address));
address = mock(InetAddress.class);
when(address.getHostAddress()).thenReturn("localhost");
assertFalse(NetUtils.isValidV4Address(address));
address = mock(InetAddress.class);
when(address.getHostAddress()).thenReturn("0.0.0.0");
assertFalse(NetUtils.isValidV4Address(address));
address = mock(InetAddress.class);
when(address.getHostAddress()).thenReturn("127.0.0.1");
assertFalse(NetUtils.isValidV4Address(address));
address = mock(InetAddress.class);
when(address.getHostAddress()).thenReturn("1.2.3.4");
assertTrue(NetUtils.isValidV4Address(address));
} |
boolean isQueueEmpty() {
return queue == null || queue.isEmpty();
} | @Test
public void testFlowAutoReadOn() throws Exception {
final CountDownLatch latch = new CountDownLatch(3);
final Exchanger<Channel> peerRef = new Exchanger<Channel>();
ChannelInboundHandlerAdapter handler = new ChannelDuplexHandler() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
peerRef.exchange(ctx.channel(), 1L, SECONDS);
super.channelActive(ctx);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ReferenceCountUtil.release(msg);
latch.countDown();
}
};
final FlowControlHandler flow = new FlowControlHandler();
Channel server = newServer(true, flow, handler);
Channel client = newClient(server.localAddress());
try {
// The client connection on the server side
Channel peer = peerRef.exchange(null, 1L, SECONDS);
// Write the message
client.writeAndFlush(newOneMessage())
.syncUninterruptibly();
// We should receive 3 messages
assertTrue(latch.await(1L, SECONDS));
assertTrue(peer.eventLoop().submit(new Callable<Boolean>() {
@Override
public Boolean call() {
return flow.isQueueEmpty();
}
}).get());
} finally {
client.close();
server.close();
}
} |
@Override
public CompletableFuture<Collection<TaskManagerLocation>> getPreferredLocations(
final ExecutionVertexID executionVertexId,
final Set<ExecutionVertexID> producersToIgnore) {
checkNotNull(executionVertexId);
checkNotNull(producersToIgnore);
final Collection<TaskManagerLocation> preferredLocationsBasedOnState =
getPreferredLocationsBasedOnState(executionVertexId);
if (!preferredLocationsBasedOnState.isEmpty()) {
return CompletableFuture.completedFuture(preferredLocationsBasedOnState);
}
return getPreferredLocationsBasedOnInputs(executionVertexId, producersToIgnore);
} | @Test
void testStateLocationsWillBeReturnedIfExist() {
final TaskManagerLocation stateLocation = new LocalTaskManagerLocation();
final TestingInputsLocationsRetriever.Builder locationRetrieverBuilder =
new TestingInputsLocationsRetriever.Builder();
final ExecutionVertexID consumerId = new ExecutionVertexID(new JobVertexID(), 0);
final ExecutionVertexID producerId = new ExecutionVertexID(new JobVertexID(), 0);
locationRetrieverBuilder.connectConsumerToProducer(consumerId, producerId);
final TestingInputsLocationsRetriever inputsLocationsRetriever =
locationRetrieverBuilder.build();
inputsLocationsRetriever.markScheduled(producerId);
final PreferredLocationsRetriever locationsRetriever =
new DefaultPreferredLocationsRetriever(
id -> Optional.of(stateLocation), inputsLocationsRetriever);
final CompletableFuture<Collection<TaskManagerLocation>> preferredLocations =
locationsRetriever.getPreferredLocations(consumerId, Collections.emptySet());
assertThat(preferredLocations.getNow(null)).containsExactly(stateLocation);
} |
@Override
public List<Document> get() {
try (var input = markdownResource.getInputStream()) {
Node node = parser.parseReader(new InputStreamReader(input));
DocumentVisitor documentVisitor = new DocumentVisitor(config);
node.accept(documentVisitor);
return documentVisitor.getDocuments();
}
catch (IOException e) {
throw new RuntimeException(e);
}
} | @Test
void testDocumentDividedViaHorizontalRules() {
MarkdownDocumentReaderConfig config = MarkdownDocumentReaderConfig.builder()
.withHorizontalRuleCreateDocument(true)
.build();
MarkdownDocumentReader reader = new MarkdownDocumentReader("classpath:/horizontal-rules.md", config);
List<Document> documents = reader.get();
assertThat(documents).hasSize(7)
.extracting(Document::getMetadata, Document::getContent)
.containsOnly(tuple(Map.of(),
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec tincidunt velit non bibendum gravida."),
tuple(Map.of(),
"Cras accumsan tincidunt ornare. Donec hendrerit consequat tellus blandit accumsan. Aenean aliquam metus at arcu elementum dignissim."),
tuple(Map.of(),
"Nullam nisi dui, egestas nec sem nec, interdum lobortis enim. Pellentesque odio orci, faucibus eu luctus nec, venenatis et magna."),
tuple(Map.of(),
"Vestibulum nec eros non felis fermentum posuere eget ac risus. Curabitur et fringilla massa. Cras facilisis nec nisl sit amet sagittis."),
tuple(Map.of(),
"Aenean eu leo eu nibh tristique posuere quis quis massa. Nullam lacinia luctus sem ut vehicula."),
tuple(Map.of(),
"Aenean quis vulputate mi. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Nam tincidunt nunc a tortor tincidunt, nec lobortis diam rhoncus."),
tuple(Map.of(), "Nulla facilisi. Phasellus eget tellus sed nibh ornare interdum eu eu mi."));
} |
@Override
public Object convertData( ValueMetaInterface meta2, Object data2 ) throws KettleValueException {
switch ( meta2.getType() ) {
case TYPE_STRING:
return convertStringToInternetAddress( meta2.getString( data2 ) );
case TYPE_INTEGER:
return convertIntegerToInternetAddress( meta2.getInteger( data2 ) );
case TYPE_NUMBER:
return convertNumberToInternetAddress( meta2.getNumber( data2 ) );
case TYPE_BIGNUMBER:
return convertBigNumberToInternetAddress( meta2.getBigNumber( data2 ) );
case TYPE_INET:
return ( (ValueMetaInternetAddress) meta2 ).getInternetAddress( data2 );
default:
throw new KettleValueException( meta2.toStringMeta() + " : can't be converted to an Internet Address" );
}
} | @Test
public void testConvertStringIPv6() throws Exception {
ValueMetaInterface vmia = new ValueMetaInternetAddress( "Test" );
ValueMetaString vms = new ValueMetaString( "aString" );
Object convertedIPv6 = vmia.convertData( vms, SAMPLE_IPV6_AS_STRING );
assertNotNull( convertedIPv6 );
assertTrue( convertedIPv6 instanceof InetAddress );
assertArrayEquals( SAMPLE_IPV6_AS_BYTES, ( (InetAddress) convertedIPv6 ).getAddress() );
} |
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
try {
if (statement.getStatement() instanceof CreateAsSelect) {
registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement);
} else if (statement.getStatement() instanceof CreateSource) {
registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
// Remove schema id from SessionConfig
return stripSchemaIdConfig(statement);
} | @Test
public void shouldThrowWrongKeyFormatExceptionWithOverrideSchema() throws Exception {
// Given:
final SchemaAndId keySchemaAndId = SchemaAndId.schemaAndId(SCHEMA.value(), AVRO_SCHEMA, 1);
final SchemaAndId valueSchemaAndId = SchemaAndId.schemaAndId(SCHEMA.value(), AVRO_SCHEMA, 2);
givenStatement("CREATE STREAM source (id int key, f1 varchar) "
+ "WITH ("
+ "kafka_topic='expectedName', "
+ "key_format='KAFKA', "
+ "value_format='AVRO', "
+ "key_schema_id=1, "
+ "value_schema_id=1, "
+ "partitions=1"
+ ");", Pair.of(keySchemaAndId, valueSchemaAndId));
// When:
final Exception e = assertThrows(
KsqlStatementException.class,
() -> injector.inject(statement)
);
// Then:
assertThat(e.getMessage(), containsString(
"KEY_SCHEMA_ID is provided but format KAFKA doesn't "
+ "support registering in Schema Registry"));
verify(schemaRegistryClient, never()).register(anyString(), any(ParsedSchema.class));
} |
@SuppressFBWarnings(value = "RV_RETURN_VALUE_OF_PUTIFABSENT_IGNORED")
public int encode(CacheScope scopeInfo) {
if (!mScopeToId.containsKey(scopeInfo)) {
synchronized (this) {
if (!mScopeToId.containsKey(scopeInfo)) {
// NOTE: If update mScopeToId ahead of updating mIdToScope,
// we may read a null scope info in decode.
int id = mNextId;
Preconditions.checkArgument(id < mMaxNumScopes, "too many scopes in shadow cache");
mNextId++;
mScopeToId.putIfAbsent(scopeInfo, id);
// if we use mScopeToID.put() here,
// we will get the findBug's error: the hashmap may not be atomic.
}
}
}
return mScopeToId.get(scopeInfo) & mScopeMask;
} | @Test
public void testConcurrentEncodeDecode() throws Exception {
List<Runnable> runnables = new ArrayList<>();
for (int k = 0; k < DEFAULT_THREAD_AMOUNT; k++) {
runnables.add(() -> {
for (int i = 0; i < NUM_SCOPES * 16; i++) {
int r = ThreadLocalRandom.current().nextInt(NUM_SCOPES);
CacheScope scopeInfo = CacheScope.create("schema1.table" + r);
int id = mScopeEncoder.encode(scopeInfo);
assertEquals(id, mScopeEncoder.encode(scopeInfo));
assertTrue(0 <= id && id < NUM_SCOPES);
}
});
}
ConcurrencyUtils.assertConcurrent(runnables, DEFAULT_TIMEOUT_SECONDS);
} |
@Override
public Consumer createConsumer(Processor processor) throws Exception {
DebeziumConsumer consumer = new DebeziumConsumer(this, processor);
configureConsumer(consumer);
return consumer;
} | @Test
void testIfCreatesConsumer() throws Exception {
final Consumer debeziumConsumer = debeziumEndpoint.createConsumer(processor);
assertNotNull(debeziumConsumer);
} |
public List<UserDTO> usersToUserDTOs(List<User> users) {
return users.stream().filter(Objects::nonNull).map(this::userToUserDTO).toList();
} | @Test
void usersToUserDTOsShouldMapOnlyNonNullUsers() {
List<User> users = new ArrayList<>();
users.add(user);
users.add(null);
List<UserDTO> userDTOS = userMapper.usersToUserDTOs(users);
assertThat(userDTOS).isNotEmpty().size().isEqualTo(1);
} |
@VisibleForTesting
public ConfigDO validateConfigExists(Long id) {
if (id == null) {
return null;
}
ConfigDO config = configMapper.selectById(id);
if (config == null) {
throw exception(CONFIG_NOT_EXISTS);
}
return config;
} | @Test
public void testValidateConfigExist_notExists() {
assertServiceException(() -> configService.validateConfigExists(randomLongId()), CONFIG_NOT_EXISTS);
} |
public static int ge0(int value, String name) {
return (int) ge0((long) value, name);
} | @Test
public void checkGEZero() {
assertEquals(Check.ge0(120, "test"), 120);
assertEquals(Check.ge0(0, "test"), 0);
} |
@Override
public void encode(Event event, OutputStream output) throws IOException {
String outputString = (format == null
? event.toString()
: StringInterpolation.evaluate(event, format));
output.write(outputString.getBytes(charset));
} | @Test
public void testEncodeWithUtf8() throws IOException {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
String message = new String("München 安装中文输入法".getBytes(), Charset.forName("UTF-8"));
Map<String, Object> config = new HashMap<>();
config.put("format", "%{message}");
Plain codec = new Plain(new ConfigurationImpl(config), new TestContext());
Event e1 = new Event(Collections.singletonMap("message", message));
codec.encode(e1, outputStream);
Assert.assertEquals(message, new String(outputStream.toByteArray(), Charset.forName("UTF-8")));
} |
public static <E> List<E> ensureImmutable(List<E> list) {
if (list.isEmpty()) return Collections.emptyList();
// Faster to make a copy than check the type to see if it is already a singleton list
if (list.size() == 1) return Collections.singletonList(list.get(0));
if (isImmutable(list)) return list;
return Collections.unmodifiableList(new ArrayList<E>(list));
} | @Test void ensureImmutable_singletonListStaysSingleton() {
List<Object> list = Collections.singletonList("foo");
assertThat(Lists.ensureImmutable(list).getClass().getSimpleName())
.isEqualTo("SingletonList");
} |
@Override
public Object toKsqlRow(final Schema connectSchema, final Object connectObject) {
final Object avroCompatibleRow = innerTranslator.toKsqlRow(connectSchema, connectObject);
if (avroCompatibleRow == null) {
return null;
}
return ConnectSchemas.withCompatibleSchema(ksqlSchema, avroCompatibleRow);
} | @Test
public void shouldReturnBytes() {
// When:
final AvroDataTranslator translator =
new AvroDataTranslator(Schema.BYTES_SCHEMA, AvroProperties.DEFAULT_AVRO_SCHEMA_FULL_NAME);
// Then:
assertThat(
translator.toKsqlRow(Schema.BYTES_SCHEMA, new byte[] {123}),
is(ByteBuffer.wrap(new byte[] {123})));
} |
public ProviderBuilder accepts(Integer accepts) {
this.accepts = accepts;
return getThis();
} | @Test
void accepts() {
ProviderBuilder builder = ProviderBuilder.newBuilder();
builder.accepts(35);
Assertions.assertEquals(35, builder.build().getAccepts());
} |
public void createRole( IRole newRole ) throws KettleException {
normalizeRoleInfo( newRole );
if ( !validateRoleInfo( newRole ) ) {
throw new KettleException( BaseMessages.getString( PurRepositorySecurityManager.class,
"PurRepositorySecurityManager.ERROR_0001_INVALID_NAME" ) );
}
userRoleDelegate.createRole( newRole );
} | @Test
public void createRole_NormalizesInfo_PassesIfNoViolations() throws Exception {
IRole info = new EERoleInfo( "role ", "" );
ArgumentCaptor<IRole> captor = ArgumentCaptor.forClass( IRole.class );
manager.createRole( info );
verify( roleDelegate ).createRole( captor.capture() );
info = captor.getValue();
assertEquals( "Spaces should be trimmed", "role", info.getName() );
} |
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image)
throws IOException
{
if (isGrayImage(image))
{
return createFromGrayImage(image, document);
}
// We try to encode the image with predictor
if (USE_PREDICTOR_ENCODER)
{
PDImageXObject pdImageXObject = new PredictorEncoder(document, image).encode();
if (pdImageXObject != null)
{
if (pdImageXObject.getColorSpace() == PDDeviceRGB.INSTANCE &&
pdImageXObject.getBitsPerComponent() < 16 &&
image.getWidth() * image.getHeight() <= 50 * 50)
{
// also create classic compressed image, compare sizes
PDImageXObject pdImageXObjectClassic = createFromRGBImage(image, document);
if (pdImageXObjectClassic.getCOSObject().getLength() <
pdImageXObject.getCOSObject().getLength())
{
pdImageXObject.getCOSObject().close();
return pdImageXObjectClassic;
}
else
{
pdImageXObjectClassic.getCOSObject().close();
}
}
return pdImageXObject;
}
}
// Fallback: We export the image as 8-bit sRGB and might lose color information
return createFromRGBImage(image, document);
} | @Test
void testCreateLosslessFromImageINT_BGR() throws IOException
{
PDDocument document = new PDDocument();
BufferedImage image = ImageIO.read(this.getClass().getResourceAsStream("png.png"));
BufferedImage imgBgr = new BufferedImage(image.getWidth(), image.getHeight(), BufferedImage.TYPE_INT_BGR);
Graphics2D graphics = imgBgr.createGraphics();
graphics.drawImage(image, 0, 0, null);
PDImageXObject ximage = LosslessFactory.createFromImage(document, imgBgr);
validate(ximage, 8, imgBgr.getWidth(), imgBgr.getHeight(), "png", PDDeviceRGB.INSTANCE.getName());
checkIdent(image, ximage.getImage());
} |
public static GSBlobIdentifier getTemporaryBlobIdentifier(
GSBlobIdentifier finalBlobIdentifier,
UUID temporaryObjectId,
GSFileSystemOptions options) {
String temporaryBucketName = BlobUtils.getTemporaryBucketName(finalBlobIdentifier, options);
String temporaryObjectName =
options.isFileSinkEntropyEnabled()
? BlobUtils.getTemporaryObjectNameWithEntropy(
finalBlobIdentifier, temporaryObjectId)
: BlobUtils.getTemporaryObjectName(finalBlobIdentifier, temporaryObjectId);
return new GSBlobIdentifier(temporaryBucketName, temporaryObjectName);
} | @Test
public void shouldProperlyConstructTemporaryBlobIdentifierWithDefaultBucket() {
Configuration flinkConfig = new Configuration();
GSFileSystemOptions options = new GSFileSystemOptions(flinkConfig);
GSBlobIdentifier identifier = new GSBlobIdentifier("foo", "bar");
UUID temporaryObjectId = UUID.fromString("f09c43e5-ea49-4537-a406-0586f8f09d47");
GSBlobIdentifier temporaryBlobIdentifier =
BlobUtils.getTemporaryBlobIdentifier(identifier, temporaryObjectId, options);
assertEquals("foo", temporaryBlobIdentifier.bucketName);
assertEquals(
".inprogress/foo/bar/f09c43e5-ea49-4537-a406-0586f8f09d47",
temporaryBlobIdentifier.objectName);
} |
@GET
@Path("/conversions")
@Produces(MediaType.APPLICATION_JSON)
public CurrencyConversionEntityList getConversions(final @ReadOnly @Auth AuthenticatedDevice auth) {
return currencyManager.getCurrencyConversions().orElseThrow();
} | @Test
void testGetCurrencyConversions() {
CurrencyConversionEntityList conversions =
resources.getJerseyTest()
.target("/v1/payments/conversions")
.request()
.header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD))
.get(CurrencyConversionEntityList.class);
assertThat(conversions.getCurrencies().size()).isEqualTo(2);
assertThat(conversions.getCurrencies().get(0).getBase()).isEqualTo("FOO");
assertThat(conversions.getCurrencies().get(0).getConversions().get("USD")).isEqualTo(new BigDecimal("2.35"));
} |
@Override
public Set<K> keySet() {
Set<IdentityObject<K>> set = mInternalMap.keySet();
return new Set<K>() {
@Override
public int size() {
return set.size();
}
@Override
public boolean isEmpty() {
return set.isEmpty();
}
@Override
public boolean contains(Object o) {
return set.contains(new IdentityObject<>(o));
}
@Override
public Iterator<K> iterator() {
return set.stream().map(IdentityObject::get).iterator();
}
@Override
public Object[] toArray() {
throw new UnsupportedOperationException(String.format(UNSUPPORTED_OP_FMT, "toArray"));
}
@Override
public <T> T[] toArray(T[] a) {
throw new UnsupportedOperationException(String.format(UNSUPPORTED_OP_FMT, "toArray"));
}
@Override
public boolean add(K k) {
throw new UnsupportedOperationException(String.format(UNSUPPORTED_OP_FMT, "add"));
}
@Override
public boolean remove(Object o) {
return set.remove(new IdentityObject<>(o));
}
/*
* This method is possible to implement if inputs from the input collection are auto-boxed
* into an IdentityObject<T> but the implementation has been left absent for now.
*/
@Override
public boolean containsAll(Collection<?> c) {
throw new UnsupportedOperationException(String.format(UNSUPPORTED_OP_FMT, "containsAll"));
}
@Override
public boolean addAll(Collection<? extends K> c) {
throw new UnsupportedOperationException(String.format(UNSUPPORTED_OP_FMT, "addAll"));
}
/*
* This method is possible to implement if inputs from the input collection are auto-boxed
* into an IdentityObject<T> but the implementation has been left absent for now.
*/
@Override
public boolean retainAll(Collection<?> c) {
throw new UnsupportedOperationException(String.format(UNSUPPORTED_OP_FMT, "retainAll"));
}
/*
* This method is possible to implement if inputs from the input collection are auto-boxed
* into an IdentityObject<T> but the implementation has been left absent for now.
*/
@Override
public boolean removeAll(Collection<?> c) {
throw new UnsupportedOperationException(String.format(UNSUPPORTED_OP_FMT, "removeAll"));
}
@Override
public void clear() {
set.clear();
}
};
} | @Test
public void keySet() {
String x = new String("x");
String xx = new String("x");
assertNull(mMap.put(x, "x"));
assertNull(mMap.put(xx, "x2"));
assertEquals(2, mMap.size());
Set<String> km = mMap.keySet();
assertEquals(2, km.size());
assertTrue(km.contains(x));
assertTrue(km.contains(xx));
assertEquals("x", mMap.remove(x));
assertEquals(1, km.size());
assertTrue(km.remove(xx));
assertEquals(0, km.size());
assertEquals(0, mMap.size());
} |
public EvaluationResult evaluate(Condition condition, Measure measure) {
checkArgument(SUPPORTED_METRIC_TYPE.contains(condition.getMetric().getType()), "Conditions on MetricType %s are not supported", condition.getMetric().getType());
Comparable measureComparable = parseMeasure(measure);
if (measureComparable == null) {
return new EvaluationResult(Measure.Level.OK, null);
}
return evaluateCondition(condition, measureComparable)
.orElseGet(() -> new EvaluationResult(Measure.Level.OK, measureComparable));
} | @Test
public void test_condition_on_rating() {
Metric metric = createMetric(RATING);
Measure measure = newMeasureBuilder().create(4, "D");
assertThat(underTest.evaluate(new Condition(metric, GREATER_THAN.getDbValue(), "4"), measure)).hasLevel(OK).hasValue(4);
assertThat(underTest.evaluate(new Condition(metric, GREATER_THAN.getDbValue(), "2"), measure)).hasLevel(ERROR).hasValue(4);
} |
@Override
public boolean serverHealthy() {
return grpcClientProxy.serverHealthy() || httpClientProxy.serverHealthy();
} | @Test
void testServerHealthy() {
Mockito.when(mockGrpcClient.serverHealthy()).thenReturn(true);
assertTrue(delegate.serverHealthy());
} |
@Override
public Iterator<Object> iterateObjects() {
return new CompositeObjectIterator(concreteStores, true);
} | @Test
public void isOkayToReinsertSameTypeThenQuery() throws Exception {
insertObjectWithFactHandle(new SubClass());
insertObjectWithFactHandle(new SubClass());
Collection<Object> result = collect(underTest.iterateObjects(SuperClass.class));
assertThat(result).hasSize(2);
// Check there's no duplication of results
assertThat(new HashSet<Object>(result)).hasSize(2);
} |
public static Optional<File> getFileFromFileNameOrFilePath(String fileName, String filePath) {
Optional<File> fromClassloader = getFileByFileNameFromClassloader(fileName, Thread.currentThread().getContextClassLoader());
return fromClassloader.isPresent() ? fromClassloader : getFileByFilePath(filePath);
} | @Test
void getFileFromFileNameOrFilePathExisting() {
Optional<File> retrieved = MemoryFileUtils.getFileFromFileNameOrFilePath(TEST_FILE, TEST_FILE);
assertThat(retrieved).isNotNull().isNotEmpty();
String path = String.format("target%1$stest-classes%1$s%2$s", File.separator, TEST_FILE);
retrieved = MemoryFileUtils.getFileFromFileNameOrFilePath(NOT_EXISTING_FILE, path);
assertThat(retrieved).isNotNull().isNotEmpty();
retrieved = MemoryFileUtils.getFileFromFileNameOrFilePath(path, NOT_EXISTING_FILE);
assertThat(retrieved).isNotNull().isNotEmpty();
} |
static ManifestFile fromJson(JsonNode jsonNode) {
Preconditions.checkArgument(jsonNode != null, "Invalid JSON node for manifest file: null");
Preconditions.checkArgument(
jsonNode.isObject(), "Invalid JSON node for manifest file: non-object (%s)", jsonNode);
String path = JsonUtil.getString(PATH, jsonNode);
long length = JsonUtil.getLong(LENGTH, jsonNode);
int specId = JsonUtil.getInt(SPEC_ID, jsonNode);
ManifestContent manifestContent = null;
if (jsonNode.has(CONTENT)) {
manifestContent = ManifestContent.fromId(JsonUtil.getInt(CONTENT, jsonNode));
}
long sequenceNumber = JsonUtil.getLong(SEQUENCE_NUMBER, jsonNode);
long minSequenceNumber = JsonUtil.getLong(MIN_SEQUENCE_NUMBER, jsonNode);
Long addedSnapshotId = null;
if (jsonNode.has(ADDED_SNAPSHOT_ID)) {
addedSnapshotId = JsonUtil.getLong(ADDED_SNAPSHOT_ID, jsonNode);
}
Integer addedFilesCount = null;
if (jsonNode.has(ADDED_FILES_COUNT)) {
addedFilesCount = JsonUtil.getInt(ADDED_FILES_COUNT, jsonNode);
}
Integer existingFilesCount = null;
if (jsonNode.has(EXISTING_FILES_COUNT)) {
existingFilesCount = JsonUtil.getInt(EXISTING_FILES_COUNT, jsonNode);
}
Integer deletedFilesCount = null;
if (jsonNode.has(DELETED_FILES_COUNT)) {
deletedFilesCount = JsonUtil.getInt(DELETED_FILES_COUNT, jsonNode);
}
Long addedRowsCount = null;
if (jsonNode.has(ADDED_ROWS_COUNT)) {
addedRowsCount = JsonUtil.getLong(ADDED_ROWS_COUNT, jsonNode);
}
Long existingRowsCount = null;
if (jsonNode.has(EXISTING_ROWS_COUNT)) {
existingRowsCount = JsonUtil.getLong(EXISTING_ROWS_COUNT, jsonNode);
}
Long deletedRowsCount = null;
if (jsonNode.has(DELETED_ROWS_COUNT)) {
deletedRowsCount = JsonUtil.getLong(DELETED_ROWS_COUNT, jsonNode);
}
List<ManifestFile.PartitionFieldSummary> partitionFieldSummaries = null;
if (jsonNode.has(PARTITION_FIELD_SUMMARY)) {
JsonNode summaryArray = JsonUtil.get(PARTITION_FIELD_SUMMARY, jsonNode);
Preconditions.checkArgument(
summaryArray.isArray(),
"Invalid JSON node for partition field summaries: non-array (%s)",
summaryArray);
ImmutableList.Builder<ManifestFile.PartitionFieldSummary> builder = ImmutableList.builder();
for (JsonNode summaryNode : summaryArray) {
ManifestFile.PartitionFieldSummary summary =
PartitionFieldSummaryParser.fromJson(summaryNode);
builder.add(summary);
}
partitionFieldSummaries = builder.build();
}
ByteBuffer keyMetadata = JsonUtil.getByteBufferOrNull(KEY_METADATA, jsonNode);
return new GenericManifestFile(
path,
length,
specId,
manifestContent,
sequenceNumber,
minSequenceNumber,
addedSnapshotId,
partitionFieldSummaries,
keyMetadata,
addedFilesCount,
addedRowsCount,
existingFilesCount,
existingRowsCount,
deletedFilesCount,
deletedRowsCount);
} | @Test
public void invalidJsonNode() throws Exception {
String jsonStr = "{\"str\":\"1\", \"arr\":[]}";
ObjectMapper mapper = new ObjectMapper();
JsonNode rootNode = mapper.reader().readTree(jsonStr);
assertThatThrownBy(() -> ManifestFileParser.fromJson(rootNode.get("str")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Invalid JSON node for manifest file: non-object ");
assertThatThrownBy(() -> ManifestFileParser.fromJson(rootNode.get("arr")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Invalid JSON node for manifest file: non-object ");
} |
public Set<String> exposedHeaders() {
return Collections.unmodifiableSet(exposeHeaders);
} | @Test
public void exposeHeaders() {
final CorsConfig cors = forAnyOrigin().exposeHeaders("custom-header1", "custom-header2").build();
assertThat(cors.exposedHeaders(), hasItems("custom-header1", "custom-header2"));
} |
@Override
public void destroy() {
super.destroy();
cache.clear();
listener.remove();
} | @Test
public void testSubscriptionTimeout() {
Config config = new Config();
config.useSingleServer()
.setSubscriptionsPerConnection(2)
.setSubscriptionConnectionPoolSize(1)
.setAddress(redisson.getConfig().useSingleServer().getAddress());
RedissonClient redisson = Redisson.create(config);
RLocalCachedMap<Object, Object> m1 = redisson.getLocalCachedMap(LocalCachedMapOptions.name("pubsub_test1"));
ScheduledExecutorService e = Executors.newSingleThreadScheduledExecutor();
e.schedule(() -> {
m1.destroy();
}, 1, TimeUnit.SECONDS);
RLocalCachedMap<Object, Object> m2 = redisson.getLocalCachedMap(LocalCachedMapOptions.name("pubsub_test2"));
redisson.shutdown();
} |
public static TraceTransferBean encoderFromContextBean(TraceContext ctx) {
if (ctx == null) {
return null;
}
//build message trace of the transferring entity content bean
TraceTransferBean transferBean = new TraceTransferBean();
StringBuilder sb = new StringBuilder(256);
switch (ctx.getTraceType()) {
case Pub: {
TraceBean bean = ctx.getTraceBeans().get(0);
//append the content of context and traceBean to transferBean's TransData
sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getRegionId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getGroupName()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTopic()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTags()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getKeys()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getStoreHost()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getBodyLength()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getCostTime()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgType().ordinal()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getOffsetMsgId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.isSuccess()).append(TraceConstants.FIELD_SPLITOR);//
}
break;
case SubBefore: {
for (TraceBean bean : ctx.getTraceBeans()) {
sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getRegionId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getGroupName()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getRequestId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getRetryTimes()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getKeys()).append(TraceConstants.FIELD_SPLITOR);//
}
}
break;
case SubAfter: {
for (TraceBean bean : ctx.getTraceBeans()) {
sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getRequestId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getCostTime()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.isSuccess()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getKeys()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getContextCode()).append(TraceConstants.CONTENT_SPLITOR);
if (!ctx.getAccessChannel().equals(AccessChannel.CLOUD)) {
sb.append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR);
sb.append(ctx.getGroupName());
}
sb.append(TraceConstants.FIELD_SPLITOR);
}
}
break;
case EndTransaction: {
TraceBean bean = ctx.getTraceBeans().get(0);
sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getRegionId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getGroupName()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTopic()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTags()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getKeys()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getStoreHost()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgType().ordinal()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTransactionId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTransactionState().name()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.isFromTransactionCheck()).append(TraceConstants.FIELD_SPLITOR);
}
break;
default:
}
transferBean.setTransData(sb.toString());
for (TraceBean bean : ctx.getTraceBeans()) {
transferBean.getTransKey().add(bean.getMsgId());
if (bean.getKeys() != null && bean.getKeys().length() > 0) {
String[] keys = bean.getKeys().split(MessageConst.KEY_SEPARATOR);
transferBean.getTransKey().addAll(Arrays.asList(keys));
}
}
return transferBean;
} | @Test
public void testTraceKeys() {
TraceContext endTrxContext = new TraceContext();
endTrxContext.setTraceType(TraceType.EndTransaction);
endTrxContext.setGroupName("PID-test");
endTrxContext.setRegionId("DefaultRegion");
endTrxContext.setTimeStamp(time);
TraceBean endTrxTraceBean = new TraceBean();
endTrxTraceBean.setTopic("topic-test");
endTrxTraceBean.setKeys("Keys Keys2");
endTrxTraceBean.setTags("Tags");
endTrxTraceBean.setMsgId("AC1415116D1418B4AAC217FE1B4E0000");
endTrxTraceBean.setStoreHost("127.0.0.1:10911");
endTrxTraceBean.setMsgType(MessageType.Trans_msg_Commit);
endTrxTraceBean.setTransactionId("transactionId");
endTrxTraceBean.setTransactionState(LocalTransactionState.COMMIT_MESSAGE);
endTrxTraceBean.setFromTransactionCheck(false);
List<TraceBean> traceBeans = new ArrayList<>();
traceBeans.add(endTrxTraceBean);
endTrxContext.setTraceBeans(traceBeans);
TraceTransferBean traceTransferBean = TraceDataEncoder.encoderFromContextBean(endTrxContext);
Set<String> keys = traceTransferBean.getTransKey();
assertThat(keys).contains("Keys");
assertThat(keys).contains("Keys2");
} |
@PublicEvolving
public static AIMDScalingStrategyBuilder builder(int rateThreshold) {
return new AIMDScalingStrategyBuilder(rateThreshold);
} | @Test
void testInvalidRateThreshold() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(
() ->
AIMDScalingStrategy.builder(0)
.setIncreaseRate(1)
.setDecreaseFactor(0.5)
.build())
.withMessageContaining("rateThreshold must be a positive integer.");
} |
@Operation(summary = "delete", description = "Delete a host")
@DeleteMapping("/{id}")
public ResponseEntity<Boolean> delete(@PathVariable Long clusterId, @PathVariable Long id) {
return ResponseEntity.success(hostService.delete(id));
} | @Test
void deleteReturnsFalseForInvalidHostId() {
Long clusterId = 1L;
Long hostId = 999L;
when(hostService.delete(hostId)).thenReturn(false);
ResponseEntity<Boolean> response = hostController.delete(clusterId, hostId);
assertTrue(response.isSuccess());
assertFalse(response.getData());
} |
@Override
public String rpcType() {
return RpcTypeEnum.SOFA.getName();
} | @Test
public void testRpcType() {
String rpcType = shenyuClientRegisterSofaService.rpcType();
assertEquals(RpcTypeEnum.SOFA.getName(), rpcType);
} |
@Override
public CompletableFuture<Void> authorizeConfigRequest(Request request) {
return doAsyncAuthorization(request, this::doConfigRequestAuthorization);
} | @Test
public void tenant_node_cannot_access_lbservice_config() throws ExecutionException, InterruptedException {
RpcAuthorizer authorizer = createAuthorizer(new NodeIdentity.Builder(NodeType.tenant).build(), new HostRegistry());
Request configRequest = createConfigRequest(
new ConfigKey<>(LbServicesConfig.CONFIG_DEF_NAME, "*", LbServicesConfig.CONFIG_DEF_NAMESPACE),
HOSTNAME);
exceptionRule.expectMessage("Node with type 'tenant' is not allowed to access global config [name=cloud.config.lb-services,configId=*]");
exceptionRule.expectCause(instanceOf(AuthorizationException.class));
authorizer.authorizeConfigRequest(configRequest)
.get();
} |
public static BadRequestException clusterAlreadyExists(String clusterName) {
return new BadRequestException("cluster already exists for clusterName:%s", clusterName);
} | @Test
public void testClusterAlreadyExists(){
BadRequestException clusterAlreadyExists = BadRequestException.clusterAlreadyExists(clusterName);
assertEquals("cluster already exists for clusterName:test", clusterAlreadyExists.getMessage());
} |
private void announceBackgroundJobServer() {
final BackgroundJobServerStatus serverStatus = backgroundJobServer.getServerStatus();
storageProvider.announceBackgroundJobServer(serverStatus);
determineIfCurrentBackgroundJobServerIsMaster();
lastSignalAlive = serverStatus.getLastHeartbeat();
} | @Test
void onStartServerAnnouncesItselfAndDoesNotBecomeMasterIfItIsNotTheFirstToBeOnline() {
final BackgroundJobServerStatus master = anotherServer();
storageProvider.announceBackgroundJobServer(master);
backgroundJobServer.start();
await().untilAsserted(() -> assertThat(storageProvider.getBackgroundJobServers()).hasSize(2));
assertThat(backgroundJobServer.isMaster()).isFalse();
} |
@Override
public Usuario getUsuarioByUsername(String username) {
Usuario usuario = usuarioRepository.findByUsername(username).orElseThrow(
() -> new ResourceNotFoundException("Usuario no encontrado con el id: " + username));
// Obtengo las calificaciones usando RestTemplate
Calificacion[] calificacionesUsuario = restTemplate.getForObject(
"http://calificacion-service/calificaciones/usuarios/" + usuario.getId(), Calificacion[].class);
if (calificacionesUsuario != null && calificacionesUsuario.length > 0) {
logger.info("El usuario {} tiene las siguientes calificaciones:", usuario.getName());
// Con un for each recorro el arreglo de calificaciones y cojo el id del hotel de cada una y obtengo su hotel con ello
for (Calificacion calificacion : calificacionesUsuario) {
String hotelId = calificacion.getHotelId();
// Obtengo el hotel usando Feign Client
Hotel hotel = hotelFeignService.obtenerHotel(hotelId);
calificacion.setHotel(hotel);
logger.info("Calificación ID: {}, Calificación: {}, Hotel: {}", calificacion.getId(), calificacion.getCalificacion(), hotel.toString());
}
} else {
logger.info("El usuario {} no tiene calificaciones.", usuario.getName());
}
usuario.setCalificaciones(calificacionesUsuario);
return usuario;
} | @Test
void testGetUsuarioByUsername() {
when(usuarioRepository.findByUsername(anyString())).thenReturn(Optional.of(usuario));
when(restTemplate.getForObject(anyString(), eq(Calificacion[].class))).thenReturn(new Calificacion[0]);
Usuario foundUsuario = usuarioService.getUsuarioByUsername("johndoe");
assertThat(foundUsuario).isNotNull();
assertThat(foundUsuario.getUsername()).isEqualTo("johndoe");
} |
public OffsetRange[] getNextOffsetRanges(Option<String> lastCheckpointStr, long sourceLimit, HoodieIngestionMetrics metrics) {
// Come up with final set of OffsetRanges to read (account for new partitions, limit number of events)
long maxEventsToReadFromKafka = getLongWithAltKeys(props, KafkaSourceConfig.MAX_EVENTS_FROM_KAFKA_SOURCE);
long numEvents;
if (sourceLimit == Long.MAX_VALUE) {
numEvents = maxEventsToReadFromKafka;
LOG.info("SourceLimit not configured, set numEvents to default value : {}", maxEventsToReadFromKafka);
} else {
numEvents = sourceLimit;
}
long minPartitions = getLongWithAltKeys(props, KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS);
LOG.info("getNextOffsetRanges set config {} to {}", KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS.key(), minPartitions);
return getNextOffsetRanges(lastCheckpointStr, numEvents, minPartitions, metrics);
} | @Test
public void testGetNextOffsetRangesFromSingleOffsetCheckpoint() {
HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator();
testUtils.createTopic(testTopicName, 1);
testUtils.sendMessages(testTopicName, Helpers.jsonifyRecords(dataGenerator.generateInserts("000", 1000)));
KafkaOffsetGen kafkaOffsetGen = new KafkaOffsetGen(getConsumerConfigs("latest", KAFKA_CHECKPOINT_TYPE_SINGLE_OFFSET));
// long positive value of offset => get it
String lastCheckpointString = "250";
OffsetRange[] nextOffsetRanges = kafkaOffsetGen.getNextOffsetRanges(Option.of(lastCheckpointString), 500, metrics);
assertEquals(1, nextOffsetRanges.length);
assertEquals(250, nextOffsetRanges[0].fromOffset());
assertEquals(750, nextOffsetRanges[0].untilOffset());
// negative offset value => get by autoOffsetReset config
lastCheckpointString = "-2";
nextOffsetRanges = kafkaOffsetGen.getNextOffsetRanges(Option.of(lastCheckpointString), 500, metrics);
assertEquals(1, nextOffsetRanges.length);
assertEquals(1000, nextOffsetRanges[0].fromOffset());
assertEquals(1000, nextOffsetRanges[0].untilOffset());
// incorrect offset value => get by autoOffsetReset config
kafkaOffsetGen = new KafkaOffsetGen(getConsumerConfigs("earliest", KAFKA_CHECKPOINT_TYPE_SINGLE_OFFSET));
lastCheckpointString = "garbage";
nextOffsetRanges = kafkaOffsetGen.getNextOffsetRanges(Option.of(lastCheckpointString), 5000, metrics);
assertEquals(1, nextOffsetRanges.length);
assertEquals(0, nextOffsetRanges[0].fromOffset());
assertEquals(1000, nextOffsetRanges[0].untilOffset());
} |
@Override
public ChannelFuture writeGoAway(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData,
ChannelPromise promise) {
return lifecycleManager.goAway(ctx, lastStreamId, errorCode, debugData, promise);
} | @Test
public void encoderDelegatesGoAwayToLifeCycleManager() {
ChannelPromise promise = newPromise();
encoder.writeGoAway(ctx, STREAM_ID, Http2Error.INTERNAL_ERROR.code(), null, promise);
verify(lifecycleManager).goAway(eq(ctx), eq(STREAM_ID), eq(Http2Error.INTERNAL_ERROR.code()),
eq((ByteBuf) null), eq(promise));
verifyNoMoreInteractions(writer);
} |
int sendBackups0(BackupAwareOperation backupAwareOp) {
int requestedSyncBackups = requestedSyncBackups(backupAwareOp);
int requestedAsyncBackups = requestedAsyncBackups(backupAwareOp);
int requestedTotalBackups = requestedTotalBackups(backupAwareOp);
if (requestedTotalBackups == 0) {
return 0;
}
Operation op = (Operation) backupAwareOp;
PartitionReplicaVersionManager versionManager = node.getPartitionService().getPartitionReplicaVersionManager();
ServiceNamespace namespace = versionManager.getServiceNamespace(op);
long[] replicaVersions = versionManager.incrementPartitionReplicaVersions(op.getPartitionId(), namespace,
requestedTotalBackups);
boolean syncForced = backpressureRegulator.isSyncForced(backupAwareOp);
int syncBackups = syncBackups(requestedSyncBackups, requestedAsyncBackups, syncForced);
int asyncBackups = asyncBackups(requestedSyncBackups, requestedAsyncBackups, syncForced);
// TODO: This could cause a problem with back pressure
if (!op.returnsResponse()) {
asyncBackups += syncBackups;
syncBackups = 0;
}
if (syncBackups + asyncBackups == 0) {
return 0;
}
return makeBackups(backupAwareOp, op.getPartitionId(), replicaVersions, syncBackups, asyncBackups);
} | @Test(expected = IllegalArgumentException.class)
public void backup_whenTooLargeSyncBackupCount() {
setup(BACKPRESSURE_ENABLED);
BackupAwareOperation op = makeOperation(MAX_BACKUP_COUNT + 1, 0);
backupHandler.sendBackups0(op);
} |
public int getTargetDocsPerChunk() {
return _targetDocsPerChunk;
} | @Test
public void withNegativeTargetDocsPerChunk()
throws JsonProcessingException {
String confStr = "{\"targetDocsPerChunk\": \"-1\"}";
ForwardIndexConfig config = JsonUtils.stringToObject(confStr, ForwardIndexConfig.class);
assertEquals(config.getTargetDocsPerChunk(), -1, "Unexpected defaultTargetDocsPerChunk");
} |
@Override
public void addListener(String key, String group, ConfigurationListener listener) {
ApolloListener apolloListener = listeners.computeIfAbsent(group + key, k -> createTargetListener(key, group));
apolloListener.addListener(listener);
dubboConfig.addChangeListener(apolloListener, Collections.singleton(key));
} | @Test
void testAddListener() throws Exception {
String mockKey = "mockKey3";
String mockValue = String.valueOf(new Random().nextInt());
final SettableFuture<org.apache.dubbo.common.config.configcenter.ConfigChangedEvent> future =
SettableFuture.create();
apolloDynamicConfiguration = new ApolloDynamicConfiguration(url, applicationModel);
apolloDynamicConfiguration.addListener(mockKey, DEFAULT_NAMESPACE, new ConfigurationListener() {
@Override
public void process(org.apache.dubbo.common.config.configcenter.ConfigChangedEvent event) {
future.set(event);
}
});
putData(mockKey, mockValue);
org.apache.dubbo.common.config.configcenter.ConfigChangedEvent result = future.get(3000, TimeUnit.MILLISECONDS);
assertEquals(mockValue, result.getContent());
assertEquals(mockKey, result.getKey());
assertEquals(ConfigChangeType.MODIFIED, result.getChangeType());
} |
protected List<MavenArtifact> processResponse(Dependency dependency, HttpURLConnection conn) throws IOException {
final List<MavenArtifact> result = new ArrayList<>();
try (InputStreamReader streamReader = new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8);
JsonParser parser = objectReader.getFactory().createParser(streamReader)) {
if (init(parser) && parser.nextToken() == com.fasterxml.jackson.core.JsonToken.START_OBJECT) {
// at least one result
do {
final FileImpl file = objectReader.readValue(parser);
checkHashes(dependency, file.getChecksums());
final Matcher pathMatcher = PATH_PATTERN.matcher(file.getPath());
if (!pathMatcher.matches()) {
throw new IllegalStateException("Cannot extract the Maven information from the path "
+ "retrieved in Artifactory " + file.getPath());
}
final String groupId = pathMatcher.group("groupId").replace('/', '.');
final String artifactId = pathMatcher.group("artifactId");
final String version = pathMatcher.group("version");
result.add(new MavenArtifact(groupId, artifactId, version, file.getDownloadUri(),
MavenArtifact.derivePomUrl(artifactId, version, file.getDownloadUri())));
} while (parser.nextToken() == com.fasterxml.jackson.core.JsonToken.START_OBJECT);
} else {
throw new FileNotFoundException("Artifact " + dependency + " not found in Artifactory");
}
}
return result;
} | @Test
public void shouldProcessCorrectlyArtifactoryAnswerMisMatchMd5() throws IOException {
// Given
Dependency dependency = new Dependency();
dependency.setSha1sum("c5b4c491aecb72e7c32a78da0b5c6b9cda8dee0f");
dependency.setSha256sum("512b4bf6927f4864acc419b8c5109c23361c30ed1f5798170248d33040de068e");
dependency.setMd5sum("2d1dd0fc21ee96bccfab4353d5379640");
final HttpURLConnection urlConnection = mock(HttpURLConnection.class);
final byte[] payload = payloadWithSha256().getBytes(StandardCharsets.UTF_8);
when(urlConnection.getInputStream()).thenReturn(new ByteArrayInputStream(payload));
// When
try {
searcher.processResponse(dependency, urlConnection);
fail("MD5 mismatching should throw an exception!");
} catch (FileNotFoundException e) {
// Then
assertEquals("Artifact found by API is not matching the md5 of the artifact (repository hash is 2d1dd0fc21ee96bccfab4353d5379649 while actual is 2d1dd0fc21ee96bccfab4353d5379640) !", e.getMessage());
}
} |
public Lifecycle getLifecycle() {
return lifecycle;
} | @Test
public void testGetLifecycle() throws Exception {
assertEquals(Lifecycle.UNINITIALIZED, status.getLifecycle());
} |
public static TemplateEngine createEngine() {
return TemplateFactory.create();
} | @Test
public void enjoyEngineTest() {
// 字符串模板
TemplateEngine engine = TemplateUtil.createEngine(
new TemplateConfig("templates").setCustomEngine(EnjoyEngine.class));
Template template = engine.getTemplate("#(x + 123)");
String result = template.render(Dict.create().set("x", 1));
assertEquals("124", result);
//ClassPath模板
engine = new EnjoyEngine(
new TemplateConfig("templates", ResourceMode.CLASSPATH).setCustomEngine(EnjoyEngine.class));
template = engine.getTemplate("enjoy_test.etl");
result = template.render(Dict.create().set("x", 1));
assertEquals("124", result);
} |
@Override
public boolean setAlertFilter(String severity) {
DriverHandler handler = handler();
NetconfController controller = handler.get(NetconfController.class);
MastershipService mastershipService = handler.get(MastershipService.class);
DeviceId ncDeviceId = handler.data().deviceId();
checkNotNull(controller, "Netconf controller is null");
if (!mastershipService.isLocalMaster(ncDeviceId)) {
log.warn("Not master for {} Use {} to execute command",
ncDeviceId,
mastershipService.getMasterFor(ncDeviceId));
return false;
}
if (!SEVERITYLEVELS.contains(severity)) {
log.error("Invalid severity level: {}", severity);
return false;
}
try {
StringBuilder request = new StringBuilder();
request.append(VOLT_NE_OPEN + VOLT_NE_NAMESPACE);
request.append(ANGLE_RIGHT + NEW_LINE);
request.append(buildStartTag(VOLT_ALERTS))
.append(buildStartTag(ALERT_FILTER, false))
.append(severity)
.append(buildEndTag(ALERT_FILTER))
.append(buildEndTag(VOLT_ALERTS))
.append(VOLT_NE_CLOSE);
controller.getDevicesMap().get(ncDeviceId).getSession().
editConfig(RUNNING, null, request.toString());
} catch (NetconfException e) {
log.error("Cannot communicate to device {} exception {}", ncDeviceId, e);
return false;
}
return true;
} | @Test
public void testInvalidSetAlertFilterInput() throws Exception {
String target;
boolean result;
for (int i = ZERO; i < INVALID_SET_TCS.length; i++) {
target = INVALID_SET_TCS[i];
result = voltConfig.setAlertFilter(target);
assertFalse("Incorrect response for ", result);
}
} |
@Override
protected long getEncodedElementByteSize(BigInteger value) throws Exception {
checkNotNull(value, String.format("cannot encode a null %s", BigInteger.class.getSimpleName()));
return BYTE_ARRAY_CODER.getEncodedElementByteSize(value.toByteArray());
} | @Test
public void testGetEncodedElementByteSize() throws Exception {
TestElementByteSizeObserver observer = new TestElementByteSizeObserver();
for (BigInteger value : TEST_VALUES) {
TEST_CODER.registerByteSizeObserver(value, observer);
observer.advance();
assertThat(
observer.getSumAndReset(),
equalTo(
(long) CoderUtils.encodeToByteArray(TEST_CODER, value, Coder.Context.NESTED).length));
}
} |
@Override
public void populateCxfRequestFromExchange(
org.apache.cxf.message.Exchange cxfExchange, Exchange camelExchange,
Map<String, Object> requestContext) {
// propagate request context
Map<String, Object> camelHeaders = camelExchange.getIn().getHeaders();
extractInvocationContextFromCamel(camelExchange, camelHeaders,
requestContext, CxfConstants.REQUEST_CONTEXT);
// propagate headers
propagateHeadersFromCamelToCxf(camelExchange, camelHeaders, cxfExchange,
requestContext);
String overrideAddress = camelExchange.getIn().getHeader(CxfConstants.DESTINATION_OVERRIDE_URL, String.class);
if (overrideAddress != null) {
LOG.trace("Client address is overridden by header '{}' to value '{}'",
CxfConstants.DESTINATION_OVERRIDE_URL, overrideAddress);
requestContext.put(Message.ENDPOINT_ADDRESS, overrideAddress);
}
// propagate attachments
propagateAttachments(camelExchange, requestContext);
} | @Test
public void testPopulateCxfRequestFromExchange() {
DefaultCxfBinding cxfBinding = new DefaultCxfBinding();
cxfBinding.setHeaderFilterStrategy(new DefaultHeaderFilterStrategy());
Exchange exchange = new DefaultExchange(context);
org.apache.cxf.message.Exchange cxfExchange = new org.apache.cxf.message.ExchangeImpl();
exchange.setProperty(CxfConstants.DATA_FORMAT_PROPERTY, DataFormat.PAYLOAD);
Map<String, Object> requestContext = new HashMap<>();
exchange.getIn().setHeader("soapAction", "urn:hello:world");
exchange.getIn().setHeader("MyFruitHeader", "peach");
exchange.getIn().setHeader("MyBrewHeader", Arrays.asList("cappuccino", "espresso"));
exchange.getIn(AttachmentMessage.class).addAttachment("att-1", new DataHandler(new FileDataSource("pom.xml")));
exchange.getIn(AttachmentMessage.class).getAttachmentObject("att-1").setHeader("attachment-header", "value 1");
cxfBinding.populateCxfRequestFromExchange(cxfExchange, exchange, requestContext);
// check the protocol headers
Map<String, List<String>> headers = CastUtils.cast((Map<?, ?>) requestContext.get(Message.PROTOCOL_HEADERS));
assertNotNull(headers);
assertEquals(3, headers.size());
verifyHeader(headers, "soapaction", "\"urn:hello:world\"");
verifyHeader(headers, "SoapAction", "\"urn:hello:world\"");
verifyHeader(headers, "SOAPAction", "\"urn:hello:world\"");
verifyHeader(headers, "myfruitheader", "peach");
verifyHeader(headers, "myFruitHeader", "peach");
verifyHeader(headers, "MYFRUITHEADER", "peach");
verifyHeader(headers, "MyBrewHeader", Arrays.asList("cappuccino", "espresso"));
Set<Attachment> attachments
= CastUtils.cast((Set<?>) requestContext.get(CxfConstants.CAMEL_CXF_ATTACHMENTS));
assertNotNull(attachments);
assertEquals(1, attachments.size());
Attachment att = attachments.iterator().next();
assertEquals("att-1", att.getId());
assertEquals("value 1", att.getHeader("attachment-header"));
} |
@Override
public HttpHeaders add(HttpHeaders headers) {
if (headers instanceof DefaultHttpHeaders) {
this.headers.add(((DefaultHttpHeaders) headers).headers);
return this;
} else {
return super.add(headers);
}
} | @Test
public void nullHeaderNameNotAllowed() {
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
new DefaultHttpHeaders().add(null, "foo");
}
});
} |
private KafkaPool(
Reconciliation reconciliation,
Kafka kafka,
KafkaNodePool pool,
String componentName,
OwnerReference ownerReference,
NodeIdAssignment idAssignment,
SharedEnvironmentProvider sharedEnvironmentProvider
) {
super(
reconciliation,
kafka.getMetadata().getName(),
kafka.getMetadata().getNamespace(),
componentName,
Labels.fromResource(pool)
// Strimzi labels
.withStrimziKind(kafka.getKind())
// This needs ot be selectable through KafkaCluster selector. So we intentionally use the <clusterName>-kafka
// as the strimzi.io/name. strimzi.io/pool-name can be used to select through node pool.
.withStrimziName(KafkaResources.kafkaComponentName(kafka.getMetadata().getName()))
.withStrimziCluster(kafka.getMetadata().getName())
.withStrimziComponentType(COMPONENT_TYPE)
.withStrimziPoolName(pool.getMetadata().getName())
// Kubernetes labels
.withKubernetesName(COMPONENT_TYPE)
.withKubernetesInstance(kafka.getMetadata().getName())
.withKubernetesPartOf(kafka.getMetadata().getName())
.withKubernetesManagedBy(STRIMZI_CLUSTER_OPERATOR_NAME),
ownerReference,
sharedEnvironmentProvider
);
this.poolName = pool.getMetadata().getName();
this.idAssignment = idAssignment;
} | @Test
public void testKafkaPool() {
KafkaPool kp = KafkaPool.fromCrd(
Reconciliation.DUMMY_RECONCILIATION,
KAFKA,
POOL,
new NodeIdAssignment(Set.of(10, 11, 13), Set.of(10, 11, 13), Set.of(), Set.of(), Set.of()),
new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()).build(),
OWNER_REFERENCE,
SHARED_ENV_PROVIDER
);
assertThat(kp, is(notNullValue()));
assertThat(kp.componentName, is(CLUSTER_NAME + "-pool"));
assertThat(kp.processRoles, is(Set.of(ProcessRoles.BROKER)));
assertThat(kp.isBroker(), is(true));
assertThat(kp.isController(), is(false));
assertThat(kp.storage, is(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()).build()));
assertThat(kp.resources, is(nullValue()));
assertThat(kp.jvmOptions, is(nullValue()));
assertThat(kp.gcLoggingEnabled, is(false));
assertThat(kp.templateContainer, is(nullValue()));
assertThat(kp.templateInitContainer, is(nullValue()));
assertThat(kp.templatePod, is(nullValue()));
assertThat(kp.templatePerBrokerIngress, is(nullValue()));
assertThat(kp.templatePodSet, is(nullValue()));
assertThat(kp.templatePerBrokerRoute, is(nullValue()));
assertThat(kp.templatePerBrokerService, is(nullValue()));
assertThat(kp.templatePersistentVolumeClaims, is(nullValue()));
Set<NodeRef> nodes = kp.nodes();
assertThat(nodes.size(), is(3));
assertThat(nodes, hasItems(new NodeRef(CLUSTER_NAME + "-pool-10", 10, "pool", false, true),
new NodeRef(CLUSTER_NAME + "-pool-11", 11, "pool", false, true),
new NodeRef(CLUSTER_NAME + "-pool-13", 13, "pool", false, true)));
assertThat(kp.containsNodeId(10), is(true));
assertThat(kp.containsNodeId(11), is(true));
assertThat(kp.containsNodeId(12), is(false));
assertThat(kp.containsNodeId(13), is(true));
assertThat(kp.nodeRef(11), is(new NodeRef(CLUSTER_NAME + "-pool-11", 11, "pool", false, true)));
KafkaNodePoolStatus status = kp.generateNodePoolStatus("my-cluster-id");
assertThat(status.getClusterId(), is("my-cluster-id"));
assertThat(status.getReplicas(), is(3));
assertThat(status.getLabelSelector(), is("strimzi.io/cluster=my-cluster,strimzi.io/name=my-cluster-kafka,strimzi.io/kind=Kafka,strimzi.io/pool-name=pool"));
assertThat(status.getNodeIds().size(), is(3));
assertThat(status.getNodeIds(), hasItems(10, 11, 13));
assertThat(status.getRoles().size(), is(1));
assertThat(status.getRoles(), hasItems(ProcessRoles.BROKER));
} |
@Override
public Stream<ColumnName> resolveSelectStar(
final Optional<SourceName> sourceName
) {
return getSource().resolveSelectStar(sourceName)
.map(name -> aliases.getOrDefault(name, name));
} | @Test
public void shouldAddAliasOnResolveSelectStarWhenAliased() {
// Given:
when(source.resolveSelectStar(any()))
.thenReturn(ImmutableList.of(COL_1, COL_0, COL_2).stream());
// When:
final Stream<ColumnName> result = projectNode.resolveSelectStar(Optional.empty());
// Then:
final List<ColumnName> columns = result.collect(Collectors.toList());
assertThat(columns, contains(
ColumnNames.generatedJoinColumnAlias(ALIAS, COL_1),
ColumnNames.generatedJoinColumnAlias(ALIAS, COL_0),
ColumnNames.generatedJoinColumnAlias(ALIAS, COL_2)
));
} |
static Map<String, String> getSparkConf(String configFile, List<String> variables) {
Config appConfig = ConfigBuilder.of(configFile, variables);
return appConfig.getConfig("env").entrySet().stream()
.collect(
Collectors.toMap(
Map.Entry::getKey, e -> e.getValue().unwrapped().toString()));
} | @Test
public void testGetSparkConf() throws URISyntaxException, FileNotFoundException {
URI uri = ClassLoader.getSystemResource("spark_application.conf").toURI();
String file = new File(uri).toString();
Map<String, String> sparkConf = SparkStarter.getSparkConf(file, null);
assertEquals("SeaTunnel", sparkConf.get("job.name"));
assertEquals("1", sparkConf.get("spark.executor.cores"));
} |
@Override
public String toString() {
return String.format(
Locale.US,
"%s '%s' from %s (id %s), API-%d",
getClass().getName(),
mName,
mPackageName,
mId,
mApiVersion);
} | @Test
public void testToString() {
String toString = new TestableAddOn("id1", "name111", 8).toString();
Assert.assertTrue(toString.contains("name111"));
Assert.assertTrue(toString.contains("id1"));
Assert.assertTrue(toString.contains("TestableAddOn"));
Assert.assertTrue(toString.contains(getApplicationContext().getPackageName()));
Assert.assertTrue(toString.contains("API-8"));
} |
static void encodeFlowControlReceiver(
final UnsafeBuffer encodingBuffer,
final int offset,
final int captureLength,
final int length,
final long receiverId,
final int sessionId,
final int streamId,
final String channel,
final int receiverCount)
{
int encodedLength = encodeLogHeader(encodingBuffer, offset, captureLength, length);
encodingBuffer.putInt(offset + encodedLength, receiverCount, LITTLE_ENDIAN);
encodedLength += SIZE_OF_INT;
encodingBuffer.putLong(offset + encodedLength, receiverId, LITTLE_ENDIAN);
encodedLength += SIZE_OF_LONG;
encodingBuffer.putInt(offset + encodedLength, sessionId, LITTLE_ENDIAN);
encodedLength += SIZE_OF_INT;
encodingBuffer.putInt(offset + encodedLength, streamId, LITTLE_ENDIAN);
encodedLength += SIZE_OF_INT;
encodeTrailingString(
encodingBuffer, offset + encodedLength, captureLength - SIZE_OF_INT * 3 - SIZE_OF_LONG, channel);
} | @Test
void encodeFlowControlReceiverShouldWriteChannelLast()
{
final int offset = 48;
final long receiverId = 1947384623864823283L;
final int sessionId = 219;
final int streamId = 3;
final String channel = "my channel";
final int receiverCount = 17;
final int length = 4 * SIZE_OF_INT + SIZE_OF_LONG + channel.length();
final int captureLength = captureLength(length);
encodeFlowControlReceiver(
buffer, offset, captureLength, length, receiverId, sessionId, streamId, channel, receiverCount);
assertEquals(captureLength, buffer.getInt(offset, LITTLE_ENDIAN));
assertEquals(length, buffer.getInt(offset + SIZE_OF_INT, LITTLE_ENDIAN));
assertNotEquals(0, buffer.getLong(offset + SIZE_OF_INT * 2, LITTLE_ENDIAN));
assertEquals(receiverCount, buffer.getInt(offset + LOG_HEADER_LENGTH, LITTLE_ENDIAN));
assertEquals(receiverId, buffer.getLong(offset + LOG_HEADER_LENGTH + SIZE_OF_INT, LITTLE_ENDIAN));
assertEquals(sessionId, buffer.getInt(offset + LOG_HEADER_LENGTH + SIZE_OF_INT + SIZE_OF_LONG, LITTLE_ENDIAN));
assertEquals(streamId,
buffer.getInt(offset + LOG_HEADER_LENGTH + SIZE_OF_INT * 2 + SIZE_OF_LONG, LITTLE_ENDIAN));
assertEquals(channel,
buffer.getStringAscii(offset + LOG_HEADER_LENGTH + SIZE_OF_INT * 3 + SIZE_OF_LONG, LITTLE_ENDIAN));
} |
public ClientAuth getClientAuth() {
String clientAuth = getString(SSL_CLIENT_AUTHENTICATION_CONFIG);
if (originals().containsKey(SSL_CLIENT_AUTH_CONFIG)) {
if (originals().containsKey(SSL_CLIENT_AUTHENTICATION_CONFIG)) {
log.warn(
"The {} configuration is deprecated. Since a value has been supplied for the {} "
+ "configuration, that will be used instead",
SSL_CLIENT_AUTH_CONFIG,
SSL_CLIENT_AUTHENTICATION_CONFIG
);
} else {
log.warn(
"The configuration {} is deprecated and should be replaced with {}",
SSL_CLIENT_AUTH_CONFIG,
SSL_CLIENT_AUTHENTICATION_CONFIG
);
clientAuth = getBoolean(SSL_CLIENT_AUTH_CONFIG)
? SSL_CLIENT_AUTHENTICATION_REQUIRED
: SSL_CLIENT_AUTHENTICATION_NONE;
}
}
return getClientAuth(clientAuth);
} | @Test
public void shouldUseClientAuthenticationIfClientAuthProvidedRequested() {
// Given:
final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder()
.put(KsqlRestConfig.SSL_CLIENT_AUTH_CONFIG, false)
.put(KsqlRestConfig.SSL_CLIENT_AUTHENTICATION_CONFIG,
KsqlRestConfig.SSL_CLIENT_AUTHENTICATION_REQUESTED)
.build());
// When:
final ClientAuth clientAuth = config.getClientAuth();
// Then:
assertThat(clientAuth, is(ClientAuth.REQUEST));
} |
public String format() {
return dataSourceName + DELIMITER + tableName;
} | @Test
void assertFormat() {
String expected = "ds_0.tbl_0";
DataNode dataNode = new DataNode(expected);
assertThat(dataNode.format(), is(expected));
} |
public static FunctionDetails convert(SourceConfig sourceConfig, ExtractedSourceDetails sourceDetails)
throws IllegalArgumentException {
FunctionDetails.Builder functionDetailsBuilder = FunctionDetails.newBuilder();
boolean isBuiltin = !StringUtils.isEmpty(sourceConfig.getArchive()) && sourceConfig.getArchive()
.startsWith(org.apache.pulsar.common.functions.Utils.BUILTIN);
if (sourceConfig.getTenant() != null) {
functionDetailsBuilder.setTenant(sourceConfig.getTenant());
}
if (sourceConfig.getNamespace() != null) {
functionDetailsBuilder.setNamespace(sourceConfig.getNamespace());
}
if (sourceConfig.getName() != null) {
functionDetailsBuilder.setName(sourceConfig.getName());
}
if (sourceConfig.getLogTopic() != null) {
functionDetailsBuilder.setLogTopic(sourceConfig.getLogTopic());
}
functionDetailsBuilder.setRuntime(FunctionDetails.Runtime.JAVA);
if (sourceConfig.getParallelism() != null) {
functionDetailsBuilder.setParallelism(sourceConfig.getParallelism());
} else {
functionDetailsBuilder.setParallelism(1);
}
functionDetailsBuilder.setClassName(IdentityFunction.class.getName());
functionDetailsBuilder.setAutoAck(true);
if (sourceConfig.getProcessingGuarantees() != null) {
functionDetailsBuilder.setProcessingGuarantees(
convertProcessingGuarantee(sourceConfig.getProcessingGuarantees()));
}
// set source spec
Function.SourceSpec.Builder sourceSpecBuilder = Function.SourceSpec.newBuilder();
if (sourceDetails.getSourceClassName() != null) {
sourceSpecBuilder.setClassName(sourceDetails.getSourceClassName());
}
if (isBuiltin) {
String builtin = sourceConfig.getArchive().replaceFirst("^builtin://", "");
sourceSpecBuilder.setBuiltin(builtin);
}
Map<String, Object> configs = new HashMap<>();
if (sourceConfig.getConfigs() != null) {
configs.putAll(sourceConfig.getConfigs());
}
// Batch source handling
if (sourceConfig.getBatchSourceConfig() != null) {
configs.put(BatchSourceConfig.BATCHSOURCE_CONFIG_KEY,
new Gson().toJson(sourceConfig.getBatchSourceConfig()));
configs.put(BatchSourceConfig.BATCHSOURCE_CLASSNAME_KEY, sourceSpecBuilder.getClassName());
sourceSpecBuilder.setClassName("org.apache.pulsar.functions.source.batch.BatchSourceExecutor");
}
sourceSpecBuilder.setConfigs(new Gson().toJson(configs));
if (sourceConfig.getSecrets() != null && !sourceConfig.getSecrets().isEmpty()) {
functionDetailsBuilder.setSecretsMap(new Gson().toJson(sourceConfig.getSecrets()));
}
if (sourceDetails.getTypeArg() != null) {
sourceSpecBuilder.setTypeClassName(sourceDetails.getTypeArg());
}
functionDetailsBuilder.setSource(sourceSpecBuilder);
// set up sink spec.
// Sink spec classname should be empty so that the default pulsar sink will be used
Function.SinkSpec.Builder sinkSpecBuilder = Function.SinkSpec.newBuilder();
if (!org.apache.commons.lang3.StringUtils.isEmpty(sourceConfig.getSchemaType())) {
sinkSpecBuilder.setSchemaType(sourceConfig.getSchemaType());
}
if (!org.apache.commons.lang3.StringUtils.isEmpty(sourceConfig.getSerdeClassName())) {
sinkSpecBuilder.setSerDeClassName(sourceConfig.getSerdeClassName());
}
if (!isEmpty(sourceConfig.getTopicName())) {
sinkSpecBuilder.setTopic(sourceConfig.getTopicName());
}
if (sourceDetails.getTypeArg() != null) {
sinkSpecBuilder.setTypeClassName(sourceDetails.getTypeArg());
}
if (sourceConfig.getProducerConfig() != null) {
sinkSpecBuilder.setProducerSpec(convertProducerConfigToProducerSpec(sourceConfig.getProducerConfig()));
}
if (sourceConfig.getBatchBuilder() != null) {
Function.ProducerSpec.Builder builder = sinkSpecBuilder.getProducerSpec() != null
? sinkSpecBuilder.getProducerSpec().toBuilder()
: Function.ProducerSpec.newBuilder();
sinkSpecBuilder.setProducerSpec(builder.setBatchBuilder(sourceConfig.getBatchBuilder()).build());
}
sinkSpecBuilder.setForwardSourceMessageProperty(true);
functionDetailsBuilder.setSink(sinkSpecBuilder);
// use default resources if resources not set
Resources resources = Resources.mergeWithDefault(sourceConfig.getResources());
Function.Resources.Builder bldr = Function.Resources.newBuilder();
bldr.setCpu(resources.getCpu());
bldr.setRam(resources.getRam());
bldr.setDisk(resources.getDisk());
functionDetailsBuilder.setResources(bldr);
if (!org.apache.commons.lang3.StringUtils.isEmpty(sourceConfig.getRuntimeFlags())) {
functionDetailsBuilder.setRuntimeFlags(sourceConfig.getRuntimeFlags());
}
functionDetailsBuilder.setComponentType(FunctionDetails.ComponentType.SOURCE);
if (!StringUtils.isEmpty(sourceConfig.getCustomRuntimeOptions())) {
functionDetailsBuilder.setCustomRuntimeOptions(sourceConfig.getCustomRuntimeOptions());
}
return FunctionConfigUtils.validateFunctionDetails(functionDetailsBuilder.build());
} | @Test
public void testSupportsBatchBuilderWhenProducerConfigExists() {
SourceConfig sourceConfig = createSourceConfig();
sourceConfig.setBatchBuilder("KEY_BASED");
sourceConfig.getProducerConfig().setMaxPendingMessages(123456);
Function.FunctionDetails functionDetails =
SourceConfigUtils.convert(sourceConfig, new SourceConfigUtils.ExtractedSourceDetails(null, null));
assertEquals(functionDetails.getSink().getProducerSpec().getBatchBuilder(), "KEY_BASED");
assertEquals(functionDetails.getSink().getProducerSpec().getMaxPendingMessages(), 123456);
} |
public PrimitiveType encodingType()
{
return encodingType;
} | @Test
void shouldHandleEncodingTypesWithNamedTypes() throws Exception
{
try (InputStream in = Tests.getLocalResource("encoding-types-schema.xml"))
{
final MessageSchema schema = parse(in, ParserOptions.DEFAULT);
final List<Field> fields = schema.getMessage(1).fields();
assertNotNull(fields);
EnumType type = (EnumType)fields.get(1).type();
assertThat(type.encodingType(), is(PrimitiveType.CHAR));
type = (EnumType)fields.get(2).type();
assertThat(type.encodingType(), is(PrimitiveType.UINT8));
}
} |
public static <Key extends Comparable, Value, ListType extends List<Value>> MultiMap<Key, Value, ListType> make(final boolean updatable,
final NewSubMapProvider<Value, ListType> newSubMapProvider) {
if (updatable) {
return new ChangeHandledMultiMap<>(new RawMultiMap<>(newSubMapProvider));
} else {
return new RawMultiMap<>(newSubMapProvider);
}
} | @Test
void changeHandled() throws Exception {
assertThat(MultiMapFactory.make(true) instanceof ChangeHandledMultiMap).isTrue();
} |
@Deprecated
static Optional<GlobalStreamExchangeMode> getGlobalStreamExchangeMode(ReadableConfig config) {
return config.getOptional(ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE)
.map(
value -> {
try {
return GlobalStreamExchangeMode.valueOf(
convertLegacyShuffleMode(value).toUpperCase());
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
String.format(
"Unsupported value %s for config %s.",
value,
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE
.key()));
}
});
} | @Test
void testLegacyShuffleMode() {
final Configuration configuration = new Configuration();
configuration.set(
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE,
GlobalStreamExchangeMode.ALL_EDGES_BLOCKING.toString());
assertThat(getGlobalStreamExchangeMode(configuration).orElseThrow(AssertionError::new))
.isEqualTo(GlobalStreamExchangeMode.ALL_EDGES_BLOCKING);
configuration.set(
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE,
GlobalStreamExchangeMode.FORWARD_EDGES_PIPELINED.toString());
assertThat(getGlobalStreamExchangeMode(configuration).orElseThrow(AssertionError::new))
.isEqualTo(GlobalStreamExchangeMode.FORWARD_EDGES_PIPELINED);
configuration.set(
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE,
GlobalStreamExchangeMode.POINTWISE_EDGES_PIPELINED.toString());
assertThat(getGlobalStreamExchangeMode(configuration).orElseThrow(AssertionError::new))
.isEqualTo(GlobalStreamExchangeMode.POINTWISE_EDGES_PIPELINED);
configuration.set(
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE,
GlobalStreamExchangeMode.ALL_EDGES_PIPELINED.toString());
assertThat(getGlobalStreamExchangeMode(configuration).orElseThrow(AssertionError::new))
.isEqualTo(GlobalStreamExchangeMode.ALL_EDGES_PIPELINED);
configuration.set(
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE,
StreamExchangeModeUtils.ALL_EDGES_BLOCKING_LEGACY);
assertThat(getGlobalStreamExchangeMode(configuration).orElseThrow(AssertionError::new))
.isEqualTo(GlobalStreamExchangeMode.ALL_EDGES_BLOCKING);
configuration.set(
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE,
StreamExchangeModeUtils.ALL_EDGES_PIPELINED_LEGACY);
assertThat(getGlobalStreamExchangeMode(configuration).orElseThrow(AssertionError::new))
.isEqualTo(GlobalStreamExchangeMode.ALL_EDGES_PIPELINED);
configuration.set(
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE, "Forward_edges_PIPELINED");
assertThat(
StreamExchangeModeUtils.getGlobalStreamExchangeMode(configuration)
.orElseThrow(AssertionError::new))
.isEqualTo(GlobalStreamExchangeMode.FORWARD_EDGES_PIPELINED);
} |
@Override
public Object read(final MySQLPacketPayload payload, final boolean unsigned) {
return payload.readStringLenenc();
} | @Test
void assertRead() {
when(payload.readStringLenenc()).thenReturn("value");
assertThat(new MySQLStringLenencBinaryProtocolValue().read(payload, false), is("value"));
} |
public static Schema mergeWideningNullable(Schema schema1, Schema schema2) {
if (schema1.getFieldCount() != schema2.getFieldCount()) {
throw new IllegalArgumentException(
"Cannot merge schemas with different numbers of fields. "
+ "schema1: "
+ schema1
+ " schema2: "
+ schema2);
}
Schema.Builder builder = Schema.builder();
for (int i = 0; i < schema1.getFieldCount(); ++i) {
String name = schema1.getField(i).getName();
builder.addField(
name, widenNullableTypes(schema1.getField(i).getType(), schema2.getField(i).getType()));
}
return builder.build();
} | @Test
public void testWidenMap() {
Schema schema1 =
Schema.builder().addMapField("field1", FieldType.INT32, FieldType.INT32).build();
Schema schema2 =
Schema.builder()
.addMapField(
"field1", FieldType.INT32.withNullable(true), FieldType.INT32.withNullable(true))
.build();
Schema expected =
Schema.builder()
.addMapField(
"field1", FieldType.INT32.withNullable(true), FieldType.INT32.withNullable(true))
.build();
assertEquals(expected, SchemaUtils.mergeWideningNullable(schema1, schema2));
} |
Object[] getOneRow() throws KettleException {
if ( !openNextFile() ) {
return null;
}
// Build an empty row based on the meta-data
Object[] outputRowData = buildEmptyRow();
try {
// Create new row or clone
if ( meta.getIsInFields() ) {
outputRowData = copyOrCloneArrayFromLoadFile( outputRowData, data.readrow );
}
// Read fields...
for ( int i = 0; i < data.nrInputFields; i++ ) {
// Get field
LoadFileInputField loadFileInputField = meta.getInputFields()[i];
Object o = null;
int indexField = data.totalpreviousfields + i;
ValueMetaInterface targetValueMeta = data.outputRowMeta.getValueMeta( indexField );
ValueMetaInterface sourceValueMeta = data.convertRowMeta.getValueMeta( indexField );
switch ( loadFileInputField.getElementType() ) {
case LoadFileInputField.ELEMENT_TYPE_FILECONTENT:
// DO Trimming!
switch ( loadFileInputField.getTrimType() ) {
case LoadFileInputField.TYPE_TRIM_LEFT:
if ( meta.getEncoding() != null ) {
data.filecontent = Const.ltrim( new String( data.filecontent, meta.getEncoding() ) ).getBytes();
} else {
data.filecontent = Const.ltrim( new String( data.filecontent ) ).getBytes();
}
break;
case LoadFileInputField.TYPE_TRIM_RIGHT:
if ( meta.getEncoding() != null ) {
data.filecontent = Const.rtrim( new String( data.filecontent, meta.getEncoding() ) ).getBytes();
} else {
data.filecontent = Const.rtrim( new String( data.filecontent ) ).getBytes();
}
break;
case LoadFileInputField.TYPE_TRIM_BOTH:
if ( meta.getEncoding() != null ) {
data.filecontent = Const.trim( new String( data.filecontent, meta.getEncoding() ) ).getBytes();
} else {
data.filecontent = Const.trim( new String( data.filecontent ) ).getBytes();
}
break;
default:
break;
}
if ( targetValueMeta.getType() != ValueMetaInterface.TYPE_BINARY ) {
// handle as a String
if ( meta.getEncoding() != null ) {
o = new String( data.filecontent, meta.getEncoding() );
} else {
o = new String( data.filecontent );
}
} else {
// save as byte[] without any conversion
o = data.filecontent;
}
break;
case LoadFileInputField.ELEMENT_TYPE_FILESIZE:
o = String.valueOf( data.fileSize );
break;
default:
break;
}
if ( targetValueMeta.getType() == ValueMetaInterface.TYPE_BINARY ) {
// save as byte[] without any conversion
outputRowData[indexField] = o;
} else {
// convert string (processing type) to the target type
outputRowData[indexField] = targetValueMeta.convertData( sourceValueMeta, o );
}
// Do we need to repeat this field if it is null?
if ( loadFileInputField.isRepeated() ) {
if ( data.previousRow != null && o == null ) {
outputRowData[indexField] = data.previousRow[indexField];
}
}
} // End of loop over fields...
int rowIndex = data.totalpreviousfields + data.nrInputFields;
// See if we need to add the filename to the row...
if ( meta.includeFilename() && meta.getFilenameField() != null && meta.getFilenameField().length() > 0 ) {
outputRowData[rowIndex++] = data.filename;
}
// See if we need to add the row number to the row...
if ( meta.includeRowNumber() && meta.getRowNumberField() != null && meta.getRowNumberField().length() > 0 ) {
outputRowData[rowIndex++] = new Long( data.rownr );
}
// Possibly add short filename...
if ( meta.getShortFileNameField() != null && meta.getShortFileNameField().length() > 0 ) {
outputRowData[rowIndex++] = data.shortFilename;
}
// Add Extension
if ( meta.getExtensionField() != null && meta.getExtensionField().length() > 0 ) {
outputRowData[rowIndex++] = data.extension;
}
// add path
if ( meta.getPathField() != null && meta.getPathField().length() > 0 ) {
outputRowData[rowIndex++] = data.path;
}
// add Hidden
if ( meta.isHiddenField() != null && meta.isHiddenField().length() > 0 ) {
outputRowData[rowIndex++] = new Boolean( data.hidden );
}
// Add modification date
if ( meta.getLastModificationDateField() != null && meta.getLastModificationDateField().length() > 0 ) {
outputRowData[rowIndex++] = data.lastModificationDateTime;
}
// Add Uri
if ( meta.getUriField() != null && meta.getUriField().length() > 0 ) {
outputRowData[rowIndex++] = data.uriName;
}
// Add RootUri
if ( meta.getRootUriField() != null && meta.getRootUriField().length() > 0 ) {
outputRowData[rowIndex++] = data.rootUriName;
}
RowMetaInterface irow = getInputRowMeta();
data.previousRow = irow == null ? outputRowData : irow.cloneRow( outputRowData ); // copy it to make
// surely the next step doesn't change it in between...
incrementLinesInput();
data.rownr++;
} catch ( Exception e ) {
throw new KettleException( "Error during processing a row", e );
}
return outputRowData;
} | @Test
public void testGetOneRow() throws Exception {
// string without specified encoding
stepInputFiles.addFile( getFile( "input1.txt" ) );
assertNotNull( stepLoadFileInput.getOneRow() );
assertEquals( "input1 - not empty", new String( stepLoadFileInput.data.filecontent ) );
} |
@Udf
public String concat(@UdfParameter final String... jsonStrings) {
if (jsonStrings == null) {
return null;
}
final List<JsonNode> nodes = new ArrayList<>(jsonStrings.length);
boolean allObjects = true;
for (final String jsonString : jsonStrings) {
if (jsonString == null) {
return null;
}
final JsonNode node = UdfJsonMapper.parseJson(jsonString);
if (node.isMissingNode()) {
return null;
}
if (allObjects && !node.isObject()) {
allObjects = false;
}
nodes.add(node);
}
JsonNode result = nodes.get(0);
if (allObjects) {
for (int i = 1; i < nodes.size(); i++) {
result = concatObjects((ObjectNode) result, (ObjectNode) nodes.get(i));
}
} else {
for (int i = 1; i < nodes.size(); i++) {
result = concatArrays(toArrayNode(result), toArrayNode(nodes.get(i)));
}
}
return UdfJsonMapper.writeValueAsJson(result);
} | @Test
public void shouldMerge2Arrays() {
// When:
final String result = udf.concat("[1, 2]", "[3, 4]");
// Then:
assertEquals("[1,2,3,4]", result);
} |
public int size() {
return stream.size();
} | @Test
public void testSize() {
writer.write(byteBuffer.asReadOnlyBuffer());
int size = writer.size();
Assertions.assertEquals(size, 13);
} |
public static <E> E findStaticFieldValue(Class clazz, String fieldName) {
try {
Field field = clazz.getField(fieldName);
return (E) field.get(null);
} catch (Exception ignore) {
return null;
}
} | @Test
public void test_whenClassAndFieldExist() {
Integer value = findStaticFieldValue(ClassWithStaticField.class, "staticField");
assertEquals(ClassWithStaticField.staticField, value);
} |
@SuppressWarnings("ConstantConditions")
public boolean addOrReplaceAction(@NonNull Action a) {
if (a == null) {
throw new IllegalArgumentException("Action must be non-null");
}
// CopyOnWriteArrayList does not support Iterator.remove, so need to do it this way:
List<Action> old = new ArrayList<>(1);
List<Action> current = getActions();
boolean found = false;
for (Action a2 : current) {
if (!found && a.equals(a2)) {
found = true;
} else if (a2.getClass() == a.getClass()) {
old.add(a2);
}
}
current.removeAll(old);
if (!found) {
addAction(a);
}
return !found || !old.isEmpty();
} | @SuppressWarnings("deprecation")
@Test
public void addOrReplaceAction() {
CauseAction a1 = new CauseAction();
ParametersAction a2 = new ParametersAction();
thing.addAction(a1);
thing.addAction(a2);
CauseAction a3 = new CauseAction();
assertTrue(thing.addOrReplaceAction(a3));
assertEquals(Arrays.asList(a2, a3), thing.getActions());
assertFalse(thing.addOrReplaceAction(a3));
assertEquals(Arrays.asList(a2, a3), thing.getActions());
thing.addAction(a1);
assertEquals(Arrays.asList(a2, a3, a1), thing.getActions());
assertTrue(thing.addOrReplaceAction(a3));
assertEquals(Arrays.asList(a2, a3), thing.getActions());
} |
@Override
public void execute(GraphModel graphModel) {
Graph graph;
if (isDirected) {
graph = graphModel.getDirectedGraphVisible();
} else {
graph = graphModel.getUndirectedGraphVisible();
}
execute(graph);
} | @Test
public void testColumnReplace() {
GraphModel graphModel = GraphGenerator.generateNullUndirectedGraph(1);
graphModel.getNodeTable().addColumn(ClusteringCoefficient.CLUSTERING_COEFF, String.class);
ClusteringCoefficient cc = new ClusteringCoefficient();
cc.execute(graphModel);
} |
@Override
public void dropRuleItemConfiguration(final DropRuleItemEvent event, final MaskRuleConfiguration currentRuleConfig) {
currentRuleConfig.getTables().removeIf(each -> each.getName().equals(((DropNamedRuleItemEvent) event).getItemName()));
} | @Test
void assertDropRuleItemConfiguration() {
MaskRuleConfiguration currentRuleConfig = new MaskRuleConfiguration(
new LinkedList<>(Collections.singleton(new MaskTableRuleConfiguration("foo_tbl", Collections.emptyList()))), Collections.emptyMap());
new MaskTableChangedProcessor().dropRuleItemConfiguration(new DropNamedRuleItemEvent("foo_db", "foo_tbl", ""), currentRuleConfig);
assertTrue(currentRuleConfig.getTables().isEmpty());
} |
@VisibleForTesting
static void initAddrUseFqdn(List<InetAddress> addrs) {
useFqdn = true;
analyzePriorityCidrs();
String fqdn = null;
if (PRIORITY_CIDRS.isEmpty()) {
// Get FQDN from local host by default.
try {
InetAddress localHost = InetAddress.getLocalHost();
fqdn = localHost.getCanonicalHostName();
String ip = localHost.getHostAddress();
LOG.info("Get FQDN from local host by default, FQDN: {}, ip: {}, v6: {}", fqdn, ip,
localHost instanceof Inet6Address);
} catch (UnknownHostException e) {
LOG.error("failed to get FQDN from local host, will exit", e);
System.exit(-1);
}
if (fqdn == null) {
LOG.error("priority_networks is not set and we cannot get FQDN from local host");
System.exit(-1);
}
// Try to resolve addr from FQDN
InetAddress uncheckedInetAddress = null;
try {
uncheckedInetAddress = InetAddress.getByName(fqdn);
} catch (UnknownHostException e) {
LOG.error("failed to parse FQDN: {}, message: {}", fqdn, e.getMessage(), e);
System.exit(-1);
}
if (null == uncheckedInetAddress) {
LOG.error("failed to parse FQDN: {}", fqdn);
System.exit(-1);
}
// Check whether the InetAddress obtained via FQDN is bound to some network interface
boolean hasInetAddr = false;
for (InetAddress addr : addrs) {
LOG.info("Try to match addr in fqdn mode, ip: {}, FQDN: {}",
addr.getHostAddress(), addr.getCanonicalHostName());
if (addr.getCanonicalHostName()
.equals(uncheckedInetAddress.getCanonicalHostName())) {
hasInetAddr = true;
break;
}
}
if (hasInetAddr) {
localAddr = uncheckedInetAddress;
LOG.info("Using FQDN from local host by default, FQDN: {}, ip: {}, v6: {}",
localAddr.getCanonicalHostName(),
localAddr.getHostAddress(),
localAddr instanceof Inet6Address);
} else {
LOG.error("Cannot find a network interface matching FQDN: {}", fqdn);
System.exit(-1);
}
} else {
LOG.info("using priority_networks in fqdn mode to decide whether ipv6 or ipv4 is preferred");
for (InetAddress addr : addrs) {
String hostAddr = addr.getHostAddress();
String canonicalHostName = addr.getCanonicalHostName();
LOG.info("Try to match addr in fqdn mode, ip: {}, FQDN: {}", hostAddr, canonicalHostName);
if (isInPriorNetwork(hostAddr)) {
localAddr = addr;
fqdn = canonicalHostName;
LOG.info("Using FQDN from matched addr, FQDN: {}, ip: {}, v6: {}",
fqdn, hostAddr, addr instanceof Inet6Address);
break;
}
LOG.info("skip addr {} not belonged to priority networks in FQDN mode", addr);
}
if (fqdn == null) {
LOG.error("priority_networks has been set and we cannot find matched addr, will exit");
System.exit(-1);
}
}
// double-check the reverse resolve
String canonicalHostName = localAddr.getCanonicalHostName();
if (!canonicalHostName.equals(fqdn)) {
LOG.error("The FQDN of the parsed address [{}] is not the same as " +
"the FQDN obtained from the host [{}]", canonicalHostName, fqdn);
System.exit(-1);
}
} | @Test(expected = IllegalAccessException.class)
public void testGetStartWithFQDNThrowUnknownHostException() {
String oldVal = Config.priority_networks;
Config.priority_networks = "";
testInitAddrUseFqdnCommonMock();
List<InetAddress> hosts = NetUtils.getHosts();
new MockUp<InetAddress>() {
@Mock
public InetAddress getLocalHost() throws UnknownHostException {
throw new UnknownHostException();
}
};
FrontendOptions.initAddrUseFqdn(hosts);
Config.priority_networks = oldVal;
} |
public Mutex mutexFor(Object mutexKey) {
Mutex mutex;
synchronized (mainMutex) {
mutex = mutexMap.computeIfAbsent(mutexKey, Mutex::new);
mutex.referenceCount++;
}
return mutex;
} | @Test
public void testConcurrentMutexOperation() {
final String[] keys = new String[]{"a", "b", "c"};
final Map<String, Integer> timesAcquired = new HashMap<>();
int concurrency = RuntimeAvailableProcessors.get() * 3;
final CyclicBarrier cyc = new CyclicBarrier(concurrency + 1);
for (int i = 0; i < concurrency; i++) {
new Thread(() -> {
await(cyc);
for (String key : keys) {
try (ContextMutexFactory.Mutex mutex = contextMutexFactory.mutexFor(key)) {
synchronized (mutex) {
Integer value = timesAcquired.get(key);
if (value == null) {
timesAcquired.put(key, 1);
} else {
timesAcquired.put(key, value + 1);
}
}
}
}
await(cyc);
}).start();
}
// start threads, wait for them to finish
await(cyc);
await(cyc);
// assert each key's lock was acquired by each thread
for (String key : keys) {
assertEquals(concurrency, timesAcquired.get(key).longValue());
}
// assert there are no mutexes leftover
assertEquals(0, contextMutexFactory.mutexMap.size());
if (testFailed.get()) {
fail("Failure due to exception while waiting on cyclic barrier.");
}
} |
@Nullable
public static ValueReference of(Object value) {
if (value instanceof Boolean) {
return of((Boolean) value);
} else if (value instanceof Double) {
return of((Double) value);
} else if (value instanceof Float) {
return of((Float) value);
} else if (value instanceof Integer) {
return of((Integer) value);
} else if (value instanceof Long) {
return of((Long) value);
} else if (value instanceof String) {
return of((String) value);
} else if (value instanceof Enum) {
return of((Enum) value);
} else if (value instanceof EncryptedValue encryptedValue) {
return of(encryptedValue);
} else {
return null;
}
} | @Test
public void serializeInteger() throws IOException {
assertJsonEqualsNonStrict(objectMapper.writeValueAsString(ValueReference.of(1)), "{\"@type\":\"integer\",\"@value\":1}");
assertJsonEqualsNonStrict(objectMapper.writeValueAsString(ValueReference.of(42)), "{\"@type\":\"integer\",\"@value\":42}");
} |
@Udf(description = "Returns the inverse (arc) cosine of an INT value")
public Double acos(
@UdfParameter(
value = "value",
description = "The value to get the inverse cosine of."
) final Integer value
) {
return acos(value == null ? null : value.doubleValue());
} | @Test
public void shouldHandlePositive() {
assertThat(udf.acos(0.43), closeTo(1.1263035498590777, 0.000000000000001));
assertThat(udf.acos(0.5), closeTo(1.0471975511965979, 0.000000000000001));
assertThat(udf.acos(1.0), closeTo(0.0, 0.000000000000001));
assertThat(udf.acos(1), closeTo(0.0, 0.000000000000001));
assertThat(udf.acos(1L), closeTo(0.0, 0.000000000000001));
} |
@Override
@Transactional(rollbackFor = Exception.class)
public void syncCodegenFromDB(Long tableId) {
// 校验是否已经存在
CodegenTableDO table = codegenTableMapper.selectById(tableId);
if (table == null) {
throw exception(CODEGEN_TABLE_NOT_EXISTS);
}
// 从数据库中,获得数据库表结构
TableInfo tableInfo = databaseTableService.getTable(table.getDataSourceConfigId(), table.getTableName());
// 执行同步
syncCodegen0(tableId, tableInfo);
} | @Test
@Disabled // TODO @芋艿:这个单测会随机性失败,需要定位下;
public void testSyncCodegenFromDB() {
// mock 数据(CodegenTableDO)
CodegenTableDO table = randomPojo(CodegenTableDO.class, o -> o.setTableName("t_yunai")
.setDataSourceConfigId(1L).setScene(CodegenSceneEnum.ADMIN.getScene()));
codegenTableMapper.insert(table);
CodegenColumnDO column01 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())
.setColumnName("id"));
codegenColumnMapper.insert(column01);
CodegenColumnDO column02 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())
.setColumnName("name"));
codegenColumnMapper.insert(column02);
// 准备参数
Long tableId = table.getId();
// mock 方法(TableInfo)
TableInfo tableInfo = mock(TableInfo.class);
when(databaseTableService.getTable(eq(1L), eq("t_yunai")))
.thenReturn(tableInfo);
when(tableInfo.getComment()).thenReturn("芋艿");
// mock 方法(TableInfo fields)
TableField field01 = mock(TableField.class);
when(field01.getComment()).thenReturn("主键");
TableField field03 = mock(TableField.class);
when(field03.getComment()).thenReturn("分类");
List<TableField> fields = Arrays.asList(field01, field03);
when(tableInfo.getFields()).thenReturn(fields);
when(databaseTableService.getTable(eq(1L), eq("t_yunai")))
.thenReturn(tableInfo);
// mock 方法(CodegenTableDO)
List<CodegenColumnDO> newColumns = randomPojoList(CodegenColumnDO.class);
when(codegenBuilder.buildColumns(eq(table.getId()), argThat(tableFields -> {
assertEquals(2, tableFields.size());
assertSame(tableInfo.getFields(), tableFields);
return true;
}))).thenReturn(newColumns);
// 调用
codegenService.syncCodegenFromDB(tableId);
// 断言
List<CodegenColumnDO> dbColumns = codegenColumnMapper.selectList();
assertEquals(newColumns.size(), dbColumns.size());
assertPojoEquals(newColumns.get(0), dbColumns.get(0));
assertPojoEquals(newColumns.get(1), dbColumns.get(1));
} |
public void removeAttribute(String name) {
parent.context().remove(name);
} | @Test
void testRemoveAttribute() {
URI uri = URI.create("http://localhost:8080/test");
HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1);
DiscFilterRequest request = new DiscFilterRequest(httpReq);
request.setAttribute("some_attr", "some_value");
assertTrue(request.containsAttribute("some_attr"));
request.removeAttribute("some_attr");
assertFalse(request.containsAttribute("some_attr"));
} |
public FEELFnResult<List> invoke(@ParameterName( "list" ) Object list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
// spec requires us to return a new list
final List<Object> result = new ArrayList<>();
flattenList( list, result );
return FEELFnResult.ofResult( result );
} | @Test
void invokeParamNotCollection() {
FunctionTestUtil.assertResult(flattenFunction.invoke(BigDecimal.valueOf(10.2)),
Collections.singletonList(BigDecimal.valueOf(10.2)));
FunctionTestUtil.assertResult(flattenFunction.invoke("test"), Collections.singletonList("test"));
} |
public void completeDefaults(Props props) {
// init string properties
for (Map.Entry<Object, Object> entry : defaults().entrySet()) {
props.setDefault(entry.getKey().toString(), entry.getValue().toString());
}
boolean clusterEnabled = props.valueAsBoolean(CLUSTER_ENABLED.getKey(), false);
if (!clusterEnabled) {
props.setDefault(SEARCH_HOST.getKey(), InetAddress.getLoopbackAddress().getHostAddress());
props.setDefault(SEARCH_PORT.getKey(), "9001");
fixPortIfZero(props, Property.SEARCH_HOST.getKey(), SEARCH_PORT.getKey());
fixEsTransportPortIfNull(props);
}
} | @Test
public void defaults_throws_exception_on_same_property_defined_more_than_once_in_extensions() {
Props p = new Props(new Properties());
when(serviceLoaderWrapper.load()).thenReturn(ImmutableSet.of(new FakeExtension1(), new FakeExtension2()));
assertThatThrownBy(() -> processProperties.completeDefaults(p))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Configuration error: property definition named 'sonar.some.property2' found in multiple extensions.");
} |
public static boolean safeCollectionEquals(final Collection<Comparable<?>> sources, final Collection<Comparable<?>> targets) {
List<Comparable<?>> all = new ArrayList<>(sources);
all.addAll(targets);
Optional<Class<?>> clazz = getTargetNumericType(all);
if (!clazz.isPresent()) {
return sources.equals(targets);
}
List<Comparable<?>> sourceClasses = sources.stream().map(each -> parseNumberByClazz(each.toString(), clazz.get())).collect(Collectors.toList());
List<Comparable<?>> targetClasses = targets.stream().map(each -> parseNumberByClazz(each.toString(), clazz.get())).collect(Collectors.toList());
return sourceClasses.equals(targetClasses);
} | @Test
void assertSafeCollectionEqualsForBigInteger() {
List<Comparable<?>> sources = Arrays.asList(10, 12);
List<Comparable<?>> targets = Arrays.asList(BigInteger.valueOf(10L), BigInteger.valueOf(12L));
assertTrue(SafeNumberOperationUtils.safeCollectionEquals(sources, targets));
} |
public boolean checkIfEnabled() {
try {
this.gitCommand = locateDefaultGit();
MutableString stdOut = new MutableString();
this.processWrapperFactory.create(null, l -> stdOut.string = l, gitCommand, "--version").execute();
return stdOut.string != null && stdOut.string.startsWith("git version") && isCompatibleGitVersion(stdOut.string);
} catch (Exception e) {
LOG.debug("Failed to find git native client", e);
return false;
}
} | @Test
public void git_should_be_enabled_if_version_is_equal_or_greater_than_required_minimum() {
Stream.of(
"git version 2.24.0",
"git version 2.25.2.1",
"git version 2.24.1.1.windows.2",
"git version 2.25.1.msysgit.2"
).forEach(output -> {
ProcessWrapperFactory mockedCmd = mockGitVersionCommand(output);
mockGitWhereOnWindows(mockedCmd);
when(mockedCmd.create(isNull(), any(), eq("C:\\mockGit.exe"), eq("--version"))).then(invocation -> {
var argument = (Consumer<String>) invocation.getArgument(1);
argument.accept(output);
return mock(ProcessWrapper.class);
});
NativeGitBlameCommand blameCommand = new NativeGitBlameCommand(System2.INSTANCE, mockedCmd);
assertThat(blameCommand.checkIfEnabled()).isTrue();
});
} |
@VisibleForTesting
public SmsChannelDO validateSmsChannel(Long channelId) {
SmsChannelDO channelDO = smsChannelService.getSmsChannel(channelId);
if (channelDO == null) {
throw exception(SMS_CHANNEL_NOT_EXISTS);
}
if (CommonStatusEnum.isDisable(channelDO.getStatus())) {
throw exception(SMS_CHANNEL_DISABLE);
}
return channelDO;
} | @Test
public void testValidateSmsChannel_disable() {
// 准备参数
Long channelId = randomLongId();
// mock 方法
SmsChannelDO channelDO = randomPojo(SmsChannelDO.class, o -> {
o.setId(channelId);
o.setStatus(CommonStatusEnum.DISABLE.getStatus()); // 保证 status 禁用,触发失败
});
when(smsChannelService.getSmsChannel(eq(channelId))).thenReturn(channelDO);
// 调用,校验异常
assertServiceException(() -> smsTemplateService.validateSmsChannel(channelId),
SMS_CHANNEL_DISABLE);
} |
public static String jaasConfig(String moduleName, Map<String, String> options) {
StringJoiner joiner = new StringJoiner(" ");
for (Entry<String, String> entry : options.entrySet()) {
String key = Objects.requireNonNull(entry.getKey());
String value = Objects.requireNonNull(entry.getValue());
if (key.contains("=") || key.contains(";")) {
throw new IllegalArgumentException("Keys must not contain '=' or ';'");
}
if (moduleName.isEmpty() || moduleName.contains(";") || moduleName.contains("=")) {
throw new IllegalArgumentException("module name must be not empty and must not contain '=' or ';'");
} else {
joiner.add(key + "=\"" + value + "\"");
}
}
return moduleName + " required " + joiner + ";";
} | @Test
public void testEmptyModuleName() {
Map<String, String> options = new HashMap<>();
options.put("key1", "value1");
String moduleName = "";
assertThrows(IllegalArgumentException.class, () -> AuthenticationUtils.jaasConfig(moduleName, options));
} |
public void transitionTo(ClassicGroupState groupState) {
assertValidTransition(groupState);
previousState = state;
state = groupState;
currentStateTimestamp = Optional.of(time.milliseconds());
metrics.onClassicGroupStateTransition(previousState, state);
} | @Test
public void testEmptyToStableIllegalTransition() {
assertThrows(IllegalStateException.class, () -> group.transitionTo(STABLE));
} |
@Override
public TCreatePartitionResult createPartition(TCreatePartitionRequest request) throws TException {
LOG.info("Receive create partition: {}", request);
TCreatePartitionResult result;
try {
if (partitionRequestNum.incrementAndGet() >= Config.thrift_server_max_worker_threads / 4) {
result = new TCreatePartitionResult();
TStatus errorStatus = new TStatus(SERVICE_UNAVAILABLE);
errorStatus.setError_msgs(Lists.newArrayList(
String.format("Too many create partition requests, please try again later txn_id=%d",
request.getTxn_id())));
result.setStatus(errorStatus);
return result;
}
result = createPartitionProcess(request);
} catch (Exception t) {
LOG.warn(DebugUtil.getStackTrace(t));
result = new TCreatePartitionResult();
TStatus errorStatus = new TStatus(RUNTIME_ERROR);
errorStatus.setError_msgs(Lists.newArrayList(String.format("txn_id=%d failed. %s",
request.getTxn_id(), t.getMessage())));
result.setStatus(errorStatus);
} finally {
partitionRequestNum.decrementAndGet();
}
return result;
} | @Test
public void testCreatePartitionExceedLimit() throws TException {
Database db = GlobalStateMgr.getCurrentState().getDb("test");
Table table = db.getTable("site_access_day");
List<List<String>> partitionValues = Lists.newArrayList();
List<String> values = Lists.newArrayList();
values.add("1990-04-24");
partitionValues.add(values);
FrontendServiceImpl impl = new FrontendServiceImpl(exeEnv);
TCreatePartitionRequest request = new TCreatePartitionRequest();
request.setDb_id(db.getId());
request.setTable_id(table.getId());
request.setPartition_values(partitionValues);
Config.thrift_server_max_worker_threads = 4;
TCreatePartitionResult partition = impl.createPartition(request);
Config.thrift_server_max_worker_threads = 4096;
Assert.assertEquals(partition.getStatus().getStatus_code(), TStatusCode.SERVICE_UNAVAILABLE);
} |
@Override
public void removeAll(Set<? extends K> keys) {
RFuture<Void> future = removeAllAsync(keys);
sync(future);
} | @Test
public void testRemoveAll() throws Exception {
URL configUrl = getClass().getResource("redisson-jcache.yaml");
Config cfg = Config.fromYAML(configUrl);
Configuration<String, String> config = RedissonConfiguration.fromConfig(cfg);
Cache<String, String> cache = Caching.getCachingProvider().getCacheManager()
.createCache("test", config);
cache.put("1", "2");
cache.put("3", "4");
cache.put("4", "4");
cache.put("5", "5");
Set<? extends String> keys = new HashSet<String>(Arrays.asList("1", "3", "4", "5"));
cache.removeAll(keys);
assertThat(cache.containsKey("1")).isFalse();
assertThat(cache.containsKey("3")).isFalse();
assertThat(cache.containsKey("4")).isFalse();
assertThat(cache.containsKey("5")).isFalse();
cache.close();
} |
public DrlxParseResult drlxParse(Class<?> patternType, String bindingId, String expression) {
return drlxParse(patternType, bindingId, expression, false);
} | @Test
public void testBigDecimalLiteralWithBindVariable() {
SingleDrlxParseSuccess result = (SingleDrlxParseSuccess) parser.drlxParse(Person.class, "$p", "$bd : 10.3B");
assertThat(result.getExpr().toString()).isEqualTo("new java.math.BigDecimal(\"10.3\")");
} |
public static InetSocketAddress toInetSocketAddress(String address) {
String[] ipPortStr = splitIPPortStr(address);
String host;
int port;
if (null != ipPortStr) {
host = ipPortStr[0];
port = Integer.parseInt(ipPortStr[1]);
} else {
host = address;
port = 0;
}
return new InetSocketAddress(host, port);
} | @Test
public void testToInetSocketAddress1() {
assertThat(NetUtil.toInetSocketAddress("kadfskl").getHostName()).isEqualTo("kadfskl");
} |
public static String getShortestTableStringFormat(List<List<String>> table)
{
if (table.isEmpty()) {
throw new IllegalArgumentException("Table must include at least one row");
}
int tableWidth = table.get(0).size();
int[] lengthTracker = new int[tableWidth];
for (List<String> row : table) {
if (row.size() != tableWidth) {
String errorString = format("All rows in the table are expected to have exactly same number of columns: %s != %s",
tableWidth, row.size());
throw new IllegalArgumentException(errorString);
}
for (int i = 0; i < row.size(); i++) {
lengthTracker[i] = max(row.get(i).length(), lengthTracker[i]);
}
}
StringBuilder sb = new StringBuilder();
sb.append('|');
for (int maxLen : lengthTracker) {
sb.append(" %-")
.append(maxLen)
.append("s |");
}
return sb.toString();
} | @Test
public void testGetShortestTableStringFormatSimpleSuccess()
{
List<List<String>> table = Arrays.asList(
Arrays.asList("Header1", "Header2", "Headr3"),
Arrays.asList("Value1", "Value2", "Value3"),
Arrays.asList("LongValue1", "SVal2", "SVal3"));
assertEquals(
StringTableUtils.getShortestTableStringFormat(table),
"| %-10s | %-7s | %-6s |");
} |
public static WorkflowInstanceAggregatedInfo computeAggregatedView(
WorkflowInstance workflowInstance, boolean statusKnown) {
if (workflowInstance == null) {
// returning empty object since cannot access state of the current instance run
return new WorkflowInstanceAggregatedInfo();
}
WorkflowInstanceAggregatedInfo instanceAggregated =
computeAggregatedViewNoStatus(workflowInstance);
if (statusKnown || workflowInstance.getAggregatedInfo() == null) {
instanceAggregated.setWorkflowInstanceStatus(workflowInstance.getStatus());
} else {
computeAndSetAggregatedInstanceStatus(workflowInstance, instanceAggregated);
}
return instanceAggregated;
} | @Test
public void testAggregatedViewSomeNotStarted() {
WorkflowInstance run1 =
getGenericWorkflowInstance(
1, WorkflowInstance.Status.FAILED, RunPolicy.START_FRESH_NEW_RUN, null);
Workflow runtimeWorkflow = mock(Workflow.class);
Map<String, StepRuntimeState> decodedOverview = new LinkedHashMap<>();
decodedOverview.put("step1", generateStepState(StepInstance.Status.SUCCEEDED, 1L, 2L));
decodedOverview.put("step2", generateStepState(StepInstance.Status.SUCCEEDED, 3L, 4L));
decodedOverview.put("step3", generateStepState(StepInstance.Status.FATALLY_FAILED, 5L, 6L));
decodedOverview.put("step4", generateStepState(StepInstance.Status.NOT_CREATED, null, null));
decodedOverview.put("step5", generateStepState(StepInstance.Status.NOT_CREATED, null, null));
WorkflowRuntimeOverview wro = mock(WorkflowRuntimeOverview.class);
doReturn(decodedOverview).when(wro).decodeStepOverview(run1.getRuntimeDag());
run1.setRuntimeOverview(wro);
run1.setRuntimeWorkflow(runtimeWorkflow);
run1.getRuntimeDag().put("step4", new StepTransition());
run1.getRuntimeDag().put("step5", new StepTransition());
WorkflowInstanceAggregatedInfo aggregated =
AggregatedViewHelper.computeAggregatedView(run1, false);
assertEquals(1L, aggregated.getStepAggregatedViews().get("step1").getStartTime().longValue());
assertEquals(3L, aggregated.getStepAggregatedViews().get("step2").getStartTime().longValue());
assertEquals(5L, aggregated.getStepAggregatedViews().get("step3").getStartTime().longValue());
assertEquals(WorkflowInstance.Status.FAILED, aggregated.getWorkflowInstanceStatus());
WorkflowInstance run2 =
getGenericWorkflowInstance(
2,
WorkflowInstance.Status.SUCCEEDED,
RunPolicy.RESTART_FROM_INCOMPLETE,
RestartPolicy.RESTART_FROM_INCOMPLETE);
Map<String, StepRuntimeState> decodedOverview2 = new LinkedHashMap<>();
decodedOverview2.put("step3", generateStepState(StepInstance.Status.SUCCEEDED, 11L, 12L));
decodedOverview2.put("step4", generateStepState(StepInstance.Status.SUCCEEDED, 14L, 15L));
decodedOverview2.put("step5", generateStepState(StepInstance.Status.SUCCEEDED, 16L, 17L));
Map<String, StepTransition> run2Dag = new LinkedHashMap<>();
run2Dag.put("step3", new StepTransition());
run2Dag.put("step4", new StepTransition());
run2Dag.put("step5", new StepTransition());
run2.setRuntimeDag(run2Dag);
doReturn(run1)
.when(workflowInstanceDao)
.getWorkflowInstanceRun(run2.getWorkflowId(), run2.getWorkflowInstanceId(), 1L);
run2.setAggregatedInfo(AggregatedViewHelper.computeAggregatedView(run1, false));
assertEquals(5, run2.getAggregatedInfo().getStepAggregatedViews().size());
assertEquals(
StepInstance.Status.SUCCEEDED,
run2.getAggregatedInfo().getStepAggregatedViews().get("step1").getStatus());
assertEquals(
StepInstance.Status.SUCCEEDED,
run2.getAggregatedInfo().getStepAggregatedViews().get("step2").getStatus());
assertEquals(
StepInstance.Status.FATALLY_FAILED,
run2.getAggregatedInfo().getStepAggregatedViews().get("step3").getStatus());
assertEquals(
StepInstance.Status.NOT_CREATED,
run2.getAggregatedInfo().getStepAggregatedViews().get("step4").getStatus());
assertEquals(
StepInstance.Status.NOT_CREATED,
run2.getAggregatedInfo().getStepAggregatedViews().get("step5").getStatus());
assertEquals(
1L,
run2.getAggregatedInfo().getStepAggregatedViews().get("step1").getStartTime().longValue());
assertEquals(
3L,
run2.getAggregatedInfo().getStepAggregatedViews().get("step2").getStartTime().longValue());
assertEquals(
5L,
run2.getAggregatedInfo().getStepAggregatedViews().get("step3").getStartTime().longValue());
WorkflowRuntimeOverview wro2 = mock(WorkflowRuntimeOverview.class);
doReturn(decodedOverview2).when(wro2).decodeStepOverview(run2.getRuntimeDag());
run2.setRuntimeOverview(wro2);
run2.setRuntimeWorkflow(runtimeWorkflow);
WorkflowInstanceAggregatedInfo aggregated2 =
AggregatedViewHelper.computeAggregatedView(run2, false);
assertEquals(5, aggregated2.getStepAggregatedViews().size());
assertEquals(
StepInstance.Status.SUCCEEDED,
aggregated2.getStepAggregatedViews().get("step1").getStatus());
assertEquals(
StepInstance.Status.SUCCEEDED,
aggregated2.getStepAggregatedViews().get("step2").getStatus());
assertEquals(
StepInstance.Status.SUCCEEDED,
aggregated2.getStepAggregatedViews().get("step3").getStatus());
assertEquals(
StepInstance.Status.SUCCEEDED,
aggregated2.getStepAggregatedViews().get("step4").getStatus());
assertEquals(
StepInstance.Status.SUCCEEDED,
aggregated2.getStepAggregatedViews().get("step5").getStatus());
assertEquals(1L, aggregated2.getStepAggregatedViews().get("step1").getStartTime().longValue());
assertEquals(3L, aggregated2.getStepAggregatedViews().get("step2").getStartTime().longValue());
assertEquals(11L, aggregated2.getStepAggregatedViews().get("step3").getStartTime().longValue());
assertEquals(14L, aggregated2.getStepAggregatedViews().get("step4").getStartTime().longValue());
assertEquals(16L, aggregated2.getStepAggregatedViews().get("step5").getStartTime().longValue());
assertEquals(WorkflowInstance.Status.SUCCEEDED, aggregated2.getWorkflowInstanceStatus());
} |
@Override
public Set<Host> getHostsByMac(MacAddress mac) {
checkNotNull(mac, "MAC address cannot be null");
return filter(getHostsColl(), host -> Objects.equals(host.mac(), mac));
} | @Test(expected = NullPointerException.class)
public void testGetHostsByNullMac() {
VirtualNetwork vnet = setupEmptyVnet();
HostService hostService = manager.get(vnet.id(), HostService.class);
hostService.getHostsByMac(null);
} |
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
} | @Test
void default_wip() {
RuntimeOptions options = parser
.parse()
.build();
assertThat(options.isWip(), is(false));
} |
@Operation(summary = "createSchedule", description = "CREATE_SCHEDULE_NOTES")
@Parameters({
@Parameter(name = "processDefinitionCode", description = "PROCESS_DEFINITION_CODE", required = true, schema = @Schema(implementation = long.class, example = "100")),
@Parameter(name = "schedule", description = "SCHEDULE", schema = @Schema(implementation = String.class, example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','timezoneId':'America/Phoenix','crontab':'0 0 3/6 * * ? *'}")),
@Parameter(name = "warningType", description = "WARNING_TYPE", schema = @Schema(implementation = WarningType.class)),
@Parameter(name = "warningGroupId", description = "WARNING_GROUP_ID", schema = @Schema(implementation = int.class, example = "100")),
@Parameter(name = "failureStrategy", description = "FAILURE_STRATEGY", schema = @Schema(implementation = FailureStrategy.class)),
@Parameter(name = "workerGroup", description = "WORKER_GROUP", schema = @Schema(implementation = String.class, example = "default")),
@Parameter(name = "tenantCode", description = "TENANT_CODE", schema = @Schema(implementation = String.class, example = "default")),
@Parameter(name = "environmentCode", description = "ENVIRONMENT_CODE", schema = @Schema(implementation = long.class)),
@Parameter(name = "processInstancePriority", description = "PROCESS_INSTANCE_PRIORITY", schema = @Schema(implementation = Priority.class)),
})
@PostMapping()
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_SCHEDULE_ERROR)
public Result createSchedule(@Parameter(hidden = true) @RequestAttribute(value = SESSION_USER) User loginUser,
@Parameter(name = "projectCode", description = "PROJECT_CODE", required = true) @PathVariable long projectCode,
@RequestParam(value = "processDefinitionCode") long processDefinitionCode,
@RequestParam(value = "schedule") String schedule,
@RequestParam(value = "warningType", required = false, defaultValue = DEFAULT_WARNING_TYPE) WarningType warningType,
@RequestParam(value = "warningGroupId", required = false, defaultValue = DEFAULT_NOTIFY_GROUP_ID) int warningGroupId,
@RequestParam(value = "failureStrategy", required = false, defaultValue = DEFAULT_FAILURE_POLICY) FailureStrategy failureStrategy,
@RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup,
@RequestParam(value = "tenantCode", required = false, defaultValue = "default") String tenantCode,
@RequestParam(value = "environmentCode", required = false, defaultValue = "-1") Long environmentCode,
@RequestParam(value = "processInstancePriority", required = false, defaultValue = DEFAULT_PROCESS_INSTANCE_PRIORITY) Priority processInstancePriority) {
Map<String, Object> result = schedulerService.insertSchedule(
loginUser,
projectCode,
processDefinitionCode,
schedule,
warningType,
warningGroupId,
failureStrategy,
processInstancePriority,
workerGroup,
tenantCode,
environmentCode);
return returnDataList(result);
} | @Test
public void testCreateSchedule() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("processDefinitionCode", "40");
paramsMap.add("schedule",
"{'startTime':'2019-12-16 00:00:00','endTime':'2019-12-17 00:00:00','crontab':'0 0 6 * * ? *'}");
paramsMap.add("warningType", String.valueOf(WarningType.NONE));
paramsMap.add("warningGroupId", "1");
paramsMap.add("failureStrategy", String.valueOf(FailureStrategy.CONTINUE));
paramsMap.add("receivers", "");
paramsMap.add("receiversCc", "");
paramsMap.add("workerGroupId", "1");
paramsMap.add("tenantCode", "root");
paramsMap.add("processInstancePriority", String.valueOf(Priority.HIGH));
Mockito.when(schedulerService.insertSchedule(isA(User.class), isA(Long.class), isA(Long.class),
isA(String.class), isA(WarningType.class), isA(int.class), isA(FailureStrategy.class),
isA(Priority.class), isA(String.class), isA(String.class), isA(Long.class))).thenReturn(success());
MvcResult mvcResult = mockMvc.perform(post("/projects/{projectCode}/schedules/", 123)
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
} |
public static InetSocketAddress parseAddress(String address, int defaultPort) {
return parseAddress(address, defaultPort, false);
} | @Test
void shouldNotParseAddressForIPv6WithNotNumericPort_Strict() {
assertThatIllegalArgumentException()
.isThrownBy(() -> AddressUtils.parseAddress("[1abc:2abc:3abc:0:0:0:5abc:6abc]:abc42", 80, true))
.withMessage("Failed to parse a port from [1abc:2abc:3abc:0:0:0:5abc:6abc]:abc42");
} |
public void verifyBucketAccessible(GcsPath path) throws IOException {
verifyBucketAccessible(path, createBackOff(), Sleeper.DEFAULT);
} | @Test
public void testVerifyBucketAccessible() throws IOException {
GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
GcsUtil gcsUtil = pipelineOptions.getGcsUtil();
Storage mockStorage = Mockito.mock(Storage.class);
gcsUtil.setStorageClient(mockStorage);
Storage.Buckets mockStorageObjects = Mockito.mock(Storage.Buckets.class);
Storage.Buckets.Get mockStorageGet = Mockito.mock(Storage.Buckets.Get.class);
BackOff mockBackOff = BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff());
when(mockStorage.buckets()).thenReturn(mockStorageObjects);
when(mockStorageObjects.get("testbucket")).thenReturn(mockStorageGet);
when(mockStorageGet.execute())
.thenThrow(new SocketTimeoutException("SocketException"))
.thenReturn(new Bucket());
gcsUtil.verifyBucketAccessible(
GcsPath.fromComponents("testbucket", "testobject"),
mockBackOff,
new FastNanoClockAndSleeper()::sleep);
} |
@Override
public void onEvent(ReplicaMigrationEvent event) {
switch (event.getPartitionId()) {
case MIGRATION_STARTED_PARTITION_ID:
migrationListener.migrationStarted(event.getMigrationState());
break;
case MIGRATION_FINISHED_PARTITION_ID:
migrationListener.migrationFinished(event.getMigrationState());
break;
default:
if (event.isSuccess()) {
migrationListener.replicaMigrationCompleted(event);
} else {
migrationListener.replicaMigrationFailed(event);
}
}
} | @Test
public void test_migrationProcessCompleted() {
MigrationState migrationSchedule = new MigrationStateImpl();
ReplicaMigrationEvent event = new ReplicaMigrationEventImpl(migrationSchedule, MIGRATION_FINISHED_PARTITION_ID, 0, null, null, true, 0L);
adapter.onEvent(event);
verify(listener, never()).migrationStarted(any(MigrationState.class));
verify(listener).migrationFinished(migrationSchedule);
verify(listener, never()).replicaMigrationCompleted(any(ReplicaMigrationEvent.class));
verify(listener, never()).replicaMigrationFailed(any(ReplicaMigrationEvent.class));
} |
@Override
public void set(V value) {
get(setAsync(value));
} | @Test
public void testIdleTime() throws InterruptedException {
RBucket<Integer> al = redisson.getBucket("test");
al.set(1234);
Thread.sleep(5000);
assertThat(al.getIdleTime()).isBetween(4L, 6L);
} |
public static SeaTunnelRowType convert(RowType rowType, int[] projectionIndex) {
String[] fieldNames = rowType.getFieldNames().toArray(new String[0]);
SeaTunnelDataType<?>[] dataTypes =
rowType.getFields().stream()
.map(field -> field.type().accept(PaimonToSeaTunnelTypeVisitor.INSTANCE))
.toArray(SeaTunnelDataType<?>[]::new);
if (projectionIndex != null) {
String[] projectionFieldNames =
Arrays.stream(projectionIndex)
.filter(index -> index >= 0 && index < fieldNames.length)
.mapToObj(index -> fieldNames[index])
.toArray(String[]::new);
SeaTunnelDataType<?>[] projectionDataTypes =
Arrays.stream(projectionIndex)
.filter(index -> index >= 0 && index < fieldNames.length)
.mapToObj(index -> dataTypes[index])
.toArray(SeaTunnelDataType<?>[]::new);
return new SeaTunnelRowType(projectionFieldNames, projectionDataTypes);
}
return new SeaTunnelRowType(fieldNames, dataTypes);
} | @Test
public void paimonToSeaTunnelWithProjection() {
int[] projection = {7, 2};
SeaTunnelRowType convert = RowTypeConverter.convert(rowType, projection);
Assertions.assertEquals(convert, seaTunnelProjectionRowType);
} |
public static ExecutableModelClassesContainer drlToExecutableModel(DrlFileSetResource resources, DrlCompilationContext context) {
KnowledgeBuilderConfigurationImpl conf = context.newKnowledgeBuilderConfiguration().as(KnowledgeBuilderConfigurationImpl.KEY);
return pkgDescrToExecModel(buildCompositePackageDescrs(resources, conf), resources.getModelLocalUriId().basePath(), conf, context);
} | @Test
void getDrlCallableClassesContainerFromFileResource() {
String basePath = UUID.randomUUID().toString();
DrlFileSetResource toProcess = new DrlFileSetResource(drlFiles, basePath);
EfestoCompilationOutput retrieved = DrlCompilerHelper.drlToExecutableModel(toProcess, context);
commonVerifyEfestoCompilationOutput(retrieved, basePath);
} |
public boolean shouldOverwriteHeaderWithName(String headerName) {
notNull(headerName, "Header name");
return headersToOverwrite.get(headerName.toUpperCase());
} | @Test public void
accept_header_is_overwritable_by_default() {
HeaderConfig headerConfig = new HeaderConfig();
assertThat(headerConfig.shouldOverwriteHeaderWithName("content-type"), is(true));
} |
@SuppressWarnings("unchecked")
public static <S, F> S visit(final SqlType type, final SqlTypeWalker.Visitor<S, F> visitor) {
final BiFunction<SqlTypeWalker.Visitor<?, ?>, SqlType, Object> handler = HANDLER
.get(type.baseType());
if (handler == null) {
throw new UnsupportedOperationException("Unsupported schema type: " + type.baseType());
}
return (S) handler.apply(visitor, type);
} | @Test
public void shouldVisitPrimitives() {
// Given:
visitor = new Visitor<String, Integer>() {
@Override
public String visitPrimitive(final SqlPrimitiveType type) {
return "Expected";
}
};
primitiveTypes().forEach(type -> {
// When:
final String result = SqlTypeWalker.visit(type, visitor);
// Then:
assertThat(result, is("Expected"));
});
} |
@Nonnull
public static <C> SourceBuilder<C>.Batch<Void> batch(
@Nonnull String name,
@Nonnull FunctionEx<? super Processor.Context, ? extends C> createFn
) {
return new SourceBuilder<C>(name, createFn).new Batch<>();
} | @Test
public void stream_socketSource_withTimestamps() throws IOException {
// Given
try (ServerSocket serverSocket = new ServerSocket(0)) {
startServer(serverSocket);
// When
int localPort = serverSocket.getLocalPort();
ToLongFunctionEx<String> timestampFn = line -> Long.parseLong(line.substring(LINE_PREFIX.length()));
BatchSource<String> socketSource = SourceBuilder
.batch("socket-source-with-timestamps", ctx -> socketReader(localPort))
.<String>fillBufferFn((in, buf) -> {
String line = in.readLine();
if (line != null) {
buf.add(line);
} else {
buf.close();
}
})
.destroyFn(BufferedReader::close)
.build();
// Then
Pipeline p = Pipeline.create();
p.readFrom(socketSource)
.addTimestamps(timestampFn, 0)
.window(tumbling(1))
.aggregate(AggregateOperations.counting())
.writeTo(sinkList());
hz().getJet().newJob(p).join();
List<WindowResult<Long>> expected = LongStream
.range(1, itemCount + 1)
.mapToObj(i -> new WindowResult<>(i - 1, i, 1L))
.collect(toList());
assertEquals(expected, new ArrayList<>(sinkList));
}
} |
@CanIgnoreReturnValue
public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) {
List<@Nullable Object> expected =
(varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs);
return containsExactlyElementsIn(
expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable);
} | @Test
public void iterableContainsExactlyArray() {
String[] stringArray = {"a", "b"};
ImmutableList<String[]> iterable = ImmutableList.of(stringArray);
// This test fails w/o the explicit cast
assertThat(iterable).containsExactly((Object) stringArray);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.